/* Generated by Cython 3.2.2 */

/* BEGIN: Cython Metadata
{
    "distutils": {
        "depends": [],
        "extra_compile_args": [
            "-std=c++14",
            "-fpermissive",
            "-Wno-deprecated-declarations",
            "-fno-var-tracking-assignments",
            "-O3"
        ],
        "include_dirs": [
            "/opt/python/cp310-cp310/include",
            "/host//home/runner/_work/cuda-python/cuda-python/cuda_toolkit/include"
        ],
        "language": "c++",
        "library_dirs": [
            "/tmp/build-env-yyn641v7/lib/python3.10/site-packages",
            "/tmp/build-env-yyn641v7/lib",
            "/host//home/runner/_work/cuda-python/cuda-python/cuda_toolkit/lib64",
            "/host//home/runner/_work/cuda-python/cuda-python/cuda_toolkit/lib"
        ],
        "name": "cuda.bindings._nvml",
        "sources": [
            "cuda/bindings/_nvml.pyx"
        ]
    },
    "module_name": "cuda.bindings._nvml"
}
END: Cython Metadata */

#ifndef PY_SSIZE_T_CLEAN
#define PY_SSIZE_T_CLEAN
#endif /* PY_SSIZE_T_CLEAN */
/* InitLimitedAPI */
#if defined(Py_LIMITED_API)
  #if !defined(CYTHON_LIMITED_API)
  #define CYTHON_LIMITED_API 1
  #endif
#elif defined(CYTHON_LIMITED_API)
  #ifdef _MSC_VER
  #pragma message ("Limited API usage is enabled with 'CYTHON_LIMITED_API' but 'Py_LIMITED_API' does not define a Python target version. Consider setting 'Py_LIMITED_API' instead.")
  #else
  #warning Limited API usage is enabled with 'CYTHON_LIMITED_API' but 'Py_LIMITED_API' does not define a Python target version. Consider setting 'Py_LIMITED_API' instead.
  #endif
#endif

#include "Python.h"
#ifndef Py_PYTHON_H
    #error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x03080000
    #error Cython requires Python 3.8+.
#else
#define __PYX_ABI_VERSION "3_2_2"
#define CYTHON_HEX_VERSION 0x030202F0
#define CYTHON_FUTURE_DIVISION 1
/* CModulePreamble */
#include <stddef.h>
#ifndef offsetof
  #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(_WIN32) && !defined(WIN32) && !defined(MS_WINDOWS)
  #ifndef __stdcall
    #define __stdcall
  #endif
  #ifndef __cdecl
    #define __cdecl
  #endif
  #ifndef __fastcall
    #define __fastcall
  #endif
#endif
#ifndef DL_IMPORT
  #define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
  #define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef PY_LONG_LONG
  #define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
  #define Py_HUGE_VAL HUGE_VAL
#endif
#define __PYX_LIMITED_VERSION_HEX PY_VERSION_HEX
#if defined(GRAALVM_PYTHON)
  /* For very preliminary testing purposes. Most variables are set the same as PyPy.
     The existence of this section does not imply that anything works or is even tested */
  #define CYTHON_COMPILING_IN_PYPY 0
  #define CYTHON_COMPILING_IN_CPYTHON 0
  #define CYTHON_COMPILING_IN_LIMITED_API 0
  #define CYTHON_COMPILING_IN_GRAAL 1
  #define CYTHON_COMPILING_IN_CPYTHON_FREETHREADING 0
  #undef CYTHON_USE_TYPE_SLOTS
  #define CYTHON_USE_TYPE_SLOTS 0
  #undef CYTHON_USE_TYPE_SPECS
  #define CYTHON_USE_TYPE_SPECS 0
  #undef CYTHON_USE_PYTYPE_LOOKUP
  #define CYTHON_USE_PYTYPE_LOOKUP 0
  #undef CYTHON_USE_PYLIST_INTERNALS
  #define CYTHON_USE_PYLIST_INTERNALS 0
  #undef CYTHON_USE_UNICODE_INTERNALS
  #define CYTHON_USE_UNICODE_INTERNALS 0
  #undef CYTHON_USE_UNICODE_WRITER
  #define CYTHON_USE_UNICODE_WRITER 0
  #undef CYTHON_USE_PYLONG_INTERNALS
  #define CYTHON_USE_PYLONG_INTERNALS 0
  #undef CYTHON_AVOID_BORROWED_REFS
  #define CYTHON_AVOID_BORROWED_REFS 1
  #undef CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS
  #define CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS 0
  #undef CYTHON_ASSUME_SAFE_MACROS
  #define CYTHON_ASSUME_SAFE_MACROS 0
  #undef CYTHON_ASSUME_SAFE_SIZE
  #define CYTHON_ASSUME_SAFE_SIZE 0
  #undef CYTHON_UNPACK_METHODS
  #define CYTHON_UNPACK_METHODS 0
  #undef CYTHON_FAST_THREAD_STATE
  #define CYTHON_FAST_THREAD_STATE 0
  #undef CYTHON_FAST_GIL
  #define CYTHON_FAST_GIL 0
  #undef CYTHON_METH_FASTCALL
  #define CYTHON_METH_FASTCALL 0
  #undef CYTHON_FAST_PYCALL
  #define CYTHON_FAST_PYCALL 0
  #ifndef CYTHON_PEP487_INIT_SUBCLASS
    #define CYTHON_PEP487_INIT_SUBCLASS 1
  #endif
  #undef CYTHON_PEP489_MULTI_PHASE_INIT
  #define CYTHON_PEP489_MULTI_PHASE_INIT 1
  #undef CYTHON_USE_MODULE_STATE
  #define CYTHON_USE_MODULE_STATE 0
  #undef CYTHON_USE_SYS_MONITORING
  #define CYTHON_USE_SYS_MONITORING 0
  #undef CYTHON_USE_TP_FINALIZE
  #define CYTHON_USE_TP_FINALIZE 0
  #undef CYTHON_USE_AM_SEND
  #define CYTHON_USE_AM_SEND 0
  #undef CYTHON_USE_DICT_VERSIONS
  #define CYTHON_USE_DICT_VERSIONS 0
  #undef CYTHON_USE_EXC_INFO_STACK
  #define CYTHON_USE_EXC_INFO_STACK 1
  #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
    #define CYTHON_UPDATE_DESCRIPTOR_DOC 0
  #endif
  #undef CYTHON_USE_FREELISTS
  #define CYTHON_USE_FREELISTS 0
  #undef CYTHON_IMMORTAL_CONSTANTS
  #define CYTHON_IMMORTAL_CONSTANTS 0
#elif defined(PYPY_VERSION)
  #define CYTHON_COMPILING_IN_PYPY 1
  #define CYTHON_COMPILING_IN_CPYTHON 0
  #define CYTHON_COMPILING_IN_LIMITED_API 0
  #define CYTHON_COMPILING_IN_GRAAL 0
  #define CYTHON_COMPILING_IN_CPYTHON_FREETHREADING 0
  #undef CYTHON_USE_TYPE_SLOTS
  #define CYTHON_USE_TYPE_SLOTS 1
  #ifndef CYTHON_USE_TYPE_SPECS
    #define CYTHON_USE_TYPE_SPECS 0
  #endif
  #undef CYTHON_USE_PYTYPE_LOOKUP
  #define CYTHON_USE_PYTYPE_LOOKUP 0
  #undef CYTHON_USE_PYLIST_INTERNALS
  #define CYTHON_USE_PYLIST_INTERNALS 0
  #undef CYTHON_USE_UNICODE_INTERNALS
  #define CYTHON_USE_UNICODE_INTERNALS 0
  #undef CYTHON_USE_UNICODE_WRITER
  #define CYTHON_USE_UNICODE_WRITER 0
  #undef CYTHON_USE_PYLONG_INTERNALS
  #define CYTHON_USE_PYLONG_INTERNALS 0
  #undef CYTHON_AVOID_BORROWED_REFS
  #define CYTHON_AVOID_BORROWED_REFS 1
  #undef CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS
  #define CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS 1
  #undef CYTHON_ASSUME_SAFE_MACROS
  #define CYTHON_ASSUME_SAFE_MACROS 0
  #ifndef CYTHON_ASSUME_SAFE_SIZE
    #define CYTHON_ASSUME_SAFE_SIZE 1
  #endif
  #undef CYTHON_UNPACK_METHODS
  #define CYTHON_UNPACK_METHODS 0
  #undef CYTHON_FAST_THREAD_STATE
  #define CYTHON_FAST_THREAD_STATE 0
  #undef CYTHON_FAST_GIL
  #define CYTHON_FAST_GIL 0
  #undef CYTHON_METH_FASTCALL
  #define CYTHON_METH_FASTCALL 0
  #undef CYTHON_FAST_PYCALL
  #define CYTHON_FAST_PYCALL 0
  #ifndef CYTHON_PEP487_INIT_SUBCLASS
    #define CYTHON_PEP487_INIT_SUBCLASS 1
  #endif
  #if PY_VERSION_HEX < 0x03090000
    #undef CYTHON_PEP489_MULTI_PHASE_INIT
    #define CYTHON_PEP489_MULTI_PHASE_INIT 0
  #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT)
    #define CYTHON_PEP489_MULTI_PHASE_INIT 1
  #endif
  #undef CYTHON_USE_MODULE_STATE
  #define CYTHON_USE_MODULE_STATE 0
  #undef CYTHON_USE_SYS_MONITORING
  #define CYTHON_USE_SYS_MONITORING 0
  #ifndef CYTHON_USE_TP_FINALIZE
    #define CYTHON_USE_TP_FINALIZE (PYPY_VERSION_NUM >= 0x07030C00)
  #endif
  #undef CYTHON_USE_AM_SEND
  #define CYTHON_USE_AM_SEND 0
  #undef CYTHON_USE_DICT_VERSIONS
  #define CYTHON_USE_DICT_VERSIONS 0
  #undef CYTHON_USE_EXC_INFO_STACK
  #define CYTHON_USE_EXC_INFO_STACK 0
  #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
    #define CYTHON_UPDATE_DESCRIPTOR_DOC (PYPY_VERSION_NUM >= 0x07031100)
  #endif
  #undef CYTHON_USE_FREELISTS
  #define CYTHON_USE_FREELISTS 0
  #undef CYTHON_IMMORTAL_CONSTANTS
  #define CYTHON_IMMORTAL_CONSTANTS 0
#elif defined(CYTHON_LIMITED_API)
  #ifdef Py_LIMITED_API
    #undef __PYX_LIMITED_VERSION_HEX
    #define __PYX_LIMITED_VERSION_HEX Py_LIMITED_API
  #endif
  #define CYTHON_COMPILING_IN_PYPY 0
  #define CYTHON_COMPILING_IN_CPYTHON 0
  #define CYTHON_COMPILING_IN_LIMITED_API 1
  #define CYTHON_COMPILING_IN_GRAAL 0
  #define CYTHON_COMPILING_IN_CPYTHON_FREETHREADING 0
  #undef CYTHON_USE_TYPE_SLOTS
  #define CYTHON_USE_TYPE_SLOTS 0
  #undef CYTHON_USE_TYPE_SPECS
  #define CYTHON_USE_TYPE_SPECS 1
  #undef CYTHON_USE_PYTYPE_LOOKUP
  #define CYTHON_USE_PYTYPE_LOOKUP 0
  #undef CYTHON_USE_PYLIST_INTERNALS
  #define CYTHON_USE_PYLIST_INTERNALS 0
  #undef CYTHON_USE_UNICODE_INTERNALS
  #define CYTHON_USE_UNICODE_INTERNALS 0
  #ifndef CYTHON_USE_UNICODE_WRITER
    #define CYTHON_USE_UNICODE_WRITER 0
  #endif
  #undef CYTHON_USE_PYLONG_INTERNALS
  #define CYTHON_USE_PYLONG_INTERNALS 0
  #ifndef CYTHON_AVOID_BORROWED_REFS
    #define CYTHON_AVOID_BORROWED_REFS 0
  #endif
  #ifndef CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS
    #define CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS 0
  #endif
  #undef CYTHON_ASSUME_SAFE_MACROS
  #define CYTHON_ASSUME_SAFE_MACROS 0
  #undef CYTHON_ASSUME_SAFE_SIZE
  #define CYTHON_ASSUME_SAFE_SIZE 0
  #undef CYTHON_UNPACK_METHODS
  #define CYTHON_UNPACK_METHODS 0
  #undef CYTHON_FAST_THREAD_STATE
  #define CYTHON_FAST_THREAD_STATE 0
  #undef CYTHON_FAST_GIL
  #define CYTHON_FAST_GIL 0
  #undef CYTHON_METH_FASTCALL
  #define CYTHON_METH_FASTCALL (__PYX_LIMITED_VERSION_HEX >= 0x030C0000)
  #undef CYTHON_FAST_PYCALL
  #define CYTHON_FAST_PYCALL 0
  #ifndef CYTHON_PEP487_INIT_SUBCLASS
    #define CYTHON_PEP487_INIT_SUBCLASS 1
  #endif
  #ifndef CYTHON_PEP489_MULTI_PHASE_INIT
    #define CYTHON_PEP489_MULTI_PHASE_INIT 1
  #endif
  #ifndef CYTHON_USE_MODULE_STATE
    #define CYTHON_USE_MODULE_STATE 0
  #endif
  #undef CYTHON_USE_SYS_MONITORING
  #define CYTHON_USE_SYS_MONITORING 0
  #ifndef CYTHON_USE_TP_FINALIZE
    #define CYTHON_USE_TP_FINALIZE 0
  #endif
  #ifndef CYTHON_USE_AM_SEND
    #define CYTHON_USE_AM_SEND (__PYX_LIMITED_VERSION_HEX >= 0x030A0000)
  #endif
  #undef CYTHON_USE_DICT_VERSIONS
  #define CYTHON_USE_DICT_VERSIONS 0
  #undef CYTHON_USE_EXC_INFO_STACK
  #define CYTHON_USE_EXC_INFO_STACK 0
  #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
    #define CYTHON_UPDATE_DESCRIPTOR_DOC 0
  #endif
  #ifndef CYTHON_USE_FREELISTS
  #define CYTHON_USE_FREELISTS 1
  #endif
  #undef CYTHON_IMMORTAL_CONSTANTS
  #define CYTHON_IMMORTAL_CONSTANTS 0
#else
  #define CYTHON_COMPILING_IN_PYPY 0
  #define CYTHON_COMPILING_IN_CPYTHON 1
  #define CYTHON_COMPILING_IN_LIMITED_API 0
  #define CYTHON_COMPILING_IN_GRAAL 0
  #ifdef Py_GIL_DISABLED
    #define CYTHON_COMPILING_IN_CPYTHON_FREETHREADING 1
  #else
    #define CYTHON_COMPILING_IN_CPYTHON_FREETHREADING 0
  #endif
  #if PY_VERSION_HEX < 0x030A0000
    #undef CYTHON_USE_TYPE_SLOTS
    #define CYTHON_USE_TYPE_SLOTS 1
  #elif !defined(CYTHON_USE_TYPE_SLOTS)
    #define CYTHON_USE_TYPE_SLOTS 1
  #endif
  #ifndef CYTHON_USE_TYPE_SPECS
    #define CYTHON_USE_TYPE_SPECS 0
  #endif
  #ifndef CYTHON_USE_PYTYPE_LOOKUP
    #define CYTHON_USE_PYTYPE_LOOKUP 1
  #endif
  #ifndef CYTHON_USE_PYLONG_INTERNALS
    #define CYTHON_USE_PYLONG_INTERNALS 1
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    #undef CYTHON_USE_PYLIST_INTERNALS
    #define CYTHON_USE_PYLIST_INTERNALS 0
  #elif !defined(CYTHON_USE_PYLIST_INTERNALS)
    #define CYTHON_USE_PYLIST_INTERNALS 1
  #endif
  #ifndef CYTHON_USE_UNICODE_INTERNALS
    #define CYTHON_USE_UNICODE_INTERNALS 1
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING || PY_VERSION_HEX >= 0x030B00A2
    #undef CYTHON_USE_UNICODE_WRITER
    #define CYTHON_USE_UNICODE_WRITER 0
  #elif !defined(CYTHON_USE_UNICODE_WRITER)
    #define CYTHON_USE_UNICODE_WRITER 1
  #endif
  #ifndef CYTHON_AVOID_BORROWED_REFS
    #define CYTHON_AVOID_BORROWED_REFS 0
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    #undef CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS
    #define CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS 1
  #elif !defined(CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS)
    #define CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS 0
  #endif
  #ifndef CYTHON_ASSUME_SAFE_MACROS
    #define CYTHON_ASSUME_SAFE_MACROS 1
  #endif
  #ifndef CYTHON_ASSUME_SAFE_SIZE
    #define CYTHON_ASSUME_SAFE_SIZE 1
  #endif
  #ifndef CYTHON_UNPACK_METHODS
    #define CYTHON_UNPACK_METHODS 1
  #endif
  #ifndef CYTHON_FAST_THREAD_STATE
    #define CYTHON_FAST_THREAD_STATE 1
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    #undef CYTHON_FAST_GIL
    #define CYTHON_FAST_GIL 0
  #elif !defined(CYTHON_FAST_GIL)
    #define CYTHON_FAST_GIL (PY_VERSION_HEX < 0x030C00A6)
  #endif
  #ifndef CYTHON_METH_FASTCALL
    #define CYTHON_METH_FASTCALL 1
  #endif
  #ifndef CYTHON_FAST_PYCALL
    #define CYTHON_FAST_PYCALL 1
  #endif
  #ifndef CYTHON_PEP487_INIT_SUBCLASS
    #define CYTHON_PEP487_INIT_SUBCLASS 1
  #endif
  #ifndef CYTHON_PEP489_MULTI_PHASE_INIT
    #define CYTHON_PEP489_MULTI_PHASE_INIT 1
  #endif
  #ifndef CYTHON_USE_MODULE_STATE
    #define CYTHON_USE_MODULE_STATE 0
  #endif
  #ifndef CYTHON_USE_SYS_MONITORING
    #define CYTHON_USE_SYS_MONITORING (PY_VERSION_HEX >= 0x030d00B1)
  #endif
  #ifndef CYTHON_USE_TP_FINALIZE
    #define CYTHON_USE_TP_FINALIZE 1
  #endif
  #ifndef CYTHON_USE_AM_SEND
    #define CYTHON_USE_AM_SEND 1
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    #undef CYTHON_USE_DICT_VERSIONS
    #define CYTHON_USE_DICT_VERSIONS 0
  #elif !defined(CYTHON_USE_DICT_VERSIONS)
    #define CYTHON_USE_DICT_VERSIONS  (PY_VERSION_HEX < 0x030C00A5 && !CYTHON_USE_MODULE_STATE)
  #endif
  #ifndef CYTHON_USE_EXC_INFO_STACK
    #define CYTHON_USE_EXC_INFO_STACK 1
  #endif
  #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
    #define CYTHON_UPDATE_DESCRIPTOR_DOC 1
  #endif
  #ifndef CYTHON_USE_FREELISTS
    #define CYTHON_USE_FREELISTS (!CYTHON_COMPILING_IN_CPYTHON_FREETHREADING)
  #endif
  #if defined(CYTHON_IMMORTAL_CONSTANTS) && PY_VERSION_HEX < 0x030C0000
    #undef CYTHON_IMMORTAL_CONSTANTS
    #define CYTHON_IMMORTAL_CONSTANTS 0  // definitely won't work
  #elif !defined(CYTHON_IMMORTAL_CONSTANTS)
    #define CYTHON_IMMORTAL_CONSTANTS (PY_VERSION_HEX >= 0x030C0000 && !CYTHON_USE_MODULE_STATE && CYTHON_COMPILING_IN_CPYTHON_FREETHREADING)
  #endif
#endif
#ifndef CYTHON_COMPRESS_STRINGS
  #define CYTHON_COMPRESS_STRINGS 1
#endif
#ifndef CYTHON_FAST_PYCCALL
#define CYTHON_FAST_PYCCALL  CYTHON_FAST_PYCALL
#endif
#ifndef CYTHON_VECTORCALL
#if CYTHON_COMPILING_IN_LIMITED_API
#define CYTHON_VECTORCALL  (__PYX_LIMITED_VERSION_HEX >= 0x030C0000)
#else
#define CYTHON_VECTORCALL  (CYTHON_FAST_PYCCALL)
#endif
#endif
#if CYTHON_USE_PYLONG_INTERNALS
  #undef SHIFT
  #undef BASE
  #undef MASK
  #ifdef SIZEOF_VOID_P
    enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
  #endif
#endif
#ifndef __has_attribute
  #define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
  #define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
  #if defined(__GNUC__)
    #define CYTHON_RESTRICT __restrict__
  #elif defined(_MSC_VER) && _MSC_VER >= 1400
    #define CYTHON_RESTRICT __restrict
  #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
    #define CYTHON_RESTRICT restrict
  #else
    #define CYTHON_RESTRICT
  #endif
#endif
#ifndef CYTHON_UNUSED
  #if defined(__cplusplus)
    /* for clang __has_cpp_attribute(maybe_unused) is true even before C++17
     * but leads to warnings with -pedantic, since it is a C++17 feature */
    #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L)
      #if __has_cpp_attribute(maybe_unused)
        #define CYTHON_UNUSED [[maybe_unused]]
      #endif
    #endif
  #endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
#   if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
#     define CYTHON_UNUSED __attribute__ ((__unused__))
#   else
#     define CYTHON_UNUSED
#   endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
#   define CYTHON_UNUSED __attribute__ ((__unused__))
# else
#   define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_UNUSED_VAR
#  if defined(__cplusplus)
     template<class T> void CYTHON_UNUSED_VAR( const T& ) { }
#  else
#    define CYTHON_UNUSED_VAR(x) (void)(x)
#  endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
  #define CYTHON_MAYBE_UNUSED_VAR(x) CYTHON_UNUSED_VAR(x)
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON && !CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
#  define CYTHON_NCP_UNUSED
# else
#  define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_USE_CPP_STD_MOVE
  #if defined(__cplusplus) && (\
    __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1600))
    #define CYTHON_USE_CPP_STD_MOVE 1
  #else
    #define CYTHON_USE_CPP_STD_MOVE 0
  #endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#include <stdint.h>
typedef uintptr_t  __pyx_uintptr_t;
#ifndef CYTHON_FALLTHROUGH
  #if defined(__cplusplus)
    /* for clang __has_cpp_attribute(fallthrough) is true even before C++17
     * but leads to warnings with -pedantic, since it is a C++17 feature */
    #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L)
      #if __has_cpp_attribute(fallthrough)
        #define CYTHON_FALLTHROUGH [[fallthrough]]
      #endif
    #endif
    #ifndef CYTHON_FALLTHROUGH
      #if __has_cpp_attribute(clang::fallthrough)
        #define CYTHON_FALLTHROUGH [[clang::fallthrough]]
      #elif __has_cpp_attribute(gnu::fallthrough)
        #define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
      #endif
    #endif
  #endif
  #ifndef CYTHON_FALLTHROUGH
    #if __has_attribute(fallthrough)
      #define CYTHON_FALLTHROUGH __attribute__((fallthrough))
    #else
      #define CYTHON_FALLTHROUGH
    #endif
  #endif
  #if defined(__clang__) && defined(__apple_build_version__)
    #if __apple_build_version__ < 7000000
      #undef  CYTHON_FALLTHROUGH
      #define CYTHON_FALLTHROUGH
    #endif
  #endif
#endif
#ifndef Py_UNREACHABLE
  #define Py_UNREACHABLE()  assert(0); abort()
#endif
#ifdef __cplusplus
  template <typename T>
  struct __PYX_IS_UNSIGNED_IMPL {static const bool value = T(0) < T(-1);};
  #define __PYX_IS_UNSIGNED(type) (__PYX_IS_UNSIGNED_IMPL<type>::value)
#else
  #define __PYX_IS_UNSIGNED(type) (((type)-1) > 0)
#endif
#if CYTHON_COMPILING_IN_PYPY == 1
  #define __PYX_NEED_TP_PRINT_SLOT  (PY_VERSION_HEX < 0x030A0000)
#else
  #define __PYX_NEED_TP_PRINT_SLOT  (PY_VERSION_HEX < 0x03090000)
#endif
#define __PYX_REINTERPRET_FUNCION(func_pointer, other_pointer) ((func_pointer)(void(*)(void))(other_pointer))

/* CppInitCode */
#ifndef __cplusplus
  #error "Cython files generated with the C++ option must be compiled with a C++ compiler."
#endif
#ifndef CYTHON_INLINE
  #if defined(__clang__)
    #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
  #else
    #define CYTHON_INLINE inline
  #endif
#endif
template<typename T>
void __Pyx_call_destructor(T& x) {
    x.~T();
}
template<typename T>
class __Pyx_FakeReference {
  public:
    __Pyx_FakeReference() : ptr(NULL) { }
    __Pyx_FakeReference(const T& ref) : ptr(const_cast<T*>(&ref)) { }
    T *operator->() { return ptr; }
    T *operator&() { return ptr; }
    operator T&() { return *ptr; }
    template<typename U> bool operator ==(const U& other) const { return *ptr == other; }
    template<typename U> bool operator !=(const U& other) const { return *ptr != other; }
    template<typename U> bool operator==(const __Pyx_FakeReference<U>& other) const { return *ptr == *other.ptr; }
    template<typename U> bool operator!=(const __Pyx_FakeReference<U>& other) const { return *ptr != *other.ptr; }
  private:
    T *ptr;
};

/* PythonCompatibility */
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_DefaultClassType PyType_Type
#if CYTHON_COMPILING_IN_LIMITED_API
    #ifndef CO_OPTIMIZED
    static int CO_OPTIMIZED;
    #endif
    #ifndef CO_NEWLOCALS
    static int CO_NEWLOCALS;
    #endif
    #ifndef CO_VARARGS
    static int CO_VARARGS;
    #endif
    #ifndef CO_VARKEYWORDS
    static int CO_VARKEYWORDS;
    #endif
    #ifndef CO_ASYNC_GENERATOR
    static int CO_ASYNC_GENERATOR;
    #endif
    #ifndef CO_GENERATOR
    static int CO_GENERATOR;
    #endif
    #ifndef CO_COROUTINE
    static int CO_COROUTINE;
    #endif
#else
    #ifndef CO_COROUTINE
      #define CO_COROUTINE 0x80
    #endif
    #ifndef CO_ASYNC_GENERATOR
      #define CO_ASYNC_GENERATOR 0x200
    #endif
#endif
static int __Pyx_init_co_variables(void);
#if PY_VERSION_HEX >= 0x030900A4 || defined(Py_IS_TYPE)
  #define __Pyx_IS_TYPE(ob, type) Py_IS_TYPE(ob, type)
#else
  #define __Pyx_IS_TYPE(ob, type) (((const PyObject*)ob)->ob_type == (type))
#endif
#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_Is)
  #define __Pyx_Py_Is(x, y)  Py_Is(x, y)
#else
  #define __Pyx_Py_Is(x, y) ((x) == (y))
#endif
#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsNone)
  #define __Pyx_Py_IsNone(ob) Py_IsNone(ob)
#else
  #define __Pyx_Py_IsNone(ob) __Pyx_Py_Is((ob), Py_None)
#endif
#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsTrue)
  #define __Pyx_Py_IsTrue(ob) Py_IsTrue(ob)
#else
  #define __Pyx_Py_IsTrue(ob) __Pyx_Py_Is((ob), Py_True)
#endif
#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsFalse)
  #define __Pyx_Py_IsFalse(ob) Py_IsFalse(ob)
#else
  #define __Pyx_Py_IsFalse(ob) __Pyx_Py_Is((ob), Py_False)
#endif
#define __Pyx_NoneAsNull(obj)  (__Pyx_Py_IsNone(obj) ? NULL : (obj))
#if PY_VERSION_HEX >= 0x030900F0 && !CYTHON_COMPILING_IN_PYPY
  #define __Pyx_PyObject_GC_IsFinalized(o) PyObject_GC_IsFinalized(o)
#else
  #define __Pyx_PyObject_GC_IsFinalized(o) _PyGC_FINALIZED(o)
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
  #define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
  #define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
  #define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
  #define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef Py_TPFLAGS_SEQUENCE
  #define Py_TPFLAGS_SEQUENCE 0
#endif
#ifndef Py_TPFLAGS_MAPPING
  #define Py_TPFLAGS_MAPPING 0
#endif
#ifndef Py_TPFLAGS_IMMUTABLETYPE
  #define Py_TPFLAGS_IMMUTABLETYPE (1UL << 8)
#endif
#ifndef Py_TPFLAGS_DISALLOW_INSTANTIATION
  #define Py_TPFLAGS_DISALLOW_INSTANTIATION (1UL << 7)
#endif
#ifndef METH_STACKLESS
  #define METH_STACKLESS 0
#endif
#ifndef METH_FASTCALL
  #ifndef METH_FASTCALL
     #define METH_FASTCALL 0x80
  #endif
  typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
  typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
                                                          Py_ssize_t nargs, PyObject *kwnames);
#else
  #if PY_VERSION_HEX >= 0x030d00A4
  #  define __Pyx_PyCFunctionFast PyCFunctionFast
  #  define __Pyx_PyCFunctionFastWithKeywords PyCFunctionFastWithKeywords
  #else
  #  define __Pyx_PyCFunctionFast _PyCFunctionFast
  #  define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
  #endif
#endif
#if CYTHON_METH_FASTCALL
  #define __Pyx_METH_FASTCALL METH_FASTCALL
  #define __Pyx_PyCFunction_FastCall __Pyx_PyCFunctionFast
  #define __Pyx_PyCFunction_FastCallWithKeywords __Pyx_PyCFunctionFastWithKeywords
#else
  #define __Pyx_METH_FASTCALL METH_VARARGS
  #define __Pyx_PyCFunction_FastCall PyCFunction
  #define __Pyx_PyCFunction_FastCallWithKeywords PyCFunctionWithKeywords
#endif
#if CYTHON_VECTORCALL
  #define __pyx_vectorcallfunc vectorcallfunc
  #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET  PY_VECTORCALL_ARGUMENTS_OFFSET
  #define __Pyx_PyVectorcall_NARGS(n)  PyVectorcall_NARGS((size_t)(n))
#else
  #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET  0
  #define __Pyx_PyVectorcall_NARGS(n)  ((Py_ssize_t)(n))
#endif
#if PY_VERSION_HEX >= 0x030900B1
#define __Pyx_PyCFunction_CheckExact(func)  PyCFunction_CheckExact(func)
#else
#define __Pyx_PyCFunction_CheckExact(func)  PyCFunction_Check(func)
#endif
#define __Pyx_CyOrPyCFunction_Check(func)  PyCFunction_Check(func)
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_CyOrPyCFunction_GET_FUNCTION(func)  (((PyCFunctionObject*)(func))->m_ml->ml_meth)
#elif !CYTHON_COMPILING_IN_LIMITED_API
#define __Pyx_CyOrPyCFunction_GET_FUNCTION(func)  PyCFunction_GET_FUNCTION(func)
#endif
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_CyOrPyCFunction_GET_FLAGS(func)  (((PyCFunctionObject*)(func))->m_ml->ml_flags)
static CYTHON_INLINE PyObject* __Pyx_CyOrPyCFunction_GET_SELF(PyObject *func) {
    return (__Pyx_CyOrPyCFunction_GET_FLAGS(func) & METH_STATIC) ? NULL : ((PyCFunctionObject*)func)->m_self;
}
#endif
static CYTHON_INLINE int __Pyx__IsSameCFunction(PyObject *func, void (*cfunc)(void)) {
#if CYTHON_COMPILING_IN_LIMITED_API
    return PyCFunction_Check(func) && PyCFunction_GetFunction(func) == (PyCFunction) cfunc;
#else
    return PyCFunction_Check(func) && PyCFunction_GET_FUNCTION(func) == (PyCFunction) cfunc;
#endif
}
#define __Pyx_IsSameCFunction(func, cfunc)   __Pyx__IsSameCFunction(func, cfunc)
#if PY_VERSION_HEX < 0x03090000 || (CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  #define __Pyx_PyType_FromModuleAndSpec(m, s, b)  ((void)m, PyType_FromSpecWithBases(s, b))
  typedef PyObject *(*__Pyx_PyCMethod)(PyObject *, PyTypeObject *, PyObject *const *, size_t, PyObject *);
#else
  #define __Pyx_PyType_FromModuleAndSpec(m, s, b)  PyType_FromModuleAndSpec(m, s, b)
  #define __Pyx_PyCMethod  PyCMethod
#endif
#ifndef METH_METHOD
  #define METH_METHOD 0x200
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
  #define PyObject_Malloc(s)   PyMem_Malloc(s)
  #define PyObject_Free(p)     PyMem_Free(p)
  #define PyObject_Realloc(p)  PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_LIMITED_API
  #define __Pyx_PyFrame_SetLineNumber(frame, lineno)
#elif CYTHON_COMPILING_IN_GRAAL && defined(GRAALPY_VERSION_NUM) && GRAALPY_VERSION_NUM > 0x19000000
  #define __Pyx_PyCode_HasFreeVars(co)  (PyCode_GetNumFree(co) > 0)
  #define __Pyx_PyFrame_SetLineNumber(frame, lineno) GraalPyFrame_SetLineNumber((frame), (lineno))
#elif CYTHON_COMPILING_IN_GRAAL
  #define __Pyx_PyCode_HasFreeVars(co)  (PyCode_GetNumFree(co) > 0)
  #define __Pyx_PyFrame_SetLineNumber(frame, lineno) _PyFrame_SetLineNumber((frame), (lineno))
#else
  #define __Pyx_PyCode_HasFreeVars(co)  (PyCode_GetNumFree(co) > 0)
  #define __Pyx_PyFrame_SetLineNumber(frame, lineno)  (frame)->f_lineno = (lineno)
#endif
#if CYTHON_COMPILING_IN_LIMITED_API
  #define __Pyx_PyThreadState_Current PyThreadState_Get()
#elif !CYTHON_FAST_THREAD_STATE
  #define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x030d00A1
  #define __Pyx_PyThreadState_Current PyThreadState_GetUnchecked()
#else
  #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#endif
#if CYTHON_USE_MODULE_STATE
static CYTHON_INLINE void *__Pyx__PyModule_GetState(PyObject *op)
{
    void *result;
    result = PyModule_GetState(op);
    if (!result)
        Py_FatalError("Couldn't find the module state");
    return result;
}
#define __Pyx_PyModule_GetState(o) (__pyx_mstatetype *)__Pyx__PyModule_GetState(o)
#else
#define __Pyx_PyModule_GetState(op) ((void)op,__pyx_mstate_global)
#endif
#define __Pyx_PyObject_GetSlot(obj, name, func_ctype)  __Pyx_PyType_GetSlot(Py_TYPE((PyObject *) obj), name, func_ctype)
#define __Pyx_PyObject_TryGetSlot(obj, name, func_ctype) __Pyx_PyType_TryGetSlot(Py_TYPE(obj), name, func_ctype)
#define __Pyx_PyObject_GetSubSlot(obj, sub, name, func_ctype) __Pyx_PyType_GetSubSlot(Py_TYPE(obj), sub, name, func_ctype)
#define __Pyx_PyObject_TryGetSubSlot(obj, sub, name, func_ctype) __Pyx_PyType_TryGetSubSlot(Py_TYPE(obj), sub, name, func_ctype)
#if CYTHON_USE_TYPE_SLOTS
  #define __Pyx_PyType_GetSlot(type, name, func_ctype)  ((type)->name)
  #define __Pyx_PyType_TryGetSlot(type, name, func_ctype) __Pyx_PyType_GetSlot(type, name, func_ctype)
  #define __Pyx_PyType_GetSubSlot(type, sub, name, func_ctype) (((type)->sub) ? ((type)->sub->name) : NULL)
  #define __Pyx_PyType_TryGetSubSlot(type, sub, name, func_ctype) __Pyx_PyType_GetSubSlot(type, sub, name, func_ctype)
#else
  #define __Pyx_PyType_GetSlot(type, name, func_ctype)  ((func_ctype) PyType_GetSlot((type), Py_##name))
  #define __Pyx_PyType_TryGetSlot(type, name, func_ctype)\
    ((__PYX_LIMITED_VERSION_HEX >= 0x030A0000 ||\
     (PyType_GetFlags(type) & Py_TPFLAGS_HEAPTYPE) || __Pyx_get_runtime_version() >= 0x030A0000) ?\
     __Pyx_PyType_GetSlot(type, name, func_ctype) : NULL)
  #define __Pyx_PyType_GetSubSlot(obj, sub, name, func_ctype) __Pyx_PyType_GetSlot(obj, name, func_ctype)
  #define __Pyx_PyType_TryGetSubSlot(obj, sub, name, func_ctype) __Pyx_PyType_TryGetSlot(obj, name, func_ctype)
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n)  ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n)  PyDict_New()
#endif
#define __Pyx_PyNumber_Divide(x,y)         PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y)  PyNumber_InPlaceTrueDivide(x,y)
#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStrWithError(dict, name)  _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStr(PyObject *dict, PyObject *name) {
    PyObject *res = __Pyx_PyDict_GetItemStrWithError(dict, name);
    if (res == NULL) PyErr_Clear();
    return res;
}
#elif !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07020000
#define __Pyx_PyDict_GetItemStrWithError  PyDict_GetItemWithError
#define __Pyx_PyDict_GetItemStr           PyDict_GetItem
#else
static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict, PyObject *name) {
#if CYTHON_COMPILING_IN_PYPY
    return PyDict_GetItem(dict, name);
#else
    PyDictEntry *ep;
    PyDictObject *mp = (PyDictObject*) dict;
    long hash = ((PyStringObject *) name)->ob_shash;
    assert(hash != -1);
    ep = (mp->ma_lookup)(mp, name, hash);
    if (ep == NULL) {
        return NULL;
    }
    return ep->me_value;
#endif
}
#define __Pyx_PyDict_GetItemStr           PyDict_GetItem
#endif
#if CYTHON_USE_TYPE_SLOTS
  #define __Pyx_PyType_GetFlags(tp)   (((PyTypeObject *)tp)->tp_flags)
  #define __Pyx_PyType_HasFeature(type, feature)  ((__Pyx_PyType_GetFlags(type) & (feature)) != 0)
#else
  #define __Pyx_PyType_GetFlags(tp)   (PyType_GetFlags((PyTypeObject *)tp))
  #define __Pyx_PyType_HasFeature(type, feature)  PyType_HasFeature(type, feature)
#endif
#define __Pyx_PyObject_GetIterNextFunc(iterator)  __Pyx_PyObject_GetSlot(iterator, tp_iternext, iternextfunc)
#if CYTHON_USE_TYPE_SPECS
#define __Pyx_PyHeapTypeObject_GC_Del(obj)  {\
    PyTypeObject *type = Py_TYPE((PyObject*)obj);\
    assert(__Pyx_PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE));\
    PyObject_GC_Del(obj);\
    Py_DECREF(type);\
}
#else
#define __Pyx_PyHeapTypeObject_GC_Del(obj)  PyObject_GC_Del(obj)
#endif
#if CYTHON_COMPILING_IN_LIMITED_API
  #define __Pyx_PyUnicode_READY(op)       (0)
  #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_ReadChar(u, i)
  #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u)   ((void)u, 1114111U)
  #define __Pyx_PyUnicode_KIND(u)         ((void)u, (0))
  #define __Pyx_PyUnicode_DATA(u)         ((void*)u)
  #define __Pyx_PyUnicode_READ(k, d, i)   ((void)k, PyUnicode_ReadChar((PyObject*)(d), i))
  #define __Pyx_PyUnicode_IS_TRUE(u)      (0 != PyUnicode_GetLength(u))
#else
  #if PY_VERSION_HEX >= 0x030C0000
    #define __Pyx_PyUnicode_READY(op)       (0)
  #else
    #define __Pyx_PyUnicode_READY(op)       (likely(PyUnicode_IS_READY(op)) ?\
                                                0 : _PyUnicode_Ready((PyObject *)(op)))
  #endif
  #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
  #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u)   PyUnicode_MAX_CHAR_VALUE(u)
  #define __Pyx_PyUnicode_KIND(u)         ((int)PyUnicode_KIND(u))
  #define __Pyx_PyUnicode_DATA(u)         PyUnicode_DATA(u)
  #define __Pyx_PyUnicode_READ(k, d, i)   PyUnicode_READ(k, d, i)
  #define __Pyx_PyUnicode_WRITE(k, d, i, ch)  PyUnicode_WRITE(k, d, i, (Py_UCS4) ch)
  #if PY_VERSION_HEX >= 0x030C0000
    #define __Pyx_PyUnicode_IS_TRUE(u)      (0 != PyUnicode_GET_LENGTH(u))
  #else
    #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000
    #define __Pyx_PyUnicode_IS_TRUE(u)      (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length))
    #else
    #define __Pyx_PyUnicode_IS_TRUE(u)      (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
    #endif
  #endif
#endif
#if CYTHON_COMPILING_IN_PYPY
  #define __Pyx_PyUnicode_Concat(a, b)      PyNumber_Add(a, b)
  #define __Pyx_PyUnicode_ConcatSafe(a, b)  PyNumber_Add(a, b)
#else
  #define __Pyx_PyUnicode_Concat(a, b)      PyUnicode_Concat(a, b)
  #define __Pyx_PyUnicode_ConcatSafe(a, b)  ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
      PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY
  #if !defined(PyUnicode_DecodeUnicodeEscape)
    #define PyUnicode_DecodeUnicodeEscape(s, size, errors)  PyUnicode_Decode(s, size, "unicode_escape", errors)
  #endif
  #if !defined(PyUnicode_Contains)
    #define PyUnicode_Contains(u, s)  PySequence_Contains(u, s)
  #endif
  #if !defined(PyByteArray_Check)
    #define PyByteArray_Check(obj)  PyObject_TypeCheck(obj, &PyByteArray_Type)
  #endif
  #if !defined(PyObject_Format)
    #define PyObject_Format(obj, fmt)  PyObject_CallMethod(obj, "__format__", "O", fmt)
  #endif
#endif
#define __Pyx_PyUnicode_FormatSafe(a, b)  ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  #define __Pyx_PySequence_ListKeepNew(obj)\
    (likely(PyList_CheckExact(obj) && PyUnstable_Object_IsUniquelyReferenced(obj)) ? __Pyx_NewRef(obj) : PySequence_List(obj))
#elif CYTHON_COMPILING_IN_CPYTHON
  #define __Pyx_PySequence_ListKeepNew(obj)\
    (likely(PyList_CheckExact(obj) && Py_REFCNT(obj) == 1) ? __Pyx_NewRef(obj) : PySequence_List(obj))
#else
  #define __Pyx_PySequence_ListKeepNew(obj)  PySequence_List(obj)
#endif
#ifndef PySet_CheckExact
  #define PySet_CheckExact(obj)        __Pyx_IS_TYPE(obj, &PySet_Type)
#endif
#if PY_VERSION_HEX >= 0x030900A4
  #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
  #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
#else
  #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
  #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
#endif
enum __Pyx_ReferenceSharing {
  __Pyx_ReferenceSharing_DefinitelyUnique, // We created it so we know it's unshared - no need to check
  __Pyx_ReferenceSharing_OwnStrongReference,
  __Pyx_ReferenceSharing_FunctionArgument,
  __Pyx_ReferenceSharing_SharedReference, // Never trust it to be unshared because it's a global or similar
};
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING && PY_VERSION_HEX >= 0x030E0000
#define __Pyx_IS_UNIQUELY_REFERENCED(o, sharing)\
    (sharing == __Pyx_ReferenceSharing_DefinitelyUnique ? 1 :\
      (sharing == __Pyx_ReferenceSharing_FunctionArgument ? PyUnstable_Object_IsUniqueReferencedTemporary(o) :\
      (sharing == __Pyx_ReferenceSharing_OwnStrongReference ? PyUnstable_Object_IsUniquelyReferenced(o) : 0)))
#elif (CYTHON_COMPILING_IN_CPYTHON && !CYTHON_COMPILING_IN_CPYTHON_FREETHREADING) || CYTHON_COMPILING_IN_LIMITED_API
#define __Pyx_IS_UNIQUELY_REFERENCED(o, sharing) (((void)sharing), Py_REFCNT(o) == 1)
#else
#define __Pyx_IS_UNIQUELY_REFERENCED(o, sharing) (((void)o), ((void)sharing), 0)
#endif
#if CYTHON_AVOID_BORROWED_REFS || CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS
  #if __PYX_LIMITED_VERSION_HEX >= 0x030d0000
    #define __Pyx_PyList_GetItemRef(o, i) PyList_GetItemRef(o, i)
  #elif CYTHON_COMPILING_IN_LIMITED_API || !CYTHON_ASSUME_SAFE_MACROS
    #define __Pyx_PyList_GetItemRef(o, i) (likely((i) >= 0) ? PySequence_GetItem(o, i) : (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
  #else
    #define __Pyx_PyList_GetItemRef(o, i) PySequence_ITEM(o, i)
  #endif
#elif CYTHON_COMPILING_IN_LIMITED_API || !CYTHON_ASSUME_SAFE_MACROS
  #if __PYX_LIMITED_VERSION_HEX >= 0x030d0000
    #define __Pyx_PyList_GetItemRef(o, i) PyList_GetItemRef(o, i)
  #else
    #define __Pyx_PyList_GetItemRef(o, i) __Pyx_XNewRef(PyList_GetItem(o, i))
  #endif
#else
  #define __Pyx_PyList_GetItemRef(o, i) __Pyx_NewRef(PyList_GET_ITEM(o, i))
#endif
#if CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS && !CYTHON_COMPILING_IN_LIMITED_API && CYTHON_ASSUME_SAFE_MACROS
  #define __Pyx_PyList_GetItemRefFast(o, i, unsafe_shared) (__Pyx_IS_UNIQUELY_REFERENCED(o, unsafe_shared) ?\
    __Pyx_NewRef(PyList_GET_ITEM(o, i)) : __Pyx_PyList_GetItemRef(o, i))
#else
  #define __Pyx_PyList_GetItemRefFast(o, i, unsafe_shared) __Pyx_PyList_GetItemRef(o, i)
#endif
#if __PYX_LIMITED_VERSION_HEX >= 0x030d0000
#define __Pyx_PyDict_GetItemRef(dict, key, result) PyDict_GetItemRef(dict, key, result)
#elif CYTHON_AVOID_BORROWED_REFS || CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS
static CYTHON_INLINE int __Pyx_PyDict_GetItemRef(PyObject *dict, PyObject *key, PyObject **result) {
  *result = PyObject_GetItem(dict, key);
  if (*result == NULL) {
    if (PyErr_ExceptionMatches(PyExc_KeyError)) {
      PyErr_Clear();
      return 0;
    }
    return -1;
  }
  return 1;
}
#else
static CYTHON_INLINE int __Pyx_PyDict_GetItemRef(PyObject *dict, PyObject *key, PyObject **result) {
  *result = PyDict_GetItemWithError(dict, key);
  if (*result == NULL) {
    return PyErr_Occurred() ? -1 : 0;
  }
  Py_INCREF(*result);
  return 1;
}
#endif
#if defined(CYTHON_DEBUG_VISIT_CONST) && CYTHON_DEBUG_VISIT_CONST
  #define __Pyx_VISIT_CONST(obj)  Py_VISIT(obj)
#else
  #define __Pyx_VISIT_CONST(obj)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
  #define __Pyx_PySequence_ITEM(o, i) PySequence_ITEM(o, i)
  #define __Pyx_PySequence_SIZE(seq)  Py_SIZE(seq)
  #define __Pyx_PyTuple_SET_ITEM(o, i, v) (PyTuple_SET_ITEM(o, i, v), (0))
  #define __Pyx_PyTuple_GET_ITEM(o, i) PyTuple_GET_ITEM(o, i)
  #define __Pyx_PyList_SET_ITEM(o, i, v) (PyList_SET_ITEM(o, i, v), (0))
  #define __Pyx_PyList_GET_ITEM(o, i) PyList_GET_ITEM(o, i)
#else
  #define __Pyx_PySequence_ITEM(o, i) PySequence_GetItem(o, i)
  #define __Pyx_PySequence_SIZE(seq)  PySequence_Size(seq)
  #define __Pyx_PyTuple_SET_ITEM(o, i, v) PyTuple_SetItem(o, i, v)
  #define __Pyx_PyTuple_GET_ITEM(o, i) PyTuple_GetItem(o, i)
  #define __Pyx_PyList_SET_ITEM(o, i, v) PyList_SetItem(o, i, v)
  #define __Pyx_PyList_GET_ITEM(o, i) PyList_GetItem(o, i)
#endif
#if CYTHON_ASSUME_SAFE_SIZE
  #define __Pyx_PyTuple_GET_SIZE(o) PyTuple_GET_SIZE(o)
  #define __Pyx_PyList_GET_SIZE(o) PyList_GET_SIZE(o)
  #define __Pyx_PySet_GET_SIZE(o) PySet_GET_SIZE(o)
  #define __Pyx_PyBytes_GET_SIZE(o) PyBytes_GET_SIZE(o)
  #define __Pyx_PyByteArray_GET_SIZE(o) PyByteArray_GET_SIZE(o)
  #define __Pyx_PyUnicode_GET_LENGTH(o) PyUnicode_GET_LENGTH(o)
#else
  #define __Pyx_PyTuple_GET_SIZE(o) PyTuple_Size(o)
  #define __Pyx_PyList_GET_SIZE(o) PyList_Size(o)
  #define __Pyx_PySet_GET_SIZE(o) PySet_Size(o)
  #define __Pyx_PyBytes_GET_SIZE(o) PyBytes_Size(o)
  #define __Pyx_PyByteArray_GET_SIZE(o) PyByteArray_Size(o)
  #define __Pyx_PyUnicode_GET_LENGTH(o) PyUnicode_GetLength(o)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_InternFromString)
  #define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#define __Pyx_PyLong_FromHash_t PyLong_FromSsize_t
#define __Pyx_PyLong_AsHash_t   __Pyx_PyIndex_AsSsize_t
#if __PYX_LIMITED_VERSION_HEX >= 0x030A0000
    #define __Pyx_PySendResult PySendResult
#else
    typedef enum {
        PYGEN_RETURN = 0,
        PYGEN_ERROR = -1,
        PYGEN_NEXT = 1,
    } __Pyx_PySendResult;
#endif
#if CYTHON_COMPILING_IN_LIMITED_API || PY_VERSION_HEX < 0x030A00A3
  typedef __Pyx_PySendResult (*__Pyx_pyiter_sendfunc)(PyObject *iter, PyObject *value, PyObject **result);
#else
  #define __Pyx_pyiter_sendfunc sendfunc
#endif
#if !CYTHON_USE_AM_SEND
#define __PYX_HAS_PY_AM_SEND 0
#elif __PYX_LIMITED_VERSION_HEX >= 0x030A0000
#define __PYX_HAS_PY_AM_SEND 1
#else
#define __PYX_HAS_PY_AM_SEND 2  // our own backported implementation
#endif
#if __PYX_HAS_PY_AM_SEND < 2
    #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#else
    typedef struct {
        unaryfunc am_await;
        unaryfunc am_aiter;
        unaryfunc am_anext;
        __Pyx_pyiter_sendfunc am_send;
    } __Pyx_PyAsyncMethodsStruct;
    #define __Pyx_SlotTpAsAsync(s) ((PyAsyncMethods*)(s))
#endif
#if CYTHON_USE_AM_SEND && PY_VERSION_HEX < 0x030A00F0
    #define __Pyx_TPFLAGS_HAVE_AM_SEND (1UL << 21)
#else
    #define __Pyx_TPFLAGS_HAVE_AM_SEND (0)
#endif
#if PY_VERSION_HEX >= 0x03090000
#define __Pyx_PyInterpreterState_Get() PyInterpreterState_Get()
#else
#define __Pyx_PyInterpreterState_Get() PyThreadState_Get()->interp
#endif
#if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030A0000
#ifdef __cplusplus
extern "C"
#endif
PyAPI_FUNC(void *) PyMem_Calloc(size_t nelem, size_t elsize);
#endif
#if CYTHON_COMPILING_IN_LIMITED_API
static int __Pyx_init_co_variable(PyObject *inspect, const char* name, int *write_to) {
    int value;
    PyObject *py_value = PyObject_GetAttrString(inspect, name);
    if (!py_value) return 0;
    value = (int) PyLong_AsLong(py_value);
    Py_DECREF(py_value);
    *write_to = value;
    return value != -1 || !PyErr_Occurred();
}
static int __Pyx_init_co_variables(void) {
    PyObject *inspect;
    int result;
    inspect = PyImport_ImportModule("inspect");
    result =
#if !defined(CO_OPTIMIZED)
        __Pyx_init_co_variable(inspect, "CO_OPTIMIZED", &CO_OPTIMIZED) &&
#endif
#if !defined(CO_NEWLOCALS)
        __Pyx_init_co_variable(inspect, "CO_NEWLOCALS", &CO_NEWLOCALS) &&
#endif
#if !defined(CO_VARARGS)
        __Pyx_init_co_variable(inspect, "CO_VARARGS", &CO_VARARGS) &&
#endif
#if !defined(CO_VARKEYWORDS)
        __Pyx_init_co_variable(inspect, "CO_VARKEYWORDS", &CO_VARKEYWORDS) &&
#endif
#if !defined(CO_ASYNC_GENERATOR)
        __Pyx_init_co_variable(inspect, "CO_ASYNC_GENERATOR", &CO_ASYNC_GENERATOR) &&
#endif
#if !defined(CO_GENERATOR)
        __Pyx_init_co_variable(inspect, "CO_GENERATOR", &CO_GENERATOR) &&
#endif
#if !defined(CO_COROUTINE)
        __Pyx_init_co_variable(inspect, "CO_COROUTINE", &CO_COROUTINE) &&
#endif
        1;
    Py_DECREF(inspect);
    return result ? 0 : -1;
}
#else
static int __Pyx_init_co_variables(void) {
    return 0;  // It's a limited API-only feature
}
#endif

/* MathInitCode */
#if defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS)
  #ifndef _USE_MATH_DEFINES
    #define _USE_MATH_DEFINES
  #endif
#endif
#include <math.h>
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif

#ifndef CYTHON_CLINE_IN_TRACEBACK_RUNTIME
#define CYTHON_CLINE_IN_TRACEBACK_RUNTIME 0
#endif
#ifndef CYTHON_CLINE_IN_TRACEBACK
#define CYTHON_CLINE_IN_TRACEBACK CYTHON_CLINE_IN_TRACEBACK_RUNTIME
#endif
#if CYTHON_CLINE_IN_TRACEBACK
#define __PYX_MARK_ERR_POS(f_index, lineno)  { __pyx_filename = __pyx_f[f_index]; (void) __pyx_filename; __pyx_lineno = lineno; (void) __pyx_lineno; __pyx_clineno = __LINE__; (void) __pyx_clineno; }
#else
#define __PYX_MARK_ERR_POS(f_index, lineno)  { __pyx_filename = __pyx_f[f_index]; (void) __pyx_filename; __pyx_lineno = lineno; (void) __pyx_lineno; (void) __pyx_clineno; }
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
    { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }

#ifdef CYTHON_EXTERN_C
    #undef __PYX_EXTERN_C
    #define __PYX_EXTERN_C CYTHON_EXTERN_C
#elif defined(__PYX_EXTERN_C)
    #ifdef _MSC_VER
    #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.")
    #else
    #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.
    #endif
#else
    #define __PYX_EXTERN_C extern "C++"
#endif

#define __PYX_HAVE__cuda__bindings___nvml
#define __PYX_HAVE_API__cuda__bindings___nvml
/* Early includes */
#include <stdint.h>
#include <string.h>
#include <stdio.h>

    #if PY_MAJOR_VERSION >= 3
      #define __Pyx_PyFloat_FromString(obj)  PyFloat_FromString(obj)
    #else
      #define __Pyx_PyFloat_FromString(obj)  PyFloat_FromString(obj, NULL)
    #endif
    
#include <stddef.h>

    #if PY_MAJOR_VERSION <= 2
    #define PyDict_GetItemWithError _PyDict_GetItemWithError
    #endif

    #if __PYX_LIMITED_VERSION_HEX < 0x030d0000
    static CYTHON_INLINE int
    __Pyx_CAPI_PyDict_GetItemStringRef(PyObject *mp, const char *key, PyObject **result)
    {
        int res;
        PyObject *key_obj = PyUnicode_FromString(key);
        if (key_obj == NULL) {
            *result = NULL;
            return -1;
        }
        res = __Pyx_PyDict_GetItemRef(mp, key_obj, result);
        Py_DECREF(key_obj);
        return res;
    }
    #else
    #define __Pyx_CAPI_PyDict_GetItemStringRef PyDict_GetItemStringRef
    #endif
    #if PY_VERSION_HEX < 0x030d0000 || (CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030F0000)
    static CYTHON_INLINE int
    __Pyx_CAPI_PyDict_SetDefaultRef(PyObject *d, PyObject *key, PyObject *default_value,
                        PyObject **result)
    {
        PyObject *value;
        if (__Pyx_PyDict_GetItemRef(d, key, &value) < 0) {
            // get error
            if (result) {
                *result = NULL;
            }
            return -1;
        }
        if (value != NULL) {
            // present
            if (result) {
                *result = value;
            }
            else {
                Py_DECREF(value);
            }
            return 1;
        }

        // missing: set the item
        if (PyDict_SetItem(d, key, default_value) < 0) {
            // set error
            if (result) {
                *result = NULL;
            }
            return -1;
        }
        if (result) {
            Py_INCREF(default_value);
            *result = default_value;
        }
        return 0;
    }
    #else
    #define __Pyx_CAPI_PyDict_SetDefaultRef PyDict_SetDefaultRef
    #endif
    

    #if PY_VERSION_HEX < 0x030d0000
    static CYTHON_INLINE int __Pyx_PyWeakref_GetRef(PyObject *ref, PyObject **pobj)
    {
        PyObject *obj = PyWeakref_GetObject(ref);
        if (obj == NULL) {
            // SystemError if ref is NULL
            *pobj = NULL;
            return -1;
        }
        if (obj == Py_None) {
            *pobj = NULL;
            return 0;
        }
        Py_INCREF(obj);
        *pobj = obj;
        return 1;
    }
    #else
    #define __Pyx_PyWeakref_GetRef PyWeakref_GetRef
    #endif
    
#include "pythread.h"

    #if (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030600) && !defined(PyContextVar_Get)
    #define PyContextVar_Get(var, d, v)         ((d) ?             ((void)(var), Py_INCREF(d), (v)[0] = (d), 0) :             ((v)[0] = NULL, 0)         )
    #endif
    
#include "ios"
#include "new"
#include "stdexcept"
#include "typeinfo"
#include <vector>
#include <memory>

    template<typename T>
    class nullable_unique_ptr {
      public:
        nullable_unique_ptr() noexcept = default;

        nullable_unique_ptr(std::nullptr_t) noexcept = delete;

        explicit nullable_unique_ptr(T* data, bool own_data):
            own_data_(own_data)
        {
            if (own_data)
                manager_.reset(data);
            else
                raw_data_ = data;
        }

        nullable_unique_ptr(const nullable_unique_ptr&) = delete;

        nullable_unique_ptr& operator=(const nullable_unique_ptr&) = delete;

        nullable_unique_ptr(nullable_unique_ptr&& other) noexcept
        {
            own_data_ = other.own_data_;
            other.own_data_ = false;  // ownership is transferred
            if (own_data_)
            {
                manager_ = std::move(other.manager_);
                raw_data_ = nullptr;  // just in case
            }
            else
            {
                manager_.reset(nullptr);  // just in case
                raw_data_ = other.raw_data_;
            }
        }

        nullable_unique_ptr& operator=(nullable_unique_ptr&& other) noexcept
        {
            own_data_ = other.own_data_;
            other.own_data_ = false;  // ownership is transferred
            if (own_data_)
            {
                manager_ = std::move(other.manager_);
                raw_data_ = nullptr;  // just in case
            }
            else
            {
                manager_.reset(nullptr);  // just in case
                raw_data_ = other.raw_data_;
            }
            return *this;
        }

        ~nullable_unique_ptr() = default;

        void reset(T* data, bool own_data)
        {
            own_data_ = own_data;
            if (own_data_)
            {
                manager_.reset(data);
                raw_data_ = nullptr;
            }
            else
            {
                manager_.reset(nullptr);
                raw_data_ = data;
            }
        }

        void swap(nullable_unique_ptr& other) noexcept
        {
            std::swap(manager_, other.manager_);
            std::swap(raw_data_, other.raw_data_);
            std::swap(own_data_, other.own_data_);
        }

        /*
         * Get the pointer to the underlying object (this is different from data()!).
         */
        T* get() const noexcept
        {
            if (own_data_)
                return manager_.get();
            else
                return raw_data_;
        }

        /*
         * Get the pointer to the underlying buffer (this is different from get()!).
         */
        void* data() noexcept
        {
            if (own_data_)
                return manager_.get()->data();
            else
                return raw_data_;
        }

        T& operator*()
        {
            if (own_data_)
                return *manager_;
            else
                return *raw_data_;
        }

      private:
        std::unique_ptr<T> manager_{};
        T* raw_data_{nullptr};
        bool own_data_{false};
    };
    
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */

#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif

#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed)  (\
    (sizeof(type) < sizeof(Py_ssize_t))  ||\
    (sizeof(type) > sizeof(Py_ssize_t) &&\
          likely(v < (type)PY_SSIZE_T_MAX ||\
                 v == (type)PY_SSIZE_T_MAX)  &&\
          (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
                                v == (type)PY_SSIZE_T_MIN)))  ||\
    (sizeof(type) == sizeof(Py_ssize_t) &&\
          (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
                               v == (type)PY_SSIZE_T_MAX)))  )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
    return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
    #include <cstdlib>
    #define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
    #define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
    #define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
    #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
    #define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
    #define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
    #define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE Py_ssize_t __Pyx_ssize_strlen(const char *s);
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
static CYTHON_INLINE PyObject* __Pyx_PyByteArray_FromString(const char*);
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString        PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if CYTHON_ASSUME_SAFE_MACROS
    #define __Pyx_PyBytes_AsWritableString(s)     ((char*) PyBytes_AS_STRING(s))
    #define __Pyx_PyBytes_AsWritableSString(s)    ((signed char*) PyBytes_AS_STRING(s))
    #define __Pyx_PyBytes_AsWritableUString(s)    ((unsigned char*) PyBytes_AS_STRING(s))
    #define __Pyx_PyBytes_AsString(s)     ((const char*) PyBytes_AS_STRING(s))
    #define __Pyx_PyBytes_AsSString(s)    ((const signed char*) PyBytes_AS_STRING(s))
    #define __Pyx_PyBytes_AsUString(s)    ((const unsigned char*) PyBytes_AS_STRING(s))
    #define __Pyx_PyByteArray_AsString(s) PyByteArray_AS_STRING(s)
#else
    #define __Pyx_PyBytes_AsWritableString(s)     ((char*) PyBytes_AsString(s))
    #define __Pyx_PyBytes_AsWritableSString(s)    ((signed char*) PyBytes_AsString(s))
    #define __Pyx_PyBytes_AsWritableUString(s)    ((unsigned char*) PyBytes_AsString(s))
    #define __Pyx_PyBytes_AsString(s)     ((const char*) PyBytes_AsString(s))
    #define __Pyx_PyBytes_AsSString(s)    ((const signed char*) PyBytes_AsString(s))
    #define __Pyx_PyBytes_AsUString(s)    ((const unsigned char*) PyBytes_AsString(s))
    #define __Pyx_PyByteArray_AsString(s) PyByteArray_AsString(s)
#endif
#define __Pyx_PyObject_AsWritableString(s)    ((char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s)    ((signed char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s)    ((unsigned char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s)    ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s)    ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s)  __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s)   __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s)   __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
#define __Pyx_PyUnicode_FromOrdinal(o)       PyUnicode_FromOrdinal((int)o)
#define __Pyx_PyUnicode_AsUnicode            PyUnicode_AsUnicode
static CYTHON_INLINE PyObject *__Pyx_NewRef(PyObject *obj) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030a0000 || defined(Py_NewRef)
    return Py_NewRef(obj);
#else
    Py_INCREF(obj);
    return obj;
#endif
}
static CYTHON_INLINE PyObject *__Pyx_XNewRef(PyObject *obj) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030a0000 || defined(Py_XNewRef)
    return Py_XNewRef(obj);
#else
    Py_XINCREF(obj);
    return obj;
#endif
}
static CYTHON_INLINE PyObject *__Pyx_Owned_Py_None(int b);
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Long(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
    (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyLong_FromSize_t(size_t);
static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*);
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#define __Pyx_PyFloat_AS_DOUBLE(x) PyFloat_AS_DOUBLE(x)
#else
#define __Pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#define __Pyx_PyFloat_AS_DOUBLE(x) PyFloat_AsDouble(x)
#endif
#define __Pyx_PyFloat_AsFloat(x) ((float) __Pyx_PyFloat_AsDouble(x))
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#if CYTHON_USE_PYLONG_INTERNALS
  #if PY_VERSION_HEX >= 0x030C00A7
  #ifndef _PyLong_SIGN_MASK
    #define _PyLong_SIGN_MASK 3
  #endif
  #ifndef _PyLong_NON_SIZE_BITS
    #define _PyLong_NON_SIZE_BITS 3
  #endif
  #define __Pyx_PyLong_Sign(x)  (((PyLongObject*)x)->long_value.lv_tag & _PyLong_SIGN_MASK)
  #define __Pyx_PyLong_IsNeg(x)  ((__Pyx_PyLong_Sign(x) & 2) != 0)
  #define __Pyx_PyLong_IsNonNeg(x)  (!__Pyx_PyLong_IsNeg(x))
  #define __Pyx_PyLong_IsZero(x)  (__Pyx_PyLong_Sign(x) & 1)
  #define __Pyx_PyLong_IsPos(x)  (__Pyx_PyLong_Sign(x) == 0)
  #define __Pyx_PyLong_CompactValueUnsigned(x)  (__Pyx_PyLong_Digits(x)[0])
  #define __Pyx_PyLong_DigitCount(x)  ((Py_ssize_t) (((PyLongObject*)x)->long_value.lv_tag >> _PyLong_NON_SIZE_BITS))
  #define __Pyx_PyLong_SignedDigitCount(x)\
        ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * __Pyx_PyLong_DigitCount(x))
  #if defined(PyUnstable_Long_IsCompact) && defined(PyUnstable_Long_CompactValue)
    #define __Pyx_PyLong_IsCompact(x)     PyUnstable_Long_IsCompact((PyLongObject*) x)
    #define __Pyx_PyLong_CompactValue(x)  PyUnstable_Long_CompactValue((PyLongObject*) x)
  #else
    #define __Pyx_PyLong_IsCompact(x)     (((PyLongObject*)x)->long_value.lv_tag < (2 << _PyLong_NON_SIZE_BITS))
    #define __Pyx_PyLong_CompactValue(x)  ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * (Py_ssize_t) __Pyx_PyLong_Digits(x)[0])
  #endif
  typedef Py_ssize_t  __Pyx_compact_pylong;
  typedef size_t  __Pyx_compact_upylong;
  #else
  #define __Pyx_PyLong_IsNeg(x)  (Py_SIZE(x) < 0)
  #define __Pyx_PyLong_IsNonNeg(x)  (Py_SIZE(x) >= 0)
  #define __Pyx_PyLong_IsZero(x)  (Py_SIZE(x) == 0)
  #define __Pyx_PyLong_IsPos(x)  (Py_SIZE(x) > 0)
  #define __Pyx_PyLong_CompactValueUnsigned(x)  ((Py_SIZE(x) == 0) ? 0 : __Pyx_PyLong_Digits(x)[0])
  #define __Pyx_PyLong_DigitCount(x)  __Pyx_sst_abs(Py_SIZE(x))
  #define __Pyx_PyLong_SignedDigitCount(x)  Py_SIZE(x)
  #define __Pyx_PyLong_IsCompact(x)  (Py_SIZE(x) == 0 || Py_SIZE(x) == 1 || Py_SIZE(x) == -1)
  #define __Pyx_PyLong_CompactValue(x)\
        ((Py_SIZE(x) == 0) ? (sdigit) 0 : ((Py_SIZE(x) < 0) ? -(sdigit)__Pyx_PyLong_Digits(x)[0] : (sdigit)__Pyx_PyLong_Digits(x)[0]))
  typedef sdigit  __Pyx_compact_pylong;
  typedef digit  __Pyx_compact_upylong;
  #endif
  #if PY_VERSION_HEX >= 0x030C00A5
  #define __Pyx_PyLong_Digits(x)  (((PyLongObject*)x)->long_value.ob_digit)
  #else
  #define __Pyx_PyLong_Digits(x)  (((PyLongObject*)x)->ob_digit)
  #endif
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_UTF8
  #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#elif __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
  #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeASCII(c_str, size, NULL)
#else
  #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#endif


/* Test for GCC > 2.95 */
#if defined(__GNUC__)     && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
  #define likely(x)   __builtin_expect(!!(x), 1)
  #define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
  #define likely(x)   (x)
  #define unlikely(x) (x)
#endif /* __GNUC__ */
/* PretendToInitialize */
#ifdef __cplusplus
#if __cplusplus > 201103L
#include <type_traits>
#endif
template <typename T>
static void __Pyx_pretend_to_initialize(T* ptr) {
#if __cplusplus > 201103L
    if ((std::is_trivially_default_constructible<T>::value))
#endif
        *ptr = T();
    (void)ptr;
}
#else
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
#endif


#if !CYTHON_USE_MODULE_STATE
static PyObject *__pyx_m = NULL;
#endif
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * const __pyx_cfilenm = __FILE__;
static const char *__pyx_filename;

/* #### Code section: filename_table ### */

static const char* const __pyx_f[] = {
  "cuda/bindings/_nvml.pyx",
  "<stringsource>",
  "cpython/contextvars.pxd",
  "cpython/type.pxd",
  "cpython/bool.pxd",
  "cpython/complex.pxd",
};
/* #### Code section: utility_code_proto_before_types ### */
/* Atomics.proto (used by UnpackUnboundCMethod) */
#include <pythread.h>
#ifndef CYTHON_ATOMICS
    #define CYTHON_ATOMICS 1
#endif
#define __PYX_CYTHON_ATOMICS_ENABLED() CYTHON_ATOMICS
#define __PYX_GET_CYTHON_COMPILING_IN_CPYTHON_FREETHREADING() CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
#define __pyx_atomic_int_type int
#define __pyx_nonatomic_int_type int
#if CYTHON_ATOMICS && (defined(__STDC_VERSION__) &&\
                        (__STDC_VERSION__ >= 201112L) &&\
                        !defined(__STDC_NO_ATOMICS__))
    #include <stdatomic.h>
#elif CYTHON_ATOMICS && (defined(__cplusplus) && (\
                    (__cplusplus >= 201103L) ||\
                    (defined(_MSC_VER) && _MSC_VER >= 1700)))
    #include <atomic>
#endif
#if CYTHON_ATOMICS && (defined(__STDC_VERSION__) &&\
                        (__STDC_VERSION__ >= 201112L) &&\
                        !defined(__STDC_NO_ATOMICS__) &&\
                       ATOMIC_INT_LOCK_FREE == 2)
    #undef __pyx_atomic_int_type
    #define __pyx_atomic_int_type atomic_int
    #define __pyx_atomic_ptr_type atomic_uintptr_t
    #define __pyx_nonatomic_ptr_type uintptr_t
    #define __pyx_atomic_incr_relaxed(value) atomic_fetch_add_explicit(value, 1, memory_order_relaxed)
    #define __pyx_atomic_incr_acq_rel(value) atomic_fetch_add_explicit(value, 1, memory_order_acq_rel)
    #define __pyx_atomic_decr_acq_rel(value) atomic_fetch_sub_explicit(value, 1, memory_order_acq_rel)
    #define __pyx_atomic_sub(value, arg) atomic_fetch_sub(value, arg)
    #define __pyx_atomic_int_cmp_exchange(value, expected, desired) atomic_compare_exchange_strong(value, expected, desired)
    #define __pyx_atomic_load(value) atomic_load(value)
    #define __pyx_atomic_store(value, new_value) atomic_store(value, new_value)
    #define __pyx_atomic_pointer_load_relaxed(value) atomic_load_explicit(value, memory_order_relaxed)
    #define __pyx_atomic_pointer_load_acquire(value) atomic_load_explicit(value, memory_order_acquire)
    #define __pyx_atomic_pointer_exchange(value, new_value) atomic_exchange(value, (__pyx_nonatomic_ptr_type)new_value)
    #define __pyx_atomic_pointer_cmp_exchange(value, expected, desired) atomic_compare_exchange_strong(value, expected, desired)
    #if defined(__PYX_DEBUG_ATOMICS) && defined(_MSC_VER)
        #pragma message ("Using standard C atomics")
    #elif defined(__PYX_DEBUG_ATOMICS)
        #warning "Using standard C atomics"
    #endif
#elif CYTHON_ATOMICS && (defined(__cplusplus) && (\
                    (__cplusplus >= 201103L) ||\
\
                    (defined(_MSC_VER) && _MSC_VER >= 1700)) &&\
                    ATOMIC_INT_LOCK_FREE == 2)
    #undef __pyx_atomic_int_type
    #define __pyx_atomic_int_type std::atomic_int
    #define __pyx_atomic_ptr_type std::atomic_uintptr_t
    #define __pyx_nonatomic_ptr_type uintptr_t
    #define __pyx_atomic_incr_relaxed(value) std::atomic_fetch_add_explicit(value, 1, std::memory_order_relaxed)
    #define __pyx_atomic_incr_acq_rel(value) std::atomic_fetch_add_explicit(value, 1, std::memory_order_acq_rel)
    #define __pyx_atomic_decr_acq_rel(value) std::atomic_fetch_sub_explicit(value, 1, std::memory_order_acq_rel)
    #define __pyx_atomic_sub(value, arg) std::atomic_fetch_sub(value, arg)
    #define __pyx_atomic_int_cmp_exchange(value, expected, desired) std::atomic_compare_exchange_strong(value, expected, desired)
    #define __pyx_atomic_load(value) std::atomic_load(value)
    #define __pyx_atomic_store(value, new_value) std::atomic_store(value, new_value)
    #define __pyx_atomic_pointer_load_relaxed(value) std::atomic_load_explicit(value, std::memory_order_relaxed)
    #define __pyx_atomic_pointer_load_acquire(value) std::atomic_load_explicit(value, std::memory_order_acquire)
    #define __pyx_atomic_pointer_exchange(value, new_value) std::atomic_exchange(value, (__pyx_nonatomic_ptr_type)new_value)
    #define __pyx_atomic_pointer_cmp_exchange(value, expected, desired) std::atomic_compare_exchange_strong(value, expected, desired)
    #if defined(__PYX_DEBUG_ATOMICS) && defined(_MSC_VER)
        #pragma message ("Using standard C++ atomics")
    #elif defined(__PYX_DEBUG_ATOMICS)
        #warning "Using standard C++ atomics"
    #endif
#elif CYTHON_ATOMICS && (__GNUC__ >= 5 || (__GNUC__ == 4 &&\
                    (__GNUC_MINOR__ > 1 ||\
                    (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ >= 2))))
    #define __pyx_atomic_ptr_type void*
    #define __pyx_nonatomic_ptr_type void*
    #define __pyx_atomic_incr_relaxed(value) __sync_fetch_and_add(value, 1)
    #define __pyx_atomic_incr_acq_rel(value) __sync_fetch_and_add(value, 1)
    #define __pyx_atomic_decr_acq_rel(value) __sync_fetch_and_sub(value, 1)
    #define __pyx_atomic_sub(value, arg) __sync_fetch_and_sub(value, arg)
    static CYTHON_INLINE int __pyx_atomic_int_cmp_exchange(__pyx_atomic_int_type* value, __pyx_nonatomic_int_type* expected, __pyx_nonatomic_int_type desired) {
        __pyx_nonatomic_int_type old = __sync_val_compare_and_swap(value, *expected, desired);
        int result = old == *expected;
        *expected = old;
        return result;
    }
    #define __pyx_atomic_load(value) __sync_fetch_and_add(value, 0)
    #define __pyx_atomic_store(value, new_value) __sync_lock_test_and_set(value, new_value)
    #define __pyx_atomic_pointer_load_relaxed(value) __sync_fetch_and_add(value, 0)
    #define __pyx_atomic_pointer_load_acquire(value) __sync_fetch_and_add(value, 0)
    #define __pyx_atomic_pointer_exchange(value, new_value) __sync_lock_test_and_set(value, (__pyx_atomic_ptr_type)new_value)
    static CYTHON_INLINE int __pyx_atomic_pointer_cmp_exchange(__pyx_atomic_ptr_type* value, __pyx_nonatomic_ptr_type* expected, __pyx_nonatomic_ptr_type desired) {
        __pyx_nonatomic_ptr_type old = __sync_val_compare_and_swap(value, *expected, desired);
        int result = old == *expected;
        *expected = old;
        return result;
    }
    #ifdef __PYX_DEBUG_ATOMICS
        #warning "Using GNU atomics"
    #endif
#elif CYTHON_ATOMICS && defined(_MSC_VER)
    #include <intrin.h>
    #undef __pyx_atomic_int_type
    #define __pyx_atomic_int_type long
    #define __pyx_atomic_ptr_type void*
    #undef __pyx_nonatomic_int_type
    #define __pyx_nonatomic_int_type long
    #define __pyx_nonatomic_ptr_type void*
    #pragma intrinsic (_InterlockedExchangeAdd, _InterlockedExchange, _InterlockedCompareExchange, _InterlockedCompareExchangePointer, _InterlockedExchangePointer)
    #define __pyx_atomic_incr_relaxed(value) _InterlockedExchangeAdd(value, 1)
    #define __pyx_atomic_incr_acq_rel(value) _InterlockedExchangeAdd(value, 1)
    #define __pyx_atomic_decr_acq_rel(value) _InterlockedExchangeAdd(value, -1)
    #define __pyx_atomic_sub(value, arg) _InterlockedExchangeAdd(value, -arg)
    static CYTHON_INLINE int __pyx_atomic_int_cmp_exchange(__pyx_atomic_int_type* value, __pyx_nonatomic_int_type* expected, __pyx_nonatomic_int_type desired) {
        __pyx_nonatomic_int_type old = _InterlockedCompareExchange(value, desired, *expected);
        int result = old == *expected;
        *expected = old;
        return result;
    }
    #define __pyx_atomic_load(value) _InterlockedExchangeAdd(value, 0)
    #define __pyx_atomic_store(value, new_value) _InterlockedExchange(value, new_value)
    #define __pyx_atomic_pointer_load_relaxed(value) *(void * volatile *)value
    #define __pyx_atomic_pointer_load_acquire(value) _InterlockedCompareExchangePointer(value, 0, 0)
    #define __pyx_atomic_pointer_exchange(value, new_value) _InterlockedExchangePointer(value, (__pyx_atomic_ptr_type)new_value)
    static CYTHON_INLINE int __pyx_atomic_pointer_cmp_exchange(__pyx_atomic_ptr_type* value, __pyx_nonatomic_ptr_type* expected, __pyx_nonatomic_ptr_type desired) {
        __pyx_atomic_ptr_type old = _InterlockedCompareExchangePointer(value, desired, *expected);
        int result = old == *expected;
        *expected = old;
        return result;
    }
    #ifdef __PYX_DEBUG_ATOMICS
        #pragma message ("Using MSVC atomics")
    #endif
#else
    #undef CYTHON_ATOMICS
    #define CYTHON_ATOMICS 0
    #ifdef __PYX_DEBUG_ATOMICS
        #warning "Not using atomics"
    #endif
#endif

/* CriticalSectionsDefinition.proto (used by CriticalSections) */
#if !CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
#define __Pyx_PyCriticalSection void*
#define __Pyx_PyCriticalSection2 void*
#define __Pyx_PyCriticalSection_End(cs)
#define __Pyx_PyCriticalSection2_End(cs)
#else
#define __Pyx_PyCriticalSection PyCriticalSection
#define __Pyx_PyCriticalSection2 PyCriticalSection2
#define __Pyx_PyCriticalSection_End PyCriticalSection_End
#define __Pyx_PyCriticalSection2_End PyCriticalSection2_End
#endif

/* CriticalSections.proto (used by ParseKeywordsImpl) */
#if !CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
#define __Pyx_PyCriticalSection_Begin(cs, arg) (void)(cs)
#define __Pyx_PyCriticalSection2_Begin(cs, arg1, arg2) (void)(cs)
#else
#define __Pyx_PyCriticalSection_Begin PyCriticalSection_Begin
#define __Pyx_PyCriticalSection2_Begin PyCriticalSection2_Begin
#endif
#if PY_VERSION_HEX < 0x030d0000 || CYTHON_COMPILING_IN_LIMITED_API
#define __Pyx_BEGIN_CRITICAL_SECTION(o) {
#define __Pyx_END_CRITICAL_SECTION() }
#else
#define __Pyx_BEGIN_CRITICAL_SECTION Py_BEGIN_CRITICAL_SECTION
#define __Pyx_END_CRITICAL_SECTION Py_END_CRITICAL_SECTION
#endif

/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
  #define __PYX_FORCE_INIT_THREADS 0
#endif

/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()

/* IncludeStructmemberH.proto (used by FixUpExtensionType) */
#include <structmember.h>

/* BufferFormatStructs.proto */
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
  const char* name;
  const struct __Pyx_StructField_* fields;
  size_t size;
  size_t arraysize[8];
  int ndim;
  char typegroup;
  char is_unsigned;
  int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
  const __Pyx_TypeInfo* type;
  const char* name;
  size_t offset;
} __Pyx_StructField;
typedef struct {
  const __Pyx_StructField* field;
  size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
  __Pyx_StructField root;
  __Pyx_BufFmt_StackElem* head;
  size_t fmt_offset;
  size_t new_count, enc_count;
  size_t struct_alignment;
  int is_complex;
  char enc_type;
  char new_packmode;
  char enc_packmode;
  char is_valid_array;
} __Pyx_BufFmt_Context;

/* MemviewSliceStruct.proto */
struct __pyx_memoryview_obj;
typedef struct {
  struct __pyx_memoryview_obj *memview;
  char *data;
  Py_ssize_t shape[8];
  Py_ssize_t strides[8];
  Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
#define __Pyx_MemoryView_Len(m)  (m.shape[0])
#define __Pyx_MEMVIEW_DIRECT   1
#define __Pyx_MEMVIEW_PTR      2
#define __Pyx_MEMVIEW_FULL     4
#define __Pyx_MEMVIEW_CONTIG   8
#define __Pyx_MEMVIEW_STRIDED  16
#define __Pyx_MEMVIEW_FOLLOW   32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
#define __Pyx_MEMSLICE_INIT  { 0, 0, { 0 }, { 0 }, { 0 } }
#if CYTHON_ATOMICS
    #define __pyx_add_acquisition_count(memview)\
             __pyx_atomic_incr_relaxed(__pyx_get_slice_count_pointer(memview))
    #define __pyx_sub_acquisition_count(memview)\
            __pyx_atomic_decr_acq_rel(__pyx_get_slice_count_pointer(memview))
#else
    #define __pyx_add_acquisition_count(memview)\
            __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
    #define __pyx_sub_acquisition_count(memview)\
            __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif

/* #### Code section: numeric_typedefs ### */

/* "cy_nvml.pxd":746
 *     unsigned char moduleId
 * 
 * ctypedef unsigned int nvmlDeviceArchitecture_t 'nvmlDeviceArchitecture_t'             # <<<<<<<<<<<<<<
 * ctypedef unsigned int nvmlBusType_t 'nvmlBusType_t'
 * ctypedef unsigned int nvmlFanControlPolicy_t 'nvmlFanControlPolicy_t'
*/
typedef unsigned int nvmlDeviceArchitecture_t;

/* "cy_nvml.pxd":747
 * 
 * ctypedef unsigned int nvmlDeviceArchitecture_t 'nvmlDeviceArchitecture_t'
 * ctypedef unsigned int nvmlBusType_t 'nvmlBusType_t'             # <<<<<<<<<<<<<<
 * ctypedef unsigned int nvmlFanControlPolicy_t 'nvmlFanControlPolicy_t'
 * ctypedef unsigned int nvmlPowerSource_t 'nvmlPowerSource_t'
*/
typedef unsigned int nvmlBusType_t;

/* "cy_nvml.pxd":748
 * ctypedef unsigned int nvmlDeviceArchitecture_t 'nvmlDeviceArchitecture_t'
 * ctypedef unsigned int nvmlBusType_t 'nvmlBusType_t'
 * ctypedef unsigned int nvmlFanControlPolicy_t 'nvmlFanControlPolicy_t'             # <<<<<<<<<<<<<<
 * ctypedef unsigned int nvmlPowerSource_t 'nvmlPowerSource_t'
 * ctypedef unsigned char nvmlPowerScopeType_t 'nvmlPowerScopeType_t'
*/
typedef unsigned int nvmlFanControlPolicy_t;

/* "cy_nvml.pxd":749
 * ctypedef unsigned int nvmlBusType_t 'nvmlBusType_t'
 * ctypedef unsigned int nvmlFanControlPolicy_t 'nvmlFanControlPolicy_t'
 * ctypedef unsigned int nvmlPowerSource_t 'nvmlPowerSource_t'             # <<<<<<<<<<<<<<
 * ctypedef unsigned char nvmlPowerScopeType_t 'nvmlPowerScopeType_t'
 * ctypedef unsigned int nvmlVgpuTypeId_t 'nvmlVgpuTypeId_t'
*/
typedef unsigned int nvmlPowerSource_t;

/* "cy_nvml.pxd":750
 * ctypedef unsigned int nvmlFanControlPolicy_t 'nvmlFanControlPolicy_t'
 * ctypedef unsigned int nvmlPowerSource_t 'nvmlPowerSource_t'
 * ctypedef unsigned char nvmlPowerScopeType_t 'nvmlPowerScopeType_t'             # <<<<<<<<<<<<<<
 * ctypedef unsigned int nvmlVgpuTypeId_t 'nvmlVgpuTypeId_t'
 * ctypedef unsigned int nvmlVgpuInstance_t 'nvmlVgpuInstance_t'
*/
typedef unsigned char nvmlPowerScopeType_t;

/* "cy_nvml.pxd":751
 * ctypedef unsigned int nvmlPowerSource_t 'nvmlPowerSource_t'
 * ctypedef unsigned char nvmlPowerScopeType_t 'nvmlPowerScopeType_t'
 * ctypedef unsigned int nvmlVgpuTypeId_t 'nvmlVgpuTypeId_t'             # <<<<<<<<<<<<<<
 * ctypedef unsigned int nvmlVgpuInstance_t 'nvmlVgpuInstance_t'
 * ctypedef struct nvmlVgpuHeterogeneousMode_v1_t 'nvmlVgpuHeterogeneousMode_v1_t':
*/
typedef unsigned int nvmlVgpuTypeId_t;

/* "cy_nvml.pxd":752
 * ctypedef unsigned char nvmlPowerScopeType_t 'nvmlPowerScopeType_t'
 * ctypedef unsigned int nvmlVgpuTypeId_t 'nvmlVgpuTypeId_t'
 * ctypedef unsigned int nvmlVgpuInstance_t 'nvmlVgpuInstance_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlVgpuHeterogeneousMode_v1_t 'nvmlVgpuHeterogeneousMode_v1_t':
 *     unsigned int version
*/
typedef unsigned int nvmlVgpuInstance_t;

/* "cy_nvml.pxd":791
 *     unsigned long long attackerAdvantage
 * 
 * ctypedef unsigned char nvmlGpuFabricState_t 'nvmlGpuFabricState_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlSystemDriverBranchInfo_v1_t 'nvmlSystemDriverBranchInfo_v1_t':
 *     unsigned int version
*/
typedef unsigned char nvmlGpuFabricState_t;

/* "cy_nvml.pxd":796
 *     char branch[80]
 * 
 * ctypedef unsigned int nvmlAffinityScope_t 'nvmlAffinityScope_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlTemperature_v1_t 'nvmlTemperature_v1_t':
 *     unsigned int version
*/
typedef unsigned int nvmlAffinityScope_t;
/* #### Code section: complex_type_declarations ### */
/* #### Code section: type_declarations ### */

/*--- Type declarations ---*/
struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo;
struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization;
struct __pyx_obj_4cuda_8bindings_5_nvml_Memory;
struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2;
struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory;
struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo;
struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes;
struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues;
struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo;
struct __pyx_obj_4cuda_8bindings_5_nvml_Value;
struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0;
struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo;
struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample;
struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2;
struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2;
struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry;
struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4;
struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry;
struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry;
struct __pyx_obj_4cuda_8bindings_5_nvml_LedState;
struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo;
struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo;
struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo;
struct __pyx_obj_4cuda_8bindings_5_nvml_EventData;
struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats;
struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo;
struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats;
struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo;
struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps;
struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState;
struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo;
struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate;
struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport;
struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility;
struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement;
struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2;
struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3;
struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement;
struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2;
struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3;
struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport;
struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3;
struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion;
struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo;
struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy;
struct __pyx_obj_4cuda_8bindings_5_nvml_Sample;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue;
struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings;
struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus;
struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo;
struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature;
struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata;
struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo;
struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo;
struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1;
struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures;
struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2;
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
struct nvmlPciInfoExt_v1_t;
typedef struct nvmlPciInfoExt_v1_t nvmlPciInfoExt_v1_t;
struct nvmlCoolerInfo_v1_t;
typedef struct nvmlCoolerInfo_v1_t nvmlCoolerInfo_v1_t;
struct nvmlDramEncryptionInfo_v1_t;
typedef struct nvmlDramEncryptionInfo_v1_t nvmlDramEncryptionInfo_v1_t;
struct nvmlMarginTemperature_v1_t;
typedef struct nvmlMarginTemperature_v1_t nvmlMarginTemperature_v1_t;
struct nvmlClockOffset_v1_t;
typedef struct nvmlClockOffset_v1_t nvmlClockOffset_v1_t;
struct nvmlFanSpeedInfo_v1_t;
typedef struct nvmlFanSpeedInfo_v1_t nvmlFanSpeedInfo_v1_t;
struct nvmlDevicePerfModes_v1_t;
typedef struct nvmlDevicePerfModes_v1_t nvmlDevicePerfModes_v1_t;
struct nvmlDeviceCurrentClockFreqs_v1_t;
typedef struct nvmlDeviceCurrentClockFreqs_v1_t nvmlDeviceCurrentClockFreqs_v1_t;
struct nvmlEccSramErrorStatus_v1_t;
typedef struct nvmlEccSramErrorStatus_v1_t nvmlEccSramErrorStatus_v1_t;
struct nvmlPlatformInfo_v2_t;
typedef struct nvmlPlatformInfo_v2_t nvmlPlatformInfo_v2_t;
struct nvmlVgpuHeterogeneousMode_v1_t;
typedef struct nvmlVgpuHeterogeneousMode_v1_t nvmlVgpuHeterogeneousMode_v1_t;
struct nvmlVgpuPlacementId_v1_t;
typedef struct nvmlVgpuPlacementId_v1_t nvmlVgpuPlacementId_v1_t;
struct nvmlVgpuPlacementList_v2_t;
typedef struct nvmlVgpuPlacementList_v2_t nvmlVgpuPlacementList_v2_t;
struct nvmlVgpuTypeBar1Info_v1_t;
typedef struct nvmlVgpuTypeBar1Info_v1_t nvmlVgpuTypeBar1Info_v1_t;
struct nvmlVgpuRuntimeState_v1_t;
typedef struct nvmlVgpuRuntimeState_v1_t nvmlVgpuRuntimeState_v1_t;
struct nvmlSystemConfComputeSettings_v1_t;
typedef struct nvmlSystemConfComputeSettings_v1_t nvmlSystemConfComputeSettings_v1_t;
struct nvmlConfComputeSetKeyRotationThresholdInfo_v1_t;
typedef struct nvmlConfComputeSetKeyRotationThresholdInfo_v1_t nvmlConfComputeSetKeyRotationThresholdInfo_v1_t;
struct nvmlConfComputeGetKeyRotationThresholdInfo_v1_t;
typedef struct nvmlConfComputeGetKeyRotationThresholdInfo_v1_t nvmlConfComputeGetKeyRotationThresholdInfo_v1_t;
struct nvmlSystemDriverBranchInfo_v1_t;
typedef struct nvmlSystemDriverBranchInfo_v1_t nvmlSystemDriverBranchInfo_v1_t;
struct nvmlTemperature_v1_t;
typedef struct nvmlTemperature_v1_t nvmlTemperature_v1_t;
struct nvmlNvlinkSupportedBwModes_v1_t;
typedef struct nvmlNvlinkSupportedBwModes_v1_t nvmlNvlinkSupportedBwModes_v1_t;
struct nvmlNvlinkGetBwMode_v1_t;
typedef struct nvmlNvlinkGetBwMode_v1_t nvmlNvlinkGetBwMode_v1_t;
struct nvmlNvlinkSetBwMode_v1_t;
typedef struct nvmlNvlinkSetBwMode_v1_t nvmlNvlinkSetBwMode_v1_t;
struct nvmlDeviceCapabilities_v1_t;
typedef struct nvmlDeviceCapabilities_v1_t nvmlDeviceCapabilities_v1_t;
struct nvmlPowerSmoothingProfile_v1_t;
typedef struct nvmlPowerSmoothingProfile_v1_t nvmlPowerSmoothingProfile_v1_t;
struct nvmlPowerSmoothingState_v1_t;
typedef struct nvmlPowerSmoothingState_v1_t nvmlPowerSmoothingState_v1_t;
struct nvmlDeviceAddressingMode_v1_t;
typedef struct nvmlDeviceAddressingMode_v1_t nvmlDeviceAddressingMode_v1_t;
struct nvmlRepairStatus_v1_t;
typedef struct nvmlRepairStatus_v1_t nvmlRepairStatus_v1_t;
struct nvmlPdi_v1_t;
typedef struct nvmlPdi_v1_t nvmlPdi_v1_t;
struct nvmlPciInfo_t;
typedef struct nvmlPciInfo_t nvmlPciInfo_t;
struct nvmlEccErrorCounts_t;
typedef struct nvmlEccErrorCounts_t nvmlEccErrorCounts_t;
struct nvmlUtilization_t;
typedef struct nvmlUtilization_t nvmlUtilization_t;
struct nvmlMemory_t;
typedef struct nvmlMemory_t nvmlMemory_t;
struct nvmlMemory_v2_t;
typedef struct nvmlMemory_v2_t nvmlMemory_v2_t;
struct nvmlBAR1Memory_t;
typedef struct nvmlBAR1Memory_t nvmlBAR1Memory_t;
struct nvmlProcessInfo_v1_t;
typedef struct nvmlProcessInfo_v1_t nvmlProcessInfo_v1_t;
struct nvmlProcessInfo_v2_t;
typedef struct nvmlProcessInfo_v2_t nvmlProcessInfo_v2_t;
struct nvmlProcessInfo_t;
typedef struct nvmlProcessInfo_t nvmlProcessInfo_t;
struct nvmlProcessDetail_v1_t;
typedef struct nvmlProcessDetail_v1_t nvmlProcessDetail_v1_t;
struct nvmlDeviceAttributes_t;
typedef struct nvmlDeviceAttributes_t nvmlDeviceAttributes_t;
struct nvmlC2cModeInfo_v1_t;
typedef struct nvmlC2cModeInfo_v1_t nvmlC2cModeInfo_v1_t;
struct nvmlRowRemapperHistogramValues_t;
typedef struct nvmlRowRemapperHistogramValues_t nvmlRowRemapperHistogramValues_t;
struct nvmlNvLinkUtilizationControl_t;
typedef struct nvmlNvLinkUtilizationControl_t nvmlNvLinkUtilizationControl_t;
struct nvmlBridgeChipInfo_t;
typedef struct nvmlBridgeChipInfo_t nvmlBridgeChipInfo_t;
union nvmlValue_t;
typedef union nvmlValue_t nvmlValue_t;
struct nvmlViolationTime_t;
typedef struct nvmlViolationTime_t nvmlViolationTime_t;
struct _anon_pod0;
typedef struct _anon_pod0 _anon_pod0;
union nvmlUUIDValue_t;
typedef union nvmlUUIDValue_t nvmlUUIDValue_t;
struct nvmlClkMonFaultInfo_t;
typedef struct nvmlClkMonFaultInfo_t nvmlClkMonFaultInfo_t;
struct nvmlProcessUtilizationSample_t;
typedef struct nvmlProcessUtilizationSample_t nvmlProcessUtilizationSample_t;
struct nvmlProcessUtilizationInfo_v1_t;
typedef struct nvmlProcessUtilizationInfo_v1_t nvmlProcessUtilizationInfo_v1_t;
struct nvmlPlatformInfo_v1_t;
typedef struct nvmlPlatformInfo_v1_t nvmlPlatformInfo_v1_t;
struct _anon_pod1;
typedef struct _anon_pod1 _anon_pod1;
struct nvmlVgpuPlacementList_v1_t;
typedef struct nvmlVgpuPlacementList_v1_t nvmlVgpuPlacementList_v1_t;
struct _anon_pod2;
typedef struct _anon_pod2 _anon_pod2;
struct _anon_pod3;
typedef struct _anon_pod3 _anon_pod3;
struct nvmlVgpuSchedulerLogEntry_t;
typedef struct nvmlVgpuSchedulerLogEntry_t nvmlVgpuSchedulerLogEntry_t;
struct _anon_pod4;
typedef struct _anon_pod4 _anon_pod4;
struct _anon_pod5;
typedef struct _anon_pod5 _anon_pod5;
struct nvmlVgpuSchedulerCapabilities_t;
typedef struct nvmlVgpuSchedulerCapabilities_t nvmlVgpuSchedulerCapabilities_t;
struct nvmlVgpuLicenseExpiry_t;
typedef struct nvmlVgpuLicenseExpiry_t nvmlVgpuLicenseExpiry_t;
struct nvmlGridLicenseExpiry_t;
typedef struct nvmlGridLicenseExpiry_t nvmlGridLicenseExpiry_t;
struct nvmlNvLinkPowerThres_t;
typedef struct nvmlNvLinkPowerThres_t nvmlNvLinkPowerThres_t;
struct nvmlHwbcEntry_t;
typedef struct nvmlHwbcEntry_t nvmlHwbcEntry_t;
struct nvmlLedState_t;
typedef struct nvmlLedState_t nvmlLedState_t;
struct nvmlUnitInfo_t;
typedef struct nvmlUnitInfo_t nvmlUnitInfo_t;
struct nvmlPSUInfo_t;
typedef struct nvmlPSUInfo_t nvmlPSUInfo_t;
struct nvmlUnitFanInfo_t;
typedef struct nvmlUnitFanInfo_t nvmlUnitFanInfo_t;
struct nvmlSystemEventData_v1_t;
typedef struct nvmlSystemEventData_v1_t nvmlSystemEventData_v1_t;
struct nvmlAccountingStats_t;
typedef struct nvmlAccountingStats_t nvmlAccountingStats_t;
struct nvmlFBCStats_t;
typedef struct nvmlFBCStats_t nvmlFBCStats_t;
struct nvmlConfComputeSystemCaps_t;
typedef struct nvmlConfComputeSystemCaps_t nvmlConfComputeSystemCaps_t;
struct nvmlConfComputeSystemState_t;
typedef struct nvmlConfComputeSystemState_t nvmlConfComputeSystemState_t;
struct nvmlConfComputeMemSizeInfo_t;
typedef struct nvmlConfComputeMemSizeInfo_t nvmlConfComputeMemSizeInfo_t;
struct nvmlConfComputeGpuCertificate_t;
typedef struct nvmlConfComputeGpuCertificate_t nvmlConfComputeGpuCertificate_t;
struct nvmlConfComputeGpuAttestationReport_t;
typedef struct nvmlConfComputeGpuAttestationReport_t nvmlConfComputeGpuAttestationReport_t;
struct nvmlVgpuVersion_t;
typedef struct nvmlVgpuVersion_t nvmlVgpuVersion_t;
struct nvmlVgpuMetadata_t;
typedef struct nvmlVgpuMetadata_t nvmlVgpuMetadata_t;
struct nvmlVgpuPgpuCompatibility_t;
typedef struct nvmlVgpuPgpuCompatibility_t nvmlVgpuPgpuCompatibility_t;
struct nvmlGpuInstancePlacement_t;
typedef struct nvmlGpuInstancePlacement_t nvmlGpuInstancePlacement_t;
struct nvmlGpuInstanceProfileInfo_t;
typedef struct nvmlGpuInstanceProfileInfo_t nvmlGpuInstanceProfileInfo_t;
struct nvmlGpuInstanceProfileInfo_v2_t;
typedef struct nvmlGpuInstanceProfileInfo_v2_t nvmlGpuInstanceProfileInfo_v2_t;
struct nvmlGpuInstanceProfileInfo_v3_t;
typedef struct nvmlGpuInstanceProfileInfo_v3_t nvmlGpuInstanceProfileInfo_v3_t;
struct nvmlComputeInstancePlacement_t;
typedef struct nvmlComputeInstancePlacement_t nvmlComputeInstancePlacement_t;
struct nvmlComputeInstanceProfileInfo_t;
typedef struct nvmlComputeInstanceProfileInfo_t nvmlComputeInstanceProfileInfo_t;
struct nvmlComputeInstanceProfileInfo_v2_t;
typedef struct nvmlComputeInstanceProfileInfo_v2_t nvmlComputeInstanceProfileInfo_v2_t;
struct nvmlComputeInstanceProfileInfo_v3_t;
typedef struct nvmlComputeInstanceProfileInfo_v3_t nvmlComputeInstanceProfileInfo_v3_t;
struct _anon_pod6;
typedef struct _anon_pod6 _anon_pod6;
struct nvmlGpmSupport_t;
typedef struct nvmlGpmSupport_t nvmlGpmSupport_t;
struct nvmlMask255_t;
typedef struct nvmlMask255_t nvmlMask255_t;
struct nvmlDevicePowerMizerModes_v1_t;
typedef struct nvmlDevicePowerMizerModes_v1_t nvmlDevicePowerMizerModes_v1_t;
struct nvmlHostname_v1_t;
typedef struct nvmlHostname_v1_t nvmlHostname_v1_t;
struct nvmlEccSramUniqueUncorrectedErrorEntry_v1_t;
typedef struct nvmlEccSramUniqueUncorrectedErrorEntry_v1_t nvmlEccSramUniqueUncorrectedErrorEntry_v1_t;
struct nvmlNvLinkInfo_v1_t;
typedef struct nvmlNvLinkInfo_v1_t nvmlNvLinkInfo_v1_t;
struct nvmlNvlinkFirmwareVersion_t;
typedef struct nvmlNvlinkFirmwareVersion_t nvmlNvlinkFirmwareVersion_t;
union _anon_pod7;
typedef union _anon_pod7 _anon_pod7;
struct nvmlPowerValue_v2_t;
typedef struct nvmlPowerValue_v2_t nvmlPowerValue_v2_t;
struct nvmlVgpuTypeIdInfo_v1_t;
typedef struct nvmlVgpuTypeIdInfo_v1_t nvmlVgpuTypeIdInfo_v1_t;
struct nvmlVgpuTypeMaxInstance_v1_t;
typedef struct nvmlVgpuTypeMaxInstance_v1_t nvmlVgpuTypeMaxInstance_v1_t;
struct nvmlVgpuCreatablePlacementInfo_v1_t;
typedef struct nvmlVgpuCreatablePlacementInfo_v1_t nvmlVgpuCreatablePlacementInfo_v1_t;
struct nvmlVgpuProcessUtilizationSample_t;
typedef struct nvmlVgpuProcessUtilizationSample_t nvmlVgpuProcessUtilizationSample_t;
struct nvmlVgpuProcessUtilizationInfo_v1_t;
typedef struct nvmlVgpuProcessUtilizationInfo_v1_t nvmlVgpuProcessUtilizationInfo_v1_t;
struct nvmlActiveVgpuInstanceInfo_v1_t;
typedef struct nvmlActiveVgpuInstanceInfo_v1_t nvmlActiveVgpuInstanceInfo_v1_t;
struct nvmlEncoderSessionInfo_t;
typedef struct nvmlEncoderSessionInfo_t nvmlEncoderSessionInfo_t;
struct nvmlFBCSessionInfo_t;
typedef struct nvmlFBCSessionInfo_t nvmlFBCSessionInfo_t;
struct nvmlGpuFabricInfo_t;
typedef struct nvmlGpuFabricInfo_t nvmlGpuFabricInfo_t;
struct nvmlGpuFabricInfo_v2_t;
typedef struct nvmlGpuFabricInfo_v2_t nvmlGpuFabricInfo_v2_t;
struct nvmlGpuFabricInfo_v3_t;
typedef struct nvmlGpuFabricInfo_v3_t nvmlGpuFabricInfo_v3_t;
struct nvmlEventData_t;
typedef struct nvmlEventData_t nvmlEventData_t;
struct nvmlSystemEventSetCreateRequest_v1_t;
typedef struct nvmlSystemEventSetCreateRequest_v1_t nvmlSystemEventSetCreateRequest_v1_t;
struct nvmlSystemEventSetFreeRequest_v1_t;
typedef struct nvmlSystemEventSetFreeRequest_v1_t nvmlSystemEventSetFreeRequest_v1_t;
struct nvmlSystemRegisterEventRequest_v1_t;
typedef struct nvmlSystemRegisterEventRequest_v1_t nvmlSystemRegisterEventRequest_v1_t;
struct nvmlExcludedDeviceInfo_t;
typedef struct nvmlExcludedDeviceInfo_t nvmlExcludedDeviceInfo_t;
struct nvmlProcessDetailList_v1_t;
typedef struct nvmlProcessDetailList_v1_t nvmlProcessDetailList_v1_t;
struct nvmlBridgeChipHierarchy_t;
typedef struct nvmlBridgeChipHierarchy_t nvmlBridgeChipHierarchy_t;
struct nvmlSample_t;
typedef struct nvmlSample_t nvmlSample_t;
struct nvmlVgpuInstanceUtilizationSample_t;
typedef struct nvmlVgpuInstanceUtilizationSample_t nvmlVgpuInstanceUtilizationSample_t;
struct nvmlVgpuInstanceUtilizationInfo_v1_t;
typedef struct nvmlVgpuInstanceUtilizationInfo_v1_t nvmlVgpuInstanceUtilizationInfo_v1_t;
struct nvmlFieldValue_t;
typedef struct nvmlFieldValue_t nvmlFieldValue_t;
struct nvmlGpuThermalSettings_t;
typedef struct nvmlGpuThermalSettings_t nvmlGpuThermalSettings_t;
struct nvmlUUID_v1_t;
typedef struct nvmlUUID_v1_t nvmlUUID_v1_t;
struct nvmlClkMonStatus_t;
typedef struct nvmlClkMonStatus_t nvmlClkMonStatus_t;
struct nvmlProcessesUtilizationInfo_v1_t;
typedef struct nvmlProcessesUtilizationInfo_v1_t nvmlProcessesUtilizationInfo_v1_t;
struct nvmlGpuDynamicPstatesInfo_t;
typedef struct nvmlGpuDynamicPstatesInfo_t nvmlGpuDynamicPstatesInfo_t;
union nvmlVgpuSchedulerParams_t;
typedef union nvmlVgpuSchedulerParams_t nvmlVgpuSchedulerParams_t;
union nvmlVgpuSchedulerSetParams_t;
typedef union nvmlVgpuSchedulerSetParams_t nvmlVgpuSchedulerSetParams_t;
struct nvmlVgpuLicenseInfo_t;
typedef struct nvmlVgpuLicenseInfo_t nvmlVgpuLicenseInfo_t;
struct nvmlGridLicensableFeature_t;
typedef struct nvmlGridLicensableFeature_t nvmlGridLicensableFeature_t;
struct nvmlUnitFanSpeeds_t;
typedef struct nvmlUnitFanSpeeds_t nvmlUnitFanSpeeds_t;
struct nvmlSystemEventSetWaitRequest_v1_t;
typedef struct nvmlSystemEventSetWaitRequest_v1_t nvmlSystemEventSetWaitRequest_v1_t;
struct nvmlVgpuPgpuMetadata_t;
typedef struct nvmlVgpuPgpuMetadata_t nvmlVgpuPgpuMetadata_t;
struct nvmlGpuInstanceInfo_t;
typedef struct nvmlGpuInstanceInfo_t nvmlGpuInstanceInfo_t;
struct nvmlComputeInstanceInfo_t;
typedef struct nvmlComputeInstanceInfo_t nvmlComputeInstanceInfo_t;
struct nvmlGpmMetric_t;
typedef struct nvmlGpmMetric_t nvmlGpmMetric_t;
struct nvmlWorkloadPowerProfileInfo_v1_t;
typedef struct nvmlWorkloadPowerProfileInfo_v1_t nvmlWorkloadPowerProfileInfo_v1_t;
struct nvmlWorkloadPowerProfileCurrentProfiles_v1_t;
typedef struct nvmlWorkloadPowerProfileCurrentProfiles_v1_t nvmlWorkloadPowerProfileCurrentProfiles_v1_t;
struct nvmlWorkloadPowerProfileRequestedProfiles_v1_t;
typedef struct nvmlWorkloadPowerProfileRequestedProfiles_v1_t nvmlWorkloadPowerProfileRequestedProfiles_v1_t;
struct nvmlEccSramUniqueUncorrectedErrorCounts_v1_t;
typedef struct nvmlEccSramUniqueUncorrectedErrorCounts_v1_t nvmlEccSramUniqueUncorrectedErrorCounts_v1_t;
struct nvmlNvlinkFirmwareInfo_t;
typedef struct nvmlNvlinkFirmwareInfo_t nvmlNvlinkFirmwareInfo_t;
struct nvmlPRMTLV_v1_t;
typedef struct nvmlPRMTLV_v1_t nvmlPRMTLV_v1_t;
struct nvmlVgpuProcessesUtilizationInfo_v1_t;
typedef struct nvmlVgpuProcessesUtilizationInfo_v1_t nvmlVgpuProcessesUtilizationInfo_v1_t;
struct nvmlVgpuInstancesUtilizationInfo_v1_t;
typedef struct nvmlVgpuInstancesUtilizationInfo_v1_t nvmlVgpuInstancesUtilizationInfo_v1_t;
struct nvmlVgpuSchedulerLog_t;
typedef struct nvmlVgpuSchedulerLog_t nvmlVgpuSchedulerLog_t;
struct nvmlVgpuSchedulerGetState_t;
typedef struct nvmlVgpuSchedulerGetState_t nvmlVgpuSchedulerGetState_t;
struct nvmlVgpuSchedulerStateInfo_v1_t;
typedef struct nvmlVgpuSchedulerStateInfo_v1_t nvmlVgpuSchedulerStateInfo_v1_t;
struct nvmlVgpuSchedulerLogInfo_v1_t;
typedef struct nvmlVgpuSchedulerLogInfo_v1_t nvmlVgpuSchedulerLogInfo_v1_t;
struct nvmlVgpuSchedulerSetState_t;
typedef struct nvmlVgpuSchedulerSetState_t nvmlVgpuSchedulerSetState_t;
struct nvmlVgpuSchedulerState_v1_t;
typedef struct nvmlVgpuSchedulerState_v1_t nvmlVgpuSchedulerState_v1_t;
struct nvmlGridLicensableFeatures_t;
typedef struct nvmlGridLicensableFeatures_t nvmlGridLicensableFeatures_t;
struct nvmlGpmMetricsGet_t;
typedef struct nvmlGpmMetricsGet_t nvmlGpmMetricsGet_t;
struct nvmlNvLinkInfo_v2_t;
typedef struct nvmlNvLinkInfo_v2_t nvmlNvLinkInfo_v2_t;
struct nvmlWorkloadPowerProfileProfilesInfo_v1_t;
typedef struct nvmlWorkloadPowerProfileProfilesInfo_v1_t nvmlWorkloadPowerProfileProfilesInfo_v1_t;

/* "cy_nvml.pxd":15
 * 
 * # enums
 * ctypedef enum nvmlBridgeChipType_t "nvmlBridgeChipType_t":             # <<<<<<<<<<<<<<
 *     NVML_BRIDGE_CHIP_PLX "NVML_BRIDGE_CHIP_PLX" = 0
 *     NVML_BRIDGE_CHIP_BRO4 "NVML_BRIDGE_CHIP_BRO4" = 1
*/
enum nvmlBridgeChipType_t {
  NVML_BRIDGE_CHIP_PLX = 0,
  NVML_BRIDGE_CHIP_BRO4 = 1
};
typedef enum nvmlBridgeChipType_t nvmlBridgeChipType_t;

/* "cy_nvml.pxd":19
 *     NVML_BRIDGE_CHIP_BRO4 "NVML_BRIDGE_CHIP_BRO4" = 1
 * 
 * ctypedef enum nvmlNvLinkUtilizationCountUnits_t "nvmlNvLinkUtilizationCountUnits_t":             # <<<<<<<<<<<<<<
 *     NVML_NVLINK_COUNTER_UNIT_CYCLES "NVML_NVLINK_COUNTER_UNIT_CYCLES" = 0
 *     NVML_NVLINK_COUNTER_UNIT_PACKETS "NVML_NVLINK_COUNTER_UNIT_PACKETS" = 1
*/
enum nvmlNvLinkUtilizationCountUnits_t {
  NVML_NVLINK_COUNTER_UNIT_CYCLES = 0,
  NVML_NVLINK_COUNTER_UNIT_PACKETS = 1,
  NVML_NVLINK_COUNTER_UNIT_BYTES = 2,
  NVML_NVLINK_COUNTER_UNIT_RESERVED = 3,
  NVML_NVLINK_COUNTER_UNIT_COUNT
};
typedef enum nvmlNvLinkUtilizationCountUnits_t nvmlNvLinkUtilizationCountUnits_t;

/* "cy_nvml.pxd":26
 *     NVML_NVLINK_COUNTER_UNIT_COUNT "NVML_NVLINK_COUNTER_UNIT_COUNT"
 * 
 * ctypedef enum nvmlNvLinkUtilizationCountPktTypes_t "nvmlNvLinkUtilizationCountPktTypes_t":             # <<<<<<<<<<<<<<
 *     NVML_NVLINK_COUNTER_PKTFILTER_NOP "NVML_NVLINK_COUNTER_PKTFILTER_NOP" = 0x1
 *     NVML_NVLINK_COUNTER_PKTFILTER_READ "NVML_NVLINK_COUNTER_PKTFILTER_READ" = 0x2
*/
enum nvmlNvLinkUtilizationCountPktTypes_t {
  NVML_NVLINK_COUNTER_PKTFILTER_NOP = 0x1,
  NVML_NVLINK_COUNTER_PKTFILTER_READ = 0x2,
  NVML_NVLINK_COUNTER_PKTFILTER_WRITE = 0x4,
  NVML_NVLINK_COUNTER_PKTFILTER_RATOM = 0x8,
  NVML_NVLINK_COUNTER_PKTFILTER_NRATOM = 0x10,
  NVML_NVLINK_COUNTER_PKTFILTER_FLUSH = 0x20,
  NVML_NVLINK_COUNTER_PKTFILTER_RESPDATA = 0x40,
  NVML_NVLINK_COUNTER_PKTFILTER_RESPNODATA = 0x80,
  NVML_NVLINK_COUNTER_PKTFILTER_ALL = 0xFF
};
typedef enum nvmlNvLinkUtilizationCountPktTypes_t nvmlNvLinkUtilizationCountPktTypes_t;

/* "cy_nvml.pxd":37
 *     NVML_NVLINK_COUNTER_PKTFILTER_ALL "NVML_NVLINK_COUNTER_PKTFILTER_ALL" = 0xFF
 * 
 * ctypedef enum nvmlNvLinkCapability_t "nvmlNvLinkCapability_t":             # <<<<<<<<<<<<<<
 *     NVML_NVLINK_CAP_P2P_SUPPORTED "NVML_NVLINK_CAP_P2P_SUPPORTED" = 0
 *     NVML_NVLINK_CAP_SYSMEM_ACCESS "NVML_NVLINK_CAP_SYSMEM_ACCESS" = 1
*/
enum nvmlNvLinkCapability_t {
  NVML_NVLINK_CAP_P2P_SUPPORTED = 0,
  NVML_NVLINK_CAP_SYSMEM_ACCESS = 1,
  NVML_NVLINK_CAP_P2P_ATOMICS = 2,
  NVML_NVLINK_CAP_SYSMEM_ATOMICS = 3,
  NVML_NVLINK_CAP_SLI_BRIDGE = 4,
  NVML_NVLINK_CAP_VALID = 5,
  NVML_NVLINK_CAP_COUNT
};
typedef enum nvmlNvLinkCapability_t nvmlNvLinkCapability_t;

/* "cy_nvml.pxd":46
 *     NVML_NVLINK_CAP_COUNT "NVML_NVLINK_CAP_COUNT"
 * 
 * ctypedef enum nvmlNvLinkErrorCounter_t "nvmlNvLinkErrorCounter_t":             # <<<<<<<<<<<<<<
 *     NVML_NVLINK_ERROR_DL_REPLAY "NVML_NVLINK_ERROR_DL_REPLAY" = 0
 *     NVML_NVLINK_ERROR_DL_RECOVERY "NVML_NVLINK_ERROR_DL_RECOVERY" = 1
*/
enum nvmlNvLinkErrorCounter_t {
  NVML_NVLINK_ERROR_DL_REPLAY = 0,
  NVML_NVLINK_ERROR_DL_RECOVERY = 1,
  NVML_NVLINK_ERROR_DL_CRC_FLIT = 2,
  NVML_NVLINK_ERROR_DL_CRC_DATA = 3,
  NVML_NVLINK_ERROR_DL_ECC_DATA = 4,
  NVML_NVLINK_ERROR_COUNT
};
typedef enum nvmlNvLinkErrorCounter_t nvmlNvLinkErrorCounter_t;

/* "cy_nvml.pxd":54
 *     NVML_NVLINK_ERROR_COUNT "NVML_NVLINK_ERROR_COUNT"
 * 
 * ctypedef enum nvmlIntNvLinkDeviceType_t "nvmlIntNvLinkDeviceType_t":             # <<<<<<<<<<<<<<
 *     NVML_NVLINK_DEVICE_TYPE_GPU "NVML_NVLINK_DEVICE_TYPE_GPU" = 0x00
 *     NVML_NVLINK_DEVICE_TYPE_IBMNPU "NVML_NVLINK_DEVICE_TYPE_IBMNPU" = 0x01
*/
enum nvmlIntNvLinkDeviceType_t {
  NVML_NVLINK_DEVICE_TYPE_GPU = 0x00,
  NVML_NVLINK_DEVICE_TYPE_IBMNPU = 0x01,
  NVML_NVLINK_DEVICE_TYPE_SWITCH = 0x02,
  NVML_NVLINK_DEVICE_TYPE_UNKNOWN = 0xFF
};
typedef enum nvmlIntNvLinkDeviceType_t nvmlIntNvLinkDeviceType_t;

/* "cy_nvml.pxd":60
 *     NVML_NVLINK_DEVICE_TYPE_UNKNOWN "NVML_NVLINK_DEVICE_TYPE_UNKNOWN" = 0xFF
 * 
 * ctypedef enum nvmlGpuTopologyLevel_t "nvmlGpuTopologyLevel_t":             # <<<<<<<<<<<<<<
 *     NVML_TOPOLOGY_INTERNAL "NVML_TOPOLOGY_INTERNAL" = 0
 *     NVML_TOPOLOGY_SINGLE "NVML_TOPOLOGY_SINGLE" = 10
*/
enum nvmlGpuTopologyLevel_t {
  NVML_TOPOLOGY_INTERNAL = 0,
  NVML_TOPOLOGY_SINGLE = 10,
  NVML_TOPOLOGY_MULTIPLE = 20,
  NVML_TOPOLOGY_HOSTBRIDGE = 30,
  NVML_TOPOLOGY_NODE = 40,
  NVML_TOPOLOGY_SYSTEM = 50
};
typedef enum nvmlGpuTopologyLevel_t nvmlGpuTopologyLevel_t;

/* "cy_nvml.pxd":68
 *     NVML_TOPOLOGY_SYSTEM "NVML_TOPOLOGY_SYSTEM" = 50
 * 
 * ctypedef enum nvmlGpuP2PStatus_t "nvmlGpuP2PStatus_t":             # <<<<<<<<<<<<<<
 *     NVML_P2P_STATUS_OK "NVML_P2P_STATUS_OK" = 0
 *     NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED "NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED"
*/
enum nvmlGpuP2PStatus_t {

  /* "cy_nvml.pxd":71
 *     NVML_P2P_STATUS_OK "NVML_P2P_STATUS_OK" = 0
 *     NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED "NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED"
 *     NVML_P2P_STATUS_CHIPSET_NOT_SUPPORTED "NVML_P2P_STATUS_CHIPSET_NOT_SUPPORTED" = NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED             # <<<<<<<<<<<<<<
 *     NVML_P2P_STATUS_GPU_NOT_SUPPORTED "NVML_P2P_STATUS_GPU_NOT_SUPPORTED"
 *     NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED "NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED"
*/
  NVML_P2P_STATUS_OK = 0,
  NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED,
  NVML_P2P_STATUS_CHIPSET_NOT_SUPPORTED = NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED,
  NVML_P2P_STATUS_GPU_NOT_SUPPORTED,
  NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED,
  NVML_P2P_STATUS_DISABLED_BY_REGKEY,
  NVML_P2P_STATUS_NOT_SUPPORTED,
  NVML_P2P_STATUS_UNKNOWN
};
typedef enum nvmlGpuP2PStatus_t nvmlGpuP2PStatus_t;

/* "cy_nvml.pxd":78
 *     NVML_P2P_STATUS_UNKNOWN "NVML_P2P_STATUS_UNKNOWN"
 * 
 * ctypedef enum nvmlGpuP2PCapsIndex_t "nvmlGpuP2PCapsIndex_t":             # <<<<<<<<<<<<<<
 *     NVML_P2P_CAPS_INDEX_READ "NVML_P2P_CAPS_INDEX_READ" = 0
 *     NVML_P2P_CAPS_INDEX_WRITE "NVML_P2P_CAPS_INDEX_WRITE" = 1
*/
enum nvmlGpuP2PCapsIndex_t {

  /* "cy_nvml.pxd":84
 *     NVML_P2P_CAPS_INDEX_ATOMICS "NVML_P2P_CAPS_INDEX_ATOMICS" = 3
 *     NVML_P2P_CAPS_INDEX_PCI "NVML_P2P_CAPS_INDEX_PCI" = 4
 *     NVML_P2P_CAPS_INDEX_PROP "NVML_P2P_CAPS_INDEX_PROP" = NVML_P2P_CAPS_INDEX_PCI             # <<<<<<<<<<<<<<
 *     NVML_P2P_CAPS_INDEX_UNKNOWN "NVML_P2P_CAPS_INDEX_UNKNOWN" = 5
 * 
*/
  NVML_P2P_CAPS_INDEX_READ = 0,
  NVML_P2P_CAPS_INDEX_WRITE = 1,
  NVML_P2P_CAPS_INDEX_NVLINK = 2,
  NVML_P2P_CAPS_INDEX_ATOMICS = 3,
  NVML_P2P_CAPS_INDEX_PCI = 4,
  NVML_P2P_CAPS_INDEX_PROP = NVML_P2P_CAPS_INDEX_PCI,
  NVML_P2P_CAPS_INDEX_UNKNOWN = 5
};
typedef enum nvmlGpuP2PCapsIndex_t nvmlGpuP2PCapsIndex_t;

/* "cy_nvml.pxd":87
 *     NVML_P2P_CAPS_INDEX_UNKNOWN "NVML_P2P_CAPS_INDEX_UNKNOWN" = 5
 * 
 * ctypedef enum nvmlSamplingType_t "nvmlSamplingType_t":             # <<<<<<<<<<<<<<
 *     NVML_TOTAL_POWER_SAMPLES "NVML_TOTAL_POWER_SAMPLES" = 0
 *     NVML_GPU_UTILIZATION_SAMPLES "NVML_GPU_UTILIZATION_SAMPLES" = 1
*/
enum nvmlSamplingType_t {
  NVML_TOTAL_POWER_SAMPLES = 0,
  NVML_GPU_UTILIZATION_SAMPLES = 1,
  NVML_MEMORY_UTILIZATION_SAMPLES = 2,
  NVML_ENC_UTILIZATION_SAMPLES = 3,
  NVML_DEC_UTILIZATION_SAMPLES = 4,
  NVML_PROCESSOR_CLK_SAMPLES = 5,
  NVML_MEMORY_CLK_SAMPLES = 6,
  NVML_MODULE_POWER_SAMPLES = 7,
  NVML_JPG_UTILIZATION_SAMPLES = 8,
  NVML_OFA_UTILIZATION_SAMPLES = 9,
  NVML_SAMPLINGTYPE_COUNT
};
typedef enum nvmlSamplingType_t nvmlSamplingType_t;

/* "cy_nvml.pxd":100
 *     NVML_SAMPLINGTYPE_COUNT "NVML_SAMPLINGTYPE_COUNT"
 * 
 * ctypedef enum nvmlPcieUtilCounter_t "nvmlPcieUtilCounter_t":             # <<<<<<<<<<<<<<
 *     NVML_PCIE_UTIL_TX_BYTES "NVML_PCIE_UTIL_TX_BYTES" = 0
 *     NVML_PCIE_UTIL_RX_BYTES "NVML_PCIE_UTIL_RX_BYTES" = 1
*/
enum nvmlPcieUtilCounter_t {
  NVML_PCIE_UTIL_TX_BYTES = 0,
  NVML_PCIE_UTIL_RX_BYTES = 1,
  NVML_PCIE_UTIL_COUNT
};
typedef enum nvmlPcieUtilCounter_t nvmlPcieUtilCounter_t;

/* "cy_nvml.pxd":105
 *     NVML_PCIE_UTIL_COUNT "NVML_PCIE_UTIL_COUNT"
 * 
 * ctypedef enum nvmlValueType_t "nvmlValueType_t":             # <<<<<<<<<<<<<<
 *     NVML_VALUE_TYPE_DOUBLE "NVML_VALUE_TYPE_DOUBLE" = 0
 *     NVML_VALUE_TYPE_UNSIGNED_INT "NVML_VALUE_TYPE_UNSIGNED_INT" = 1
*/
enum nvmlValueType_t {
  NVML_VALUE_TYPE_DOUBLE = 0,
  NVML_VALUE_TYPE_UNSIGNED_INT = 1,
  NVML_VALUE_TYPE_UNSIGNED_LONG = 2,
  NVML_VALUE_TYPE_UNSIGNED_LONG_LONG = 3,
  NVML_VALUE_TYPE_SIGNED_LONG_LONG = 4,
  NVML_VALUE_TYPE_SIGNED_INT = 5,
  NVML_VALUE_TYPE_UNSIGNED_SHORT = 6,
  NVML_VALUE_TYPE_COUNT
};
typedef enum nvmlValueType_t nvmlValueType_t;

/* "cy_nvml.pxd":115
 *     NVML_VALUE_TYPE_COUNT "NVML_VALUE_TYPE_COUNT"
 * 
 * ctypedef enum nvmlPerfPolicyType_t "nvmlPerfPolicyType_t":             # <<<<<<<<<<<<<<
 *     NVML_PERF_POLICY_POWER "NVML_PERF_POLICY_POWER" = 0
 *     NVML_PERF_POLICY_THERMAL "NVML_PERF_POLICY_THERMAL" = 1
*/
enum nvmlPerfPolicyType_t {
  NVML_PERF_POLICY_POWER = 0,
  NVML_PERF_POLICY_THERMAL = 1,
  NVML_PERF_POLICY_SYNC_BOOST = 2,
  NVML_PERF_POLICY_BOARD_LIMIT = 3,
  NVML_PERF_POLICY_LOW_UTILIZATION = 4,
  NVML_PERF_POLICY_RELIABILITY = 5,
  NVML_PERF_POLICY_TOTAL_APP_CLOCKS = 10,
  NVML_PERF_POLICY_TOTAL_BASE_CLOCKS = 11,
  NVML_PERF_POLICY_COUNT
};
typedef enum nvmlPerfPolicyType_t nvmlPerfPolicyType_t;

/* "cy_nvml.pxd":126
 *     NVML_PERF_POLICY_COUNT "NVML_PERF_POLICY_COUNT"
 * 
 * ctypedef enum nvmlThermalTarget_t "nvmlThermalTarget_t":             # <<<<<<<<<<<<<<
 *     NVML_THERMAL_TARGET_NONE "NVML_THERMAL_TARGET_NONE" = 0
 *     NVML_THERMAL_TARGET_GPU "NVML_THERMAL_TARGET_GPU" = 1
*/
enum nvmlThermalTarget_t {
  NVML_THERMAL_TARGET_NONE = 0,
  NVML_THERMAL_TARGET_GPU = 1,
  NVML_THERMAL_TARGET_MEMORY = 2,
  NVML_THERMAL_TARGET_POWER_SUPPLY = 4,
  NVML_THERMAL_TARGET_BOARD = 8,
  NVML_THERMAL_TARGET_VCD_BOARD = 9,
  NVML_THERMAL_TARGET_VCD_INLET = 10,
  NVML_THERMAL_TARGET_VCD_OUTLET = 11,
  NVML_THERMAL_TARGET_ALL = 15,
  NVML_THERMAL_TARGET_UNKNOWN = -1L
};
typedef enum nvmlThermalTarget_t nvmlThermalTarget_t;

/* "cy_nvml.pxd":138
 *     NVML_THERMAL_TARGET_UNKNOWN "NVML_THERMAL_TARGET_UNKNOWN" = -(1)
 * 
 * ctypedef enum nvmlThermalController_t "nvmlThermalController_t":             # <<<<<<<<<<<<<<
 *     NVML_THERMAL_CONTROLLER_NONE "NVML_THERMAL_CONTROLLER_NONE" = 0
 *     NVML_THERMAL_CONTROLLER_GPU_INTERNAL "NVML_THERMAL_CONTROLLER_GPU_INTERNAL"
*/
enum nvmlThermalController_t {
  NVML_THERMAL_CONTROLLER_NONE = 0,
  NVML_THERMAL_CONTROLLER_GPU_INTERNAL,
  NVML_THERMAL_CONTROLLER_ADM1032,
  NVML_THERMAL_CONTROLLER_ADT7461,
  NVML_THERMAL_CONTROLLER_MAX6649,
  NVML_THERMAL_CONTROLLER_MAX1617,
  NVML_THERMAL_CONTROLLER_LM99,
  NVML_THERMAL_CONTROLLER_LM89,
  NVML_THERMAL_CONTROLLER_LM64,
  NVML_THERMAL_CONTROLLER_G781,
  NVML_THERMAL_CONTROLLER_ADT7473,
  NVML_THERMAL_CONTROLLER_SBMAX6649,
  NVML_THERMAL_CONTROLLER_VBIOSEVT,
  NVML_THERMAL_CONTROLLER_OS,
  NVML_THERMAL_CONTROLLER_NVSYSCON_CANOAS,
  NVML_THERMAL_CONTROLLER_NVSYSCON_E551,
  NVML_THERMAL_CONTROLLER_MAX6649R,
  NVML_THERMAL_CONTROLLER_ADT7473S,
  NVML_THERMAL_CONTROLLER_UNKNOWN = -1L
};
typedef enum nvmlThermalController_t nvmlThermalController_t;

/* "cy_nvml.pxd":159
 *     NVML_THERMAL_CONTROLLER_UNKNOWN "NVML_THERMAL_CONTROLLER_UNKNOWN" = -(1)
 * 
 * ctypedef enum nvmlCoolerControl_t "nvmlCoolerControl_t":             # <<<<<<<<<<<<<<
 *     NVML_THERMAL_COOLER_SIGNAL_NONE "NVML_THERMAL_COOLER_SIGNAL_NONE" = 0
 *     NVML_THERMAL_COOLER_SIGNAL_TOGGLE "NVML_THERMAL_COOLER_SIGNAL_TOGGLE" = 1
*/
enum nvmlCoolerControl_t {
  NVML_THERMAL_COOLER_SIGNAL_NONE = 0,
  NVML_THERMAL_COOLER_SIGNAL_TOGGLE = 1,
  NVML_THERMAL_COOLER_SIGNAL_VARIABLE = 2,
  NVML_THERMAL_COOLER_SIGNAL_COUNT
};
typedef enum nvmlCoolerControl_t nvmlCoolerControl_t;

/* "cy_nvml.pxd":165
 *     NVML_THERMAL_COOLER_SIGNAL_COUNT "NVML_THERMAL_COOLER_SIGNAL_COUNT"
 * 
 * ctypedef enum nvmlCoolerTarget_t "nvmlCoolerTarget_t":             # <<<<<<<<<<<<<<
 *     NVML_THERMAL_COOLER_TARGET_NONE "NVML_THERMAL_COOLER_TARGET_NONE" = (1 << 0)
 *     NVML_THERMAL_COOLER_TARGET_GPU "NVML_THERMAL_COOLER_TARGET_GPU" = (1 << 1)
*/
enum nvmlCoolerTarget_t {

  /* "cy_nvml.pxd":170
 *     NVML_THERMAL_COOLER_TARGET_MEMORY "NVML_THERMAL_COOLER_TARGET_MEMORY" = (1 << 2)
 *     NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY "NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY" = (1 << 3)
 *     NVML_THERMAL_COOLER_TARGET_GPU_RELATED "NVML_THERMAL_COOLER_TARGET_GPU_RELATED" = ((NVML_THERMAL_COOLER_TARGET_GPU | NVML_THERMAL_COOLER_TARGET_MEMORY) | NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY)             # <<<<<<<<<<<<<<
 * 
 * ctypedef enum nvmlUUIDType_t "nvmlUUIDType_t":
*/
  NVML_THERMAL_COOLER_TARGET_NONE = (1 << 0),
  NVML_THERMAL_COOLER_TARGET_GPU = (1 << 1),
  NVML_THERMAL_COOLER_TARGET_MEMORY = (1 << 2),
  NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY = (1 << 3),
  NVML_THERMAL_COOLER_TARGET_GPU_RELATED = ((NVML_THERMAL_COOLER_TARGET_GPU | NVML_THERMAL_COOLER_TARGET_MEMORY) | NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY)
};
typedef enum nvmlCoolerTarget_t nvmlCoolerTarget_t;

/* "cy_nvml.pxd":172
 *     NVML_THERMAL_COOLER_TARGET_GPU_RELATED "NVML_THERMAL_COOLER_TARGET_GPU_RELATED" = ((NVML_THERMAL_COOLER_TARGET_GPU | NVML_THERMAL_COOLER_TARGET_MEMORY) | NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY)
 * 
 * ctypedef enum nvmlUUIDType_t "nvmlUUIDType_t":             # <<<<<<<<<<<<<<
 *     NVML_UUID_TYPE_NONE "NVML_UUID_TYPE_NONE" = 0
 *     NVML_UUID_TYPE_ASCII "NVML_UUID_TYPE_ASCII" = 1
*/
enum nvmlUUIDType_t {
  NVML_UUID_TYPE_NONE = 0,
  NVML_UUID_TYPE_ASCII = 1,
  NVML_UUID_TYPE_BINARY = 2
};
typedef enum nvmlUUIDType_t nvmlUUIDType_t;

/* "cy_nvml.pxd":177
 *     NVML_UUID_TYPE_BINARY "NVML_UUID_TYPE_BINARY" = 2
 * 
 * ctypedef enum nvmlEnableState_t "nvmlEnableState_t":             # <<<<<<<<<<<<<<
 *     NVML_FEATURE_DISABLED "NVML_FEATURE_DISABLED" = 0
 *     NVML_FEATURE_ENABLED "NVML_FEATURE_ENABLED" = 1
*/
enum nvmlEnableState_t {
  NVML_FEATURE_DISABLED = 0,
  NVML_FEATURE_ENABLED = 1
};
typedef enum nvmlEnableState_t nvmlEnableState_t;

/* "cy_nvml.pxd":181
 *     NVML_FEATURE_ENABLED "NVML_FEATURE_ENABLED" = 1
 * 
 * ctypedef enum nvmlBrandType_t "nvmlBrandType_t":             # <<<<<<<<<<<<<<
 *     NVML_BRAND_UNKNOWN "NVML_BRAND_UNKNOWN" = 0
 *     NVML_BRAND_QUADRO "NVML_BRAND_QUADRO" = 1
*/
enum nvmlBrandType_t {

  /* "cy_nvml.pxd":194
 *     NVML_BRAND_NVIDIA_VWS "NVML_BRAND_NVIDIA_VWS" = 10
 *     NVML_BRAND_NVIDIA_CLOUD_GAMING "NVML_BRAND_NVIDIA_CLOUD_GAMING" = 11
 *     NVML_BRAND_NVIDIA_VGAMING "NVML_BRAND_NVIDIA_VGAMING" = NVML_BRAND_NVIDIA_CLOUD_GAMING             # <<<<<<<<<<<<<<
 *     NVML_BRAND_QUADRO_RTX "NVML_BRAND_QUADRO_RTX" = 12
 *     NVML_BRAND_NVIDIA_RTX "NVML_BRAND_NVIDIA_RTX" = 13
*/
  NVML_BRAND_UNKNOWN = 0,
  NVML_BRAND_QUADRO = 1,
  NVML_BRAND_TESLA = 2,
  NVML_BRAND_NVS = 3,
  NVML_BRAND_GRID = 4,
  NVML_BRAND_GEFORCE = 5,
  NVML_BRAND_TITAN = 6,
  NVML_BRAND_NVIDIA_VAPPS = 7,
  NVML_BRAND_NVIDIA_VPC = 8,
  NVML_BRAND_NVIDIA_VCS = 9,
  NVML_BRAND_NVIDIA_VWS = 10,
  NVML_BRAND_NVIDIA_CLOUD_GAMING = 11,
  NVML_BRAND_NVIDIA_VGAMING = NVML_BRAND_NVIDIA_CLOUD_GAMING,
  NVML_BRAND_QUADRO_RTX = 12,
  NVML_BRAND_NVIDIA_RTX = 13,
  NVML_BRAND_NVIDIA = 14,
  NVML_BRAND_GEFORCE_RTX = 15,
  NVML_BRAND_TITAN_RTX = 16,
  NVML_BRAND_COUNT = 18
};
typedef enum nvmlBrandType_t nvmlBrandType_t;

/* "cy_nvml.pxd":202
 *     NVML_BRAND_COUNT "NVML_BRAND_COUNT" = 18
 * 
 * ctypedef enum nvmlTemperatureThresholds_t "nvmlTemperatureThresholds_t":             # <<<<<<<<<<<<<<
 *     NVML_TEMPERATURE_THRESHOLD_SHUTDOWN "NVML_TEMPERATURE_THRESHOLD_SHUTDOWN" = 0
 *     NVML_TEMPERATURE_THRESHOLD_SLOWDOWN "NVML_TEMPERATURE_THRESHOLD_SLOWDOWN" = 1
*/
enum nvmlTemperatureThresholds_t {
  NVML_TEMPERATURE_THRESHOLD_SHUTDOWN = 0,
  NVML_TEMPERATURE_THRESHOLD_SLOWDOWN = 1,
  NVML_TEMPERATURE_THRESHOLD_MEM_MAX = 2,
  NVML_TEMPERATURE_THRESHOLD_GPU_MAX = 3,
  NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MIN = 4,
  NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_CURR = 5,
  NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MAX = 6,
  NVML_TEMPERATURE_THRESHOLD_GPS_CURR = 7,
  NVML_TEMPERATURE_THRESHOLD_COUNT
};
typedef enum nvmlTemperatureThresholds_t nvmlTemperatureThresholds_t;

/* "cy_nvml.pxd":213
 *     NVML_TEMPERATURE_THRESHOLD_COUNT "NVML_TEMPERATURE_THRESHOLD_COUNT"
 * 
 * ctypedef enum nvmlTemperatureSensors_t "nvmlTemperatureSensors_t":             # <<<<<<<<<<<<<<
 *     NVML_TEMPERATURE_GPU "NVML_TEMPERATURE_GPU" = 0
 *     NVML_TEMPERATURE_COUNT "NVML_TEMPERATURE_COUNT"
*/
enum nvmlTemperatureSensors_t {
  NVML_TEMPERATURE_GPU = 0,
  NVML_TEMPERATURE_COUNT
};
typedef enum nvmlTemperatureSensors_t nvmlTemperatureSensors_t;

/* "cy_nvml.pxd":217
 *     NVML_TEMPERATURE_COUNT "NVML_TEMPERATURE_COUNT"
 * 
 * ctypedef enum nvmlComputeMode_t "nvmlComputeMode_t":             # <<<<<<<<<<<<<<
 *     NVML_COMPUTEMODE_DEFAULT "NVML_COMPUTEMODE_DEFAULT" = 0
 *     NVML_COMPUTEMODE_EXCLUSIVE_THREAD "NVML_COMPUTEMODE_EXCLUSIVE_THREAD" = 1
*/
enum nvmlComputeMode_t {
  NVML_COMPUTEMODE_DEFAULT = 0,
  NVML_COMPUTEMODE_EXCLUSIVE_THREAD = 1,
  NVML_COMPUTEMODE_PROHIBITED = 2,
  NVML_COMPUTEMODE_EXCLUSIVE_PROCESS = 3,
  NVML_COMPUTEMODE_COUNT
};
typedef enum nvmlComputeMode_t nvmlComputeMode_t;

/* "cy_nvml.pxd":224
 *     NVML_COMPUTEMODE_COUNT "NVML_COMPUTEMODE_COUNT"
 * 
 * ctypedef enum nvmlMemoryErrorType_t "nvmlMemoryErrorType_t":             # <<<<<<<<<<<<<<
 *     NVML_MEMORY_ERROR_TYPE_CORRECTED "NVML_MEMORY_ERROR_TYPE_CORRECTED" = 0
 *     NVML_MEMORY_ERROR_TYPE_UNCORRECTED "NVML_MEMORY_ERROR_TYPE_UNCORRECTED" = 1
*/
enum nvmlMemoryErrorType_t {
  NVML_MEMORY_ERROR_TYPE_CORRECTED = 0,
  NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1,
  NVML_MEMORY_ERROR_TYPE_COUNT
};
typedef enum nvmlMemoryErrorType_t nvmlMemoryErrorType_t;

/* "cy_nvml.pxd":229
 *     NVML_MEMORY_ERROR_TYPE_COUNT "NVML_MEMORY_ERROR_TYPE_COUNT"
 * 
 * ctypedef enum nvmlNvlinkVersion_t "nvmlNvlinkVersion_t":             # <<<<<<<<<<<<<<
 *     NVML_NVLINK_VERSION_INVALID "NVML_NVLINK_VERSION_INVALID" = 0
 *     NVML_NVLINK_VERSION_1_0 "NVML_NVLINK_VERSION_1_0" = 1
*/
enum nvmlNvlinkVersion_t {
  NVML_NVLINK_VERSION_INVALID = 0,
  NVML_NVLINK_VERSION_1_0 = 1,
  NVML_NVLINK_VERSION_2_0 = 2,
  NVML_NVLINK_VERSION_2_2 = 3,
  NVML_NVLINK_VERSION_3_0 = 4,
  NVML_NVLINK_VERSION_3_1 = 5,
  NVML_NVLINK_VERSION_4_0 = 6,
  NVML_NVLINK_VERSION_5_0 = 7
};
typedef enum nvmlNvlinkVersion_t nvmlNvlinkVersion_t;

/* "cy_nvml.pxd":239
 *     NVML_NVLINK_VERSION_5_0 "NVML_NVLINK_VERSION_5_0" = 7
 * 
 * ctypedef enum nvmlEccCounterType_t "nvmlEccCounterType_t":             # <<<<<<<<<<<<<<
 *     NVML_VOLATILE_ECC "NVML_VOLATILE_ECC" = 0
 *     NVML_AGGREGATE_ECC "NVML_AGGREGATE_ECC" = 1
*/
enum nvmlEccCounterType_t {
  NVML_VOLATILE_ECC = 0,
  NVML_AGGREGATE_ECC = 1,
  NVML_ECC_COUNTER_TYPE_COUNT
};
typedef enum nvmlEccCounterType_t nvmlEccCounterType_t;

/* "cy_nvml.pxd":244
 *     NVML_ECC_COUNTER_TYPE_COUNT "NVML_ECC_COUNTER_TYPE_COUNT"
 * 
 * ctypedef enum nvmlClockType_t "nvmlClockType_t":             # <<<<<<<<<<<<<<
 *     NVML_CLOCK_GRAPHICS "NVML_CLOCK_GRAPHICS" = 0
 *     NVML_CLOCK_SM "NVML_CLOCK_SM" = 1
*/
enum nvmlClockType_t {
  NVML_CLOCK_GRAPHICS = 0,
  NVML_CLOCK_SM = 1,
  NVML_CLOCK_MEM = 2,
  NVML_CLOCK_VIDEO = 3,
  NVML_CLOCK_COUNT
};
typedef enum nvmlClockType_t nvmlClockType_t;

/* "cy_nvml.pxd":251
 *     NVML_CLOCK_COUNT "NVML_CLOCK_COUNT"
 * 
 * ctypedef enum nvmlClockId_t "nvmlClockId_t":             # <<<<<<<<<<<<<<
 *     NVML_CLOCK_ID_CURRENT "NVML_CLOCK_ID_CURRENT" = 0
 *     NVML_CLOCK_ID_APP_CLOCK_TARGET "NVML_CLOCK_ID_APP_CLOCK_TARGET" = 1
*/
enum nvmlClockId_t {
  NVML_CLOCK_ID_CURRENT = 0,
  NVML_CLOCK_ID_APP_CLOCK_TARGET = 1,
  NVML_CLOCK_ID_APP_CLOCK_DEFAULT = 2,
  NVML_CLOCK_ID_CUSTOMER_BOOST_MAX = 3,
  NVML_CLOCK_ID_COUNT
};
typedef enum nvmlClockId_t nvmlClockId_t;

/* "cy_nvml.pxd":258
 *     NVML_CLOCK_ID_COUNT "NVML_CLOCK_ID_COUNT"
 * 
 * ctypedef enum nvmlDriverModel_t "nvmlDriverModel_t":             # <<<<<<<<<<<<<<
 *     NVML_DRIVER_WDDM "NVML_DRIVER_WDDM" = 0
 *     NVML_DRIVER_WDM "NVML_DRIVER_WDM" = 1
*/
enum nvmlDriverModel_t {
  NVML_DRIVER_WDDM = 0,
  NVML_DRIVER_WDM = 1,
  NVML_DRIVER_MCDM = 2
};
typedef enum nvmlDriverModel_t nvmlDriverModel_t;

/* "cy_nvml.pxd":263
 *     NVML_DRIVER_MCDM "NVML_DRIVER_MCDM" = 2
 * 
 * ctypedef enum nvmlPstates_t "nvmlPstates_t":             # <<<<<<<<<<<<<<
 *     NVML_PSTATE_0 "NVML_PSTATE_0" = 0
 *     NVML_PSTATE_1 "NVML_PSTATE_1" = 1
*/
enum nvmlPstates_t {
  NVML_PSTATE_0 = 0,
  NVML_PSTATE_1 = 1,
  NVML_PSTATE_2 = 2,
  NVML_PSTATE_3 = 3,
  NVML_PSTATE_4 = 4,
  NVML_PSTATE_5 = 5,
  NVML_PSTATE_6 = 6,
  NVML_PSTATE_7 = 7,
  NVML_PSTATE_8 = 8,
  NVML_PSTATE_9 = 9,
  NVML_PSTATE_10 = 10,
  NVML_PSTATE_11 = 11,
  NVML_PSTATE_12 = 12,
  NVML_PSTATE_13 = 13,
  NVML_PSTATE_14 = 14,
  NVML_PSTATE_15 = 15,
  NVML_PSTATE_UNKNOWN = 32
};
typedef enum nvmlPstates_t nvmlPstates_t;

/* "cy_nvml.pxd":282
 *     NVML_PSTATE_UNKNOWN "NVML_PSTATE_UNKNOWN" = 32
 * 
 * ctypedef enum nvmlGpuOperationMode_t "nvmlGpuOperationMode_t":             # <<<<<<<<<<<<<<
 *     NVML_GOM_ALL_ON "NVML_GOM_ALL_ON" = 0
 *     NVML_GOM_COMPUTE "NVML_GOM_COMPUTE" = 1
*/
enum nvmlGpuOperationMode_t {
  NVML_GOM_ALL_ON = 0,
  NVML_GOM_COMPUTE = 1,
  NVML_GOM_LOW_DP = 2
};
typedef enum nvmlGpuOperationMode_t nvmlGpuOperationMode_t;

/* "cy_nvml.pxd":287
 *     NVML_GOM_LOW_DP "NVML_GOM_LOW_DP" = 2
 * 
 * ctypedef enum nvmlInforomObject_t "nvmlInforomObject_t":             # <<<<<<<<<<<<<<
 *     NVML_INFOROM_OEM "NVML_INFOROM_OEM" = 0
 *     NVML_INFOROM_ECC "NVML_INFOROM_ECC" = 1
*/
enum nvmlInforomObject_t {
  NVML_INFOROM_OEM = 0,
  NVML_INFOROM_ECC = 1,
  NVML_INFOROM_POWER = 2,
  NVML_INFOROM_DEN = 3,
  NVML_INFOROM_COUNT
};
typedef enum nvmlInforomObject_t nvmlInforomObject_t;

/* "cy_nvml.pxd":294
 *     NVML_INFOROM_COUNT "NVML_INFOROM_COUNT"
 * 
 * ctypedef enum nvmlReturn_t "nvmlReturn_t":             # <<<<<<<<<<<<<<
 *     NVML_SUCCESS "NVML_SUCCESS" = 0
 *     NVML_ERROR_UNINITIALIZED "NVML_ERROR_UNINITIALIZED" = 1
*/
enum nvmlReturn_t {
  NVML_SUCCESS = 0,
  NVML_ERROR_UNINITIALIZED = 1,
  NVML_ERROR_INVALID_ARGUMENT = 2,
  NVML_ERROR_NOT_SUPPORTED = 3,
  NVML_ERROR_NO_PERMISSION = 4,
  NVML_ERROR_ALREADY_INITIALIZED = 5,
  NVML_ERROR_NOT_FOUND = 6,
  NVML_ERROR_INSUFFICIENT_SIZE = 7,
  NVML_ERROR_INSUFFICIENT_POWER = 8,
  NVML_ERROR_DRIVER_NOT_LOADED = 9,
  NVML_ERROR_TIMEOUT = 10,
  NVML_ERROR_IRQ_ISSUE = 11,
  NVML_ERROR_LIBRARY_NOT_FOUND = 12,
  NVML_ERROR_FUNCTION_NOT_FOUND = 13,
  NVML_ERROR_CORRUPTED_INFOROM = 14,
  NVML_ERROR_GPU_IS_LOST = 15,
  NVML_ERROR_RESET_REQUIRED = 16,
  NVML_ERROR_OPERATING_SYSTEM = 17,
  NVML_ERROR_LIB_RM_VERSION_MISMATCH = 18,
  NVML_ERROR_IN_USE = 19,
  NVML_ERROR_MEMORY = 20,
  NVML_ERROR_NO_DATA = 21,
  NVML_ERROR_VGPU_ECC_NOT_SUPPORTED = 22,
  NVML_ERROR_INSUFFICIENT_RESOURCES = 23,
  NVML_ERROR_FREQ_NOT_SUPPORTED = 24,
  NVML_ERROR_ARGUMENT_VERSION_MISMATCH = 25,
  NVML_ERROR_DEPRECATED = 26,
  NVML_ERROR_NOT_READY = 27,
  NVML_ERROR_GPU_NOT_FOUND = 28,
  NVML_ERROR_INVALID_STATE = 29,
  NVML_ERROR_RESET_TYPE_NOT_SUPPORTED = 30,
  NVML_ERROR_UNKNOWN = 0x3E7,
  _NVMLRETURN_T_INTERNAL_LOADING_ERROR = -42L
};
typedef enum nvmlReturn_t nvmlReturn_t;

/* "cy_nvml.pxd":329
 *     _NVMLRETURN_T_INTERNAL_LOADING_ERROR "_NVMLRETURN_T_INTERNAL_LOADING_ERROR" = -42
 * 
 * ctypedef enum nvmlMemoryLocation_t "nvmlMemoryLocation_t":             # <<<<<<<<<<<<<<
 *     NVML_MEMORY_LOCATION_L1_CACHE "NVML_MEMORY_LOCATION_L1_CACHE" = 0
 *     NVML_MEMORY_LOCATION_L2_CACHE "NVML_MEMORY_LOCATION_L2_CACHE" = 1
*/
enum nvmlMemoryLocation_t {
  NVML_MEMORY_LOCATION_L1_CACHE = 0,
  NVML_MEMORY_LOCATION_L2_CACHE = 1,
  NVML_MEMORY_LOCATION_DRAM = 2,
  NVML_MEMORY_LOCATION_DEVICE_MEMORY = 2,
  NVML_MEMORY_LOCATION_REGISTER_FILE = 3,
  NVML_MEMORY_LOCATION_TEXTURE_MEMORY = 4,
  NVML_MEMORY_LOCATION_TEXTURE_SHM = 5,
  NVML_MEMORY_LOCATION_CBU = 6,
  NVML_MEMORY_LOCATION_SRAM = 7,
  NVML_MEMORY_LOCATION_COUNT
};
typedef enum nvmlMemoryLocation_t nvmlMemoryLocation_t;

/* "cy_nvml.pxd":341
 *     NVML_MEMORY_LOCATION_COUNT "NVML_MEMORY_LOCATION_COUNT"
 * 
 * ctypedef enum nvmlPageRetirementCause_t "nvmlPageRetirementCause_t":             # <<<<<<<<<<<<<<
 *     NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS "NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS" = 0
 *     NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR "NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR" = 1
*/
enum nvmlPageRetirementCause_t {
  NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS = 0,
  NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR = 1,
  NVML_PAGE_RETIREMENT_CAUSE_COUNT
};
typedef enum nvmlPageRetirementCause_t nvmlPageRetirementCause_t;

/* "cy_nvml.pxd":346
 *     NVML_PAGE_RETIREMENT_CAUSE_COUNT "NVML_PAGE_RETIREMENT_CAUSE_COUNT"
 * 
 * ctypedef enum nvmlRestrictedAPI_t "nvmlRestrictedAPI_t":             # <<<<<<<<<<<<<<
 *     NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS "NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS" = 0
 *     NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS "NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS" = 1
*/
enum nvmlRestrictedAPI_t {
  NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS = 0,
  NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS = 1,
  NVML_RESTRICTED_API_COUNT
};
typedef enum nvmlRestrictedAPI_t nvmlRestrictedAPI_t;

/* "cy_nvml.pxd":351
 *     NVML_RESTRICTED_API_COUNT "NVML_RESTRICTED_API_COUNT"
 * 
 * ctypedef enum nvmlGpuUtilizationDomainId_t "nvmlGpuUtilizationDomainId_t":             # <<<<<<<<<<<<<<
 *     NVML_GPU_UTILIZATION_DOMAIN_GPU "NVML_GPU_UTILIZATION_DOMAIN_GPU" = 0
 *     NVML_GPU_UTILIZATION_DOMAIN_FB "NVML_GPU_UTILIZATION_DOMAIN_FB" = 1
*/
enum nvmlGpuUtilizationDomainId_t {
  NVML_GPU_UTILIZATION_DOMAIN_GPU = 0,
  NVML_GPU_UTILIZATION_DOMAIN_FB = 1,
  NVML_GPU_UTILIZATION_DOMAIN_VID = 2,
  NVML_GPU_UTILIZATION_DOMAIN_BUS = 3
};
typedef enum nvmlGpuUtilizationDomainId_t nvmlGpuUtilizationDomainId_t;

/* "cy_nvml.pxd":357
 *     NVML_GPU_UTILIZATION_DOMAIN_BUS "NVML_GPU_UTILIZATION_DOMAIN_BUS" = 3
 * 
 * ctypedef enum nvmlGpuVirtualizationMode_t "nvmlGpuVirtualizationMode_t":             # <<<<<<<<<<<<<<
 *     NVML_GPU_VIRTUALIZATION_MODE_NONE "NVML_GPU_VIRTUALIZATION_MODE_NONE" = 0
 *     NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH "NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH" = 1
*/
enum nvmlGpuVirtualizationMode_t {
  NVML_GPU_VIRTUALIZATION_MODE_NONE = 0,
  NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH = 1,
  NVML_GPU_VIRTUALIZATION_MODE_VGPU = 2,
  NVML_GPU_VIRTUALIZATION_MODE_HOST_VGPU = 3,
  NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA = 4
};
typedef enum nvmlGpuVirtualizationMode_t nvmlGpuVirtualizationMode_t;

/* "cy_nvml.pxd":364
 *     NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA "NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA" = 4
 * 
 * ctypedef enum nvmlHostVgpuMode_t "nvmlHostVgpuMode_t":             # <<<<<<<<<<<<<<
 *     NVML_HOST_VGPU_MODE_NON_SRIOV "NVML_HOST_VGPU_MODE_NON_SRIOV" = 0
 *     NVML_HOST_VGPU_MODE_SRIOV "NVML_HOST_VGPU_MODE_SRIOV" = 1
*/
enum nvmlHostVgpuMode_t {
  NVML_HOST_VGPU_MODE_NON_SRIOV = 0,
  NVML_HOST_VGPU_MODE_SRIOV = 1
};
typedef enum nvmlHostVgpuMode_t nvmlHostVgpuMode_t;

/* "cy_nvml.pxd":368
 *     NVML_HOST_VGPU_MODE_SRIOV "NVML_HOST_VGPU_MODE_SRIOV" = 1
 * 
 * ctypedef enum nvmlVgpuVmIdType_t "nvmlVgpuVmIdType_t":             # <<<<<<<<<<<<<<
 *     NVML_VGPU_VM_ID_DOMAIN_ID "NVML_VGPU_VM_ID_DOMAIN_ID" = 0
 *     NVML_VGPU_VM_ID_UUID "NVML_VGPU_VM_ID_UUID" = 1
*/
enum nvmlVgpuVmIdType_t {
  NVML_VGPU_VM_ID_DOMAIN_ID = 0,
  NVML_VGPU_VM_ID_UUID = 1
};
typedef enum nvmlVgpuVmIdType_t nvmlVgpuVmIdType_t;

/* "cy_nvml.pxd":372
 *     NVML_VGPU_VM_ID_UUID "NVML_VGPU_VM_ID_UUID" = 1
 * 
 * ctypedef enum nvmlVgpuGuestInfoState_t "nvmlVgpuGuestInfoState_t":             # <<<<<<<<<<<<<<
 *     NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED "NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED" = 0
 *     NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED "NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED" = 1
*/
enum nvmlVgpuGuestInfoState_t {
  NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED = 0,
  NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED = 1
};
typedef enum nvmlVgpuGuestInfoState_t nvmlVgpuGuestInfoState_t;

/* "cy_nvml.pxd":376
 *     NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED "NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED" = 1
 * 
 * ctypedef enum nvmlGridLicenseFeatureCode_t "nvmlGridLicenseFeatureCode_t":             # <<<<<<<<<<<<<<
 *     NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN "NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN" = 0
 *     NVML_GRID_LICENSE_FEATURE_CODE_VGPU "NVML_GRID_LICENSE_FEATURE_CODE_VGPU" = 1
*/
enum nvmlGridLicenseFeatureCode_t {

  /* "cy_nvml.pxd":380
 *     NVML_GRID_LICENSE_FEATURE_CODE_VGPU "NVML_GRID_LICENSE_FEATURE_CODE_VGPU" = 1
 *     NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX "NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX" = 2
 *     NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION "NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION" = NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX             # <<<<<<<<<<<<<<
 *     NVML_GRID_LICENSE_FEATURE_CODE_GAMING "NVML_GRID_LICENSE_FEATURE_CODE_GAMING" = 3
 *     NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE "NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE" = 4
*/
  NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN = 0,
  NVML_GRID_LICENSE_FEATURE_CODE_VGPU = 1,
  NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX = 2,
  NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION = NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX,
  NVML_GRID_LICENSE_FEATURE_CODE_GAMING = 3,
  NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE = 4
};
typedef enum nvmlGridLicenseFeatureCode_t nvmlGridLicenseFeatureCode_t;

/* "cy_nvml.pxd":384
 *     NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE "NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE" = 4
 * 
 * ctypedef enum nvmlVgpuCapability_t "nvmlVgpuCapability_t":             # <<<<<<<<<<<<<<
 *     NVML_VGPU_CAP_NVLINK_P2P "NVML_VGPU_CAP_NVLINK_P2P" = 0
 *     NVML_VGPU_CAP_GPUDIRECT "NVML_VGPU_CAP_GPUDIRECT" = 1
*/
enum nvmlVgpuCapability_t {
  NVML_VGPU_CAP_NVLINK_P2P = 0,
  NVML_VGPU_CAP_GPUDIRECT = 1,
  NVML_VGPU_CAP_MULTI_VGPU_EXCLUSIVE = 2,
  NVML_VGPU_CAP_EXCLUSIVE_TYPE = 3,
  NVML_VGPU_CAP_EXCLUSIVE_SIZE = 4,
  NVML_VGPU_CAP_COUNT
};
typedef enum nvmlVgpuCapability_t nvmlVgpuCapability_t;

/* "cy_nvml.pxd":392
 *     NVML_VGPU_CAP_COUNT "NVML_VGPU_CAP_COUNT"
 * 
 * ctypedef enum nvmlVgpuDriverCapability_t "nvmlVgpuDriverCapability_t":             # <<<<<<<<<<<<<<
 *     NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU "NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU" = 0
 *     NVML_VGPU_DRIVER_CAP_WARM_UPDATE "NVML_VGPU_DRIVER_CAP_WARM_UPDATE" = 1
*/
enum nvmlVgpuDriverCapability_t {
  NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU = 0,
  NVML_VGPU_DRIVER_CAP_WARM_UPDATE = 1,
  NVML_VGPU_DRIVER_CAP_COUNT
};
typedef enum nvmlVgpuDriverCapability_t nvmlVgpuDriverCapability_t;

/* "cy_nvml.pxd":397
 *     NVML_VGPU_DRIVER_CAP_COUNT "NVML_VGPU_DRIVER_CAP_COUNT"
 * 
 * ctypedef enum nvmlDeviceVgpuCapability_t "nvmlDeviceVgpuCapability_t":             # <<<<<<<<<<<<<<
 *     NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU "NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU" = 0
 *     NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES "NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES" = 1
*/
enum nvmlDeviceVgpuCapability_t {
  NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU = 0,
  NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES = 1,
  NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES = 2,
  NVML_DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW = 3,
  NVML_DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW = 4,
  NVML_DEVICE_VGPU_CAP_DEVICE_STREAMING = 5,
  NVML_DEVICE_VGPU_CAP_MINI_QUARTER_GPU = 6,
  NVML_DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU = 7,
  NVML_DEVICE_VGPU_CAP_WARM_UPDATE = 8,
  NVML_DEVICE_VGPU_CAP_HOMOGENEOUS_PLACEMENTS = 9,
  NVML_DEVICE_VGPU_CAP_MIG_TIMESLICING_SUPPORTED = 10,
  NVML_DEVICE_VGPU_CAP_MIG_TIMESLICING_ENABLED = 11,
  NVML_DEVICE_VGPU_CAP_COUNT
};
typedef enum nvmlDeviceVgpuCapability_t nvmlDeviceVgpuCapability_t;

/* "cy_nvml.pxd":412
 *     NVML_DEVICE_VGPU_CAP_COUNT "NVML_DEVICE_VGPU_CAP_COUNT"
 * 
 * ctypedef enum nvmlDeviceGpuRecoveryAction_t "nvmlDeviceGpuRecoveryAction_t":             # <<<<<<<<<<<<<<
 *     NVML_GPU_RECOVERY_ACTION_NONE "NVML_GPU_RECOVERY_ACTION_NONE" = 0
 *     NVML_GPU_RECOVERY_ACTION_GPU_RESET "NVML_GPU_RECOVERY_ACTION_GPU_RESET" = 1
*/
enum nvmlDeviceGpuRecoveryAction_t {
  NVML_GPU_RECOVERY_ACTION_NONE = 0,
  NVML_GPU_RECOVERY_ACTION_GPU_RESET = 1,
  NVML_GPU_RECOVERY_ACTION_NODE_REBOOT = 2,
  NVML_GPU_RECOVERY_ACTION_DRAIN_P2P = 3,
  NVML_GPU_RECOVERY_ACTION_DRAIN_AND_RESET = 4
};
typedef enum nvmlDeviceGpuRecoveryAction_t nvmlDeviceGpuRecoveryAction_t;

/* "cy_nvml.pxd":419
 *     NVML_GPU_RECOVERY_ACTION_DRAIN_AND_RESET "NVML_GPU_RECOVERY_ACTION_DRAIN_AND_RESET" = 4
 * 
 * ctypedef enum nvmlFanState_t "nvmlFanState_t":             # <<<<<<<<<<<<<<
 *     NVML_FAN_NORMAL "NVML_FAN_NORMAL" = 0
 *     NVML_FAN_FAILED "NVML_FAN_FAILED" = 1
*/
enum nvmlFanState_t {
  NVML_FAN_NORMAL = 0,
  NVML_FAN_FAILED = 1
};
typedef enum nvmlFanState_t nvmlFanState_t;

/* "cy_nvml.pxd":423
 *     NVML_FAN_FAILED "NVML_FAN_FAILED" = 1
 * 
 * ctypedef enum nvmlLedColor_t "nvmlLedColor_t":             # <<<<<<<<<<<<<<
 *     NVML_LED_COLOR_GREEN "NVML_LED_COLOR_GREEN" = 0
 *     NVML_LED_COLOR_AMBER "NVML_LED_COLOR_AMBER" = 1
*/
enum nvmlLedColor_t {
  NVML_LED_COLOR_GREEN = 0,
  NVML_LED_COLOR_AMBER = 1
};
typedef enum nvmlLedColor_t nvmlLedColor_t;

/* "cy_nvml.pxd":427
 *     NVML_LED_COLOR_AMBER "NVML_LED_COLOR_AMBER" = 1
 * 
 * ctypedef enum nvmlEncoderType_t "nvmlEncoderType_t":             # <<<<<<<<<<<<<<
 *     NVML_ENCODER_QUERY_H264 "NVML_ENCODER_QUERY_H264" = 0x00
 *     NVML_ENCODER_QUERY_HEVC "NVML_ENCODER_QUERY_HEVC" = 0x01
*/
enum nvmlEncoderType_t {
  NVML_ENCODER_QUERY_H264 = 0x00,
  NVML_ENCODER_QUERY_HEVC = 0x01,
  NVML_ENCODER_QUERY_AV1 = 0x02,
  NVML_ENCODER_QUERY_UNKNOWN = 0xFF
};
typedef enum nvmlEncoderType_t nvmlEncoderType_t;

/* "cy_nvml.pxd":433
 *     NVML_ENCODER_QUERY_UNKNOWN "NVML_ENCODER_QUERY_UNKNOWN" = 0xFF
 * 
 * ctypedef enum nvmlFBCSessionType_t "nvmlFBCSessionType_t":             # <<<<<<<<<<<<<<
 *     NVML_FBC_SESSION_TYPE_UNKNOWN "NVML_FBC_SESSION_TYPE_UNKNOWN" = 0
 *     NVML_FBC_SESSION_TYPE_TOSYS "NVML_FBC_SESSION_TYPE_TOSYS"
*/
enum nvmlFBCSessionType_t {
  NVML_FBC_SESSION_TYPE_UNKNOWN = 0,
  NVML_FBC_SESSION_TYPE_TOSYS,
  NVML_FBC_SESSION_TYPE_CUDA,
  NVML_FBC_SESSION_TYPE_VID,
  NVML_FBC_SESSION_TYPE_HWENC
};
typedef enum nvmlFBCSessionType_t nvmlFBCSessionType_t;

/* "cy_nvml.pxd":440
 *     NVML_FBC_SESSION_TYPE_HWENC "NVML_FBC_SESSION_TYPE_HWENC"
 * 
 * ctypedef enum nvmlDetachGpuState_t "nvmlDetachGpuState_t":             # <<<<<<<<<<<<<<
 *     NVML_DETACH_GPU_KEEP "NVML_DETACH_GPU_KEEP" = 0
 *     NVML_DETACH_GPU_REMOVE "NVML_DETACH_GPU_REMOVE"
*/
enum nvmlDetachGpuState_t {
  NVML_DETACH_GPU_KEEP = 0,
  NVML_DETACH_GPU_REMOVE
};
typedef enum nvmlDetachGpuState_t nvmlDetachGpuState_t;

/* "cy_nvml.pxd":444
 *     NVML_DETACH_GPU_REMOVE "NVML_DETACH_GPU_REMOVE"
 * 
 * ctypedef enum nvmlPcieLinkState_t "nvmlPcieLinkState_t":             # <<<<<<<<<<<<<<
 *     NVML_PCIE_LINK_KEEP "NVML_PCIE_LINK_KEEP" = 0
 *     NVML_PCIE_LINK_SHUT_DOWN "NVML_PCIE_LINK_SHUT_DOWN"
*/
enum nvmlPcieLinkState_t {
  NVML_PCIE_LINK_KEEP = 0,
  NVML_PCIE_LINK_SHUT_DOWN
};
typedef enum nvmlPcieLinkState_t nvmlPcieLinkState_t;

/* "cy_nvml.pxd":448
 *     NVML_PCIE_LINK_SHUT_DOWN "NVML_PCIE_LINK_SHUT_DOWN"
 * 
 * ctypedef enum nvmlClockLimitId_t "nvmlClockLimitId_t":             # <<<<<<<<<<<<<<
 *     NVML_CLOCK_LIMIT_ID_RANGE_START "NVML_CLOCK_LIMIT_ID_RANGE_START" = 0xffffff00
 *     NVML_CLOCK_LIMIT_ID_TDP "NVML_CLOCK_LIMIT_ID_TDP"
*/
enum nvmlClockLimitId_t {
  NVML_CLOCK_LIMIT_ID_RANGE_START = 0xffffff00,
  NVML_CLOCK_LIMIT_ID_TDP,
  NVML_CLOCK_LIMIT_ID_UNLIMITED
};
typedef enum nvmlClockLimitId_t nvmlClockLimitId_t;

/* "cy_nvml.pxd":453
 *     NVML_CLOCK_LIMIT_ID_UNLIMITED "NVML_CLOCK_LIMIT_ID_UNLIMITED"
 * 
 * ctypedef enum nvmlVgpuVmCompatibility_t "nvmlVgpuVmCompatibility_t":             # <<<<<<<<<<<<<<
 *     NVML_VGPU_VM_COMPATIBILITY_NONE "NVML_VGPU_VM_COMPATIBILITY_NONE" = 0x0
 *     NVML_VGPU_VM_COMPATIBILITY_COLD "NVML_VGPU_VM_COMPATIBILITY_COLD" = 0x1
*/
enum nvmlVgpuVmCompatibility_t {
  NVML_VGPU_VM_COMPATIBILITY_NONE = 0x0,
  NVML_VGPU_VM_COMPATIBILITY_COLD = 0x1,
  NVML_VGPU_VM_COMPATIBILITY_HIBERNATE = 0x2,
  NVML_VGPU_VM_COMPATIBILITY_SLEEP = 0x4,
  NVML_VGPU_VM_COMPATIBILITY_LIVE = 0x8
};
typedef enum nvmlVgpuVmCompatibility_t nvmlVgpuVmCompatibility_t;

/* "cy_nvml.pxd":460
 *     NVML_VGPU_VM_COMPATIBILITY_LIVE "NVML_VGPU_VM_COMPATIBILITY_LIVE" = 0x8
 * 
 * ctypedef enum nvmlVgpuPgpuCompatibilityLimitCode_t "nvmlVgpuPgpuCompatibilityLimitCode_t":             # <<<<<<<<<<<<<<
 *     NVML_VGPU_COMPATIBILITY_LIMIT_NONE "NVML_VGPU_COMPATIBILITY_LIMIT_NONE" = 0x0
 *     NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER "NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER" = 0x1
*/
enum nvmlVgpuPgpuCompatibilityLimitCode_t {
  NVML_VGPU_COMPATIBILITY_LIMIT_NONE = 0x0,
  NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER = 0x1,
  NVML_VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER = 0x2,
  NVML_VGPU_COMPATIBILITY_LIMIT_GPU = 0x4,
  NVML_VGPU_COMPATIBILITY_LIMIT_OTHER = 0x80000000
};
typedef enum nvmlVgpuPgpuCompatibilityLimitCode_t nvmlVgpuPgpuCompatibilityLimitCode_t;

/* "cy_nvml.pxd":467
 *     NVML_VGPU_COMPATIBILITY_LIMIT_OTHER "NVML_VGPU_COMPATIBILITY_LIMIT_OTHER" = 0x80000000
 * 
 * ctypedef enum nvmlGpmMetricId_t "nvmlGpmMetricId_t":             # <<<<<<<<<<<<<<
 *     NVML_GPM_METRIC_GRAPHICS_UTIL "NVML_GPM_METRIC_GRAPHICS_UTIL" = 1
 *     NVML_GPM_METRIC_SM_UTIL "NVML_GPM_METRIC_SM_UTIL" = 2
*/
enum nvmlGpmMetricId_t {
  NVML_GPM_METRIC_GRAPHICS_UTIL = 1,
  NVML_GPM_METRIC_SM_UTIL = 2,
  NVML_GPM_METRIC_SM_OCCUPANCY = 3,
  NVML_GPM_METRIC_INTEGER_UTIL = 4,
  NVML_GPM_METRIC_ANY_TENSOR_UTIL = 5,
  NVML_GPM_METRIC_DFMA_TENSOR_UTIL = 6,
  NVML_GPM_METRIC_HMMA_TENSOR_UTIL = 7,
  NVML_GPM_METRIC_IMMA_TENSOR_UTIL = 9,
  NVML_GPM_METRIC_DRAM_BW_UTIL = 10,
  NVML_GPM_METRIC_FP64_UTIL = 11,
  NVML_GPM_METRIC_FP32_UTIL = 12,
  NVML_GPM_METRIC_FP16_UTIL = 13,
  NVML_GPM_METRIC_PCIE_TX_PER_SEC = 20,
  NVML_GPM_METRIC_PCIE_RX_PER_SEC = 21,
  NVML_GPM_METRIC_NVDEC_0_UTIL = 30,
  NVML_GPM_METRIC_NVDEC_1_UTIL = 31,
  NVML_GPM_METRIC_NVDEC_2_UTIL = 32,
  NVML_GPM_METRIC_NVDEC_3_UTIL = 33,
  NVML_GPM_METRIC_NVDEC_4_UTIL = 34,
  NVML_GPM_METRIC_NVDEC_5_UTIL = 35,
  NVML_GPM_METRIC_NVDEC_6_UTIL = 36,
  NVML_GPM_METRIC_NVDEC_7_UTIL = 37,
  NVML_GPM_METRIC_NVJPG_0_UTIL = 40,
  NVML_GPM_METRIC_NVJPG_1_UTIL = 41,
  NVML_GPM_METRIC_NVJPG_2_UTIL = 42,
  NVML_GPM_METRIC_NVJPG_3_UTIL = 43,
  NVML_GPM_METRIC_NVJPG_4_UTIL = 44,
  NVML_GPM_METRIC_NVJPG_5_UTIL = 45,
  NVML_GPM_METRIC_NVJPG_6_UTIL = 46,
  NVML_GPM_METRIC_NVJPG_7_UTIL = 47,
  NVML_GPM_METRIC_NVOFA_0_UTIL = 50,
  NVML_GPM_METRIC_NVOFA_1_UTIL = 51,
  NVML_GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC = 60,
  NVML_GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC = 61,
  NVML_GPM_METRIC_NVLINK_L0_RX_PER_SEC = 62,
  NVML_GPM_METRIC_NVLINK_L0_TX_PER_SEC = 63,
  NVML_GPM_METRIC_NVLINK_L1_RX_PER_SEC = 64,
  NVML_GPM_METRIC_NVLINK_L1_TX_PER_SEC = 65,
  NVML_GPM_METRIC_NVLINK_L2_RX_PER_SEC = 66,
  NVML_GPM_METRIC_NVLINK_L2_TX_PER_SEC = 67,
  NVML_GPM_METRIC_NVLINK_L3_RX_PER_SEC = 68,
  NVML_GPM_METRIC_NVLINK_L3_TX_PER_SEC = 69,
  NVML_GPM_METRIC_NVLINK_L4_RX_PER_SEC = 70,
  NVML_GPM_METRIC_NVLINK_L4_TX_PER_SEC = 71,
  NVML_GPM_METRIC_NVLINK_L5_RX_PER_SEC = 72,
  NVML_GPM_METRIC_NVLINK_L5_TX_PER_SEC = 73,
  NVML_GPM_METRIC_NVLINK_L6_RX_PER_SEC = 74,
  NVML_GPM_METRIC_NVLINK_L6_TX_PER_SEC = 75,
  NVML_GPM_METRIC_NVLINK_L7_RX_PER_SEC = 76,
  NVML_GPM_METRIC_NVLINK_L7_TX_PER_SEC = 77,
  NVML_GPM_METRIC_NVLINK_L8_RX_PER_SEC = 78,
  NVML_GPM_METRIC_NVLINK_L8_TX_PER_SEC = 79,
  NVML_GPM_METRIC_NVLINK_L9_RX_PER_SEC = 80,
  NVML_GPM_METRIC_NVLINK_L9_TX_PER_SEC = 81,
  NVML_GPM_METRIC_NVLINK_L10_RX_PER_SEC = 82,
  NVML_GPM_METRIC_NVLINK_L10_TX_PER_SEC = 83,
  NVML_GPM_METRIC_NVLINK_L11_RX_PER_SEC = 84,
  NVML_GPM_METRIC_NVLINK_L11_TX_PER_SEC = 85,
  NVML_GPM_METRIC_NVLINK_L12_RX_PER_SEC = 86,
  NVML_GPM_METRIC_NVLINK_L12_TX_PER_SEC = 87,
  NVML_GPM_METRIC_NVLINK_L13_RX_PER_SEC = 88,
  NVML_GPM_METRIC_NVLINK_L13_TX_PER_SEC = 89,
  NVML_GPM_METRIC_NVLINK_L14_RX_PER_SEC = 90,
  NVML_GPM_METRIC_NVLINK_L14_TX_PER_SEC = 91,
  NVML_GPM_METRIC_NVLINK_L15_RX_PER_SEC = 92,
  NVML_GPM_METRIC_NVLINK_L15_TX_PER_SEC = 93,
  NVML_GPM_METRIC_NVLINK_L16_RX_PER_SEC = 94,
  NVML_GPM_METRIC_NVLINK_L16_TX_PER_SEC = 95,
  NVML_GPM_METRIC_NVLINK_L17_RX_PER_SEC = 96,
  NVML_GPM_METRIC_NVLINK_L17_TX_PER_SEC = 97,
  NVML_GPM_METRIC_C2C_TOTAL_TX_PER_SEC = 0x64,
  NVML_GPM_METRIC_C2C_TOTAL_RX_PER_SEC = 0x65,
  NVML_GPM_METRIC_C2C_DATA_TX_PER_SEC = 0x66,
  NVML_GPM_METRIC_C2C_DATA_RX_PER_SEC = 0x67,
  NVML_GPM_METRIC_C2C_LINK0_TOTAL_TX_PER_SEC = 0x68,
  NVML_GPM_METRIC_C2C_LINK0_TOTAL_RX_PER_SEC = 0x69,
  NVML_GPM_METRIC_C2C_LINK0_DATA_TX_PER_SEC = 0x6A,
  NVML_GPM_METRIC_C2C_LINK0_DATA_RX_PER_SEC = 0x6B,
  NVML_GPM_METRIC_C2C_LINK1_TOTAL_TX_PER_SEC = 0x6C,
  NVML_GPM_METRIC_C2C_LINK1_TOTAL_RX_PER_SEC = 0x6D,
  NVML_GPM_METRIC_C2C_LINK1_DATA_TX_PER_SEC = 0x6E,
  NVML_GPM_METRIC_C2C_LINK1_DATA_RX_PER_SEC = 0x6F,
  NVML_GPM_METRIC_C2C_LINK2_TOTAL_TX_PER_SEC = 0x70,
  NVML_GPM_METRIC_C2C_LINK2_TOTAL_RX_PER_SEC = 0x71,
  NVML_GPM_METRIC_C2C_LINK2_DATA_TX_PER_SEC = 0x72,
  NVML_GPM_METRIC_C2C_LINK2_DATA_RX_PER_SEC = 0x73,
  NVML_GPM_METRIC_C2C_LINK3_TOTAL_TX_PER_SEC = 0x74,
  NVML_GPM_METRIC_C2C_LINK3_TOTAL_RX_PER_SEC = 0x75,
  NVML_GPM_METRIC_C2C_LINK3_DATA_TX_PER_SEC = 0x76,
  NVML_GPM_METRIC_C2C_LINK3_DATA_RX_PER_SEC = 0x77,
  NVML_GPM_METRIC_C2C_LINK4_TOTAL_TX_PER_SEC = 0x78,
  NVML_GPM_METRIC_C2C_LINK4_TOTAL_RX_PER_SEC = 0x79,
  NVML_GPM_METRIC_C2C_LINK4_DATA_TX_PER_SEC = 0x7A,
  NVML_GPM_METRIC_C2C_LINK4_DATA_RX_PER_SEC = 0x7B,
  NVML_GPM_METRIC_C2C_LINK5_TOTAL_TX_PER_SEC = 0x7C,
  NVML_GPM_METRIC_C2C_LINK5_TOTAL_RX_PER_SEC = 0x7D,
  NVML_GPM_METRIC_C2C_LINK5_DATA_TX_PER_SEC = 0x7E,
  NVML_GPM_METRIC_C2C_LINK5_DATA_RX_PER_SEC = 0x7F,
  NVML_GPM_METRIC_C2C_LINK6_TOTAL_TX_PER_SEC = 0x80,
  NVML_GPM_METRIC_C2C_LINK6_TOTAL_RX_PER_SEC = 0x81,
  NVML_GPM_METRIC_C2C_LINK6_DATA_TX_PER_SEC = 0x82,
  NVML_GPM_METRIC_C2C_LINK6_DATA_RX_PER_SEC = 0x83,
  NVML_GPM_METRIC_C2C_LINK7_TOTAL_TX_PER_SEC = 0x84,
  NVML_GPM_METRIC_C2C_LINK7_TOTAL_RX_PER_SEC = 0x85,
  NVML_GPM_METRIC_C2C_LINK7_DATA_TX_PER_SEC = 0x86,
  NVML_GPM_METRIC_C2C_LINK7_DATA_RX_PER_SEC = 0x87,
  NVML_GPM_METRIC_C2C_LINK8_TOTAL_TX_PER_SEC = 0x88,
  NVML_GPM_METRIC_C2C_LINK8_TOTAL_RX_PER_SEC = 0x89,
  NVML_GPM_METRIC_C2C_LINK8_DATA_TX_PER_SEC = 0x8A,
  NVML_GPM_METRIC_C2C_LINK8_DATA_RX_PER_SEC = 0x8B,
  NVML_GPM_METRIC_C2C_LINK9_TOTAL_TX_PER_SEC = 0x8C,
  NVML_GPM_METRIC_C2C_LINK9_TOTAL_RX_PER_SEC = 0x8D,
  NVML_GPM_METRIC_C2C_LINK9_DATA_TX_PER_SEC = 0x8E,
  NVML_GPM_METRIC_C2C_LINK9_DATA_RX_PER_SEC = 0x8F,
  NVML_GPM_METRIC_C2C_LINK10_TOTAL_TX_PER_SEC = 0x90,
  NVML_GPM_METRIC_C2C_LINK10_TOTAL_RX_PER_SEC = 0x91,
  NVML_GPM_METRIC_C2C_LINK10_DATA_TX_PER_SEC = 0x92,
  NVML_GPM_METRIC_C2C_LINK10_DATA_RX_PER_SEC = 0x93,
  NVML_GPM_METRIC_C2C_LINK11_TOTAL_TX_PER_SEC = 0x94,
  NVML_GPM_METRIC_C2C_LINK11_TOTAL_RX_PER_SEC = 0x95,
  NVML_GPM_METRIC_C2C_LINK11_DATA_TX_PER_SEC = 0x96,
  NVML_GPM_METRIC_C2C_LINK11_DATA_RX_PER_SEC = 0x97,
  NVML_GPM_METRIC_C2C_LINK12_TOTAL_TX_PER_SEC = 0x98,
  NVML_GPM_METRIC_C2C_LINK12_TOTAL_RX_PER_SEC = 0x99,
  NVML_GPM_METRIC_C2C_LINK12_DATA_TX_PER_SEC = 0x9A,
  NVML_GPM_METRIC_C2C_LINK12_DATA_RX_PER_SEC = 0x9B,
  NVML_GPM_METRIC_C2C_LINK13_TOTAL_TX_PER_SEC = 0x9C,
  NVML_GPM_METRIC_C2C_LINK13_TOTAL_RX_PER_SEC = 0x9D,
  NVML_GPM_METRIC_C2C_LINK13_DATA_TX_PER_SEC = 0x9E,
  NVML_GPM_METRIC_C2C_LINK13_DATA_RX_PER_SEC = 0x9F,
  NVML_GPM_METRIC_HOSTMEM_CACHE_HIT = 0xA0,
  NVML_GPM_METRIC_HOSTMEM_CACHE_MISS = 0xA1,
  NVML_GPM_METRIC_PEERMEM_CACHE_HIT = 0xA2,
  NVML_GPM_METRIC_PEERMEM_CACHE_MISS = 0xA3,
  NVML_GPM_METRIC_DRAM_CACHE_HIT = 0xA4,
  NVML_GPM_METRIC_DRAM_CACHE_MISS = 0xA5,
  NVML_GPM_METRIC_NVENC_0_UTIL = 0xA6,
  NVML_GPM_METRIC_NVENC_1_UTIL = 0xA7,
  NVML_GPM_METRIC_NVENC_2_UTIL = 0xA8,
  NVML_GPM_METRIC_NVENC_3_UTIL = 0xA9,
  NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ELAPSED = 0xAA,
  NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ACTIVE = 0xAB,
  NVML_GPM_METRIC_GR0_CTXSW_REQUESTS = 0xAC,
  NVML_GPM_METRIC_GR0_CTXSW_CYCLES_PER_REQ = 0xAD,
  NVML_GPM_METRIC_GR0_CTXSW_ACTIVE_PCT = 0xAE,
  NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ELAPSED = 0xAF,
  NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ACTIVE = 0xB0,
  NVML_GPM_METRIC_GR1_CTXSW_REQUESTS = 0xB1,
  NVML_GPM_METRIC_GR1_CTXSW_CYCLES_PER_REQ = 0xB2,
  NVML_GPM_METRIC_GR1_CTXSW_ACTIVE_PCT = 0xB3,
  NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ELAPSED = 0xB4,
  NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ACTIVE = 0xB5,
  NVML_GPM_METRIC_GR2_CTXSW_REQUESTS = 0xB6,
  NVML_GPM_METRIC_GR2_CTXSW_CYCLES_PER_REQ = 0xB7,
  NVML_GPM_METRIC_GR2_CTXSW_ACTIVE_PCT = 0xB8,
  NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ELAPSED = 0xB9,
  NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ACTIVE = 0xBA,
  NVML_GPM_METRIC_GR3_CTXSW_REQUESTS = 0xBB,
  NVML_GPM_METRIC_GR3_CTXSW_CYCLES_PER_REQ = 0xBC,
  NVML_GPM_METRIC_GR3_CTXSW_ACTIVE_PCT = 0xBD,
  NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ELAPSED = 0xBE,
  NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ACTIVE = 0xBF,
  NVML_GPM_METRIC_GR4_CTXSW_REQUESTS = 0xC0,
  NVML_GPM_METRIC_GR4_CTXSW_CYCLES_PER_REQ = 0xC1,
  NVML_GPM_METRIC_GR4_CTXSW_ACTIVE_PCT = 0xC2,
  NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ELAPSED = 0xC3,
  NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ACTIVE = 0xC4,
  NVML_GPM_METRIC_GR5_CTXSW_REQUESTS = 0xC5,
  NVML_GPM_METRIC_GR5_CTXSW_CYCLES_PER_REQ = 0xC6,
  NVML_GPM_METRIC_GR5_CTXSW_ACTIVE_PCT = 0xC7,
  NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ELAPSED = 0xC8,
  NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ACTIVE = 0xC9,
  NVML_GPM_METRIC_GR6_CTXSW_REQUESTS = 0xCA,
  NVML_GPM_METRIC_GR6_CTXSW_CYCLES_PER_REQ = 0xCB,
  NVML_GPM_METRIC_GR6_CTXSW_ACTIVE_PCT = 0xCC,
  NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ELAPSED = 0xCD,
  NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ACTIVE = 0xCE,
  NVML_GPM_METRIC_GR7_CTXSW_REQUESTS = 0xCF,
  NVML_GPM_METRIC_GR7_CTXSW_CYCLES_PER_REQ = 0xD0,
  NVML_GPM_METRIC_GR7_CTXSW_ACTIVE_PCT = 0xD1,
  NVML_GPM_METRIC_MAX = 0xD2
};
typedef enum nvmlGpmMetricId_t nvmlGpmMetricId_t;

/* "cy_nvml.pxd":650
 *     NVML_GPM_METRIC_MAX "NVML_GPM_METRIC_MAX" = 210
 * 
 * ctypedef enum nvmlPowerProfileType_t "nvmlPowerProfileType_t":             # <<<<<<<<<<<<<<
 *     NVML_POWER_PROFILE_MAX_P "NVML_POWER_PROFILE_MAX_P" = 0
 *     NVML_POWER_PROFILE_MAX_Q "NVML_POWER_PROFILE_MAX_Q" = 1
*/
enum nvmlPowerProfileType_t {
  NVML_POWER_PROFILE_MAX_P = 0,
  NVML_POWER_PROFILE_MAX_Q = 1,
  NVML_POWER_PROFILE_COMPUTE = 2,
  NVML_POWER_PROFILE_MEMORY_BOUND = 3,
  NVML_POWER_PROFILE_NETWORK = 4,
  NVML_POWER_PROFILE_BALANCED = 5,
  NVML_POWER_PROFILE_LLM_INFERENCE = 6,
  NVML_POWER_PROFILE_LLM_TRAINING = 7,
  NVML_POWER_PROFILE_RBM = 8,
  NVML_POWER_PROFILE_DCPCIE = 9,
  NVML_POWER_PROFILE_HMMA_SPARSE = 10,
  NVML_POWER_PROFILE_HMMA_DENSE = 11,
  NVML_POWER_PROFILE_SYNC_BALANCED = 12,
  NVML_POWER_PROFILE_HPC = 13,
  NVML_POWER_PROFILE_MIG = 14,
  NVML_POWER_PROFILE_MAX = 15
};
typedef enum nvmlPowerProfileType_t nvmlPowerProfileType_t;

/* "cy_nvml.pxd":668
 *     NVML_POWER_PROFILE_MAX "NVML_POWER_PROFILE_MAX" = 15
 * 
 * ctypedef enum nvmlDeviceAddressingModeType_t "nvmlDeviceAddressingModeType_t":             # <<<<<<<<<<<<<<
 *     NVML_DEVICE_ADDRESSING_MODE_NONE "NVML_DEVICE_ADDRESSING_MODE_NONE" = 0
 *     NVML_DEVICE_ADDRESSING_MODE_HMM "NVML_DEVICE_ADDRESSING_MODE_HMM" = 1
*/
enum nvmlDeviceAddressingModeType_t {
  NVML_DEVICE_ADDRESSING_MODE_NONE = 0,
  NVML_DEVICE_ADDRESSING_MODE_HMM = 1,
  NVML_DEVICE_ADDRESSING_MODE_ATS = 2
};
typedef enum nvmlDeviceAddressingModeType_t nvmlDeviceAddressingModeType_t;

/* "cy_nvml.pxd":675
 * 
 * # types
 * ctypedef struct nvmlPciInfoExt_v1_t 'nvmlPciInfoExt_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int domain
*/
struct nvmlPciInfoExt_v1_t {
  unsigned int version;
  unsigned int domain;
  unsigned int bus;
  unsigned int device;
  unsigned int pciDeviceId;
  unsigned int pciSubSystemId;
  unsigned int baseClass;
  unsigned int subClass;
  char busId[32];
};

/* "cy_nvml.pxd":686
 *     char busId[32]
 * 
 * ctypedef struct nvmlCoolerInfo_v1_t 'nvmlCoolerInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int index
*/
struct nvmlCoolerInfo_v1_t {
  unsigned int version;
  unsigned int index;
  nvmlCoolerControl_t signalType;
  nvmlCoolerTarget_t target;
};

/* "cy_nvml.pxd":692
 *     nvmlCoolerTarget_t target
 * 
 * ctypedef struct nvmlDramEncryptionInfo_v1_t 'nvmlDramEncryptionInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlEnableState_t encryptionState
*/
struct nvmlDramEncryptionInfo_v1_t {
  unsigned int version;
  nvmlEnableState_t encryptionState;
};

/* "cy_nvml.pxd":696
 *     nvmlEnableState_t encryptionState
 * 
 * ctypedef struct nvmlMarginTemperature_v1_t 'nvmlMarginTemperature_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     int marginTemperature
*/
struct nvmlMarginTemperature_v1_t {
  unsigned int version;
  int marginTemperature;
};

/* "cy_nvml.pxd":700
 *     int marginTemperature
 * 
 * ctypedef struct nvmlClockOffset_v1_t 'nvmlClockOffset_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlClockType_t type
*/
struct nvmlClockOffset_v1_t {
  unsigned int version;
  nvmlClockType_t type;
  nvmlPstates_t pstate;
  int clockOffsetMHz;
  int minClockOffsetMHz;
  int maxClockOffsetMHz;
};

/* "cy_nvml.pxd":708
 *     int maxClockOffsetMHz
 * 
 * ctypedef struct nvmlFanSpeedInfo_v1_t 'nvmlFanSpeedInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int fan
*/
struct nvmlFanSpeedInfo_v1_t {
  unsigned int version;
  unsigned int fan;
  unsigned int speed;
};

/* "cy_nvml.pxd":713
 *     unsigned int speed
 * 
 * ctypedef struct nvmlDevicePerfModes_v1_t 'nvmlDevicePerfModes_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     char str[2048]
*/
struct nvmlDevicePerfModes_v1_t {
  unsigned int version;
  char str[2048];
};

/* "cy_nvml.pxd":717
 *     char str[2048]
 * 
 * ctypedef struct nvmlDeviceCurrentClockFreqs_v1_t 'nvmlDeviceCurrentClockFreqs_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     char str[2048]
*/
struct nvmlDeviceCurrentClockFreqs_v1_t {
  unsigned int version;
  char str[2048];
};

/* "cy_nvml.pxd":721
 *     char str[2048]
 * 
 * ctypedef struct nvmlEccSramErrorStatus_v1_t 'nvmlEccSramErrorStatus_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned long long aggregateUncParity
*/
struct nvmlEccSramErrorStatus_v1_t {
  unsigned int version;
  unsigned PY_LONG_LONG aggregateUncParity;
  unsigned PY_LONG_LONG aggregateUncSecDed;
  unsigned PY_LONG_LONG aggregateCor;
  unsigned PY_LONG_LONG volatileUncParity;
  unsigned PY_LONG_LONG volatileUncSecDed;
  unsigned PY_LONG_LONG volatileCor;
  unsigned PY_LONG_LONG aggregateUncBucketL2;
  unsigned PY_LONG_LONG aggregateUncBucketSm;
  unsigned PY_LONG_LONG aggregateUncBucketPcie;
  unsigned PY_LONG_LONG aggregateUncBucketMcu;
  unsigned PY_LONG_LONG aggregateUncBucketOther;
  unsigned int bThresholdExceeded;
};

/* "cy_nvml.pxd":736
 *     unsigned int bThresholdExceeded
 * 
 * ctypedef struct nvmlPlatformInfo_v2_t 'nvmlPlatformInfo_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned char ibGuid[16]
*/
struct nvmlPlatformInfo_v2_t {
  unsigned int version;
  unsigned char ibGuid[16];
  unsigned char chassisSerialNumber[16];
  unsigned char slotNumber;
  unsigned char trayIndex;
  unsigned char hostId;
  unsigned char peerType;
  unsigned char moduleId;
};

/* "cy_nvml.pxd":753
 * ctypedef unsigned int nvmlVgpuTypeId_t 'nvmlVgpuTypeId_t'
 * ctypedef unsigned int nvmlVgpuInstance_t 'nvmlVgpuInstance_t'
 * ctypedef struct nvmlVgpuHeterogeneousMode_v1_t 'nvmlVgpuHeterogeneousMode_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int mode
*/
struct nvmlVgpuHeterogeneousMode_v1_t {
  unsigned int version;
  unsigned int mode;
};

/* "cy_nvml.pxd":757
 *     unsigned int mode
 * 
 * ctypedef struct nvmlVgpuPlacementId_v1_t 'nvmlVgpuPlacementId_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int placementId
*/
struct nvmlVgpuPlacementId_v1_t {
  unsigned int version;
  unsigned int placementId;
};

/* "cy_nvml.pxd":761
 *     unsigned int placementId
 * 
 * ctypedef struct nvmlVgpuPlacementList_v2_t 'nvmlVgpuPlacementList_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int placementSize
*/
struct nvmlVgpuPlacementList_v2_t {
  unsigned int version;
  unsigned int placementSize;
  unsigned int count;
  unsigned int *placementIds;
  unsigned int mode;
};

/* "cy_nvml.pxd":768
 *     unsigned int mode
 * 
 * ctypedef struct nvmlVgpuTypeBar1Info_v1_t 'nvmlVgpuTypeBar1Info_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned long long bar1Size
*/
struct nvmlVgpuTypeBar1Info_v1_t {
  unsigned int version;
  unsigned PY_LONG_LONG bar1Size;
};

/* "cy_nvml.pxd":772
 *     unsigned long long bar1Size
 * 
 * ctypedef struct nvmlVgpuRuntimeState_v1_t 'nvmlVgpuRuntimeState_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned long long size
*/
struct nvmlVgpuRuntimeState_v1_t {
  unsigned int version;
  unsigned PY_LONG_LONG size;
};

/* "cy_nvml.pxd":776
 *     unsigned long long size
 * 
 * ctypedef struct nvmlSystemConfComputeSettings_v1_t 'nvmlSystemConfComputeSettings_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int environment
*/
struct nvmlSystemConfComputeSettings_v1_t {
  unsigned int version;
  unsigned int environment;
  unsigned int ccFeature;
  unsigned int devToolsMode;
  unsigned int multiGpuMode;
};

/* "cy_nvml.pxd":783
 *     unsigned int multiGpuMode
 * 
 * ctypedef struct nvmlConfComputeSetKeyRotationThresholdInfo_v1_t 'nvmlConfComputeSetKeyRotationThresholdInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned long long maxAttackerAdvantage
*/
struct nvmlConfComputeSetKeyRotationThresholdInfo_v1_t {
  unsigned int version;
  unsigned PY_LONG_LONG maxAttackerAdvantage;
};

/* "cy_nvml.pxd":787
 *     unsigned long long maxAttackerAdvantage
 * 
 * ctypedef struct nvmlConfComputeGetKeyRotationThresholdInfo_v1_t 'nvmlConfComputeGetKeyRotationThresholdInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned long long attackerAdvantage
*/
struct nvmlConfComputeGetKeyRotationThresholdInfo_v1_t {
  unsigned int version;
  unsigned PY_LONG_LONG attackerAdvantage;
};

/* "cy_nvml.pxd":792
 * 
 * ctypedef unsigned char nvmlGpuFabricState_t 'nvmlGpuFabricState_t'
 * ctypedef struct nvmlSystemDriverBranchInfo_v1_t 'nvmlSystemDriverBranchInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     char branch[80]
*/
struct nvmlSystemDriverBranchInfo_v1_t {
  unsigned int version;
  char branch[80];
};

/* "cy_nvml.pxd":797
 * 
 * ctypedef unsigned int nvmlAffinityScope_t 'nvmlAffinityScope_t'
 * ctypedef struct nvmlTemperature_v1_t 'nvmlTemperature_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlTemperatureSensors_t sensorType
*/
struct nvmlTemperature_v1_t {
  unsigned int version;
  nvmlTemperatureSensors_t sensorType;
  int temperature;
};

/* "cy_nvml.pxd":802
 *     int temperature
 * 
 * ctypedef struct nvmlNvlinkSupportedBwModes_v1_t 'nvmlNvlinkSupportedBwModes_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned char bwModes[23]
*/
struct nvmlNvlinkSupportedBwModes_v1_t {
  unsigned int version;
  unsigned char bwModes[23];
  unsigned char totalBwModes;
};

/* "cy_nvml.pxd":807
 *     unsigned char totalBwModes
 * 
 * ctypedef struct nvmlNvlinkGetBwMode_v1_t 'nvmlNvlinkGetBwMode_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int bIsBest
*/
struct nvmlNvlinkGetBwMode_v1_t {
  unsigned int version;
  unsigned int bIsBest;
  unsigned char bwMode;
};

/* "cy_nvml.pxd":812
 *     unsigned char bwMode
 * 
 * ctypedef struct nvmlNvlinkSetBwMode_v1_t 'nvmlNvlinkSetBwMode_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int bSetBest
*/
struct nvmlNvlinkSetBwMode_v1_t {
  unsigned int version;
  unsigned int bSetBest;
  unsigned char bwMode;
};

/* "cy_nvml.pxd":817
 *     unsigned char bwMode
 * 
 * ctypedef struct nvmlDeviceCapabilities_v1_t 'nvmlDeviceCapabilities_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int capMask
*/
struct nvmlDeviceCapabilities_v1_t {
  unsigned int version;
  unsigned int capMask;
};

/* "cy_nvml.pxd":821
 *     unsigned int capMask
 * 
 * ctypedef struct nvmlPowerSmoothingProfile_v1_t 'nvmlPowerSmoothingProfile_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int profileId
*/
struct nvmlPowerSmoothingProfile_v1_t {
  unsigned int version;
  unsigned int profileId;
  unsigned int paramId;
  double value;
};

/* "cy_nvml.pxd":827
 *     double value
 * 
 * ctypedef struct nvmlPowerSmoothingState_v1_t 'nvmlPowerSmoothingState_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlEnableState_t state
*/
struct nvmlPowerSmoothingState_v1_t {
  unsigned int version;
  nvmlEnableState_t state;
};

/* "cy_nvml.pxd":831
 *     nvmlEnableState_t state
 * 
 * ctypedef struct nvmlDeviceAddressingMode_v1_t 'nvmlDeviceAddressingMode_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int value
*/
struct nvmlDeviceAddressingMode_v1_t {
  unsigned int version;
  unsigned int value;
};

/* "cy_nvml.pxd":835
 *     unsigned int value
 * 
 * ctypedef struct nvmlRepairStatus_v1_t 'nvmlRepairStatus_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int bChannelRepairPending
*/
struct nvmlRepairStatus_v1_t {
  unsigned int version;
  unsigned int bChannelRepairPending;
  unsigned int bTpcRepairPending;
};

/* "cy_nvml.pxd":840
 *     unsigned int bTpcRepairPending
 * 
 * ctypedef struct nvmlPdi_v1_t 'nvmlPdi_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned long long value
*/
struct nvmlPdi_v1_t {
  unsigned int version;
  unsigned PY_LONG_LONG value;
};

/* "cy_nvml.pxd":844
 *     unsigned long long value
 * 
 * ctypedef void* nvmlDevice_t 'nvmlDevice_t'             # <<<<<<<<<<<<<<
 * ctypedef void* nvmlGpuInstance_t 'nvmlGpuInstance_t'
 * ctypedef void* nvmlUnit_t 'nvmlUnit_t'
*/
typedef void *nvmlDevice_t;

/* "cy_nvml.pxd":845
 * 
 * ctypedef void* nvmlDevice_t 'nvmlDevice_t'
 * ctypedef void* nvmlGpuInstance_t 'nvmlGpuInstance_t'             # <<<<<<<<<<<<<<
 * ctypedef void* nvmlUnit_t 'nvmlUnit_t'
 * ctypedef void* nvmlEventSet_t 'nvmlEventSet_t'
*/
typedef void *nvmlGpuInstance_t;

/* "cy_nvml.pxd":846
 * ctypedef void* nvmlDevice_t 'nvmlDevice_t'
 * ctypedef void* nvmlGpuInstance_t 'nvmlGpuInstance_t'
 * ctypedef void* nvmlUnit_t 'nvmlUnit_t'             # <<<<<<<<<<<<<<
 * ctypedef void* nvmlEventSet_t 'nvmlEventSet_t'
 * ctypedef void* nvmlSystemEventSet_t 'nvmlSystemEventSet_t'
*/
typedef void *nvmlUnit_t;

/* "cy_nvml.pxd":847
 * ctypedef void* nvmlGpuInstance_t 'nvmlGpuInstance_t'
 * ctypedef void* nvmlUnit_t 'nvmlUnit_t'
 * ctypedef void* nvmlEventSet_t 'nvmlEventSet_t'             # <<<<<<<<<<<<<<
 * ctypedef void* nvmlSystemEventSet_t 'nvmlSystemEventSet_t'
 * ctypedef void* nvmlComputeInstance_t 'nvmlComputeInstance_t'
*/
typedef void *nvmlEventSet_t;

/* "cy_nvml.pxd":848
 * ctypedef void* nvmlUnit_t 'nvmlUnit_t'
 * ctypedef void* nvmlEventSet_t 'nvmlEventSet_t'
 * ctypedef void* nvmlSystemEventSet_t 'nvmlSystemEventSet_t'             # <<<<<<<<<<<<<<
 * ctypedef void* nvmlComputeInstance_t 'nvmlComputeInstance_t'
 * ctypedef void* nvmlGpmSample_t 'nvmlGpmSample_t'
*/
typedef void *nvmlSystemEventSet_t;

/* "cy_nvml.pxd":849
 * ctypedef void* nvmlEventSet_t 'nvmlEventSet_t'
 * ctypedef void* nvmlSystemEventSet_t 'nvmlSystemEventSet_t'
 * ctypedef void* nvmlComputeInstance_t 'nvmlComputeInstance_t'             # <<<<<<<<<<<<<<
 * ctypedef void* nvmlGpmSample_t 'nvmlGpmSample_t'
 * ctypedef struct nvmlPciInfo_t 'nvmlPciInfo_t':
*/
typedef void *nvmlComputeInstance_t;

/* "cy_nvml.pxd":850
 * ctypedef void* nvmlSystemEventSet_t 'nvmlSystemEventSet_t'
 * ctypedef void* nvmlComputeInstance_t 'nvmlComputeInstance_t'
 * ctypedef void* nvmlGpmSample_t 'nvmlGpmSample_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlPciInfo_t 'nvmlPciInfo_t':
 *     char busIdLegacy[16]
*/
typedef void *nvmlGpmSample_t;

/* "cy_nvml.pxd":851
 * ctypedef void* nvmlComputeInstance_t 'nvmlComputeInstance_t'
 * ctypedef void* nvmlGpmSample_t 'nvmlGpmSample_t'
 * ctypedef struct nvmlPciInfo_t 'nvmlPciInfo_t':             # <<<<<<<<<<<<<<
 *     char busIdLegacy[16]
 *     unsigned int domain
*/
struct nvmlPciInfo_t {
  char busIdLegacy[16];
  unsigned int domain;
  unsigned int bus;
  unsigned int device;
  unsigned int pciDeviceId;
  unsigned int pciSubSystemId;
  char busId[32];
};

/* "cy_nvml.pxd":860
 *     char busId[32]
 * 
 * ctypedef struct nvmlEccErrorCounts_t 'nvmlEccErrorCounts_t':             # <<<<<<<<<<<<<<
 *     unsigned long long l1Cache
 *     unsigned long long l2Cache
*/
struct nvmlEccErrorCounts_t {
  unsigned PY_LONG_LONG l1Cache;
  unsigned PY_LONG_LONG l2Cache;
  unsigned PY_LONG_LONG deviceMemory;
  unsigned PY_LONG_LONG registerFile;
};

/* "cy_nvml.pxd":866
 *     unsigned long long registerFile
 * 
 * ctypedef struct nvmlUtilization_t 'nvmlUtilization_t':             # <<<<<<<<<<<<<<
 *     unsigned int gpu
 *     unsigned int memory
*/
struct nvmlUtilization_t {
  unsigned int gpu;
  unsigned int memory;
};

/* "cy_nvml.pxd":870
 *     unsigned int memory
 * 
 * ctypedef struct nvmlMemory_t 'nvmlMemory_t':             # <<<<<<<<<<<<<<
 *     unsigned long long total
 *     unsigned long long free
*/
struct nvmlMemory_t {
  unsigned PY_LONG_LONG total;
  unsigned PY_LONG_LONG free;
  unsigned PY_LONG_LONG used;
};

/* "cy_nvml.pxd":875
 *     unsigned long long used
 * 
 * ctypedef struct nvmlMemory_v2_t 'nvmlMemory_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned long long total
*/
struct nvmlMemory_v2_t {
  unsigned int version;
  unsigned PY_LONG_LONG total;
  unsigned PY_LONG_LONG reserved;
  unsigned PY_LONG_LONG free;
  unsigned PY_LONG_LONG used;
};

/* "cy_nvml.pxd":882
 *     unsigned long long used
 * 
 * ctypedef struct nvmlBAR1Memory_t 'nvmlBAR1Memory_t':             # <<<<<<<<<<<<<<
 *     unsigned long long bar1Total
 *     unsigned long long bar1Free
*/
struct nvmlBAR1Memory_t {
  unsigned PY_LONG_LONG bar1Total;
  unsigned PY_LONG_LONG bar1Free;
  unsigned PY_LONG_LONG bar1Used;
};

/* "cy_nvml.pxd":887
 *     unsigned long long bar1Used
 * 
 * ctypedef struct nvmlProcessInfo_v1_t 'nvmlProcessInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int pid
 *     unsigned long long usedGpuMemory
*/
struct nvmlProcessInfo_v1_t {
  unsigned int pid;
  unsigned PY_LONG_LONG usedGpuMemory;
};

/* "cy_nvml.pxd":891
 *     unsigned long long usedGpuMemory
 * 
 * ctypedef struct nvmlProcessInfo_v2_t 'nvmlProcessInfo_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int pid
 *     unsigned long long usedGpuMemory
*/
struct nvmlProcessInfo_v2_t {
  unsigned int pid;
  unsigned PY_LONG_LONG usedGpuMemory;
  unsigned int gpuInstanceId;
  unsigned int computeInstanceId;
};

/* "cy_nvml.pxd":897
 *     unsigned int computeInstanceId
 * 
 * ctypedef struct nvmlProcessInfo_t 'nvmlProcessInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned int pid
 *     unsigned long long usedGpuMemory
*/
struct nvmlProcessInfo_t {
  unsigned int pid;
  unsigned PY_LONG_LONG usedGpuMemory;
  unsigned int gpuInstanceId;
  unsigned int computeInstanceId;
};

/* "cy_nvml.pxd":903
 *     unsigned int computeInstanceId
 * 
 * ctypedef struct nvmlProcessDetail_v1_t 'nvmlProcessDetail_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int pid
 *     unsigned long long usedGpuMemory
*/
struct nvmlProcessDetail_v1_t {
  unsigned int pid;
  unsigned PY_LONG_LONG usedGpuMemory;
  unsigned int gpuInstanceId;
  unsigned int computeInstanceId;
  unsigned PY_LONG_LONG usedGpuCcProtectedMemory;
};

/* "cy_nvml.pxd":910
 *     unsigned long long usedGpuCcProtectedMemory
 * 
 * ctypedef struct nvmlDeviceAttributes_t 'nvmlDeviceAttributes_t':             # <<<<<<<<<<<<<<
 *     unsigned int multiprocessorCount
 *     unsigned int sharedCopyEngineCount
*/
struct nvmlDeviceAttributes_t {
  unsigned int multiprocessorCount;
  unsigned int sharedCopyEngineCount;
  unsigned int sharedDecoderCount;
  unsigned int sharedEncoderCount;
  unsigned int sharedJpegCount;
  unsigned int sharedOfaCount;
  unsigned int gpuInstanceSliceCount;
  unsigned int computeInstanceSliceCount;
  unsigned PY_LONG_LONG memorySizeMB;
};

/* "cy_nvml.pxd":921
 *     unsigned long long memorySizeMB
 * 
 * ctypedef struct nvmlC2cModeInfo_v1_t 'nvmlC2cModeInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int isC2cEnabled
 * 
*/
struct nvmlC2cModeInfo_v1_t {
  unsigned int isC2cEnabled;
};

/* "cy_nvml.pxd":924
 *     unsigned int isC2cEnabled
 * 
 * ctypedef struct nvmlRowRemapperHistogramValues_t 'nvmlRowRemapperHistogramValues_t':             # <<<<<<<<<<<<<<
 *     unsigned int max
 *     unsigned int high
*/
struct nvmlRowRemapperHistogramValues_t {
  unsigned int max;
  unsigned int high;
  unsigned int partial;
  unsigned int low;
  unsigned int none;
};

/* "cy_nvml.pxd":931
 *     unsigned int none
 * 
 * ctypedef struct nvmlNvLinkUtilizationControl_t 'nvmlNvLinkUtilizationControl_t':             # <<<<<<<<<<<<<<
 *     nvmlNvLinkUtilizationCountUnits_t units
 *     nvmlNvLinkUtilizationCountPktTypes_t pktfilter
*/
struct nvmlNvLinkUtilizationControl_t {
  nvmlNvLinkUtilizationCountUnits_t units;
  nvmlNvLinkUtilizationCountPktTypes_t pktfilter;
};

/* "cy_nvml.pxd":935
 *     nvmlNvLinkUtilizationCountPktTypes_t pktfilter
 * 
 * ctypedef struct nvmlBridgeChipInfo_t 'nvmlBridgeChipInfo_t':             # <<<<<<<<<<<<<<
 *     nvmlBridgeChipType_t type
 *     unsigned int fwVersion
*/
struct nvmlBridgeChipInfo_t {
  nvmlBridgeChipType_t type;
  unsigned int fwVersion;
};

/* "cy_nvml.pxd":939
 *     unsigned int fwVersion
 * 
 * ctypedef union nvmlValue_t 'nvmlValue_t':             # <<<<<<<<<<<<<<
 *     double dVal
 *     int siVal
*/
union nvmlValue_t {
  double dVal;
  int siVal;
  unsigned int uiVal;
  unsigned long ulVal;
  unsigned PY_LONG_LONG ullVal;
  PY_LONG_LONG sllVal;
  unsigned short usVal;
};

/* "cy_nvml.pxd":948
 *     unsigned short usVal
 * 
 * ctypedef struct nvmlViolationTime_t 'nvmlViolationTime_t':             # <<<<<<<<<<<<<<
 *     unsigned long long referenceTime
 *     unsigned long long violationTime
*/
struct nvmlViolationTime_t {
  unsigned PY_LONG_LONG referenceTime;
  unsigned PY_LONG_LONG violationTime;
};

/* "cy_nvml.pxd":952
 *     unsigned long long violationTime
 * 
 * ctypedef struct _anon_pod0 '_anon_pod0':             # <<<<<<<<<<<<<<
 *     nvmlThermalController_t controller
 *     int defaultMinTemp
*/
struct _anon_pod0 {
  nvmlThermalController_t controller;
  int defaultMinTemp;
  int defaultMaxTemp;
  int currentTemp;
  nvmlThermalTarget_t target;
};

/* "cy_nvml.pxd":959
 *     nvmlThermalTarget_t target
 * 
 * ctypedef union nvmlUUIDValue_t 'nvmlUUIDValue_t':             # <<<<<<<<<<<<<<
 *     char str[41]
 *     unsigned char bytes[16]
*/
union nvmlUUIDValue_t {
  char str[41];
  unsigned char bytes[16];
};

/* "cy_nvml.pxd":963
 *     unsigned char bytes[16]
 * 
 * ctypedef struct nvmlClkMonFaultInfo_t 'nvmlClkMonFaultInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned int clkApiDomain
 *     unsigned int clkDomainFaultMask
*/
struct nvmlClkMonFaultInfo_t {
  unsigned int clkApiDomain;
  unsigned int clkDomainFaultMask;
};

/* "cy_nvml.pxd":967
 *     unsigned int clkDomainFaultMask
 * 
 * ctypedef struct nvmlProcessUtilizationSample_t 'nvmlProcessUtilizationSample_t':             # <<<<<<<<<<<<<<
 *     unsigned int pid
 *     unsigned long long timeStamp
*/
struct nvmlProcessUtilizationSample_t {
  unsigned int pid;
  unsigned PY_LONG_LONG timeStamp;
  unsigned int smUtil;
  unsigned int memUtil;
  unsigned int encUtil;
  unsigned int decUtil;
};

/* "cy_nvml.pxd":975
 *     unsigned int decUtil
 * 
 * ctypedef struct nvmlProcessUtilizationInfo_v1_t 'nvmlProcessUtilizationInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned long long timeStamp
 *     unsigned int pid
*/
struct nvmlProcessUtilizationInfo_v1_t {
  unsigned PY_LONG_LONG timeStamp;
  unsigned int pid;
  unsigned int smUtil;
  unsigned int memUtil;
  unsigned int encUtil;
  unsigned int decUtil;
  unsigned int jpgUtil;
  unsigned int ofaUtil;
};

/* "cy_nvml.pxd":985
 *     unsigned int ofaUtil
 * 
 * ctypedef struct nvmlPlatformInfo_v1_t 'nvmlPlatformInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned char ibGuid[16]
*/
struct nvmlPlatformInfo_v1_t {
  unsigned int version;
  unsigned char ibGuid[16];
  unsigned char rackGuid[16];
  unsigned char chassisPhysicalSlotNumber;
  unsigned char computeSlotIndex;
  unsigned char nodeIndex;
  unsigned char peerType;
  unsigned char moduleId;
};

/* "cy_nvml.pxd":995
 *     unsigned char moduleId
 * 
 * ctypedef struct _anon_pod1 '_anon_pod1':             # <<<<<<<<<<<<<<
 *     unsigned int bIsPresent
 *     unsigned int percentage
*/
struct _anon_pod1 {
  unsigned int bIsPresent;
  unsigned int percentage;
  unsigned int incThreshold;
  unsigned int decThreshold;
};

/* "cy_nvml.pxd":1001
 *     unsigned int decThreshold
 * 
 * ctypedef struct nvmlVgpuPlacementList_v1_t 'nvmlVgpuPlacementList_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int placementSize
*/
struct nvmlVgpuPlacementList_v1_t {
  unsigned int version;
  unsigned int placementSize;
  unsigned int count;
  unsigned int *placementIds;
};

/* "cy_nvml.pxd":1007
 *     unsigned int* placementIds
 * 
 * ctypedef struct _anon_pod2 '_anon_pod2':             # <<<<<<<<<<<<<<
 *     unsigned int avgFactor
 *     unsigned int timeslice
*/
struct _anon_pod2 {
  unsigned int avgFactor;
  unsigned int timeslice;
};

/* "cy_nvml.pxd":1011
 *     unsigned int timeslice
 * 
 * ctypedef struct _anon_pod3 '_anon_pod3':             # <<<<<<<<<<<<<<
 *     unsigned int timeslice
 * 
*/
struct _anon_pod3 {
  unsigned int timeslice;
};

/* "cy_nvml.pxd":1014
 *     unsigned int timeslice
 * 
 * ctypedef struct nvmlVgpuSchedulerLogEntry_t 'nvmlVgpuSchedulerLogEntry_t':             # <<<<<<<<<<<<<<
 *     unsigned long long timestamp
 *     unsigned long long timeRunTotal
*/
struct nvmlVgpuSchedulerLogEntry_t {
  unsigned PY_LONG_LONG timestamp;
  unsigned PY_LONG_LONG timeRunTotal;
  unsigned PY_LONG_LONG timeRun;
  unsigned int swRunlistId;
  unsigned PY_LONG_LONG targetTimeSlice;
  unsigned PY_LONG_LONG cumulativePreemptionTime;
};

/* "cy_nvml.pxd":1022
 *     unsigned long long cumulativePreemptionTime
 * 
 * ctypedef struct _anon_pod4 '_anon_pod4':             # <<<<<<<<<<<<<<
 *     unsigned int avgFactor
 *     unsigned int frequency
*/
struct _anon_pod4 {
  unsigned int avgFactor;
  unsigned int frequency;
};

/* "cy_nvml.pxd":1026
 *     unsigned int frequency
 * 
 * ctypedef struct _anon_pod5 '_anon_pod5':             # <<<<<<<<<<<<<<
 *     unsigned int timeslice
 * 
*/
struct _anon_pod5 {
  unsigned int timeslice;
};

/* "cy_nvml.pxd":1029
 *     unsigned int timeslice
 * 
 * ctypedef struct nvmlVgpuSchedulerCapabilities_t 'nvmlVgpuSchedulerCapabilities_t':             # <<<<<<<<<<<<<<
 *     unsigned int supportedSchedulers[3]
 *     unsigned int maxTimeslice
*/
struct nvmlVgpuSchedulerCapabilities_t {
  unsigned int supportedSchedulers[3];
  unsigned int maxTimeslice;
  unsigned int minTimeslice;
  unsigned int isArrModeSupported;
  unsigned int maxFrequencyForARR;
  unsigned int minFrequencyForARR;
  unsigned int maxAvgFactorForARR;
  unsigned int minAvgFactorForARR;
};

/* "cy_nvml.pxd":1039
 *     unsigned int minAvgFactorForARR
 * 
 * ctypedef struct nvmlVgpuLicenseExpiry_t 'nvmlVgpuLicenseExpiry_t':             # <<<<<<<<<<<<<<
 *     unsigned int year
 *     unsigned short month
*/
struct nvmlVgpuLicenseExpiry_t {
  unsigned int year;
  unsigned short month;
  unsigned short day;
  unsigned short hour;
  unsigned short min;
  unsigned short sec;
  unsigned char status;
};

/* "cy_nvml.pxd":1048
 *     unsigned char status
 * 
 * ctypedef struct nvmlGridLicenseExpiry_t 'nvmlGridLicenseExpiry_t':             # <<<<<<<<<<<<<<
 *     unsigned int year
 *     unsigned short month
*/
struct nvmlGridLicenseExpiry_t {
  unsigned int year;
  unsigned short month;
  unsigned short day;
  unsigned short hour;
  unsigned short min;
  unsigned short sec;
  unsigned char status;
};

/* "cy_nvml.pxd":1057
 *     unsigned char status
 * 
 * ctypedef struct nvmlNvLinkPowerThres_t 'nvmlNvLinkPowerThres_t':             # <<<<<<<<<<<<<<
 *     unsigned int lowPwrThreshold
 * 
*/
struct nvmlNvLinkPowerThres_t {
  unsigned int lowPwrThreshold;
};

/* "cy_nvml.pxd":1060
 *     unsigned int lowPwrThreshold
 * 
 * ctypedef struct nvmlHwbcEntry_t 'nvmlHwbcEntry_t':             # <<<<<<<<<<<<<<
 *     unsigned int hwbcId
 *     char firmwareVersion[32]
*/
struct nvmlHwbcEntry_t {
  unsigned int hwbcId;
  char firmwareVersion[32];
};

/* "cy_nvml.pxd":1064
 *     char firmwareVersion[32]
 * 
 * ctypedef struct nvmlLedState_t 'nvmlLedState_t':             # <<<<<<<<<<<<<<
 *     char cause[256]
 *     nvmlLedColor_t color
*/
struct nvmlLedState_t {
  char cause[256];
  nvmlLedColor_t color;
};

/* "cy_nvml.pxd":1068
 *     nvmlLedColor_t color
 * 
 * ctypedef struct nvmlUnitInfo_t 'nvmlUnitInfo_t':             # <<<<<<<<<<<<<<
 *     char name[96]
 *     char id[96]
*/
struct nvmlUnitInfo_t {
  char name[96];
  char id[96];
  char serial[96];
  char firmwareVersion[96];
};

/* "cy_nvml.pxd":1074
 *     char firmwareVersion[96]
 * 
 * ctypedef struct nvmlPSUInfo_t 'nvmlPSUInfo_t':             # <<<<<<<<<<<<<<
 *     char state[256]
 *     unsigned int current
*/
struct nvmlPSUInfo_t {
  char state[256];
  unsigned int current;
  unsigned int voltage;
  unsigned int power;
};

/* "cy_nvml.pxd":1080
 *     unsigned int power
 * 
 * ctypedef struct nvmlUnitFanInfo_t 'nvmlUnitFanInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned int speed
 *     nvmlFanState_t state
*/
struct nvmlUnitFanInfo_t {
  unsigned int speed;
  nvmlFanState_t state;
};

/* "cy_nvml.pxd":1084
 *     nvmlFanState_t state
 * 
 * ctypedef struct nvmlSystemEventData_v1_t 'nvmlSystemEventData_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned long long eventType
 *     unsigned int gpuId
*/
struct nvmlSystemEventData_v1_t {
  unsigned PY_LONG_LONG eventType;
  unsigned int gpuId;
};

/* "cy_nvml.pxd":1088
 *     unsigned int gpuId
 * 
 * ctypedef struct nvmlAccountingStats_t 'nvmlAccountingStats_t':             # <<<<<<<<<<<<<<
 *     unsigned int gpuUtilization
 *     unsigned int memoryUtilization
*/
struct nvmlAccountingStats_t {
  unsigned int gpuUtilization;
  unsigned int memoryUtilization;
  unsigned PY_LONG_LONG maxMemoryUsage;
  unsigned PY_LONG_LONG time;
  unsigned PY_LONG_LONG startTime;
  unsigned int isRunning;
  unsigned int reserved[5];
};

/* "cy_nvml.pxd":1097
 *     unsigned int reserved[5]
 * 
 * ctypedef struct nvmlFBCStats_t 'nvmlFBCStats_t':             # <<<<<<<<<<<<<<
 *     unsigned int sessionsCount
 *     unsigned int averageFPS
*/
struct nvmlFBCStats_t {
  unsigned int sessionsCount;
  unsigned int averageFPS;
  unsigned int averageLatency;
};

/* "cy_nvml.pxd":1102
 *     unsigned int averageLatency
 * 
 * ctypedef struct nvmlConfComputeSystemCaps_t 'nvmlConfComputeSystemCaps_t':             # <<<<<<<<<<<<<<
 *     unsigned int cpuCaps
 *     unsigned int gpusCaps
*/
struct nvmlConfComputeSystemCaps_t {
  unsigned int cpuCaps;
  unsigned int gpusCaps;
};

/* "cy_nvml.pxd":1106
 *     unsigned int gpusCaps
 * 
 * ctypedef struct nvmlConfComputeSystemState_t 'nvmlConfComputeSystemState_t':             # <<<<<<<<<<<<<<
 *     unsigned int environment
 *     unsigned int ccFeature
*/
struct nvmlConfComputeSystemState_t {
  unsigned int environment;
  unsigned int ccFeature;
  unsigned int devToolsMode;
};

/* "cy_nvml.pxd":1111
 *     unsigned int devToolsMode
 * 
 * ctypedef struct nvmlConfComputeMemSizeInfo_t 'nvmlConfComputeMemSizeInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned long long protectedMemSizeKib
 *     unsigned long long unprotectedMemSizeKib
*/
struct nvmlConfComputeMemSizeInfo_t {
  unsigned PY_LONG_LONG protectedMemSizeKib;
  unsigned PY_LONG_LONG unprotectedMemSizeKib;
};

/* "cy_nvml.pxd":1115
 *     unsigned long long unprotectedMemSizeKib
 * 
 * ctypedef struct nvmlConfComputeGpuCertificate_t 'nvmlConfComputeGpuCertificate_t':             # <<<<<<<<<<<<<<
 *     unsigned int certChainSize
 *     unsigned int attestationCertChainSize
*/
struct nvmlConfComputeGpuCertificate_t {
  unsigned int certChainSize;
  unsigned int attestationCertChainSize;
  unsigned char certChain[4096];
  unsigned char attestationCertChain[5120];
};

/* "cy_nvml.pxd":1121
 *     unsigned char attestationCertChain[0x1400]
 * 
 * ctypedef struct nvmlConfComputeGpuAttestationReport_t 'nvmlConfComputeGpuAttestationReport_t':             # <<<<<<<<<<<<<<
 *     unsigned int isCecAttestationReportPresent
 *     unsigned int attestationReportSize
*/
struct nvmlConfComputeGpuAttestationReport_t {
  unsigned int isCecAttestationReportPresent;
  unsigned int attestationReportSize;
  unsigned int cecAttestationReportSize;
  unsigned char nonce[32];
  unsigned char attestationReport[8192];
  unsigned char cecAttestationReport[4096];
};

/* "cy_nvml.pxd":1129
 *     unsigned char cecAttestationReport[0x1000]
 * 
 * ctypedef struct nvmlVgpuVersion_t 'nvmlVgpuVersion_t':             # <<<<<<<<<<<<<<
 *     unsigned int minVersion
 *     unsigned int maxVersion
*/
struct nvmlVgpuVersion_t {
  unsigned int minVersion;
  unsigned int maxVersion;
};

/* "cy_nvml.pxd":1133
 *     unsigned int maxVersion
 * 
 * ctypedef struct nvmlVgpuMetadata_t 'nvmlVgpuMetadata_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int revision
*/
struct nvmlVgpuMetadata_t {
  unsigned int version;
  unsigned int revision;
  nvmlVgpuGuestInfoState_t guestInfoState;
  char guestDriverVersion[80];
  char hostDriverVersion[80];
  unsigned int reserved[6];
  unsigned int vgpuVirtualizationCaps;
  unsigned int guestVgpuVersion;
  unsigned int opaqueDataSize;
  char opaqueData[4];
};

/* "cy_nvml.pxd":1145
 *     char opaqueData[4]
 * 
 * ctypedef struct nvmlVgpuPgpuCompatibility_t 'nvmlVgpuPgpuCompatibility_t':             # <<<<<<<<<<<<<<
 *     nvmlVgpuVmCompatibility_t vgpuVmCompatibility
 *     nvmlVgpuPgpuCompatibilityLimitCode_t compatibilityLimitCode
*/
struct nvmlVgpuPgpuCompatibility_t {
  nvmlVgpuVmCompatibility_t vgpuVmCompatibility;
  nvmlVgpuPgpuCompatibilityLimitCode_t compatibilityLimitCode;
};

/* "cy_nvml.pxd":1149
 *     nvmlVgpuPgpuCompatibilityLimitCode_t compatibilityLimitCode
 * 
 * ctypedef struct nvmlGpuInstancePlacement_t 'nvmlGpuInstancePlacement_t':             # <<<<<<<<<<<<<<
 *     unsigned int start
 *     unsigned int size
*/
struct nvmlGpuInstancePlacement_t {
  unsigned int start;
  unsigned int size;
};

/* "cy_nvml.pxd":1153
 *     unsigned int size
 * 
 * ctypedef struct nvmlGpuInstanceProfileInfo_t 'nvmlGpuInstanceProfileInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned int id
 *     unsigned int isP2pSupported
*/
struct nvmlGpuInstanceProfileInfo_t {
  unsigned int id;
  unsigned int isP2pSupported;
  unsigned int sliceCount;
  unsigned int instanceCount;
  unsigned int multiprocessorCount;
  unsigned int copyEngineCount;
  unsigned int decoderCount;
  unsigned int encoderCount;
  unsigned int jpegCount;
  unsigned int ofaCount;
  unsigned PY_LONG_LONG memorySizeMB;
};

/* "cy_nvml.pxd":1166
 *     unsigned long long memorySizeMB
 * 
 * ctypedef struct nvmlGpuInstanceProfileInfo_v2_t 'nvmlGpuInstanceProfileInfo_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int id
*/
struct nvmlGpuInstanceProfileInfo_v2_t {
  unsigned int version;
  unsigned int id;
  unsigned int isP2pSupported;
  unsigned int sliceCount;
  unsigned int instanceCount;
  unsigned int multiprocessorCount;
  unsigned int copyEngineCount;
  unsigned int decoderCount;
  unsigned int encoderCount;
  unsigned int jpegCount;
  unsigned int ofaCount;
  unsigned PY_LONG_LONG memorySizeMB;
  char name[96];
};

/* "cy_nvml.pxd":1181
 *     char name[96]
 * 
 * ctypedef struct nvmlGpuInstanceProfileInfo_v3_t 'nvmlGpuInstanceProfileInfo_v3_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int id
*/
struct nvmlGpuInstanceProfileInfo_v3_t {
  unsigned int version;
  unsigned int id;
  unsigned int sliceCount;
  unsigned int instanceCount;
  unsigned int multiprocessorCount;
  unsigned int copyEngineCount;
  unsigned int decoderCount;
  unsigned int encoderCount;
  unsigned int jpegCount;
  unsigned int ofaCount;
  unsigned PY_LONG_LONG memorySizeMB;
  char name[96];
  unsigned int capabilities;
};

/* "cy_nvml.pxd":1196
 *     unsigned int capabilities
 * 
 * ctypedef struct nvmlComputeInstancePlacement_t 'nvmlComputeInstancePlacement_t':             # <<<<<<<<<<<<<<
 *     unsigned int start
 *     unsigned int size
*/
struct nvmlComputeInstancePlacement_t {
  unsigned int start;
  unsigned int size;
};

/* "cy_nvml.pxd":1200
 *     unsigned int size
 * 
 * ctypedef struct nvmlComputeInstanceProfileInfo_t 'nvmlComputeInstanceProfileInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned int id
 *     unsigned int sliceCount
*/
struct nvmlComputeInstanceProfileInfo_t {
  unsigned int id;
  unsigned int sliceCount;
  unsigned int instanceCount;
  unsigned int multiprocessorCount;
  unsigned int sharedCopyEngineCount;
  unsigned int sharedDecoderCount;
  unsigned int sharedEncoderCount;
  unsigned int sharedJpegCount;
  unsigned int sharedOfaCount;
};

/* "cy_nvml.pxd":1211
 *     unsigned int sharedOfaCount
 * 
 * ctypedef struct nvmlComputeInstanceProfileInfo_v2_t 'nvmlComputeInstanceProfileInfo_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int id
*/
struct nvmlComputeInstanceProfileInfo_v2_t {
  unsigned int version;
  unsigned int id;
  unsigned int sliceCount;
  unsigned int instanceCount;
  unsigned int multiprocessorCount;
  unsigned int sharedCopyEngineCount;
  unsigned int sharedDecoderCount;
  unsigned int sharedEncoderCount;
  unsigned int sharedJpegCount;
  unsigned int sharedOfaCount;
  char name[96];
};

/* "cy_nvml.pxd":1224
 *     char name[96]
 * 
 * ctypedef struct nvmlComputeInstanceProfileInfo_v3_t 'nvmlComputeInstanceProfileInfo_v3_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int id
*/
struct nvmlComputeInstanceProfileInfo_v3_t {
  unsigned int version;
  unsigned int id;
  unsigned int sliceCount;
  unsigned int instanceCount;
  unsigned int multiprocessorCount;
  unsigned int sharedCopyEngineCount;
  unsigned int sharedDecoderCount;
  unsigned int sharedEncoderCount;
  unsigned int sharedJpegCount;
  unsigned int sharedOfaCount;
  char name[96];
  unsigned int capabilities;
};

/* "cy_nvml.pxd":1238
 *     unsigned int capabilities
 * 
 * ctypedef struct _anon_pod6 '_anon_pod6':             # <<<<<<<<<<<<<<
 *     char* shortName
 *     char* longName
*/
struct _anon_pod6 {
  char *shortName;
  char *longName;
  char *unit;
};

/* "cy_nvml.pxd":1243
 *     char* unit
 * 
 * ctypedef struct nvmlGpmSupport_t 'nvmlGpmSupport_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int isSupportedDevice
*/
struct nvmlGpmSupport_t {
  unsigned int version;
  unsigned int isSupportedDevice;
};

/* "cy_nvml.pxd":1247
 *     unsigned int isSupportedDevice
 * 
 * ctypedef struct nvmlMask255_t 'nvmlMask255_t':             # <<<<<<<<<<<<<<
 *     unsigned int mask[8]
 * 
*/
struct nvmlMask255_t {
  unsigned int mask[8];
};

/* "cy_nvml.pxd":1250
 *     unsigned int mask[8]
 * 
 * ctypedef struct nvmlDevicePowerMizerModes_v1_t 'nvmlDevicePowerMizerModes_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int currentMode
 *     unsigned int mode
*/
struct nvmlDevicePowerMizerModes_v1_t {
  unsigned int currentMode;
  unsigned int mode;
  unsigned int supportedPowerMizerModes;
};

/* "cy_nvml.pxd":1255
 *     unsigned int supportedPowerMizerModes
 * 
 * ctypedef struct nvmlHostname_v1_t 'nvmlHostname_v1_t':             # <<<<<<<<<<<<<<
 *     char value[64]
 * 
*/
struct nvmlHostname_v1_t {
  char value[64];
};

/* "cy_nvml.pxd":1258
 *     char value[64]
 * 
 * ctypedef struct nvmlEccSramUniqueUncorrectedErrorEntry_v1_t 'nvmlEccSramUniqueUncorrectedErrorEntry_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int unit
 *     unsigned int location
*/
struct nvmlEccSramUniqueUncorrectedErrorEntry_v1_t {
  unsigned int unit;
  unsigned int location;
  unsigned int sublocation;
  unsigned int extlocation;
  unsigned int address;
  unsigned int isParity;
  unsigned int count;
};

/* "cy_nvml.pxd":1267
 *     unsigned int count
 * 
 * ctypedef struct nvmlNvLinkInfo_v1_t 'nvmlNvLinkInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int isNvleEnabled
*/
struct nvmlNvLinkInfo_v1_t {
  unsigned int version;
  unsigned int isNvleEnabled;
};

/* "cy_nvml.pxd":1271
 *     unsigned int isNvleEnabled
 * 
 * ctypedef struct nvmlNvlinkFirmwareVersion_t 'nvmlNvlinkFirmwareVersion_t':             # <<<<<<<<<<<<<<
 *     unsigned char ucodeType
 *     unsigned int major
*/
struct nvmlNvlinkFirmwareVersion_t {
  unsigned char ucodeType;
  unsigned int major;
  unsigned int minor;
  unsigned int subMinor;
};

/* "cy_nvml.pxd":1277
 *     unsigned int subMinor
 * 
 * ctypedef union _anon_pod7 '_anon_pod7':             # <<<<<<<<<<<<<<
 *     unsigned char inData[496]
 *     unsigned char outData[496]
*/
union _anon_pod7 {
  unsigned char inData[496];
  unsigned char outData[496];
};

/* "cy_nvml.pxd":1281
 *     unsigned char outData[496]
 * 
 * ctypedef nvmlPciInfoExt_v1_t nvmlPciInfoExt_t 'nvmlPciInfoExt_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlCoolerInfo_v1_t nvmlCoolerInfo_t 'nvmlCoolerInfo_t'
 * ctypedef nvmlDramEncryptionInfo_v1_t nvmlDramEncryptionInfo_t 'nvmlDramEncryptionInfo_t'
*/
typedef nvmlPciInfoExt_v1_t nvmlPciInfoExt_t;

/* "cy_nvml.pxd":1282
 * 
 * ctypedef nvmlPciInfoExt_v1_t nvmlPciInfoExt_t 'nvmlPciInfoExt_t'
 * ctypedef nvmlCoolerInfo_v1_t nvmlCoolerInfo_t 'nvmlCoolerInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlDramEncryptionInfo_v1_t nvmlDramEncryptionInfo_t 'nvmlDramEncryptionInfo_t'
 * ctypedef nvmlMarginTemperature_v1_t nvmlMarginTemperature_t 'nvmlMarginTemperature_t'
*/
typedef nvmlCoolerInfo_v1_t nvmlCoolerInfo_t;

/* "cy_nvml.pxd":1283
 * ctypedef nvmlPciInfoExt_v1_t nvmlPciInfoExt_t 'nvmlPciInfoExt_t'
 * ctypedef nvmlCoolerInfo_v1_t nvmlCoolerInfo_t 'nvmlCoolerInfo_t'
 * ctypedef nvmlDramEncryptionInfo_v1_t nvmlDramEncryptionInfo_t 'nvmlDramEncryptionInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlMarginTemperature_v1_t nvmlMarginTemperature_t 'nvmlMarginTemperature_t'
 * ctypedef nvmlClockOffset_v1_t nvmlClockOffset_t 'nvmlClockOffset_t'
*/
typedef nvmlDramEncryptionInfo_v1_t nvmlDramEncryptionInfo_t;

/* "cy_nvml.pxd":1284
 * ctypedef nvmlCoolerInfo_v1_t nvmlCoolerInfo_t 'nvmlCoolerInfo_t'
 * ctypedef nvmlDramEncryptionInfo_v1_t nvmlDramEncryptionInfo_t 'nvmlDramEncryptionInfo_t'
 * ctypedef nvmlMarginTemperature_v1_t nvmlMarginTemperature_t 'nvmlMarginTemperature_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlClockOffset_v1_t nvmlClockOffset_t 'nvmlClockOffset_t'
 * ctypedef nvmlFanSpeedInfo_v1_t nvmlFanSpeedInfo_t 'nvmlFanSpeedInfo_t'
*/
typedef nvmlMarginTemperature_v1_t nvmlMarginTemperature_t;

/* "cy_nvml.pxd":1285
 * ctypedef nvmlDramEncryptionInfo_v1_t nvmlDramEncryptionInfo_t 'nvmlDramEncryptionInfo_t'
 * ctypedef nvmlMarginTemperature_v1_t nvmlMarginTemperature_t 'nvmlMarginTemperature_t'
 * ctypedef nvmlClockOffset_v1_t nvmlClockOffset_t 'nvmlClockOffset_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlFanSpeedInfo_v1_t nvmlFanSpeedInfo_t 'nvmlFanSpeedInfo_t'
 * ctypedef nvmlDevicePerfModes_v1_t nvmlDevicePerfModes_t 'nvmlDevicePerfModes_t'
*/
typedef nvmlClockOffset_v1_t nvmlClockOffset_t;

/* "cy_nvml.pxd":1286
 * ctypedef nvmlMarginTemperature_v1_t nvmlMarginTemperature_t 'nvmlMarginTemperature_t'
 * ctypedef nvmlClockOffset_v1_t nvmlClockOffset_t 'nvmlClockOffset_t'
 * ctypedef nvmlFanSpeedInfo_v1_t nvmlFanSpeedInfo_t 'nvmlFanSpeedInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlDevicePerfModes_v1_t nvmlDevicePerfModes_t 'nvmlDevicePerfModes_t'
 * ctypedef nvmlDeviceCurrentClockFreqs_v1_t nvmlDeviceCurrentClockFreqs_t 'nvmlDeviceCurrentClockFreqs_t'
*/
typedef nvmlFanSpeedInfo_v1_t nvmlFanSpeedInfo_t;

/* "cy_nvml.pxd":1287
 * ctypedef nvmlClockOffset_v1_t nvmlClockOffset_t 'nvmlClockOffset_t'
 * ctypedef nvmlFanSpeedInfo_v1_t nvmlFanSpeedInfo_t 'nvmlFanSpeedInfo_t'
 * ctypedef nvmlDevicePerfModes_v1_t nvmlDevicePerfModes_t 'nvmlDevicePerfModes_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlDeviceCurrentClockFreqs_v1_t nvmlDeviceCurrentClockFreqs_t 'nvmlDeviceCurrentClockFreqs_t'
 * ctypedef nvmlEccSramErrorStatus_v1_t nvmlEccSramErrorStatus_t 'nvmlEccSramErrorStatus_t'
*/
typedef nvmlDevicePerfModes_v1_t nvmlDevicePerfModes_t;

/* "cy_nvml.pxd":1288
 * ctypedef nvmlFanSpeedInfo_v1_t nvmlFanSpeedInfo_t 'nvmlFanSpeedInfo_t'
 * ctypedef nvmlDevicePerfModes_v1_t nvmlDevicePerfModes_t 'nvmlDevicePerfModes_t'
 * ctypedef nvmlDeviceCurrentClockFreqs_v1_t nvmlDeviceCurrentClockFreqs_t 'nvmlDeviceCurrentClockFreqs_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlEccSramErrorStatus_v1_t nvmlEccSramErrorStatus_t 'nvmlEccSramErrorStatus_t'
 * ctypedef nvmlPlatformInfo_v2_t nvmlPlatformInfo_t 'nvmlPlatformInfo_t'
*/
typedef nvmlDeviceCurrentClockFreqs_v1_t nvmlDeviceCurrentClockFreqs_t;

/* "cy_nvml.pxd":1289
 * ctypedef nvmlDevicePerfModes_v1_t nvmlDevicePerfModes_t 'nvmlDevicePerfModes_t'
 * ctypedef nvmlDeviceCurrentClockFreqs_v1_t nvmlDeviceCurrentClockFreqs_t 'nvmlDeviceCurrentClockFreqs_t'
 * ctypedef nvmlEccSramErrorStatus_v1_t nvmlEccSramErrorStatus_t 'nvmlEccSramErrorStatus_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlPlatformInfo_v2_t nvmlPlatformInfo_t 'nvmlPlatformInfo_t'
 * ctypedef struct nvmlPowerValue_v2_t 'nvmlPowerValue_v2_t':
*/
typedef nvmlEccSramErrorStatus_v1_t nvmlEccSramErrorStatus_t;

/* "cy_nvml.pxd":1290
 * ctypedef nvmlDeviceCurrentClockFreqs_v1_t nvmlDeviceCurrentClockFreqs_t 'nvmlDeviceCurrentClockFreqs_t'
 * ctypedef nvmlEccSramErrorStatus_v1_t nvmlEccSramErrorStatus_t 'nvmlEccSramErrorStatus_t'
 * ctypedef nvmlPlatformInfo_v2_t nvmlPlatformInfo_t 'nvmlPlatformInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlPowerValue_v2_t 'nvmlPowerValue_v2_t':
 *     unsigned int version
*/
typedef nvmlPlatformInfo_v2_t nvmlPlatformInfo_t;

/* "cy_nvml.pxd":1291
 * ctypedef nvmlEccSramErrorStatus_v1_t nvmlEccSramErrorStatus_t 'nvmlEccSramErrorStatus_t'
 * ctypedef nvmlPlatformInfo_v2_t nvmlPlatformInfo_t 'nvmlPlatformInfo_t'
 * ctypedef struct nvmlPowerValue_v2_t 'nvmlPowerValue_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlPowerScopeType_t powerScope
*/
struct nvmlPowerValue_v2_t {
  unsigned int version;
  nvmlPowerScopeType_t powerScope;
  unsigned int powerValueMw;
};

/* "cy_nvml.pxd":1296
 *     unsigned int powerValueMw
 * 
 * ctypedef struct nvmlVgpuTypeIdInfo_v1_t 'nvmlVgpuTypeIdInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int vgpuCount
*/
struct nvmlVgpuTypeIdInfo_v1_t {
  unsigned int version;
  unsigned int vgpuCount;
  nvmlVgpuTypeId_t *vgpuTypeIds;
};

/* "cy_nvml.pxd":1301
 *     nvmlVgpuTypeId_t* vgpuTypeIds
 * 
 * ctypedef struct nvmlVgpuTypeMaxInstance_v1_t 'nvmlVgpuTypeMaxInstance_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlVgpuTypeId_t vgpuTypeId
*/
struct nvmlVgpuTypeMaxInstance_v1_t {
  unsigned int version;
  nvmlVgpuTypeId_t vgpuTypeId;
  unsigned int maxInstancePerGI;
};

/* "cy_nvml.pxd":1306
 *     unsigned int maxInstancePerGI
 * 
 * ctypedef struct nvmlVgpuCreatablePlacementInfo_v1_t 'nvmlVgpuCreatablePlacementInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlVgpuTypeId_t vgpuTypeId
*/
struct nvmlVgpuCreatablePlacementInfo_v1_t {
  unsigned int version;
  nvmlVgpuTypeId_t vgpuTypeId;
  unsigned int count;
  unsigned int *placementIds;
  unsigned int placementSize;
};

/* "cy_nvml.pxd":1313
 *     unsigned int placementSize
 * 
 * ctypedef struct nvmlVgpuProcessUtilizationSample_t 'nvmlVgpuProcessUtilizationSample_t':             # <<<<<<<<<<<<<<
 *     nvmlVgpuInstance_t vgpuInstance
 *     unsigned int pid
*/
struct nvmlVgpuProcessUtilizationSample_t {
  nvmlVgpuInstance_t vgpuInstance;
  unsigned int pid;
  char processName[64];
  unsigned PY_LONG_LONG timeStamp;
  unsigned int smUtil;
  unsigned int memUtil;
  unsigned int encUtil;
  unsigned int decUtil;
};

/* "cy_nvml.pxd":1323
 *     unsigned int decUtil
 * 
 * ctypedef struct nvmlVgpuProcessUtilizationInfo_v1_t 'nvmlVgpuProcessUtilizationInfo_v1_t':             # <<<<<<<<<<<<<<
 *     char processName[64]
 *     unsigned long long timeStamp
*/
struct nvmlVgpuProcessUtilizationInfo_v1_t {
  char processName[64];
  unsigned PY_LONG_LONG timeStamp;
  nvmlVgpuInstance_t vgpuInstance;
  unsigned int pid;
  unsigned int smUtil;
  unsigned int memUtil;
  unsigned int encUtil;
  unsigned int decUtil;
  unsigned int jpgUtil;
  unsigned int ofaUtil;
};

/* "cy_nvml.pxd":1335
 *     unsigned int ofaUtil
 * 
 * ctypedef struct nvmlActiveVgpuInstanceInfo_v1_t 'nvmlActiveVgpuInstanceInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int vgpuCount
*/
struct nvmlActiveVgpuInstanceInfo_v1_t {
  unsigned int version;
  unsigned int vgpuCount;
  nvmlVgpuInstance_t *vgpuInstances;
};

/* "cy_nvml.pxd":1340
 *     nvmlVgpuInstance_t* vgpuInstances
 * 
 * ctypedef struct nvmlEncoderSessionInfo_t 'nvmlEncoderSessionInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned int sessionId
 *     unsigned int pid
*/
struct nvmlEncoderSessionInfo_t {
  unsigned int sessionId;
  unsigned int pid;
  nvmlVgpuInstance_t vgpuInstance;
  nvmlEncoderType_t codecType;
  unsigned int hResolution;
  unsigned int vResolution;
  unsigned int averageFps;
  unsigned int averageLatency;
};

/* "cy_nvml.pxd":1350
 *     unsigned int averageLatency
 * 
 * ctypedef struct nvmlFBCSessionInfo_t 'nvmlFBCSessionInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned int sessionId
 *     unsigned int pid
*/
struct nvmlFBCSessionInfo_t {
  unsigned int sessionId;
  unsigned int pid;
  nvmlVgpuInstance_t vgpuInstance;
  unsigned int displayOrdinal;
  nvmlFBCSessionType_t sessionType;
  unsigned int sessionFlags;
  unsigned int hMaxResolution;
  unsigned int vMaxResolution;
  unsigned int hResolution;
  unsigned int vResolution;
  unsigned int averageFPS;
  unsigned int averageLatency;
};

/* "cy_nvml.pxd":1364
 *     unsigned int averageLatency
 * 
 * ctypedef nvmlVgpuHeterogeneousMode_v1_t nvmlVgpuHeterogeneousMode_t 'nvmlVgpuHeterogeneousMode_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuPlacementId_v1_t nvmlVgpuPlacementId_t 'nvmlVgpuPlacementId_t'
 * ctypedef nvmlVgpuPlacementList_v2_t nvmlVgpuPlacementList_t 'nvmlVgpuPlacementList_t'
*/
typedef nvmlVgpuHeterogeneousMode_v1_t nvmlVgpuHeterogeneousMode_t;

/* "cy_nvml.pxd":1365
 * 
 * ctypedef nvmlVgpuHeterogeneousMode_v1_t nvmlVgpuHeterogeneousMode_t 'nvmlVgpuHeterogeneousMode_t'
 * ctypedef nvmlVgpuPlacementId_v1_t nvmlVgpuPlacementId_t 'nvmlVgpuPlacementId_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuPlacementList_v2_t nvmlVgpuPlacementList_t 'nvmlVgpuPlacementList_t'
 * ctypedef nvmlVgpuTypeBar1Info_v1_t nvmlVgpuTypeBar1Info_t 'nvmlVgpuTypeBar1Info_t'
*/
typedef nvmlVgpuPlacementId_v1_t nvmlVgpuPlacementId_t;

/* "cy_nvml.pxd":1366
 * ctypedef nvmlVgpuHeterogeneousMode_v1_t nvmlVgpuHeterogeneousMode_t 'nvmlVgpuHeterogeneousMode_t'
 * ctypedef nvmlVgpuPlacementId_v1_t nvmlVgpuPlacementId_t 'nvmlVgpuPlacementId_t'
 * ctypedef nvmlVgpuPlacementList_v2_t nvmlVgpuPlacementList_t 'nvmlVgpuPlacementList_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuTypeBar1Info_v1_t nvmlVgpuTypeBar1Info_t 'nvmlVgpuTypeBar1Info_t'
 * ctypedef nvmlVgpuRuntimeState_v1_t nvmlVgpuRuntimeState_t 'nvmlVgpuRuntimeState_t'
*/
typedef nvmlVgpuPlacementList_v2_t nvmlVgpuPlacementList_t;

/* "cy_nvml.pxd":1367
 * ctypedef nvmlVgpuPlacementId_v1_t nvmlVgpuPlacementId_t 'nvmlVgpuPlacementId_t'
 * ctypedef nvmlVgpuPlacementList_v2_t nvmlVgpuPlacementList_t 'nvmlVgpuPlacementList_t'
 * ctypedef nvmlVgpuTypeBar1Info_v1_t nvmlVgpuTypeBar1Info_t 'nvmlVgpuTypeBar1Info_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuRuntimeState_v1_t nvmlVgpuRuntimeState_t 'nvmlVgpuRuntimeState_t'
 * ctypedef nvmlSystemConfComputeSettings_v1_t nvmlSystemConfComputeSettings_t 'nvmlSystemConfComputeSettings_t'
*/
typedef nvmlVgpuTypeBar1Info_v1_t nvmlVgpuTypeBar1Info_t;

/* "cy_nvml.pxd":1368
 * ctypedef nvmlVgpuPlacementList_v2_t nvmlVgpuPlacementList_t 'nvmlVgpuPlacementList_t'
 * ctypedef nvmlVgpuTypeBar1Info_v1_t nvmlVgpuTypeBar1Info_t 'nvmlVgpuTypeBar1Info_t'
 * ctypedef nvmlVgpuRuntimeState_v1_t nvmlVgpuRuntimeState_t 'nvmlVgpuRuntimeState_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlSystemConfComputeSettings_v1_t nvmlSystemConfComputeSettings_t 'nvmlSystemConfComputeSettings_t'
 * ctypedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t nvmlConfComputeSetKeyRotationThresholdInfo_t 'nvmlConfComputeSetKeyRotationThresholdInfo_t'
*/
typedef nvmlVgpuRuntimeState_v1_t nvmlVgpuRuntimeState_t;

/* "cy_nvml.pxd":1369
 * ctypedef nvmlVgpuTypeBar1Info_v1_t nvmlVgpuTypeBar1Info_t 'nvmlVgpuTypeBar1Info_t'
 * ctypedef nvmlVgpuRuntimeState_v1_t nvmlVgpuRuntimeState_t 'nvmlVgpuRuntimeState_t'
 * ctypedef nvmlSystemConfComputeSettings_v1_t nvmlSystemConfComputeSettings_t 'nvmlSystemConfComputeSettings_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t nvmlConfComputeSetKeyRotationThresholdInfo_t 'nvmlConfComputeSetKeyRotationThresholdInfo_t'
 * ctypedef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t nvmlConfComputeGetKeyRotationThresholdInfo_t 'nvmlConfComputeGetKeyRotationThresholdInfo_t'
*/
typedef nvmlSystemConfComputeSettings_v1_t nvmlSystemConfComputeSettings_t;

/* "cy_nvml.pxd":1370
 * ctypedef nvmlVgpuRuntimeState_v1_t nvmlVgpuRuntimeState_t 'nvmlVgpuRuntimeState_t'
 * ctypedef nvmlSystemConfComputeSettings_v1_t nvmlSystemConfComputeSettings_t 'nvmlSystemConfComputeSettings_t'
 * ctypedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t nvmlConfComputeSetKeyRotationThresholdInfo_t 'nvmlConfComputeSetKeyRotationThresholdInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t nvmlConfComputeGetKeyRotationThresholdInfo_t 'nvmlConfComputeGetKeyRotationThresholdInfo_t'
 * ctypedef struct nvmlGpuFabricInfo_t 'nvmlGpuFabricInfo_t':
*/
typedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t nvmlConfComputeSetKeyRotationThresholdInfo_t;

/* "cy_nvml.pxd":1371
 * ctypedef nvmlSystemConfComputeSettings_v1_t nvmlSystemConfComputeSettings_t 'nvmlSystemConfComputeSettings_t'
 * ctypedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t nvmlConfComputeSetKeyRotationThresholdInfo_t 'nvmlConfComputeSetKeyRotationThresholdInfo_t'
 * ctypedef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t nvmlConfComputeGetKeyRotationThresholdInfo_t 'nvmlConfComputeGetKeyRotationThresholdInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlGpuFabricInfo_t 'nvmlGpuFabricInfo_t':
 *     unsigned char clusterUuid[16]
*/
typedef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t nvmlConfComputeGetKeyRotationThresholdInfo_t;

/* "cy_nvml.pxd":1372
 * ctypedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t nvmlConfComputeSetKeyRotationThresholdInfo_t 'nvmlConfComputeSetKeyRotationThresholdInfo_t'
 * ctypedef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t nvmlConfComputeGetKeyRotationThresholdInfo_t 'nvmlConfComputeGetKeyRotationThresholdInfo_t'
 * ctypedef struct nvmlGpuFabricInfo_t 'nvmlGpuFabricInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned char clusterUuid[16]
 *     nvmlReturn_t status
*/
struct nvmlGpuFabricInfo_t {
  unsigned char clusterUuid[16];
  nvmlReturn_t status;
  unsigned int cliqueId;
  nvmlGpuFabricState_t state;
};

/* "cy_nvml.pxd":1378
 *     nvmlGpuFabricState_t state
 * 
 * ctypedef struct nvmlGpuFabricInfo_v2_t 'nvmlGpuFabricInfo_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned char clusterUuid[16]
*/
struct nvmlGpuFabricInfo_v2_t {
  unsigned int version;
  unsigned char clusterUuid[16];
  nvmlReturn_t status;
  unsigned int cliqueId;
  nvmlGpuFabricState_t state;
  unsigned int healthMask;
};

/* "cy_nvml.pxd":1386
 *     unsigned int healthMask
 * 
 * ctypedef struct nvmlGpuFabricInfo_v3_t 'nvmlGpuFabricInfo_v3_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned char clusterUuid[16]
*/
struct nvmlGpuFabricInfo_v3_t {
  unsigned int version;
  unsigned char clusterUuid[16];
  nvmlReturn_t status;
  unsigned int cliqueId;
  nvmlGpuFabricState_t state;
  unsigned int healthMask;
  unsigned char healthSummary;
};

/* "cy_nvml.pxd":1395
 *     unsigned char healthSummary
 * 
 * ctypedef nvmlSystemDriverBranchInfo_v1_t nvmlSystemDriverBranchInfo_t 'nvmlSystemDriverBranchInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlTemperature_v1_t nvmlTemperature_t 'nvmlTemperature_t'
 * ctypedef nvmlNvlinkSupportedBwModes_v1_t nvmlNvlinkSupportedBwModes_t 'nvmlNvlinkSupportedBwModes_t'
*/
typedef nvmlSystemDriverBranchInfo_v1_t nvmlSystemDriverBranchInfo_t;

/* "cy_nvml.pxd":1396
 * 
 * ctypedef nvmlSystemDriverBranchInfo_v1_t nvmlSystemDriverBranchInfo_t 'nvmlSystemDriverBranchInfo_t'
 * ctypedef nvmlTemperature_v1_t nvmlTemperature_t 'nvmlTemperature_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlNvlinkSupportedBwModes_v1_t nvmlNvlinkSupportedBwModes_t 'nvmlNvlinkSupportedBwModes_t'
 * ctypedef nvmlNvlinkGetBwMode_v1_t nvmlNvlinkGetBwMode_t 'nvmlNvlinkGetBwMode_t'
*/
typedef nvmlTemperature_v1_t nvmlTemperature_t;

/* "cy_nvml.pxd":1397
 * ctypedef nvmlSystemDriverBranchInfo_v1_t nvmlSystemDriverBranchInfo_t 'nvmlSystemDriverBranchInfo_t'
 * ctypedef nvmlTemperature_v1_t nvmlTemperature_t 'nvmlTemperature_t'
 * ctypedef nvmlNvlinkSupportedBwModes_v1_t nvmlNvlinkSupportedBwModes_t 'nvmlNvlinkSupportedBwModes_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlNvlinkGetBwMode_v1_t nvmlNvlinkGetBwMode_t 'nvmlNvlinkGetBwMode_t'
 * ctypedef nvmlNvlinkSetBwMode_v1_t nvmlNvlinkSetBwMode_t 'nvmlNvlinkSetBwMode_t'
*/
typedef nvmlNvlinkSupportedBwModes_v1_t nvmlNvlinkSupportedBwModes_t;

/* "cy_nvml.pxd":1398
 * ctypedef nvmlTemperature_v1_t nvmlTemperature_t 'nvmlTemperature_t'
 * ctypedef nvmlNvlinkSupportedBwModes_v1_t nvmlNvlinkSupportedBwModes_t 'nvmlNvlinkSupportedBwModes_t'
 * ctypedef nvmlNvlinkGetBwMode_v1_t nvmlNvlinkGetBwMode_t 'nvmlNvlinkGetBwMode_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlNvlinkSetBwMode_v1_t nvmlNvlinkSetBwMode_t 'nvmlNvlinkSetBwMode_t'
 * ctypedef nvmlDeviceCapabilities_v1_t nvmlDeviceCapabilities_t 'nvmlDeviceCapabilities_t'
*/
typedef nvmlNvlinkGetBwMode_v1_t nvmlNvlinkGetBwMode_t;

/* "cy_nvml.pxd":1399
 * ctypedef nvmlNvlinkSupportedBwModes_v1_t nvmlNvlinkSupportedBwModes_t 'nvmlNvlinkSupportedBwModes_t'
 * ctypedef nvmlNvlinkGetBwMode_v1_t nvmlNvlinkGetBwMode_t 'nvmlNvlinkGetBwMode_t'
 * ctypedef nvmlNvlinkSetBwMode_v1_t nvmlNvlinkSetBwMode_t 'nvmlNvlinkSetBwMode_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlDeviceCapabilities_v1_t nvmlDeviceCapabilities_t 'nvmlDeviceCapabilities_t'
 * ctypedef nvmlPowerSmoothingProfile_v1_t nvmlPowerSmoothingProfile_t 'nvmlPowerSmoothingProfile_t'
*/
typedef nvmlNvlinkSetBwMode_v1_t nvmlNvlinkSetBwMode_t;

/* "cy_nvml.pxd":1400
 * ctypedef nvmlNvlinkGetBwMode_v1_t nvmlNvlinkGetBwMode_t 'nvmlNvlinkGetBwMode_t'
 * ctypedef nvmlNvlinkSetBwMode_v1_t nvmlNvlinkSetBwMode_t 'nvmlNvlinkSetBwMode_t'
 * ctypedef nvmlDeviceCapabilities_v1_t nvmlDeviceCapabilities_t 'nvmlDeviceCapabilities_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlPowerSmoothingProfile_v1_t nvmlPowerSmoothingProfile_t 'nvmlPowerSmoothingProfile_t'
 * ctypedef nvmlPowerSmoothingState_v1_t nvmlPowerSmoothingState_t 'nvmlPowerSmoothingState_t'
*/
typedef nvmlDeviceCapabilities_v1_t nvmlDeviceCapabilities_t;

/* "cy_nvml.pxd":1401
 * ctypedef nvmlNvlinkSetBwMode_v1_t nvmlNvlinkSetBwMode_t 'nvmlNvlinkSetBwMode_t'
 * ctypedef nvmlDeviceCapabilities_v1_t nvmlDeviceCapabilities_t 'nvmlDeviceCapabilities_t'
 * ctypedef nvmlPowerSmoothingProfile_v1_t nvmlPowerSmoothingProfile_t 'nvmlPowerSmoothingProfile_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlPowerSmoothingState_v1_t nvmlPowerSmoothingState_t 'nvmlPowerSmoothingState_t'
 * ctypedef nvmlDeviceAddressingMode_v1_t nvmlDeviceAddressingMode_t 'nvmlDeviceAddressingMode_t'
*/
typedef nvmlPowerSmoothingProfile_v1_t nvmlPowerSmoothingProfile_t;

/* "cy_nvml.pxd":1402
 * ctypedef nvmlDeviceCapabilities_v1_t nvmlDeviceCapabilities_t 'nvmlDeviceCapabilities_t'
 * ctypedef nvmlPowerSmoothingProfile_v1_t nvmlPowerSmoothingProfile_t 'nvmlPowerSmoothingProfile_t'
 * ctypedef nvmlPowerSmoothingState_v1_t nvmlPowerSmoothingState_t 'nvmlPowerSmoothingState_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlDeviceAddressingMode_v1_t nvmlDeviceAddressingMode_t 'nvmlDeviceAddressingMode_t'
 * ctypedef nvmlRepairStatus_v1_t nvmlRepairStatus_t 'nvmlRepairStatus_t'
*/
typedef nvmlPowerSmoothingState_v1_t nvmlPowerSmoothingState_t;

/* "cy_nvml.pxd":1403
 * ctypedef nvmlPowerSmoothingProfile_v1_t nvmlPowerSmoothingProfile_t 'nvmlPowerSmoothingProfile_t'
 * ctypedef nvmlPowerSmoothingState_v1_t nvmlPowerSmoothingState_t 'nvmlPowerSmoothingState_t'
 * ctypedef nvmlDeviceAddressingMode_v1_t nvmlDeviceAddressingMode_t 'nvmlDeviceAddressingMode_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlRepairStatus_v1_t nvmlRepairStatus_t 'nvmlRepairStatus_t'
 * ctypedef nvmlPdi_v1_t nvmlPdi_t 'nvmlPdi_t'
*/
typedef nvmlDeviceAddressingMode_v1_t nvmlDeviceAddressingMode_t;

/* "cy_nvml.pxd":1404
 * ctypedef nvmlPowerSmoothingState_v1_t nvmlPowerSmoothingState_t 'nvmlPowerSmoothingState_t'
 * ctypedef nvmlDeviceAddressingMode_v1_t nvmlDeviceAddressingMode_t 'nvmlDeviceAddressingMode_t'
 * ctypedef nvmlRepairStatus_v1_t nvmlRepairStatus_t 'nvmlRepairStatus_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlPdi_v1_t nvmlPdi_t 'nvmlPdi_t'
 * ctypedef struct nvmlEventData_t 'nvmlEventData_t':
*/
typedef nvmlRepairStatus_v1_t nvmlRepairStatus_t;

/* "cy_nvml.pxd":1405
 * ctypedef nvmlDeviceAddressingMode_v1_t nvmlDeviceAddressingMode_t 'nvmlDeviceAddressingMode_t'
 * ctypedef nvmlRepairStatus_v1_t nvmlRepairStatus_t 'nvmlRepairStatus_t'
 * ctypedef nvmlPdi_v1_t nvmlPdi_t 'nvmlPdi_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlEventData_t 'nvmlEventData_t':
 *     nvmlDevice_t device
*/
typedef nvmlPdi_v1_t nvmlPdi_t;

/* "cy_nvml.pxd":1406
 * ctypedef nvmlRepairStatus_v1_t nvmlRepairStatus_t 'nvmlRepairStatus_t'
 * ctypedef nvmlPdi_v1_t nvmlPdi_t 'nvmlPdi_t'
 * ctypedef struct nvmlEventData_t 'nvmlEventData_t':             # <<<<<<<<<<<<<<
 *     nvmlDevice_t device
 *     unsigned long long eventType
*/
struct nvmlEventData_t {
  nvmlDevice_t device;
  unsigned PY_LONG_LONG eventType;
  unsigned PY_LONG_LONG eventData;
  unsigned int gpuInstanceId;
  unsigned int computeInstanceId;
};

/* "cy_nvml.pxd":1413
 *     unsigned int computeInstanceId
 * 
 * ctypedef struct nvmlSystemEventSetCreateRequest_v1_t 'nvmlSystemEventSetCreateRequest_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlSystemEventSet_t set
*/
struct nvmlSystemEventSetCreateRequest_v1_t {
  unsigned int version;
  nvmlSystemEventSet_t set;
};

/* "cy_nvml.pxd":1417
 *     nvmlSystemEventSet_t set
 * 
 * ctypedef struct nvmlSystemEventSetFreeRequest_v1_t 'nvmlSystemEventSetFreeRequest_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlSystemEventSet_t set
*/
struct nvmlSystemEventSetFreeRequest_v1_t {
  unsigned int version;
  nvmlSystemEventSet_t set;
};

/* "cy_nvml.pxd":1421
 *     nvmlSystemEventSet_t set
 * 
 * ctypedef struct nvmlSystemRegisterEventRequest_v1_t 'nvmlSystemRegisterEventRequest_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned long long eventTypes
*/
struct nvmlSystemRegisterEventRequest_v1_t {
  unsigned int version;
  unsigned PY_LONG_LONG eventTypes;
  nvmlSystemEventSet_t set;
};

/* "cy_nvml.pxd":1426
 *     nvmlSystemEventSet_t set
 * 
 * ctypedef struct nvmlExcludedDeviceInfo_t 'nvmlExcludedDeviceInfo_t':             # <<<<<<<<<<<<<<
 *     nvmlPciInfo_t pciInfo
 *     char uuid[80]
*/
struct nvmlExcludedDeviceInfo_t {
  nvmlPciInfo_t pciInfo;
  char uuid[80];
};

/* "cy_nvml.pxd":1430
 *     char uuid[80]
 * 
 * ctypedef struct nvmlProcessDetailList_v1_t 'nvmlProcessDetailList_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int mode
*/
struct nvmlProcessDetailList_v1_t {
  unsigned int version;
  unsigned int mode;
  unsigned int numProcArrayEntries;
  nvmlProcessDetail_v1_t *procArray;
};

/* "cy_nvml.pxd":1436
 *     nvmlProcessDetail_v1_t* procArray
 * 
 * ctypedef struct nvmlBridgeChipHierarchy_t 'nvmlBridgeChipHierarchy_t':             # <<<<<<<<<<<<<<
 *     unsigned char bridgeCount
 *     nvmlBridgeChipInfo_t bridgeChipInfo[128]
*/
struct nvmlBridgeChipHierarchy_t {
  unsigned char bridgeCount;
  nvmlBridgeChipInfo_t bridgeChipInfo[128];
};

/* "cy_nvml.pxd":1440
 *     nvmlBridgeChipInfo_t bridgeChipInfo[128]
 * 
 * ctypedef struct nvmlSample_t 'nvmlSample_t':             # <<<<<<<<<<<<<<
 *     unsigned long long timeStamp
 *     nvmlValue_t sampleValue
*/
struct nvmlSample_t {
  unsigned PY_LONG_LONG timeStamp;
  nvmlValue_t sampleValue;
};

/* "cy_nvml.pxd":1444
 *     nvmlValue_t sampleValue
 * 
 * ctypedef struct nvmlVgpuInstanceUtilizationSample_t 'nvmlVgpuInstanceUtilizationSample_t':             # <<<<<<<<<<<<<<
 *     nvmlVgpuInstance_t vgpuInstance
 *     unsigned long long timeStamp
*/
struct nvmlVgpuInstanceUtilizationSample_t {
  nvmlVgpuInstance_t vgpuInstance;
  unsigned PY_LONG_LONG timeStamp;
  nvmlValue_t smUtil;
  nvmlValue_t memUtil;
  nvmlValue_t encUtil;
  nvmlValue_t decUtil;
};

/* "cy_nvml.pxd":1452
 *     nvmlValue_t decUtil
 * 
 * ctypedef struct nvmlVgpuInstanceUtilizationInfo_v1_t 'nvmlVgpuInstanceUtilizationInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned long long timeStamp
 *     nvmlVgpuInstance_t vgpuInstance
*/
struct nvmlVgpuInstanceUtilizationInfo_v1_t {
  unsigned PY_LONG_LONG timeStamp;
  nvmlVgpuInstance_t vgpuInstance;
  nvmlValue_t smUtil;
  nvmlValue_t memUtil;
  nvmlValue_t encUtil;
  nvmlValue_t decUtil;
  nvmlValue_t jpgUtil;
  nvmlValue_t ofaUtil;
};

/* "cy_nvml.pxd":1462
 *     nvmlValue_t ofaUtil
 * 
 * ctypedef struct nvmlFieldValue_t 'nvmlFieldValue_t':             # <<<<<<<<<<<<<<
 *     unsigned int fieldId
 *     unsigned int scopeId
*/
struct nvmlFieldValue_t {
  unsigned int fieldId;
  unsigned int scopeId;
  PY_LONG_LONG timestamp;
  PY_LONG_LONG latencyUsec;
  nvmlValueType_t valueType;
  nvmlReturn_t nvmlReturn;
  nvmlValue_t value;
};

/* "cy_nvml.pxd":1471
 *     nvmlValue_t value
 * 
 * ctypedef struct nvmlGpuThermalSettings_t 'nvmlGpuThermalSettings_t':             # <<<<<<<<<<<<<<
 *     unsigned int count
 *     _anon_pod0 sensor[3]
*/
struct nvmlGpuThermalSettings_t {
  unsigned int count;
  _anon_pod0 sensor[3];
};

/* "cy_nvml.pxd":1475
 *     _anon_pod0 sensor[3]
 * 
 * ctypedef struct nvmlUUID_v1_t 'nvmlUUID_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int type
*/
struct nvmlUUID_v1_t {
  unsigned int version;
  unsigned int type;
  nvmlUUIDValue_t value;
};

/* "cy_nvml.pxd":1480
 *     nvmlUUIDValue_t value
 * 
 * ctypedef struct nvmlClkMonStatus_t 'nvmlClkMonStatus_t':             # <<<<<<<<<<<<<<
 *     unsigned int bGlobalStatus
 *     unsigned int clkMonListSize
*/
struct nvmlClkMonStatus_t {
  unsigned int bGlobalStatus;
  unsigned int clkMonListSize;
  nvmlClkMonFaultInfo_t clkMonList[32];
};

/* "cy_nvml.pxd":1485
 *     nvmlClkMonFaultInfo_t clkMonList[32]
 * 
 * ctypedef struct nvmlProcessesUtilizationInfo_v1_t 'nvmlProcessesUtilizationInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int processSamplesCount
*/
struct nvmlProcessesUtilizationInfo_v1_t {
  unsigned int version;
  unsigned int processSamplesCount;
  unsigned PY_LONG_LONG lastSeenTimeStamp;
  nvmlProcessUtilizationInfo_v1_t *procUtilArray;
};

/* "cy_nvml.pxd":1491
 *     nvmlProcessUtilizationInfo_v1_t* procUtilArray
 * 
 * ctypedef struct nvmlGpuDynamicPstatesInfo_t 'nvmlGpuDynamicPstatesInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned int flags
 *     _anon_pod1 utilization[8]
*/
struct nvmlGpuDynamicPstatesInfo_t {
  unsigned int flags;
  _anon_pod1 utilization[8];
};

/* "cy_nvml.pxd":1495
 *     _anon_pod1 utilization[8]
 * 
 * ctypedef union nvmlVgpuSchedulerParams_t 'nvmlVgpuSchedulerParams_t':             # <<<<<<<<<<<<<<
 *     _anon_pod2 vgpuSchedDataWithARR
 *     _anon_pod3 vgpuSchedData
*/
union nvmlVgpuSchedulerParams_t {
  _anon_pod2 vgpuSchedDataWithARR;
  _anon_pod3 vgpuSchedData;
};

/* "cy_nvml.pxd":1499
 *     _anon_pod3 vgpuSchedData
 * 
 * ctypedef union nvmlVgpuSchedulerSetParams_t 'nvmlVgpuSchedulerSetParams_t':             # <<<<<<<<<<<<<<
 *     _anon_pod4 vgpuSchedDataWithARR
 *     _anon_pod5 vgpuSchedData
*/
union nvmlVgpuSchedulerSetParams_t {
  _anon_pod4 vgpuSchedDataWithARR;
  _anon_pod5 vgpuSchedData;
};

/* "cy_nvml.pxd":1503
 *     _anon_pod5 vgpuSchedData
 * 
 * ctypedef struct nvmlVgpuLicenseInfo_t 'nvmlVgpuLicenseInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned char isLicensed
 *     nvmlVgpuLicenseExpiry_t licenseExpiry
*/
struct nvmlVgpuLicenseInfo_t {
  unsigned char isLicensed;
  nvmlVgpuLicenseExpiry_t licenseExpiry;
  unsigned int currentState;
};

/* "cy_nvml.pxd":1508
 *     unsigned int currentState
 * 
 * ctypedef struct nvmlGridLicensableFeature_t 'nvmlGridLicensableFeature_t':             # <<<<<<<<<<<<<<
 *     nvmlGridLicenseFeatureCode_t featureCode
 *     unsigned int featureState
*/
struct nvmlGridLicensableFeature_t {
  nvmlGridLicenseFeatureCode_t featureCode;
  unsigned int featureState;
  char licenseInfo[128];
  char productName[128];
  unsigned int featureEnabled;
  nvmlGridLicenseExpiry_t licenseExpiry;
};

/* "cy_nvml.pxd":1516
 *     nvmlGridLicenseExpiry_t licenseExpiry
 * 
 * ctypedef struct nvmlUnitFanSpeeds_t 'nvmlUnitFanSpeeds_t':             # <<<<<<<<<<<<<<
 *     nvmlUnitFanInfo_t fans[24]
 *     unsigned int count
*/
struct nvmlUnitFanSpeeds_t {
  nvmlUnitFanInfo_t fans[24];
  unsigned int count;
};

/* "cy_nvml.pxd":1520
 *     unsigned int count
 * 
 * ctypedef struct nvmlSystemEventSetWaitRequest_v1_t 'nvmlSystemEventSetWaitRequest_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int timeoutms
*/
struct nvmlSystemEventSetWaitRequest_v1_t {
  unsigned int version;
  unsigned int timeoutms;
  nvmlSystemEventSet_t set;
  nvmlSystemEventData_v1_t *data;
  unsigned int dataSize;
  unsigned int numEvent;
};

/* "cy_nvml.pxd":1528
 *     unsigned int numEvent
 * 
 * ctypedef struct nvmlVgpuPgpuMetadata_t 'nvmlVgpuPgpuMetadata_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int revision
*/
struct nvmlVgpuPgpuMetadata_t {
  unsigned int version;
  unsigned int revision;
  char hostDriverVersion[80];
  unsigned int pgpuVirtualizationCaps;
  unsigned int reserved[5];
  nvmlVgpuVersion_t hostSupportedVgpuRange;
  unsigned int opaqueDataSize;
  char opaqueData[4];
};

/* "cy_nvml.pxd":1538
 *     char opaqueData[4]
 * 
 * ctypedef struct nvmlGpuInstanceInfo_t 'nvmlGpuInstanceInfo_t':             # <<<<<<<<<<<<<<
 *     nvmlDevice_t device
 *     unsigned int id
*/
struct nvmlGpuInstanceInfo_t {
  nvmlDevice_t device;
  unsigned int id;
  unsigned int profileId;
  nvmlGpuInstancePlacement_t placement;
};

/* "cy_nvml.pxd":1544
 *     nvmlGpuInstancePlacement_t placement
 * 
 * ctypedef struct nvmlComputeInstanceInfo_t 'nvmlComputeInstanceInfo_t':             # <<<<<<<<<<<<<<
 *     nvmlDevice_t device
 *     nvmlGpuInstance_t gpuInstance
*/
struct nvmlComputeInstanceInfo_t {
  nvmlDevice_t device;
  nvmlGpuInstance_t gpuInstance;
  unsigned int id;
  unsigned int profileId;
  nvmlComputeInstancePlacement_t placement;
};

/* "cy_nvml.pxd":1551
 *     nvmlComputeInstancePlacement_t placement
 * 
 * ctypedef struct nvmlGpmMetric_t 'nvmlGpmMetric_t':             # <<<<<<<<<<<<<<
 *     unsigned int metricId
 *     nvmlReturn_t nvmlReturn
*/
struct nvmlGpmMetric_t {
  unsigned int metricId;
  nvmlReturn_t nvmlReturn;
  double value;
  _anon_pod6 metricInfo;
};

/* "cy_nvml.pxd":1557
 *     _anon_pod6 metricInfo
 * 
 * ctypedef struct nvmlWorkloadPowerProfileInfo_v1_t 'nvmlWorkloadPowerProfileInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int profileId
*/
struct nvmlWorkloadPowerProfileInfo_v1_t {
  unsigned int version;
  unsigned int profileId;
  unsigned int priority;
  nvmlMask255_t conflictingMask;
};

/* "cy_nvml.pxd":1563
 *     nvmlMask255_t conflictingMask
 * 
 * ctypedef struct nvmlWorkloadPowerProfileCurrentProfiles_v1_t 'nvmlWorkloadPowerProfileCurrentProfiles_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlMask255_t perfProfilesMask
*/
struct nvmlWorkloadPowerProfileCurrentProfiles_v1_t {
  unsigned int version;
  nvmlMask255_t perfProfilesMask;
  nvmlMask255_t requestedProfilesMask;
  nvmlMask255_t enforcedProfilesMask;
};

/* "cy_nvml.pxd":1569
 *     nvmlMask255_t enforcedProfilesMask
 * 
 * ctypedef struct nvmlWorkloadPowerProfileRequestedProfiles_v1_t 'nvmlWorkloadPowerProfileRequestedProfiles_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlMask255_t requestedProfilesMask
*/
struct nvmlWorkloadPowerProfileRequestedProfiles_v1_t {
  unsigned int version;
  nvmlMask255_t requestedProfilesMask;
};

/* "cy_nvml.pxd":1573
 *     nvmlMask255_t requestedProfilesMask
 * 
 * ctypedef struct nvmlEccSramUniqueUncorrectedErrorCounts_v1_t 'nvmlEccSramUniqueUncorrectedErrorCounts_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int entryCount
*/
struct nvmlEccSramUniqueUncorrectedErrorCounts_v1_t {
  unsigned int version;
  unsigned int entryCount;
  nvmlEccSramUniqueUncorrectedErrorEntry_v1_t *entries;
};

/* "cy_nvml.pxd":1578
 *     nvmlEccSramUniqueUncorrectedErrorEntry_v1_t* entries
 * 
 * ctypedef struct nvmlNvlinkFirmwareInfo_t 'nvmlNvlinkFirmwareInfo_t':             # <<<<<<<<<<<<<<
 *     nvmlNvlinkFirmwareVersion_t firmwareVersion[100]
 *     unsigned int numValidEntries
*/
struct nvmlNvlinkFirmwareInfo_t {
  nvmlNvlinkFirmwareVersion_t firmwareVersion[100];
  unsigned int numValidEntries;
};

/* "cy_nvml.pxd":1582
 *     unsigned int numValidEntries
 * 
 * ctypedef struct nvmlPRMTLV_v1_t 'nvmlPRMTLV_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned dataSize
 *     unsigned status
*/
struct nvmlPRMTLV_v1_t {
  unsigned int dataSize;
  unsigned int status;
  _anon_pod7 _anon_pod_member0;
};

/* "cy_nvml.pxd":1587
 *     _anon_pod7 _anon_pod_member0
 * 
 * ctypedef nvmlVgpuTypeIdInfo_v1_t nvmlVgpuTypeIdInfo_t 'nvmlVgpuTypeIdInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuTypeMaxInstance_v1_t nvmlVgpuTypeMaxInstance_t 'nvmlVgpuTypeMaxInstance_t'
 * ctypedef nvmlVgpuCreatablePlacementInfo_v1_t nvmlVgpuCreatablePlacementInfo_t 'nvmlVgpuCreatablePlacementInfo_t'
*/
typedef nvmlVgpuTypeIdInfo_v1_t nvmlVgpuTypeIdInfo_t;

/* "cy_nvml.pxd":1588
 * 
 * ctypedef nvmlVgpuTypeIdInfo_v1_t nvmlVgpuTypeIdInfo_t 'nvmlVgpuTypeIdInfo_t'
 * ctypedef nvmlVgpuTypeMaxInstance_v1_t nvmlVgpuTypeMaxInstance_t 'nvmlVgpuTypeMaxInstance_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuCreatablePlacementInfo_v1_t nvmlVgpuCreatablePlacementInfo_t 'nvmlVgpuCreatablePlacementInfo_t'
 * ctypedef struct nvmlVgpuProcessesUtilizationInfo_v1_t 'nvmlVgpuProcessesUtilizationInfo_v1_t':
*/
typedef nvmlVgpuTypeMaxInstance_v1_t nvmlVgpuTypeMaxInstance_t;

/* "cy_nvml.pxd":1589
 * ctypedef nvmlVgpuTypeIdInfo_v1_t nvmlVgpuTypeIdInfo_t 'nvmlVgpuTypeIdInfo_t'
 * ctypedef nvmlVgpuTypeMaxInstance_v1_t nvmlVgpuTypeMaxInstance_t 'nvmlVgpuTypeMaxInstance_t'
 * ctypedef nvmlVgpuCreatablePlacementInfo_v1_t nvmlVgpuCreatablePlacementInfo_t 'nvmlVgpuCreatablePlacementInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlVgpuProcessesUtilizationInfo_v1_t 'nvmlVgpuProcessesUtilizationInfo_v1_t':
 *     unsigned int version
*/
typedef nvmlVgpuCreatablePlacementInfo_v1_t nvmlVgpuCreatablePlacementInfo_t;

/* "cy_nvml.pxd":1590
 * ctypedef nvmlVgpuTypeMaxInstance_v1_t nvmlVgpuTypeMaxInstance_t 'nvmlVgpuTypeMaxInstance_t'
 * ctypedef nvmlVgpuCreatablePlacementInfo_v1_t nvmlVgpuCreatablePlacementInfo_t 'nvmlVgpuCreatablePlacementInfo_t'
 * ctypedef struct nvmlVgpuProcessesUtilizationInfo_v1_t 'nvmlVgpuProcessesUtilizationInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int vgpuProcessCount
*/
struct nvmlVgpuProcessesUtilizationInfo_v1_t {
  unsigned int version;
  unsigned int vgpuProcessCount;
  unsigned PY_LONG_LONG lastSeenTimeStamp;
  nvmlVgpuProcessUtilizationInfo_v1_t *vgpuProcUtilArray;
};

/* "cy_nvml.pxd":1596
 *     nvmlVgpuProcessUtilizationInfo_v1_t* vgpuProcUtilArray
 * 
 * ctypedef nvmlActiveVgpuInstanceInfo_v1_t nvmlActiveVgpuInstanceInfo_t 'nvmlActiveVgpuInstanceInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlGpuFabricInfo_v3_t nvmlGpuFabricInfoV_t 'nvmlGpuFabricInfoV_t'
 * ctypedef nvmlSystemEventSetCreateRequest_v1_t nvmlSystemEventSetCreateRequest_t 'nvmlSystemEventSetCreateRequest_t'
*/
typedef nvmlActiveVgpuInstanceInfo_v1_t nvmlActiveVgpuInstanceInfo_t;

/* "cy_nvml.pxd":1597
 * 
 * ctypedef nvmlActiveVgpuInstanceInfo_v1_t nvmlActiveVgpuInstanceInfo_t 'nvmlActiveVgpuInstanceInfo_t'
 * ctypedef nvmlGpuFabricInfo_v3_t nvmlGpuFabricInfoV_t 'nvmlGpuFabricInfoV_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlSystemEventSetCreateRequest_v1_t nvmlSystemEventSetCreateRequest_t 'nvmlSystemEventSetCreateRequest_t'
 * ctypedef nvmlSystemEventSetFreeRequest_v1_t nvmlSystemEventSetFreeRequest_t 'nvmlSystemEventSetFreeRequest_t'
*/
typedef nvmlGpuFabricInfo_v3_t nvmlGpuFabricInfoV_t;

/* "cy_nvml.pxd":1598
 * ctypedef nvmlActiveVgpuInstanceInfo_v1_t nvmlActiveVgpuInstanceInfo_t 'nvmlActiveVgpuInstanceInfo_t'
 * ctypedef nvmlGpuFabricInfo_v3_t nvmlGpuFabricInfoV_t 'nvmlGpuFabricInfoV_t'
 * ctypedef nvmlSystemEventSetCreateRequest_v1_t nvmlSystemEventSetCreateRequest_t 'nvmlSystemEventSetCreateRequest_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlSystemEventSetFreeRequest_v1_t nvmlSystemEventSetFreeRequest_t 'nvmlSystemEventSetFreeRequest_t'
 * ctypedef nvmlSystemRegisterEventRequest_v1_t nvmlSystemRegisterEventRequest_t 'nvmlSystemRegisterEventRequest_t'
*/
typedef nvmlSystemEventSetCreateRequest_v1_t nvmlSystemEventSetCreateRequest_t;

/* "cy_nvml.pxd":1599
 * ctypedef nvmlGpuFabricInfo_v3_t nvmlGpuFabricInfoV_t 'nvmlGpuFabricInfoV_t'
 * ctypedef nvmlSystemEventSetCreateRequest_v1_t nvmlSystemEventSetCreateRequest_t 'nvmlSystemEventSetCreateRequest_t'
 * ctypedef nvmlSystemEventSetFreeRequest_v1_t nvmlSystemEventSetFreeRequest_t 'nvmlSystemEventSetFreeRequest_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlSystemRegisterEventRequest_v1_t nvmlSystemRegisterEventRequest_t 'nvmlSystemRegisterEventRequest_t'
 * ctypedef nvmlProcessDetailList_v1_t nvmlProcessDetailList_t 'nvmlProcessDetailList_t'
*/
typedef nvmlSystemEventSetFreeRequest_v1_t nvmlSystemEventSetFreeRequest_t;

/* "cy_nvml.pxd":1600
 * ctypedef nvmlSystemEventSetCreateRequest_v1_t nvmlSystemEventSetCreateRequest_t 'nvmlSystemEventSetCreateRequest_t'
 * ctypedef nvmlSystemEventSetFreeRequest_v1_t nvmlSystemEventSetFreeRequest_t 'nvmlSystemEventSetFreeRequest_t'
 * ctypedef nvmlSystemRegisterEventRequest_v1_t nvmlSystemRegisterEventRequest_t 'nvmlSystemRegisterEventRequest_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlProcessDetailList_v1_t nvmlProcessDetailList_t 'nvmlProcessDetailList_t'
 * ctypedef struct nvmlVgpuInstancesUtilizationInfo_v1_t 'nvmlVgpuInstancesUtilizationInfo_v1_t':
*/
typedef nvmlSystemRegisterEventRequest_v1_t nvmlSystemRegisterEventRequest_t;

/* "cy_nvml.pxd":1601
 * ctypedef nvmlSystemEventSetFreeRequest_v1_t nvmlSystemEventSetFreeRequest_t 'nvmlSystemEventSetFreeRequest_t'
 * ctypedef nvmlSystemRegisterEventRequest_v1_t nvmlSystemRegisterEventRequest_t 'nvmlSystemRegisterEventRequest_t'
 * ctypedef nvmlProcessDetailList_v1_t nvmlProcessDetailList_t 'nvmlProcessDetailList_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlVgpuInstancesUtilizationInfo_v1_t 'nvmlVgpuInstancesUtilizationInfo_v1_t':
 *     unsigned int version
*/
typedef nvmlProcessDetailList_v1_t nvmlProcessDetailList_t;

/* "cy_nvml.pxd":1602
 * ctypedef nvmlSystemRegisterEventRequest_v1_t nvmlSystemRegisterEventRequest_t 'nvmlSystemRegisterEventRequest_t'
 * ctypedef nvmlProcessDetailList_v1_t nvmlProcessDetailList_t 'nvmlProcessDetailList_t'
 * ctypedef struct nvmlVgpuInstancesUtilizationInfo_v1_t 'nvmlVgpuInstancesUtilizationInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlValueType_t sampleValType
*/
struct nvmlVgpuInstancesUtilizationInfo_v1_t {
  unsigned int version;
  nvmlValueType_t sampleValType;
  unsigned int vgpuInstanceCount;
  unsigned PY_LONG_LONG lastSeenTimeStamp;
  nvmlVgpuInstanceUtilizationInfo_v1_t *vgpuUtilArray;
};

/* "cy_nvml.pxd":1609
 *     nvmlVgpuInstanceUtilizationInfo_v1_t* vgpuUtilArray
 * 
 * ctypedef nvmlUUID_v1_t nvmlUUID_t 'nvmlUUID_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlProcessesUtilizationInfo_v1_t nvmlProcessesUtilizationInfo_t 'nvmlProcessesUtilizationInfo_t'
 * ctypedef struct nvmlVgpuSchedulerLog_t 'nvmlVgpuSchedulerLog_t':
*/
typedef nvmlUUID_v1_t nvmlUUID_t;

/* "cy_nvml.pxd":1610
 * 
 * ctypedef nvmlUUID_v1_t nvmlUUID_t 'nvmlUUID_t'
 * ctypedef nvmlProcessesUtilizationInfo_v1_t nvmlProcessesUtilizationInfo_t 'nvmlProcessesUtilizationInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlVgpuSchedulerLog_t 'nvmlVgpuSchedulerLog_t':
 *     unsigned int engineId
*/
typedef nvmlProcessesUtilizationInfo_v1_t nvmlProcessesUtilizationInfo_t;

/* "cy_nvml.pxd":1611
 * ctypedef nvmlUUID_v1_t nvmlUUID_t 'nvmlUUID_t'
 * ctypedef nvmlProcessesUtilizationInfo_v1_t nvmlProcessesUtilizationInfo_t 'nvmlProcessesUtilizationInfo_t'
 * ctypedef struct nvmlVgpuSchedulerLog_t 'nvmlVgpuSchedulerLog_t':             # <<<<<<<<<<<<<<
 *     unsigned int engineId
 *     unsigned int schedulerPolicy
*/
struct nvmlVgpuSchedulerLog_t {
  unsigned int engineId;
  unsigned int schedulerPolicy;
  unsigned int arrMode;
  nvmlVgpuSchedulerParams_t schedulerParams;
  unsigned int entriesCount;
  nvmlVgpuSchedulerLogEntry_t logEntries[200];
};

/* "cy_nvml.pxd":1619
 *     nvmlVgpuSchedulerLogEntry_t logEntries[200]
 * 
 * ctypedef struct nvmlVgpuSchedulerGetState_t 'nvmlVgpuSchedulerGetState_t':             # <<<<<<<<<<<<<<
 *     unsigned int schedulerPolicy
 *     unsigned int arrMode
*/
struct nvmlVgpuSchedulerGetState_t {
  unsigned int schedulerPolicy;
  unsigned int arrMode;
  nvmlVgpuSchedulerParams_t schedulerParams;
};

/* "cy_nvml.pxd":1624
 *     nvmlVgpuSchedulerParams_t schedulerParams
 * 
 * ctypedef struct nvmlVgpuSchedulerStateInfo_v1_t 'nvmlVgpuSchedulerStateInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int engineId
*/
struct nvmlVgpuSchedulerStateInfo_v1_t {
  unsigned int version;
  unsigned int engineId;
  unsigned int schedulerPolicy;
  unsigned int arrMode;
  nvmlVgpuSchedulerParams_t schedulerParams;
};

/* "cy_nvml.pxd":1631
 *     nvmlVgpuSchedulerParams_t schedulerParams
 * 
 * ctypedef struct nvmlVgpuSchedulerLogInfo_v1_t 'nvmlVgpuSchedulerLogInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int engineId
*/
struct nvmlVgpuSchedulerLogInfo_v1_t {
  unsigned int version;
  unsigned int engineId;
  unsigned int schedulerPolicy;
  unsigned int arrMode;
  nvmlVgpuSchedulerParams_t schedulerParams;
  unsigned int entriesCount;
  nvmlVgpuSchedulerLogEntry_t logEntries[200];
};

/* "cy_nvml.pxd":1640
 *     nvmlVgpuSchedulerLogEntry_t logEntries[200]
 * 
 * ctypedef struct nvmlVgpuSchedulerSetState_t 'nvmlVgpuSchedulerSetState_t':             # <<<<<<<<<<<<<<
 *     unsigned int schedulerPolicy
 *     unsigned int enableARRMode
*/
struct nvmlVgpuSchedulerSetState_t {
  unsigned int schedulerPolicy;
  unsigned int enableARRMode;
  nvmlVgpuSchedulerSetParams_t schedulerParams;
};

/* "cy_nvml.pxd":1645
 *     nvmlVgpuSchedulerSetParams_t schedulerParams
 * 
 * ctypedef struct nvmlVgpuSchedulerState_v1_t 'nvmlVgpuSchedulerState_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int engineId
*/
struct nvmlVgpuSchedulerState_v1_t {
  unsigned int version;
  unsigned int engineId;
  unsigned int schedulerPolicy;
  unsigned int enableARRMode;
  nvmlVgpuSchedulerSetParams_t schedulerParams;
};

/* "cy_nvml.pxd":1652
 *     nvmlVgpuSchedulerSetParams_t schedulerParams
 * 
 * ctypedef struct nvmlGridLicensableFeatures_t 'nvmlGridLicensableFeatures_t':             # <<<<<<<<<<<<<<
 *     int isGridLicenseSupported
 *     unsigned int licensableFeaturesCount
*/
struct nvmlGridLicensableFeatures_t {
  int isGridLicenseSupported;
  unsigned int licensableFeaturesCount;
  nvmlGridLicensableFeature_t gridLicensableFeatures[3];
};

/* "cy_nvml.pxd":1657
 *     nvmlGridLicensableFeature_t gridLicensableFeatures[3]
 * 
 * ctypedef nvmlSystemEventSetWaitRequest_v1_t nvmlSystemEventSetWaitRequest_t 'nvmlSystemEventSetWaitRequest_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlGpmMetricsGet_t 'nvmlGpmMetricsGet_t':
 *     unsigned int version
*/
typedef nvmlSystemEventSetWaitRequest_v1_t nvmlSystemEventSetWaitRequest_t;

/* "cy_nvml.pxd":1658
 * 
 * ctypedef nvmlSystemEventSetWaitRequest_v1_t nvmlSystemEventSetWaitRequest_t 'nvmlSystemEventSetWaitRequest_t'
 * ctypedef struct nvmlGpmMetricsGet_t 'nvmlGpmMetricsGet_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int numMetrics
*/
struct nvmlGpmMetricsGet_t {
  unsigned int version;
  unsigned int numMetrics;
  nvmlGpmSample_t sample1;
  nvmlGpmSample_t sample2;
  nvmlGpmMetric_t metrics[210];
};

/* "cy_nvml.pxd":1665
 *     nvmlGpmMetric_t metrics[210]
 * 
 * ctypedef nvmlWorkloadPowerProfileInfo_v1_t nvmlWorkloadPowerProfileInfo_t 'nvmlWorkloadPowerProfileInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlWorkloadPowerProfileCurrentProfiles_v1_t nvmlWorkloadPowerProfileCurrentProfiles_t 'nvmlWorkloadPowerProfileCurrentProfiles_t'
 * ctypedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t nvmlWorkloadPowerProfileRequestedProfiles_t 'nvmlWorkloadPowerProfileRequestedProfiles_t'
*/
typedef nvmlWorkloadPowerProfileInfo_v1_t nvmlWorkloadPowerProfileInfo_t;

/* "cy_nvml.pxd":1666
 * 
 * ctypedef nvmlWorkloadPowerProfileInfo_v1_t nvmlWorkloadPowerProfileInfo_t 'nvmlWorkloadPowerProfileInfo_t'
 * ctypedef nvmlWorkloadPowerProfileCurrentProfiles_v1_t nvmlWorkloadPowerProfileCurrentProfiles_t 'nvmlWorkloadPowerProfileCurrentProfiles_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t nvmlWorkloadPowerProfileRequestedProfiles_t 'nvmlWorkloadPowerProfileRequestedProfiles_t'
 * ctypedef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t nvmlEccSramUniqueUncorrectedErrorCounts_t 'nvmlEccSramUniqueUncorrectedErrorCounts_t'
*/
typedef nvmlWorkloadPowerProfileCurrentProfiles_v1_t nvmlWorkloadPowerProfileCurrentProfiles_t;

/* "cy_nvml.pxd":1667
 * ctypedef nvmlWorkloadPowerProfileInfo_v1_t nvmlWorkloadPowerProfileInfo_t 'nvmlWorkloadPowerProfileInfo_t'
 * ctypedef nvmlWorkloadPowerProfileCurrentProfiles_v1_t nvmlWorkloadPowerProfileCurrentProfiles_t 'nvmlWorkloadPowerProfileCurrentProfiles_t'
 * ctypedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t nvmlWorkloadPowerProfileRequestedProfiles_t 'nvmlWorkloadPowerProfileRequestedProfiles_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t nvmlEccSramUniqueUncorrectedErrorCounts_t 'nvmlEccSramUniqueUncorrectedErrorCounts_t'
 * ctypedef struct nvmlNvLinkInfo_v2_t 'nvmlNvLinkInfo_v2_t':
*/
typedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t nvmlWorkloadPowerProfileRequestedProfiles_t;

/* "cy_nvml.pxd":1668
 * ctypedef nvmlWorkloadPowerProfileCurrentProfiles_v1_t nvmlWorkloadPowerProfileCurrentProfiles_t 'nvmlWorkloadPowerProfileCurrentProfiles_t'
 * ctypedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t nvmlWorkloadPowerProfileRequestedProfiles_t 'nvmlWorkloadPowerProfileRequestedProfiles_t'
 * ctypedef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t nvmlEccSramUniqueUncorrectedErrorCounts_t 'nvmlEccSramUniqueUncorrectedErrorCounts_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlNvLinkInfo_v2_t 'nvmlNvLinkInfo_v2_t':
 *     unsigned int version
*/
typedef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t nvmlEccSramUniqueUncorrectedErrorCounts_t;

/* "cy_nvml.pxd":1669
 * ctypedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t nvmlWorkloadPowerProfileRequestedProfiles_t 'nvmlWorkloadPowerProfileRequestedProfiles_t'
 * ctypedef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t nvmlEccSramUniqueUncorrectedErrorCounts_t 'nvmlEccSramUniqueUncorrectedErrorCounts_t'
 * ctypedef struct nvmlNvLinkInfo_v2_t 'nvmlNvLinkInfo_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int isNvleEnabled
*/
struct nvmlNvLinkInfo_v2_t {
  unsigned int version;
  unsigned int isNvleEnabled;
  nvmlNvlinkFirmwareInfo_t firmwareInfo;
};

/* "cy_nvml.pxd":1674
 *     nvmlNvlinkFirmwareInfo_t firmwareInfo
 * 
 * ctypedef nvmlVgpuProcessesUtilizationInfo_v1_t nvmlVgpuProcessesUtilizationInfo_t 'nvmlVgpuProcessesUtilizationInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuInstancesUtilizationInfo_v1_t nvmlVgpuInstancesUtilizationInfo_t 'nvmlVgpuInstancesUtilizationInfo_t'
 * ctypedef nvmlVgpuSchedulerStateInfo_v1_t nvmlVgpuSchedulerStateInfo_t 'nvmlVgpuSchedulerStateInfo_t'
*/
typedef nvmlVgpuProcessesUtilizationInfo_v1_t nvmlVgpuProcessesUtilizationInfo_t;

/* "cy_nvml.pxd":1675
 * 
 * ctypedef nvmlVgpuProcessesUtilizationInfo_v1_t nvmlVgpuProcessesUtilizationInfo_t 'nvmlVgpuProcessesUtilizationInfo_t'
 * ctypedef nvmlVgpuInstancesUtilizationInfo_v1_t nvmlVgpuInstancesUtilizationInfo_t 'nvmlVgpuInstancesUtilizationInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuSchedulerStateInfo_v1_t nvmlVgpuSchedulerStateInfo_t 'nvmlVgpuSchedulerStateInfo_t'
 * ctypedef nvmlVgpuSchedulerLogInfo_v1_t nvmlVgpuSchedulerLogInfo_t 'nvmlVgpuSchedulerLogInfo_t'
*/
typedef nvmlVgpuInstancesUtilizationInfo_v1_t nvmlVgpuInstancesUtilizationInfo_t;

/* "cy_nvml.pxd":1676
 * ctypedef nvmlVgpuProcessesUtilizationInfo_v1_t nvmlVgpuProcessesUtilizationInfo_t 'nvmlVgpuProcessesUtilizationInfo_t'
 * ctypedef nvmlVgpuInstancesUtilizationInfo_v1_t nvmlVgpuInstancesUtilizationInfo_t 'nvmlVgpuInstancesUtilizationInfo_t'
 * ctypedef nvmlVgpuSchedulerStateInfo_v1_t nvmlVgpuSchedulerStateInfo_t 'nvmlVgpuSchedulerStateInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuSchedulerLogInfo_v1_t nvmlVgpuSchedulerLogInfo_t 'nvmlVgpuSchedulerLogInfo_t'
 * ctypedef nvmlVgpuSchedulerState_v1_t nvmlVgpuSchedulerState_t 'nvmlVgpuSchedulerState_t'
*/
typedef nvmlVgpuSchedulerStateInfo_v1_t nvmlVgpuSchedulerStateInfo_t;

/* "cy_nvml.pxd":1677
 * ctypedef nvmlVgpuInstancesUtilizationInfo_v1_t nvmlVgpuInstancesUtilizationInfo_t 'nvmlVgpuInstancesUtilizationInfo_t'
 * ctypedef nvmlVgpuSchedulerStateInfo_v1_t nvmlVgpuSchedulerStateInfo_t 'nvmlVgpuSchedulerStateInfo_t'
 * ctypedef nvmlVgpuSchedulerLogInfo_v1_t nvmlVgpuSchedulerLogInfo_t 'nvmlVgpuSchedulerLogInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuSchedulerState_v1_t nvmlVgpuSchedulerState_t 'nvmlVgpuSchedulerState_t'
 * ctypedef struct nvmlWorkloadPowerProfileProfilesInfo_v1_t 'nvmlWorkloadPowerProfileProfilesInfo_v1_t':
*/
typedef nvmlVgpuSchedulerLogInfo_v1_t nvmlVgpuSchedulerLogInfo_t;

/* "cy_nvml.pxd":1678
 * ctypedef nvmlVgpuSchedulerStateInfo_v1_t nvmlVgpuSchedulerStateInfo_t 'nvmlVgpuSchedulerStateInfo_t'
 * ctypedef nvmlVgpuSchedulerLogInfo_v1_t nvmlVgpuSchedulerLogInfo_t 'nvmlVgpuSchedulerLogInfo_t'
 * ctypedef nvmlVgpuSchedulerState_v1_t nvmlVgpuSchedulerState_t 'nvmlVgpuSchedulerState_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlWorkloadPowerProfileProfilesInfo_v1_t 'nvmlWorkloadPowerProfileProfilesInfo_v1_t':
 *     unsigned int version
*/
typedef nvmlVgpuSchedulerState_v1_t nvmlVgpuSchedulerState_t;

/* "cy_nvml.pxd":1679
 * ctypedef nvmlVgpuSchedulerLogInfo_v1_t nvmlVgpuSchedulerLogInfo_t 'nvmlVgpuSchedulerLogInfo_t'
 * ctypedef nvmlVgpuSchedulerState_v1_t nvmlVgpuSchedulerState_t 'nvmlVgpuSchedulerState_t'
 * ctypedef struct nvmlWorkloadPowerProfileProfilesInfo_v1_t 'nvmlWorkloadPowerProfileProfilesInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlMask255_t perfProfilesMask
*/
struct nvmlWorkloadPowerProfileProfilesInfo_v1_t {
  unsigned int version;
  nvmlMask255_t perfProfilesMask;
  nvmlWorkloadPowerProfileInfo_t perfProfile[255];
};

/* "cy_nvml.pxd":1684
 *     nvmlWorkloadPowerProfileInfo_t perfProfile[255]
 * 
 * ctypedef nvmlNvLinkInfo_v2_t nvmlNvLinkInfo_t 'nvmlNvLinkInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlWorkloadPowerProfileProfilesInfo_v1_t nvmlWorkloadPowerProfileProfilesInfo_t 'nvmlWorkloadPowerProfileProfilesInfo_t'
 * 
*/
typedef nvmlNvLinkInfo_v2_t nvmlNvLinkInfo_t;

/* "cy_nvml.pxd":1685
 * 
 * ctypedef nvmlNvLinkInfo_v2_t nvmlNvLinkInfo_t 'nvmlNvLinkInfo_t'
 * ctypedef nvmlWorkloadPowerProfileProfilesInfo_v1_t nvmlWorkloadPowerProfileProfilesInfo_t 'nvmlWorkloadPowerProfileProfilesInfo_t'             # <<<<<<<<<<<<<<
 * 
 * 
*/
typedef nvmlWorkloadPowerProfileProfilesInfo_v1_t nvmlWorkloadPowerProfileProfilesInfo_t;
struct __pyx_opt_args_7cpython_11contextvars_get_value;
struct __pyx_opt_args_7cpython_11contextvars_get_value_no_default;

/* "cpython/contextvars.pxd":116
 * 
 * @_cython.c_compile_guard("!CYTHON_COMPILING_IN_LIMITED_API")
 * cdef inline object get_value(var, default_value=None):             # <<<<<<<<<<<<<<
 *     """Return a new reference to the value of the context variable,
 *     or the default value of the context variable,
*/
struct __pyx_opt_args_7cpython_11contextvars_get_value {
  int __pyx_n;
  PyObject *default_value;
};

/* "cpython/contextvars.pxd":134
 * 
 * @_cython.c_compile_guard("!CYTHON_COMPILING_IN_LIMITED_API")
 * cdef inline object get_value_no_default(var, default_value=None):             # <<<<<<<<<<<<<<
 *     """Return a new reference to the value of the context variable,
 *     or the provided default value if no such value was found.
*/
struct __pyx_opt_args_7cpython_11contextvars_get_value_no_default {
  int __pyx_n;
  PyObject *default_value;
};
template <typename T>
struct __pyx_t_4cuda_8bindings_9_internal_5utils_nested_resource;
struct __pyx_opt_args_4cuda_8bindings_9_internal_5utils_get_buffer_pointer;

/* "_internal/utils.pxd":156
 * 
 * 
 * cdef cppclass nested_resource[T]:             # <<<<<<<<<<<<<<
 *     nullable_unique_ptr[ vector[intptr_t] ] ptrs
 *     nullable_unique_ptr[ vector[vector[T]] ] nested_resource_ptr
*/
template <class T>
struct __pyx_t_4cuda_8bindings_9_internal_5utils_nested_resource {
  nullable_unique_ptr<std::vector<intptr_t> >  ptrs;
  nullable_unique_ptr<std::vector<std::vector<T> > >  nested_resource_ptr;
};

/* "_internal/utils.pxd":167
 * 
 * cdef bint is_nested_sequence(data)
 * cdef void* get_buffer_pointer(buf, Py_ssize_t size, readonly=*) except*             # <<<<<<<<<<<<<<
*/
struct __pyx_opt_args_4cuda_8bindings_9_internal_5utils_get_buffer_pointer {
  int __pyx_n;
  PyObject *readonly;
};

/* "cuda/bindings/_nvml.pxd":16
 * ###############################################################################
 * 
 * ctypedef nvmlDramEncryptionInfo_v1_t DramEncryptionInfo_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t ConfComputeSetKeyRotationThresholdInfo_v1
 * ctypedef nvmlSystemDriverBranchInfo_v1_t SystemDriverBranchInfo_v1
*/
typedef nvmlDramEncryptionInfo_v1_t __pyx_t_4cuda_8bindings_5_nvml_DramEncryptionInfo_v1;

/* "cuda/bindings/_nvml.pxd":17
 * 
 * ctypedef nvmlDramEncryptionInfo_v1_t DramEncryptionInfo_v1
 * ctypedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t ConfComputeSetKeyRotationThresholdInfo_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlSystemDriverBranchInfo_v1_t SystemDriverBranchInfo_v1
 * ctypedef nvmlTemperature_v1_t Temperature_v1
*/
typedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t __pyx_t_4cuda_8bindings_5_nvml_ConfComputeSetKeyRotationThresholdInfo_v1;

/* "cuda/bindings/_nvml.pxd":18
 * ctypedef nvmlDramEncryptionInfo_v1_t DramEncryptionInfo_v1
 * ctypedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t ConfComputeSetKeyRotationThresholdInfo_v1
 * ctypedef nvmlSystemDriverBranchInfo_v1_t SystemDriverBranchInfo_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlTemperature_v1_t Temperature_v1
 * ctypedef nvmlPowerSmoothingProfile_v1_t PowerSmoothingProfile_v1
*/
typedef nvmlSystemDriverBranchInfo_v1_t __pyx_t_4cuda_8bindings_5_nvml_SystemDriverBranchInfo_v1;

/* "cuda/bindings/_nvml.pxd":19
 * ctypedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t ConfComputeSetKeyRotationThresholdInfo_v1
 * ctypedef nvmlSystemDriverBranchInfo_v1_t SystemDriverBranchInfo_v1
 * ctypedef nvmlTemperature_v1_t Temperature_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlPowerSmoothingProfile_v1_t PowerSmoothingProfile_v1
 * ctypedef nvmlPowerSmoothingState_v1_t PowerSmoothingState_v1
*/
typedef nvmlTemperature_v1_t __pyx_t_4cuda_8bindings_5_nvml_Temperature_v1;

/* "cuda/bindings/_nvml.pxd":20
 * ctypedef nvmlSystemDriverBranchInfo_v1_t SystemDriverBranchInfo_v1
 * ctypedef nvmlTemperature_v1_t Temperature_v1
 * ctypedef nvmlPowerSmoothingProfile_v1_t PowerSmoothingProfile_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlPowerSmoothingState_v1_t PowerSmoothingState_v1
 * ctypedef nvmlDevice_t Device
*/
typedef nvmlPowerSmoothingProfile_v1_t __pyx_t_4cuda_8bindings_5_nvml_PowerSmoothingProfile_v1;

/* "cuda/bindings/_nvml.pxd":21
 * ctypedef nvmlTemperature_v1_t Temperature_v1
 * ctypedef nvmlPowerSmoothingProfile_v1_t PowerSmoothingProfile_v1
 * ctypedef nvmlPowerSmoothingState_v1_t PowerSmoothingState_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlDevice_t Device
 * ctypedef nvmlGpuInstance_t GpuInstance
*/
typedef nvmlPowerSmoothingState_v1_t __pyx_t_4cuda_8bindings_5_nvml_PowerSmoothingState_v1;

/* "cuda/bindings/_nvml.pxd":22
 * ctypedef nvmlPowerSmoothingProfile_v1_t PowerSmoothingProfile_v1
 * ctypedef nvmlPowerSmoothingState_v1_t PowerSmoothingState_v1
 * ctypedef nvmlDevice_t Device             # <<<<<<<<<<<<<<
 * ctypedef nvmlGpuInstance_t GpuInstance
 * ctypedef nvmlUnit_t Unit
*/
typedef nvmlDevice_t __pyx_t_4cuda_8bindings_5_nvml_Device;

/* "cuda/bindings/_nvml.pxd":23
 * ctypedef nvmlPowerSmoothingState_v1_t PowerSmoothingState_v1
 * ctypedef nvmlDevice_t Device
 * ctypedef nvmlGpuInstance_t GpuInstance             # <<<<<<<<<<<<<<
 * ctypedef nvmlUnit_t Unit
 * ctypedef nvmlEventSet_t EventSet
*/
typedef nvmlGpuInstance_t __pyx_t_4cuda_8bindings_5_nvml_GpuInstance;

/* "cuda/bindings/_nvml.pxd":24
 * ctypedef nvmlDevice_t Device
 * ctypedef nvmlGpuInstance_t GpuInstance
 * ctypedef nvmlUnit_t Unit             # <<<<<<<<<<<<<<
 * ctypedef nvmlEventSet_t EventSet
 * ctypedef nvmlSystemEventSet_t SystemEventSet
*/
typedef nvmlUnit_t __pyx_t_4cuda_8bindings_5_nvml_Unit;

/* "cuda/bindings/_nvml.pxd":25
 * ctypedef nvmlGpuInstance_t GpuInstance
 * ctypedef nvmlUnit_t Unit
 * ctypedef nvmlEventSet_t EventSet             # <<<<<<<<<<<<<<
 * ctypedef nvmlSystemEventSet_t SystemEventSet
 * ctypedef nvmlComputeInstance_t ComputeInstance
*/
typedef nvmlEventSet_t __pyx_t_4cuda_8bindings_5_nvml_EventSet;

/* "cuda/bindings/_nvml.pxd":26
 * ctypedef nvmlUnit_t Unit
 * ctypedef nvmlEventSet_t EventSet
 * ctypedef nvmlSystemEventSet_t SystemEventSet             # <<<<<<<<<<<<<<
 * ctypedef nvmlComputeInstance_t ComputeInstance
 * ctypedef nvmlGpmSample_t GpmSample
*/
typedef nvmlSystemEventSet_t __pyx_t_4cuda_8bindings_5_nvml_SystemEventSet;

/* "cuda/bindings/_nvml.pxd":27
 * ctypedef nvmlEventSet_t EventSet
 * ctypedef nvmlSystemEventSet_t SystemEventSet
 * ctypedef nvmlComputeInstance_t ComputeInstance             # <<<<<<<<<<<<<<
 * ctypedef nvmlGpmSample_t GpmSample
 * ctypedef nvmlEccErrorCounts_t EccErrorCounts
*/
typedef nvmlComputeInstance_t __pyx_t_4cuda_8bindings_5_nvml_ComputeInstance;

/* "cuda/bindings/_nvml.pxd":28
 * ctypedef nvmlSystemEventSet_t SystemEventSet
 * ctypedef nvmlComputeInstance_t ComputeInstance
 * ctypedef nvmlGpmSample_t GpmSample             # <<<<<<<<<<<<<<
 * ctypedef nvmlEccErrorCounts_t EccErrorCounts
 * ctypedef nvmlProcessInfo_v1_t ProcessInfo_v1
*/
typedef nvmlGpmSample_t __pyx_t_4cuda_8bindings_5_nvml_GpmSample;

/* "cuda/bindings/_nvml.pxd":29
 * ctypedef nvmlComputeInstance_t ComputeInstance
 * ctypedef nvmlGpmSample_t GpmSample
 * ctypedef nvmlEccErrorCounts_t EccErrorCounts             # <<<<<<<<<<<<<<
 * ctypedef nvmlProcessInfo_v1_t ProcessInfo_v1
 * ctypedef nvmlProcessInfo_v2_t ProcessInfo_v2
*/
typedef nvmlEccErrorCounts_t __pyx_t_4cuda_8bindings_5_nvml_EccErrorCounts;

/* "cuda/bindings/_nvml.pxd":30
 * ctypedef nvmlGpmSample_t GpmSample
 * ctypedef nvmlEccErrorCounts_t EccErrorCounts
 * ctypedef nvmlProcessInfo_v1_t ProcessInfo_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlProcessInfo_v2_t ProcessInfo_v2
 * ctypedef nvmlNvLinkUtilizationControl_t NvLinkUtilizationControl
*/
typedef nvmlProcessInfo_v1_t __pyx_t_4cuda_8bindings_5_nvml_ProcessInfo_v1;

/* "cuda/bindings/_nvml.pxd":31
 * ctypedef nvmlEccErrorCounts_t EccErrorCounts
 * ctypedef nvmlProcessInfo_v1_t ProcessInfo_v1
 * ctypedef nvmlProcessInfo_v2_t ProcessInfo_v2             # <<<<<<<<<<<<<<
 * ctypedef nvmlNvLinkUtilizationControl_t NvLinkUtilizationControl
 * ctypedef nvmlViolationTime_t ViolationTime
*/
typedef nvmlProcessInfo_v2_t __pyx_t_4cuda_8bindings_5_nvml_ProcessInfo_v2;

/* "cuda/bindings/_nvml.pxd":32
 * ctypedef nvmlProcessInfo_v1_t ProcessInfo_v1
 * ctypedef nvmlProcessInfo_v2_t ProcessInfo_v2
 * ctypedef nvmlNvLinkUtilizationControl_t NvLinkUtilizationControl             # <<<<<<<<<<<<<<
 * ctypedef nvmlViolationTime_t ViolationTime
 * ctypedef nvmlUUIDValue_t UUIDValue
*/
typedef nvmlNvLinkUtilizationControl_t __pyx_t_4cuda_8bindings_5_nvml_NvLinkUtilizationControl;

/* "cuda/bindings/_nvml.pxd":33
 * ctypedef nvmlProcessInfo_v2_t ProcessInfo_v2
 * ctypedef nvmlNvLinkUtilizationControl_t NvLinkUtilizationControl
 * ctypedef nvmlViolationTime_t ViolationTime             # <<<<<<<<<<<<<<
 * ctypedef nvmlUUIDValue_t UUIDValue
 * ctypedef nvmlPlatformInfo_v1_t PlatformInfo_v1
*/
typedef nvmlViolationTime_t __pyx_t_4cuda_8bindings_5_nvml_ViolationTime;

/* "cuda/bindings/_nvml.pxd":34
 * ctypedef nvmlNvLinkUtilizationControl_t NvLinkUtilizationControl
 * ctypedef nvmlViolationTime_t ViolationTime
 * ctypedef nvmlUUIDValue_t UUIDValue             # <<<<<<<<<<<<<<
 * ctypedef nvmlPlatformInfo_v1_t PlatformInfo_v1
 * ctypedef nvmlVgpuPlacementList_v1_t VgpuPlacementList_v1
*/
typedef nvmlUUIDValue_t __pyx_t_4cuda_8bindings_5_nvml_UUIDValue;

/* "cuda/bindings/_nvml.pxd":35
 * ctypedef nvmlViolationTime_t ViolationTime
 * ctypedef nvmlUUIDValue_t UUIDValue
 * ctypedef nvmlPlatformInfo_v1_t PlatformInfo_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuPlacementList_v1_t VgpuPlacementList_v1
 * ctypedef nvmlNvLinkPowerThres_t NvLinkPowerThres
*/
typedef nvmlPlatformInfo_v1_t __pyx_t_4cuda_8bindings_5_nvml_PlatformInfo_v1;

/* "cuda/bindings/_nvml.pxd":36
 * ctypedef nvmlUUIDValue_t UUIDValue
 * ctypedef nvmlPlatformInfo_v1_t PlatformInfo_v1
 * ctypedef nvmlVgpuPlacementList_v1_t VgpuPlacementList_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlNvLinkPowerThres_t NvLinkPowerThres
 * ctypedef nvmlSystemEventData_v1_t SystemEventData_v1
*/
typedef nvmlVgpuPlacementList_v1_t __pyx_t_4cuda_8bindings_5_nvml_VgpuPlacementList_v1;

/* "cuda/bindings/_nvml.pxd":37
 * ctypedef nvmlPlatformInfo_v1_t PlatformInfo_v1
 * ctypedef nvmlVgpuPlacementList_v1_t VgpuPlacementList_v1
 * ctypedef nvmlNvLinkPowerThres_t NvLinkPowerThres             # <<<<<<<<<<<<<<
 * ctypedef nvmlSystemEventData_v1_t SystemEventData_v1
 * ctypedef nvmlGpuInstanceProfileInfo_t GpuInstanceProfileInfo
*/
typedef nvmlNvLinkPowerThres_t __pyx_t_4cuda_8bindings_5_nvml_NvLinkPowerThres;

/* "cuda/bindings/_nvml.pxd":38
 * ctypedef nvmlVgpuPlacementList_v1_t VgpuPlacementList_v1
 * ctypedef nvmlNvLinkPowerThres_t NvLinkPowerThres
 * ctypedef nvmlSystemEventData_v1_t SystemEventData_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlGpuInstanceProfileInfo_t GpuInstanceProfileInfo
 * ctypedef nvmlComputeInstanceProfileInfo_t ComputeInstanceProfileInfo
*/
typedef nvmlSystemEventData_v1_t __pyx_t_4cuda_8bindings_5_nvml_SystemEventData_v1;

/* "cuda/bindings/_nvml.pxd":39
 * ctypedef nvmlNvLinkPowerThres_t NvLinkPowerThres
 * ctypedef nvmlSystemEventData_v1_t SystemEventData_v1
 * ctypedef nvmlGpuInstanceProfileInfo_t GpuInstanceProfileInfo             # <<<<<<<<<<<<<<
 * ctypedef nvmlComputeInstanceProfileInfo_t ComputeInstanceProfileInfo
 * ctypedef nvmlMask255_t Mask255
*/
typedef nvmlGpuInstanceProfileInfo_t __pyx_t_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo;

/* "cuda/bindings/_nvml.pxd":40
 * ctypedef nvmlSystemEventData_v1_t SystemEventData_v1
 * ctypedef nvmlGpuInstanceProfileInfo_t GpuInstanceProfileInfo
 * ctypedef nvmlComputeInstanceProfileInfo_t ComputeInstanceProfileInfo             # <<<<<<<<<<<<<<
 * ctypedef nvmlMask255_t Mask255
 * ctypedef nvmlHostname_v1_t Hostname_v1
*/
typedef nvmlComputeInstanceProfileInfo_t __pyx_t_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo;

/* "cuda/bindings/_nvml.pxd":41
 * ctypedef nvmlGpuInstanceProfileInfo_t GpuInstanceProfileInfo
 * ctypedef nvmlComputeInstanceProfileInfo_t ComputeInstanceProfileInfo
 * ctypedef nvmlMask255_t Mask255             # <<<<<<<<<<<<<<
 * ctypedef nvmlHostname_v1_t Hostname_v1
 * ctypedef nvmlNvLinkInfo_v1_t NvLinkInfo_v1
*/
typedef nvmlMask255_t __pyx_t_4cuda_8bindings_5_nvml_Mask255;

/* "cuda/bindings/_nvml.pxd":42
 * ctypedef nvmlComputeInstanceProfileInfo_t ComputeInstanceProfileInfo
 * ctypedef nvmlMask255_t Mask255
 * ctypedef nvmlHostname_v1_t Hostname_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlNvLinkInfo_v1_t NvLinkInfo_v1
 * ctypedef nvmlPowerValue_v2_t PowerValue_v2
*/
typedef nvmlHostname_v1_t __pyx_t_4cuda_8bindings_5_nvml_Hostname_v1;

/* "cuda/bindings/_nvml.pxd":43
 * ctypedef nvmlMask255_t Mask255
 * ctypedef nvmlHostname_v1_t Hostname_v1
 * ctypedef nvmlNvLinkInfo_v1_t NvLinkInfo_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlPowerValue_v2_t PowerValue_v2
 * ctypedef nvmlVgpuProcessUtilizationSample_t VgpuProcessUtilizationSample
*/
typedef nvmlNvLinkInfo_v1_t __pyx_t_4cuda_8bindings_5_nvml_NvLinkInfo_v1;

/* "cuda/bindings/_nvml.pxd":44
 * ctypedef nvmlHostname_v1_t Hostname_v1
 * ctypedef nvmlNvLinkInfo_v1_t NvLinkInfo_v1
 * ctypedef nvmlPowerValue_v2_t PowerValue_v2             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuProcessUtilizationSample_t VgpuProcessUtilizationSample
 * ctypedef nvmlGpuFabricInfo_t GpuFabricInfo
*/
typedef nvmlPowerValue_v2_t __pyx_t_4cuda_8bindings_5_nvml_PowerValue_v2;

/* "cuda/bindings/_nvml.pxd":45
 * ctypedef nvmlNvLinkInfo_v1_t NvLinkInfo_v1
 * ctypedef nvmlPowerValue_v2_t PowerValue_v2
 * ctypedef nvmlVgpuProcessUtilizationSample_t VgpuProcessUtilizationSample             # <<<<<<<<<<<<<<
 * ctypedef nvmlGpuFabricInfo_t GpuFabricInfo
 * ctypedef nvmlGpuFabricInfo_v2_t GpuFabricInfo_v2
*/
typedef nvmlVgpuProcessUtilizationSample_t __pyx_t_4cuda_8bindings_5_nvml_VgpuProcessUtilizationSample;

/* "cuda/bindings/_nvml.pxd":46
 * ctypedef nvmlPowerValue_v2_t PowerValue_v2
 * ctypedef nvmlVgpuProcessUtilizationSample_t VgpuProcessUtilizationSample
 * ctypedef nvmlGpuFabricInfo_t GpuFabricInfo             # <<<<<<<<<<<<<<
 * ctypedef nvmlGpuFabricInfo_v2_t GpuFabricInfo_v2
 * ctypedef nvmlSystemEventSetCreateRequest_v1_t SystemEventSetCreateRequest_v1
*/
typedef nvmlGpuFabricInfo_t __pyx_t_4cuda_8bindings_5_nvml_GpuFabricInfo;

/* "cuda/bindings/_nvml.pxd":47
 * ctypedef nvmlVgpuProcessUtilizationSample_t VgpuProcessUtilizationSample
 * ctypedef nvmlGpuFabricInfo_t GpuFabricInfo
 * ctypedef nvmlGpuFabricInfo_v2_t GpuFabricInfo_v2             # <<<<<<<<<<<<<<
 * ctypedef nvmlSystemEventSetCreateRequest_v1_t SystemEventSetCreateRequest_v1
 * ctypedef nvmlSystemEventSetFreeRequest_v1_t SystemEventSetFreeRequest_v1
*/
typedef nvmlGpuFabricInfo_v2_t __pyx_t_4cuda_8bindings_5_nvml_GpuFabricInfo_v2;

/* "cuda/bindings/_nvml.pxd":48
 * ctypedef nvmlGpuFabricInfo_t GpuFabricInfo
 * ctypedef nvmlGpuFabricInfo_v2_t GpuFabricInfo_v2
 * ctypedef nvmlSystemEventSetCreateRequest_v1_t SystemEventSetCreateRequest_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlSystemEventSetFreeRequest_v1_t SystemEventSetFreeRequest_v1
 * ctypedef nvmlSystemRegisterEventRequest_v1_t SystemRegisterEventRequest_v1
*/
typedef nvmlSystemEventSetCreateRequest_v1_t __pyx_t_4cuda_8bindings_5_nvml_SystemEventSetCreateRequest_v1;

/* "cuda/bindings/_nvml.pxd":49
 * ctypedef nvmlGpuFabricInfo_v2_t GpuFabricInfo_v2
 * ctypedef nvmlSystemEventSetCreateRequest_v1_t SystemEventSetCreateRequest_v1
 * ctypedef nvmlSystemEventSetFreeRequest_v1_t SystemEventSetFreeRequest_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlSystemRegisterEventRequest_v1_t SystemRegisterEventRequest_v1
 * ctypedef nvmlVgpuInstanceUtilizationSample_t VgpuInstanceUtilizationSample
*/
typedef nvmlSystemEventSetFreeRequest_v1_t __pyx_t_4cuda_8bindings_5_nvml_SystemEventSetFreeRequest_v1;

/* "cuda/bindings/_nvml.pxd":50
 * ctypedef nvmlSystemEventSetCreateRequest_v1_t SystemEventSetCreateRequest_v1
 * ctypedef nvmlSystemEventSetFreeRequest_v1_t SystemEventSetFreeRequest_v1
 * ctypedef nvmlSystemRegisterEventRequest_v1_t SystemRegisterEventRequest_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuInstanceUtilizationSample_t VgpuInstanceUtilizationSample
 * ctypedef nvmlUUID_v1_t UUID_v1
*/
typedef nvmlSystemRegisterEventRequest_v1_t __pyx_t_4cuda_8bindings_5_nvml_SystemRegisterEventRequest_v1;

/* "cuda/bindings/_nvml.pxd":51
 * ctypedef nvmlSystemEventSetFreeRequest_v1_t SystemEventSetFreeRequest_v1
 * ctypedef nvmlSystemRegisterEventRequest_v1_t SystemRegisterEventRequest_v1
 * ctypedef nvmlVgpuInstanceUtilizationSample_t VgpuInstanceUtilizationSample             # <<<<<<<<<<<<<<
 * ctypedef nvmlUUID_v1_t UUID_v1
 * ctypedef nvmlSystemEventSetWaitRequest_v1_t SystemEventSetWaitRequest_v1
*/
typedef nvmlVgpuInstanceUtilizationSample_t __pyx_t_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationSample;

/* "cuda/bindings/_nvml.pxd":52
 * ctypedef nvmlSystemRegisterEventRequest_v1_t SystemRegisterEventRequest_v1
 * ctypedef nvmlVgpuInstanceUtilizationSample_t VgpuInstanceUtilizationSample
 * ctypedef nvmlUUID_v1_t UUID_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlSystemEventSetWaitRequest_v1_t SystemEventSetWaitRequest_v1
 * ctypedef nvmlGpmMetric_t GpmMetric
*/
typedef nvmlUUID_v1_t __pyx_t_4cuda_8bindings_5_nvml_UUID_v1;

/* "cuda/bindings/_nvml.pxd":53
 * ctypedef nvmlVgpuInstanceUtilizationSample_t VgpuInstanceUtilizationSample
 * ctypedef nvmlUUID_v1_t UUID_v1
 * ctypedef nvmlSystemEventSetWaitRequest_v1_t SystemEventSetWaitRequest_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlGpmMetric_t GpmMetric
 * ctypedef nvmlWorkloadPowerProfileInfo_v1_t WorkloadPowerProfileInfo_v1
*/
typedef nvmlSystemEventSetWaitRequest_v1_t __pyx_t_4cuda_8bindings_5_nvml_SystemEventSetWaitRequest_v1;

/* "cuda/bindings/_nvml.pxd":54
 * ctypedef nvmlUUID_v1_t UUID_v1
 * ctypedef nvmlSystemEventSetWaitRequest_v1_t SystemEventSetWaitRequest_v1
 * ctypedef nvmlGpmMetric_t GpmMetric             # <<<<<<<<<<<<<<
 * ctypedef nvmlWorkloadPowerProfileInfo_v1_t WorkloadPowerProfileInfo_v1
 * ctypedef nvmlWorkloadPowerProfileCurrentProfiles_v1_t WorkloadPowerProfileCurrentProfiles_v1
*/
typedef nvmlGpmMetric_t __pyx_t_4cuda_8bindings_5_nvml_GpmMetric;

/* "cuda/bindings/_nvml.pxd":55
 * ctypedef nvmlSystemEventSetWaitRequest_v1_t SystemEventSetWaitRequest_v1
 * ctypedef nvmlGpmMetric_t GpmMetric
 * ctypedef nvmlWorkloadPowerProfileInfo_v1_t WorkloadPowerProfileInfo_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlWorkloadPowerProfileCurrentProfiles_v1_t WorkloadPowerProfileCurrentProfiles_v1
 * ctypedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t WorkloadPowerProfileRequestedProfiles_v1
*/
typedef nvmlWorkloadPowerProfileInfo_v1_t __pyx_t_4cuda_8bindings_5_nvml_WorkloadPowerProfileInfo_v1;

/* "cuda/bindings/_nvml.pxd":56
 * ctypedef nvmlGpmMetric_t GpmMetric
 * ctypedef nvmlWorkloadPowerProfileInfo_v1_t WorkloadPowerProfileInfo_v1
 * ctypedef nvmlWorkloadPowerProfileCurrentProfiles_v1_t WorkloadPowerProfileCurrentProfiles_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t WorkloadPowerProfileRequestedProfiles_v1
 * ctypedef nvmlPRMTLV_v1_t PRMTLV_v1
*/
typedef nvmlWorkloadPowerProfileCurrentProfiles_v1_t __pyx_t_4cuda_8bindings_5_nvml_WorkloadPowerProfileCurrentProfiles_v1;

/* "cuda/bindings/_nvml.pxd":57
 * ctypedef nvmlWorkloadPowerProfileInfo_v1_t WorkloadPowerProfileInfo_v1
 * ctypedef nvmlWorkloadPowerProfileCurrentProfiles_v1_t WorkloadPowerProfileCurrentProfiles_v1
 * ctypedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t WorkloadPowerProfileRequestedProfiles_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlPRMTLV_v1_t PRMTLV_v1
 * ctypedef nvmlVgpuSchedulerSetState_t VgpuSchedulerSetState
*/
typedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t __pyx_t_4cuda_8bindings_5_nvml_WorkloadPowerProfileRequestedProfiles_v1;

/* "cuda/bindings/_nvml.pxd":58
 * ctypedef nvmlWorkloadPowerProfileCurrentProfiles_v1_t WorkloadPowerProfileCurrentProfiles_v1
 * ctypedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t WorkloadPowerProfileRequestedProfiles_v1
 * ctypedef nvmlPRMTLV_v1_t PRMTLV_v1             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuSchedulerSetState_t VgpuSchedulerSetState
 * ctypedef nvmlGpmMetricsGet_t GpmMetricsGet
*/
typedef nvmlPRMTLV_v1_t __pyx_t_4cuda_8bindings_5_nvml_PRMTLV_v1;

/* "cuda/bindings/_nvml.pxd":59
 * ctypedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t WorkloadPowerProfileRequestedProfiles_v1
 * ctypedef nvmlPRMTLV_v1_t PRMTLV_v1
 * ctypedef nvmlVgpuSchedulerSetState_t VgpuSchedulerSetState             # <<<<<<<<<<<<<<
 * ctypedef nvmlGpmMetricsGet_t GpmMetricsGet
 * ctypedef nvmlWorkloadPowerProfileProfilesInfo_v1_t WorkloadPowerProfileProfilesInfo_v1
*/
typedef nvmlVgpuSchedulerSetState_t __pyx_t_4cuda_8bindings_5_nvml_VgpuSchedulerSetState;

/* "cuda/bindings/_nvml.pxd":60
 * ctypedef nvmlPRMTLV_v1_t PRMTLV_v1
 * ctypedef nvmlVgpuSchedulerSetState_t VgpuSchedulerSetState
 * ctypedef nvmlGpmMetricsGet_t GpmMetricsGet             # <<<<<<<<<<<<<<
 * ctypedef nvmlWorkloadPowerProfileProfilesInfo_v1_t WorkloadPowerProfileProfilesInfo_v1
 * 
*/
typedef nvmlGpmMetricsGet_t __pyx_t_4cuda_8bindings_5_nvml_GpmMetricsGet;

/* "cuda/bindings/_nvml.pxd":61
 * ctypedef nvmlVgpuSchedulerSetState_t VgpuSchedulerSetState
 * ctypedef nvmlGpmMetricsGet_t GpmMetricsGet
 * ctypedef nvmlWorkloadPowerProfileProfilesInfo_v1_t WorkloadPowerProfileProfilesInfo_v1             # <<<<<<<<<<<<<<
 * 
 * 
*/
typedef nvmlWorkloadPowerProfileProfilesInfo_v1_t __pyx_t_4cuda_8bindings_5_nvml_WorkloadPowerProfileProfilesInfo_v1;

/* "cuda/bindings/_nvml.pxd":68
 * ###############################################################################
 * 
 * ctypedef nvmlBridgeChipType_t _BridgeChipType             # <<<<<<<<<<<<<<
 * ctypedef nvmlNvLinkUtilizationCountUnits_t _NvLinkUtilizationCountUnits
 * ctypedef nvmlNvLinkUtilizationCountPktTypes_t _NvLinkUtilizationCountPktTypes
*/
typedef nvmlBridgeChipType_t __pyx_t_4cuda_8bindings_5_nvml__BridgeChipType;

/* "cuda/bindings/_nvml.pxd":69
 * 
 * ctypedef nvmlBridgeChipType_t _BridgeChipType
 * ctypedef nvmlNvLinkUtilizationCountUnits_t _NvLinkUtilizationCountUnits             # <<<<<<<<<<<<<<
 * ctypedef nvmlNvLinkUtilizationCountPktTypes_t _NvLinkUtilizationCountPktTypes
 * ctypedef nvmlNvLinkCapability_t _NvLinkCapability
*/
typedef nvmlNvLinkUtilizationCountUnits_t __pyx_t_4cuda_8bindings_5_nvml__NvLinkUtilizationCountUnits;

/* "cuda/bindings/_nvml.pxd":70
 * ctypedef nvmlBridgeChipType_t _BridgeChipType
 * ctypedef nvmlNvLinkUtilizationCountUnits_t _NvLinkUtilizationCountUnits
 * ctypedef nvmlNvLinkUtilizationCountPktTypes_t _NvLinkUtilizationCountPktTypes             # <<<<<<<<<<<<<<
 * ctypedef nvmlNvLinkCapability_t _NvLinkCapability
 * ctypedef nvmlNvLinkErrorCounter_t _NvLinkErrorCounter
*/
typedef nvmlNvLinkUtilizationCountPktTypes_t __pyx_t_4cuda_8bindings_5_nvml__NvLinkUtilizationCountPktTypes;

/* "cuda/bindings/_nvml.pxd":71
 * ctypedef nvmlNvLinkUtilizationCountUnits_t _NvLinkUtilizationCountUnits
 * ctypedef nvmlNvLinkUtilizationCountPktTypes_t _NvLinkUtilizationCountPktTypes
 * ctypedef nvmlNvLinkCapability_t _NvLinkCapability             # <<<<<<<<<<<<<<
 * ctypedef nvmlNvLinkErrorCounter_t _NvLinkErrorCounter
 * ctypedef nvmlIntNvLinkDeviceType_t _IntNvLinkDeviceType
*/
typedef nvmlNvLinkCapability_t __pyx_t_4cuda_8bindings_5_nvml__NvLinkCapability;

/* "cuda/bindings/_nvml.pxd":72
 * ctypedef nvmlNvLinkUtilizationCountPktTypes_t _NvLinkUtilizationCountPktTypes
 * ctypedef nvmlNvLinkCapability_t _NvLinkCapability
 * ctypedef nvmlNvLinkErrorCounter_t _NvLinkErrorCounter             # <<<<<<<<<<<<<<
 * ctypedef nvmlIntNvLinkDeviceType_t _IntNvLinkDeviceType
 * ctypedef nvmlGpuTopologyLevel_t _GpuTopologyLevel
*/
typedef nvmlNvLinkErrorCounter_t __pyx_t_4cuda_8bindings_5_nvml__NvLinkErrorCounter;

/* "cuda/bindings/_nvml.pxd":73
 * ctypedef nvmlNvLinkCapability_t _NvLinkCapability
 * ctypedef nvmlNvLinkErrorCounter_t _NvLinkErrorCounter
 * ctypedef nvmlIntNvLinkDeviceType_t _IntNvLinkDeviceType             # <<<<<<<<<<<<<<
 * ctypedef nvmlGpuTopologyLevel_t _GpuTopologyLevel
 * ctypedef nvmlGpuP2PStatus_t _GpuP2PStatus
*/
typedef nvmlIntNvLinkDeviceType_t __pyx_t_4cuda_8bindings_5_nvml__IntNvLinkDeviceType;

/* "cuda/bindings/_nvml.pxd":74
 * ctypedef nvmlNvLinkErrorCounter_t _NvLinkErrorCounter
 * ctypedef nvmlIntNvLinkDeviceType_t _IntNvLinkDeviceType
 * ctypedef nvmlGpuTopologyLevel_t _GpuTopologyLevel             # <<<<<<<<<<<<<<
 * ctypedef nvmlGpuP2PStatus_t _GpuP2PStatus
 * ctypedef nvmlGpuP2PCapsIndex_t _GpuP2PCapsIndex
*/
typedef nvmlGpuTopologyLevel_t __pyx_t_4cuda_8bindings_5_nvml__GpuTopologyLevel;

/* "cuda/bindings/_nvml.pxd":75
 * ctypedef nvmlIntNvLinkDeviceType_t _IntNvLinkDeviceType
 * ctypedef nvmlGpuTopologyLevel_t _GpuTopologyLevel
 * ctypedef nvmlGpuP2PStatus_t _GpuP2PStatus             # <<<<<<<<<<<<<<
 * ctypedef nvmlGpuP2PCapsIndex_t _GpuP2PCapsIndex
 * ctypedef nvmlSamplingType_t _SamplingType
*/
typedef nvmlGpuP2PStatus_t __pyx_t_4cuda_8bindings_5_nvml__GpuP2PStatus;

/* "cuda/bindings/_nvml.pxd":76
 * ctypedef nvmlGpuTopologyLevel_t _GpuTopologyLevel
 * ctypedef nvmlGpuP2PStatus_t _GpuP2PStatus
 * ctypedef nvmlGpuP2PCapsIndex_t _GpuP2PCapsIndex             # <<<<<<<<<<<<<<
 * ctypedef nvmlSamplingType_t _SamplingType
 * ctypedef nvmlPcieUtilCounter_t _PcieUtilCounter
*/
typedef nvmlGpuP2PCapsIndex_t __pyx_t_4cuda_8bindings_5_nvml__GpuP2PCapsIndex;

/* "cuda/bindings/_nvml.pxd":77
 * ctypedef nvmlGpuP2PStatus_t _GpuP2PStatus
 * ctypedef nvmlGpuP2PCapsIndex_t _GpuP2PCapsIndex
 * ctypedef nvmlSamplingType_t _SamplingType             # <<<<<<<<<<<<<<
 * ctypedef nvmlPcieUtilCounter_t _PcieUtilCounter
 * ctypedef nvmlValueType_t _ValueType
*/
typedef nvmlSamplingType_t __pyx_t_4cuda_8bindings_5_nvml__SamplingType;

/* "cuda/bindings/_nvml.pxd":78
 * ctypedef nvmlGpuP2PCapsIndex_t _GpuP2PCapsIndex
 * ctypedef nvmlSamplingType_t _SamplingType
 * ctypedef nvmlPcieUtilCounter_t _PcieUtilCounter             # <<<<<<<<<<<<<<
 * ctypedef nvmlValueType_t _ValueType
 * ctypedef nvmlPerfPolicyType_t _PerfPolicyType
*/
typedef nvmlPcieUtilCounter_t __pyx_t_4cuda_8bindings_5_nvml__PcieUtilCounter;

/* "cuda/bindings/_nvml.pxd":79
 * ctypedef nvmlSamplingType_t _SamplingType
 * ctypedef nvmlPcieUtilCounter_t _PcieUtilCounter
 * ctypedef nvmlValueType_t _ValueType             # <<<<<<<<<<<<<<
 * ctypedef nvmlPerfPolicyType_t _PerfPolicyType
 * ctypedef nvmlThermalTarget_t _ThermalTarget
*/
typedef nvmlValueType_t __pyx_t_4cuda_8bindings_5_nvml__ValueType;

/* "cuda/bindings/_nvml.pxd":80
 * ctypedef nvmlPcieUtilCounter_t _PcieUtilCounter
 * ctypedef nvmlValueType_t _ValueType
 * ctypedef nvmlPerfPolicyType_t _PerfPolicyType             # <<<<<<<<<<<<<<
 * ctypedef nvmlThermalTarget_t _ThermalTarget
 * ctypedef nvmlThermalController_t _ThermalController
*/
typedef nvmlPerfPolicyType_t __pyx_t_4cuda_8bindings_5_nvml__PerfPolicyType;

/* "cuda/bindings/_nvml.pxd":81
 * ctypedef nvmlValueType_t _ValueType
 * ctypedef nvmlPerfPolicyType_t _PerfPolicyType
 * ctypedef nvmlThermalTarget_t _ThermalTarget             # <<<<<<<<<<<<<<
 * ctypedef nvmlThermalController_t _ThermalController
 * ctypedef nvmlCoolerControl_t _CoolerControl
*/
typedef nvmlThermalTarget_t __pyx_t_4cuda_8bindings_5_nvml__ThermalTarget;

/* "cuda/bindings/_nvml.pxd":82
 * ctypedef nvmlPerfPolicyType_t _PerfPolicyType
 * ctypedef nvmlThermalTarget_t _ThermalTarget
 * ctypedef nvmlThermalController_t _ThermalController             # <<<<<<<<<<<<<<
 * ctypedef nvmlCoolerControl_t _CoolerControl
 * ctypedef nvmlCoolerTarget_t _CoolerTarget
*/
typedef nvmlThermalController_t __pyx_t_4cuda_8bindings_5_nvml__ThermalController;

/* "cuda/bindings/_nvml.pxd":83
 * ctypedef nvmlThermalTarget_t _ThermalTarget
 * ctypedef nvmlThermalController_t _ThermalController
 * ctypedef nvmlCoolerControl_t _CoolerControl             # <<<<<<<<<<<<<<
 * ctypedef nvmlCoolerTarget_t _CoolerTarget
 * ctypedef nvmlUUIDType_t _UUIDType
*/
typedef nvmlCoolerControl_t __pyx_t_4cuda_8bindings_5_nvml__CoolerControl;

/* "cuda/bindings/_nvml.pxd":84
 * ctypedef nvmlThermalController_t _ThermalController
 * ctypedef nvmlCoolerControl_t _CoolerControl
 * ctypedef nvmlCoolerTarget_t _CoolerTarget             # <<<<<<<<<<<<<<
 * ctypedef nvmlUUIDType_t _UUIDType
 * ctypedef nvmlEnableState_t _EnableState
*/
typedef nvmlCoolerTarget_t __pyx_t_4cuda_8bindings_5_nvml__CoolerTarget;

/* "cuda/bindings/_nvml.pxd":85
 * ctypedef nvmlCoolerControl_t _CoolerControl
 * ctypedef nvmlCoolerTarget_t _CoolerTarget
 * ctypedef nvmlUUIDType_t _UUIDType             # <<<<<<<<<<<<<<
 * ctypedef nvmlEnableState_t _EnableState
 * ctypedef nvmlBrandType_t _BrandType
*/
typedef nvmlUUIDType_t __pyx_t_4cuda_8bindings_5_nvml__UUIDType;

/* "cuda/bindings/_nvml.pxd":86
 * ctypedef nvmlCoolerTarget_t _CoolerTarget
 * ctypedef nvmlUUIDType_t _UUIDType
 * ctypedef nvmlEnableState_t _EnableState             # <<<<<<<<<<<<<<
 * ctypedef nvmlBrandType_t _BrandType
 * ctypedef nvmlTemperatureThresholds_t _TemperatureThresholds
*/
typedef nvmlEnableState_t __pyx_t_4cuda_8bindings_5_nvml__EnableState;

/* "cuda/bindings/_nvml.pxd":87
 * ctypedef nvmlUUIDType_t _UUIDType
 * ctypedef nvmlEnableState_t _EnableState
 * ctypedef nvmlBrandType_t _BrandType             # <<<<<<<<<<<<<<
 * ctypedef nvmlTemperatureThresholds_t _TemperatureThresholds
 * ctypedef nvmlTemperatureSensors_t _TemperatureSensors
*/
typedef nvmlBrandType_t __pyx_t_4cuda_8bindings_5_nvml__BrandType;

/* "cuda/bindings/_nvml.pxd":88
 * ctypedef nvmlEnableState_t _EnableState
 * ctypedef nvmlBrandType_t _BrandType
 * ctypedef nvmlTemperatureThresholds_t _TemperatureThresholds             # <<<<<<<<<<<<<<
 * ctypedef nvmlTemperatureSensors_t _TemperatureSensors
 * ctypedef nvmlComputeMode_t _ComputeMode
*/
typedef nvmlTemperatureThresholds_t __pyx_t_4cuda_8bindings_5_nvml__TemperatureThresholds;

/* "cuda/bindings/_nvml.pxd":89
 * ctypedef nvmlBrandType_t _BrandType
 * ctypedef nvmlTemperatureThresholds_t _TemperatureThresholds
 * ctypedef nvmlTemperatureSensors_t _TemperatureSensors             # <<<<<<<<<<<<<<
 * ctypedef nvmlComputeMode_t _ComputeMode
 * ctypedef nvmlMemoryErrorType_t _MemoryErrorType
*/
typedef nvmlTemperatureSensors_t __pyx_t_4cuda_8bindings_5_nvml__TemperatureSensors;

/* "cuda/bindings/_nvml.pxd":90
 * ctypedef nvmlTemperatureThresholds_t _TemperatureThresholds
 * ctypedef nvmlTemperatureSensors_t _TemperatureSensors
 * ctypedef nvmlComputeMode_t _ComputeMode             # <<<<<<<<<<<<<<
 * ctypedef nvmlMemoryErrorType_t _MemoryErrorType
 * ctypedef nvmlNvlinkVersion_t _NvlinkVersion
*/
typedef nvmlComputeMode_t __pyx_t_4cuda_8bindings_5_nvml__ComputeMode;

/* "cuda/bindings/_nvml.pxd":91
 * ctypedef nvmlTemperatureSensors_t _TemperatureSensors
 * ctypedef nvmlComputeMode_t _ComputeMode
 * ctypedef nvmlMemoryErrorType_t _MemoryErrorType             # <<<<<<<<<<<<<<
 * ctypedef nvmlNvlinkVersion_t _NvlinkVersion
 * ctypedef nvmlEccCounterType_t _EccCounterType
*/
typedef nvmlMemoryErrorType_t __pyx_t_4cuda_8bindings_5_nvml__MemoryErrorType;

/* "cuda/bindings/_nvml.pxd":92
 * ctypedef nvmlComputeMode_t _ComputeMode
 * ctypedef nvmlMemoryErrorType_t _MemoryErrorType
 * ctypedef nvmlNvlinkVersion_t _NvlinkVersion             # <<<<<<<<<<<<<<
 * ctypedef nvmlEccCounterType_t _EccCounterType
 * ctypedef nvmlClockType_t _ClockType
*/
typedef nvmlNvlinkVersion_t __pyx_t_4cuda_8bindings_5_nvml__NvlinkVersion;

/* "cuda/bindings/_nvml.pxd":93
 * ctypedef nvmlMemoryErrorType_t _MemoryErrorType
 * ctypedef nvmlNvlinkVersion_t _NvlinkVersion
 * ctypedef nvmlEccCounterType_t _EccCounterType             # <<<<<<<<<<<<<<
 * ctypedef nvmlClockType_t _ClockType
 * ctypedef nvmlClockId_t _ClockId
*/
typedef nvmlEccCounterType_t __pyx_t_4cuda_8bindings_5_nvml__EccCounterType;

/* "cuda/bindings/_nvml.pxd":94
 * ctypedef nvmlNvlinkVersion_t _NvlinkVersion
 * ctypedef nvmlEccCounterType_t _EccCounterType
 * ctypedef nvmlClockType_t _ClockType             # <<<<<<<<<<<<<<
 * ctypedef nvmlClockId_t _ClockId
 * ctypedef nvmlDriverModel_t _DriverModel
*/
typedef nvmlClockType_t __pyx_t_4cuda_8bindings_5_nvml__ClockType;

/* "cuda/bindings/_nvml.pxd":95
 * ctypedef nvmlEccCounterType_t _EccCounterType
 * ctypedef nvmlClockType_t _ClockType
 * ctypedef nvmlClockId_t _ClockId             # <<<<<<<<<<<<<<
 * ctypedef nvmlDriverModel_t _DriverModel
 * ctypedef nvmlPstates_t _Pstates
*/
typedef nvmlClockId_t __pyx_t_4cuda_8bindings_5_nvml__ClockId;

/* "cuda/bindings/_nvml.pxd":96
 * ctypedef nvmlClockType_t _ClockType
 * ctypedef nvmlClockId_t _ClockId
 * ctypedef nvmlDriverModel_t _DriverModel             # <<<<<<<<<<<<<<
 * ctypedef nvmlPstates_t _Pstates
 * ctypedef nvmlGpuOperationMode_t _GpuOperationMode
*/
typedef nvmlDriverModel_t __pyx_t_4cuda_8bindings_5_nvml__DriverModel;

/* "cuda/bindings/_nvml.pxd":97
 * ctypedef nvmlClockId_t _ClockId
 * ctypedef nvmlDriverModel_t _DriverModel
 * ctypedef nvmlPstates_t _Pstates             # <<<<<<<<<<<<<<
 * ctypedef nvmlGpuOperationMode_t _GpuOperationMode
 * ctypedef nvmlInforomObject_t _InforomObject
*/
typedef nvmlPstates_t __pyx_t_4cuda_8bindings_5_nvml__Pstates;

/* "cuda/bindings/_nvml.pxd":98
 * ctypedef nvmlDriverModel_t _DriverModel
 * ctypedef nvmlPstates_t _Pstates
 * ctypedef nvmlGpuOperationMode_t _GpuOperationMode             # <<<<<<<<<<<<<<
 * ctypedef nvmlInforomObject_t _InforomObject
 * ctypedef nvmlReturn_t _Return
*/
typedef nvmlGpuOperationMode_t __pyx_t_4cuda_8bindings_5_nvml__GpuOperationMode;

/* "cuda/bindings/_nvml.pxd":99
 * ctypedef nvmlPstates_t _Pstates
 * ctypedef nvmlGpuOperationMode_t _GpuOperationMode
 * ctypedef nvmlInforomObject_t _InforomObject             # <<<<<<<<<<<<<<
 * ctypedef nvmlReturn_t _Return
 * ctypedef nvmlMemoryLocation_t _MemoryLocation
*/
typedef nvmlInforomObject_t __pyx_t_4cuda_8bindings_5_nvml__InforomObject;

/* "cuda/bindings/_nvml.pxd":100
 * ctypedef nvmlGpuOperationMode_t _GpuOperationMode
 * ctypedef nvmlInforomObject_t _InforomObject
 * ctypedef nvmlReturn_t _Return             # <<<<<<<<<<<<<<
 * ctypedef nvmlMemoryLocation_t _MemoryLocation
 * ctypedef nvmlPageRetirementCause_t _PageRetirementCause
*/
typedef nvmlReturn_t __pyx_t_4cuda_8bindings_5_nvml__Return;

/* "cuda/bindings/_nvml.pxd":101
 * ctypedef nvmlInforomObject_t _InforomObject
 * ctypedef nvmlReturn_t _Return
 * ctypedef nvmlMemoryLocation_t _MemoryLocation             # <<<<<<<<<<<<<<
 * ctypedef nvmlPageRetirementCause_t _PageRetirementCause
 * ctypedef nvmlRestrictedAPI_t _RestrictedAPI
*/
typedef nvmlMemoryLocation_t __pyx_t_4cuda_8bindings_5_nvml__MemoryLocation;

/* "cuda/bindings/_nvml.pxd":102
 * ctypedef nvmlReturn_t _Return
 * ctypedef nvmlMemoryLocation_t _MemoryLocation
 * ctypedef nvmlPageRetirementCause_t _PageRetirementCause             # <<<<<<<<<<<<<<
 * ctypedef nvmlRestrictedAPI_t _RestrictedAPI
 * ctypedef nvmlGpuUtilizationDomainId_t _GpuUtilizationDomainId
*/
typedef nvmlPageRetirementCause_t __pyx_t_4cuda_8bindings_5_nvml__PageRetirementCause;

/* "cuda/bindings/_nvml.pxd":103
 * ctypedef nvmlMemoryLocation_t _MemoryLocation
 * ctypedef nvmlPageRetirementCause_t _PageRetirementCause
 * ctypedef nvmlRestrictedAPI_t _RestrictedAPI             # <<<<<<<<<<<<<<
 * ctypedef nvmlGpuUtilizationDomainId_t _GpuUtilizationDomainId
 * ctypedef nvmlGpuVirtualizationMode_t _GpuVirtualizationMode
*/
typedef nvmlRestrictedAPI_t __pyx_t_4cuda_8bindings_5_nvml__RestrictedAPI;

/* "cuda/bindings/_nvml.pxd":104
 * ctypedef nvmlPageRetirementCause_t _PageRetirementCause
 * ctypedef nvmlRestrictedAPI_t _RestrictedAPI
 * ctypedef nvmlGpuUtilizationDomainId_t _GpuUtilizationDomainId             # <<<<<<<<<<<<<<
 * ctypedef nvmlGpuVirtualizationMode_t _GpuVirtualizationMode
 * ctypedef nvmlHostVgpuMode_t _HostVgpuMode
*/
typedef nvmlGpuUtilizationDomainId_t __pyx_t_4cuda_8bindings_5_nvml__GpuUtilizationDomainId;

/* "cuda/bindings/_nvml.pxd":105
 * ctypedef nvmlRestrictedAPI_t _RestrictedAPI
 * ctypedef nvmlGpuUtilizationDomainId_t _GpuUtilizationDomainId
 * ctypedef nvmlGpuVirtualizationMode_t _GpuVirtualizationMode             # <<<<<<<<<<<<<<
 * ctypedef nvmlHostVgpuMode_t _HostVgpuMode
 * ctypedef nvmlVgpuVmIdType_t _VgpuVmIdType
*/
typedef nvmlGpuVirtualizationMode_t __pyx_t_4cuda_8bindings_5_nvml__GpuVirtualizationMode;

/* "cuda/bindings/_nvml.pxd":106
 * ctypedef nvmlGpuUtilizationDomainId_t _GpuUtilizationDomainId
 * ctypedef nvmlGpuVirtualizationMode_t _GpuVirtualizationMode
 * ctypedef nvmlHostVgpuMode_t _HostVgpuMode             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuVmIdType_t _VgpuVmIdType
 * ctypedef nvmlVgpuGuestInfoState_t _VgpuGuestInfoState
*/
typedef nvmlHostVgpuMode_t __pyx_t_4cuda_8bindings_5_nvml__HostVgpuMode;

/* "cuda/bindings/_nvml.pxd":107
 * ctypedef nvmlGpuVirtualizationMode_t _GpuVirtualizationMode
 * ctypedef nvmlHostVgpuMode_t _HostVgpuMode
 * ctypedef nvmlVgpuVmIdType_t _VgpuVmIdType             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuGuestInfoState_t _VgpuGuestInfoState
 * ctypedef nvmlGridLicenseFeatureCode_t _GridLicenseFeatureCode
*/
typedef nvmlVgpuVmIdType_t __pyx_t_4cuda_8bindings_5_nvml__VgpuVmIdType;

/* "cuda/bindings/_nvml.pxd":108
 * ctypedef nvmlHostVgpuMode_t _HostVgpuMode
 * ctypedef nvmlVgpuVmIdType_t _VgpuVmIdType
 * ctypedef nvmlVgpuGuestInfoState_t _VgpuGuestInfoState             # <<<<<<<<<<<<<<
 * ctypedef nvmlGridLicenseFeatureCode_t _GridLicenseFeatureCode
 * ctypedef nvmlVgpuCapability_t _VgpuCapability
*/
typedef nvmlVgpuGuestInfoState_t __pyx_t_4cuda_8bindings_5_nvml__VgpuGuestInfoState;

/* "cuda/bindings/_nvml.pxd":109
 * ctypedef nvmlVgpuVmIdType_t _VgpuVmIdType
 * ctypedef nvmlVgpuGuestInfoState_t _VgpuGuestInfoState
 * ctypedef nvmlGridLicenseFeatureCode_t _GridLicenseFeatureCode             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuCapability_t _VgpuCapability
 * ctypedef nvmlVgpuDriverCapability_t _VgpuDriverCapability
*/
typedef nvmlGridLicenseFeatureCode_t __pyx_t_4cuda_8bindings_5_nvml__GridLicenseFeatureCode;

/* "cuda/bindings/_nvml.pxd":110
 * ctypedef nvmlVgpuGuestInfoState_t _VgpuGuestInfoState
 * ctypedef nvmlGridLicenseFeatureCode_t _GridLicenseFeatureCode
 * ctypedef nvmlVgpuCapability_t _VgpuCapability             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuDriverCapability_t _VgpuDriverCapability
 * ctypedef nvmlDeviceVgpuCapability_t _DeviceVgpuCapability
*/
typedef nvmlVgpuCapability_t __pyx_t_4cuda_8bindings_5_nvml__VgpuCapability;

/* "cuda/bindings/_nvml.pxd":111
 * ctypedef nvmlGridLicenseFeatureCode_t _GridLicenseFeatureCode
 * ctypedef nvmlVgpuCapability_t _VgpuCapability
 * ctypedef nvmlVgpuDriverCapability_t _VgpuDriverCapability             # <<<<<<<<<<<<<<
 * ctypedef nvmlDeviceVgpuCapability_t _DeviceVgpuCapability
 * ctypedef nvmlDeviceGpuRecoveryAction_t _DeviceGpuRecoveryAction
*/
typedef nvmlVgpuDriverCapability_t __pyx_t_4cuda_8bindings_5_nvml__VgpuDriverCapability;

/* "cuda/bindings/_nvml.pxd":112
 * ctypedef nvmlVgpuCapability_t _VgpuCapability
 * ctypedef nvmlVgpuDriverCapability_t _VgpuDriverCapability
 * ctypedef nvmlDeviceVgpuCapability_t _DeviceVgpuCapability             # <<<<<<<<<<<<<<
 * ctypedef nvmlDeviceGpuRecoveryAction_t _DeviceGpuRecoveryAction
 * ctypedef nvmlFanState_t _FanState
*/
typedef nvmlDeviceVgpuCapability_t __pyx_t_4cuda_8bindings_5_nvml__DeviceVgpuCapability;

/* "cuda/bindings/_nvml.pxd":113
 * ctypedef nvmlVgpuDriverCapability_t _VgpuDriverCapability
 * ctypedef nvmlDeviceVgpuCapability_t _DeviceVgpuCapability
 * ctypedef nvmlDeviceGpuRecoveryAction_t _DeviceGpuRecoveryAction             # <<<<<<<<<<<<<<
 * ctypedef nvmlFanState_t _FanState
 * ctypedef nvmlLedColor_t _LedColor
*/
typedef nvmlDeviceGpuRecoveryAction_t __pyx_t_4cuda_8bindings_5_nvml__DeviceGpuRecoveryAction;

/* "cuda/bindings/_nvml.pxd":114
 * ctypedef nvmlDeviceVgpuCapability_t _DeviceVgpuCapability
 * ctypedef nvmlDeviceGpuRecoveryAction_t _DeviceGpuRecoveryAction
 * ctypedef nvmlFanState_t _FanState             # <<<<<<<<<<<<<<
 * ctypedef nvmlLedColor_t _LedColor
 * ctypedef nvmlEncoderType_t _EncoderType
*/
typedef nvmlFanState_t __pyx_t_4cuda_8bindings_5_nvml__FanState;

/* "cuda/bindings/_nvml.pxd":115
 * ctypedef nvmlDeviceGpuRecoveryAction_t _DeviceGpuRecoveryAction
 * ctypedef nvmlFanState_t _FanState
 * ctypedef nvmlLedColor_t _LedColor             # <<<<<<<<<<<<<<
 * ctypedef nvmlEncoderType_t _EncoderType
 * ctypedef nvmlFBCSessionType_t _FBCSessionType
*/
typedef nvmlLedColor_t __pyx_t_4cuda_8bindings_5_nvml__LedColor;

/* "cuda/bindings/_nvml.pxd":116
 * ctypedef nvmlFanState_t _FanState
 * ctypedef nvmlLedColor_t _LedColor
 * ctypedef nvmlEncoderType_t _EncoderType             # <<<<<<<<<<<<<<
 * ctypedef nvmlFBCSessionType_t _FBCSessionType
 * ctypedef nvmlDetachGpuState_t _DetachGpuState
*/
typedef nvmlEncoderType_t __pyx_t_4cuda_8bindings_5_nvml__EncoderType;

/* "cuda/bindings/_nvml.pxd":117
 * ctypedef nvmlLedColor_t _LedColor
 * ctypedef nvmlEncoderType_t _EncoderType
 * ctypedef nvmlFBCSessionType_t _FBCSessionType             # <<<<<<<<<<<<<<
 * ctypedef nvmlDetachGpuState_t _DetachGpuState
 * ctypedef nvmlPcieLinkState_t _PcieLinkState
*/
typedef nvmlFBCSessionType_t __pyx_t_4cuda_8bindings_5_nvml__FBCSessionType;

/* "cuda/bindings/_nvml.pxd":118
 * ctypedef nvmlEncoderType_t _EncoderType
 * ctypedef nvmlFBCSessionType_t _FBCSessionType
 * ctypedef nvmlDetachGpuState_t _DetachGpuState             # <<<<<<<<<<<<<<
 * ctypedef nvmlPcieLinkState_t _PcieLinkState
 * ctypedef nvmlClockLimitId_t _ClockLimitId
*/
typedef nvmlDetachGpuState_t __pyx_t_4cuda_8bindings_5_nvml__DetachGpuState;

/* "cuda/bindings/_nvml.pxd":119
 * ctypedef nvmlFBCSessionType_t _FBCSessionType
 * ctypedef nvmlDetachGpuState_t _DetachGpuState
 * ctypedef nvmlPcieLinkState_t _PcieLinkState             # <<<<<<<<<<<<<<
 * ctypedef nvmlClockLimitId_t _ClockLimitId
 * ctypedef nvmlVgpuVmCompatibility_t _VgpuVmCompatibility
*/
typedef nvmlPcieLinkState_t __pyx_t_4cuda_8bindings_5_nvml__PcieLinkState;

/* "cuda/bindings/_nvml.pxd":120
 * ctypedef nvmlDetachGpuState_t _DetachGpuState
 * ctypedef nvmlPcieLinkState_t _PcieLinkState
 * ctypedef nvmlClockLimitId_t _ClockLimitId             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuVmCompatibility_t _VgpuVmCompatibility
 * ctypedef nvmlVgpuPgpuCompatibilityLimitCode_t _VgpuPgpuCompatibilityLimitCode
*/
typedef nvmlClockLimitId_t __pyx_t_4cuda_8bindings_5_nvml__ClockLimitId;

/* "cuda/bindings/_nvml.pxd":121
 * ctypedef nvmlPcieLinkState_t _PcieLinkState
 * ctypedef nvmlClockLimitId_t _ClockLimitId
 * ctypedef nvmlVgpuVmCompatibility_t _VgpuVmCompatibility             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuPgpuCompatibilityLimitCode_t _VgpuPgpuCompatibilityLimitCode
 * ctypedef nvmlGpmMetricId_t _GpmMetricId
*/
typedef nvmlVgpuVmCompatibility_t __pyx_t_4cuda_8bindings_5_nvml__VgpuVmCompatibility;

/* "cuda/bindings/_nvml.pxd":122
 * ctypedef nvmlClockLimitId_t _ClockLimitId
 * ctypedef nvmlVgpuVmCompatibility_t _VgpuVmCompatibility
 * ctypedef nvmlVgpuPgpuCompatibilityLimitCode_t _VgpuPgpuCompatibilityLimitCode             # <<<<<<<<<<<<<<
 * ctypedef nvmlGpmMetricId_t _GpmMetricId
 * ctypedef nvmlPowerProfileType_t _PowerProfileType
*/
typedef nvmlVgpuPgpuCompatibilityLimitCode_t __pyx_t_4cuda_8bindings_5_nvml__VgpuPgpuCompatibilityLimitCode;

/* "cuda/bindings/_nvml.pxd":123
 * ctypedef nvmlVgpuVmCompatibility_t _VgpuVmCompatibility
 * ctypedef nvmlVgpuPgpuCompatibilityLimitCode_t _VgpuPgpuCompatibilityLimitCode
 * ctypedef nvmlGpmMetricId_t _GpmMetricId             # <<<<<<<<<<<<<<
 * ctypedef nvmlPowerProfileType_t _PowerProfileType
 * ctypedef nvmlDeviceAddressingModeType_t _DeviceAddressingModeType
*/
typedef nvmlGpmMetricId_t __pyx_t_4cuda_8bindings_5_nvml__GpmMetricId;

/* "cuda/bindings/_nvml.pxd":124
 * ctypedef nvmlVgpuPgpuCompatibilityLimitCode_t _VgpuPgpuCompatibilityLimitCode
 * ctypedef nvmlGpmMetricId_t _GpmMetricId
 * ctypedef nvmlPowerProfileType_t _PowerProfileType             # <<<<<<<<<<<<<<
 * ctypedef nvmlDeviceAddressingModeType_t _DeviceAddressingModeType
 * 
*/
typedef nvmlPowerProfileType_t __pyx_t_4cuda_8bindings_5_nvml__PowerProfileType;

/* "cuda/bindings/_nvml.pxd":125
 * ctypedef nvmlGpmMetricId_t _GpmMetricId
 * ctypedef nvmlPowerProfileType_t _PowerProfileType
 * ctypedef nvmlDeviceAddressingModeType_t _DeviceAddressingModeType             # <<<<<<<<<<<<<<
 * 
 * 
*/
typedef nvmlDeviceAddressingModeType_t __pyx_t_4cuda_8bindings_5_nvml__DeviceAddressingModeType;

/* "cuda/bindings/_nvml.pyx":1354
 * pci_info_ext_v1_dtype = _get_pci_info_ext_v1_dtype_offsets()
 * 
 * cdef class PciInfoExt_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlPciInfoExt_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_vtab;
  nvmlPciInfoExt_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":1572
 * pci_info_dtype = _get_pci_info_dtype_offsets()
 * 
 * cdef class PciInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlPciInfo_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PciInfo *__pyx_vtab;
  nvmlPciInfo_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":1767
 * utilization_dtype = _get_utilization_dtype_offsets()
 * 
 * cdef class Utilization:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlUtilization_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Utilization *__pyx_vtab;
  nvmlUtilization_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":1900
 * memory_dtype = _get_memory_dtype_offsets()
 * 
 * cdef class Memory:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlMemory_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_Memory {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Memory *__pyx_vtab;
  nvmlMemory_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":2046
 * memory_v2_dtype = _get_memory_v2_dtype_offsets()
 * 
 * cdef class Memory_v2:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlMemory_v2_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_vtab;
  nvmlMemory_v2_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":2201
 * ba_r1memory_dtype = _get_ba_r1memory_dtype_offsets()
 * 
 * cdef class BAR1Memory:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlBAR1Memory_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_vtab;
  nvmlBAR1Memory_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":2346
 * process_info_dtype = _get_process_info_dtype_offsets()
 * 
 * cdef class ProcessInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlProcessInfo_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":2517
 * process_detail_v1_dtype = _get_process_detail_v1_dtype_offsets()
 * 
 * cdef class ProcessDetail_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlProcessDetail_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":2703
 * device_attributes_dtype = _get_device_attributes_dtype_offsets()
 * 
 * cdef class DeviceAttributes:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlDeviceAttributes_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_vtab;
  nvmlDeviceAttributes_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":2911
 * c2c_mode_info_v1_dtype = _get_c2c_mode_info_v1_dtype_offsets()
 * 
 * cdef class C2cModeInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlC2cModeInfo_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_vtab;
  nvmlC2cModeInfo_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":3035
 * row_remapper_histogram_values_dtype = _get_row_remapper_histogram_values_dtype_offsets()
 * 
 * cdef class RowRemapperHistogramValues:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlRowRemapperHistogramValues_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_vtab;
  nvmlRowRemapperHistogramValues_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":3200
 * bridge_chip_info_dtype = _get_bridge_chip_info_dtype_offsets()
 * 
 * cdef class BridgeChipInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlBridgeChipInfo_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":3346
 * 
 * 
 * cdef class Value:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlValue_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_Value {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Value *__pyx_vtab;
  nvmlValue_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":3536
 * _py_anon_pod0_dtype = _get__py_anon_pod0_dtype_offsets()
 * 
 * cdef class _py_anon_pod0:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `_anon_pod0`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_vtab;
  _anon_pod0 *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":3703
 * cooler_info_v1_dtype = _get_cooler_info_v1_dtype_offsets()
 * 
 * cdef class CoolerInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlCoolerInfo_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_vtab;
  nvmlCoolerInfo_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":3857
 * margin_temperature_v1_dtype = _get_margin_temperature_v1_dtype_offsets()
 * 
 * cdef class MarginTemperature_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlMarginTemperature_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_vtab;
  nvmlMarginTemperature_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":3989
 * clk_mon_fault_info_dtype = _get_clk_mon_fault_info_dtype_offsets()
 * 
 * cdef class ClkMonFaultInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlClkMonFaultInfo_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":4139
 * clock_offset_v1_dtype = _get_clock_offset_v1_dtype_offsets()
 * 
 * cdef class ClockOffset_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlClockOffset_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_vtab;
  nvmlClockOffset_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":4316
 * fan_speed_info_v1_dtype = _get_fan_speed_info_v1_dtype_offsets()
 * 
 * cdef class FanSpeedInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlFanSpeedInfo_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_vtab;
  nvmlFanSpeedInfo_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":4459
 * device_perf_modes_v1_dtype = _get_device_perf_modes_v1_dtype_offsets()
 * 
 * cdef class DevicePerfModes_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlDevicePerfModes_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_vtab;
  nvmlDevicePerfModes_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":4595
 * device_current_clock_freqs_v1_dtype = _get_device_current_clock_freqs_v1_dtype_offsets()
 * 
 * cdef class DeviceCurrentClockFreqs_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlDeviceCurrentClockFreqs_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_vtab;
  nvmlDeviceCurrentClockFreqs_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":4735
 * process_utilization_sample_dtype = _get_process_utilization_sample_dtype_offsets()
 * 
 * cdef class ProcessUtilizationSample:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlProcessUtilizationSample_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":4931
 * process_utilization_info_v1_dtype = _get_process_utilization_info_v1_dtype_offsets()
 * 
 * cdef class ProcessUtilizationInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlProcessUtilizationInfo_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":5154
 * ecc_sram_error_status_v1_dtype = _get_ecc_sram_error_status_v1_dtype_offsets()
 * 
 * cdef class EccSramErrorStatus_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlEccSramErrorStatus_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_vtab;
  nvmlEccSramErrorStatus_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":5413
 * platform_info_v2_dtype = _get_platform_info_v2_dtype_offsets()
 * 
 * cdef class PlatformInfo_v2:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlPlatformInfo_v2_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_vtab;
  nvmlPlatformInfo_v2_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":5621
 * _py_anon_pod1_dtype = _get__py_anon_pod1_dtype_offsets()
 * 
 * cdef class _py_anon_pod1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `_anon_pod1`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_vtab;
  _anon_pod1 *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":5775
 * vgpu_heterogeneous_mode_v1_dtype = _get_vgpu_heterogeneous_mode_v1_dtype_offsets()
 * 
 * cdef class VgpuHeterogeneousMode_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuHeterogeneousMode_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_vtab;
  nvmlVgpuHeterogeneousMode_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":5907
 * vgpu_placement_id_v1_dtype = _get_vgpu_placement_id_v1_dtype_offsets()
 * 
 * cdef class VgpuPlacementId_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuPlacementId_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_vtab;
  nvmlVgpuPlacementId_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":6042
 * vgpu_placement_list_v2_dtype = _get_vgpu_placement_list_v2_dtype_offsets()
 * 
 * cdef class VgpuPlacementList_v2:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuPlacementList_v2_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_vtab;
  nvmlVgpuPlacementList_v2_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
  PyObject *_refs;
};


/* "cuda/bindings/_nvml.pyx":6207
 * vgpu_type_bar1info_v1_dtype = _get_vgpu_type_bar1info_v1_dtype_offsets()
 * 
 * cdef class VgpuTypeBar1Info_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuTypeBar1Info_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_vtab;
  nvmlVgpuTypeBar1Info_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":6347
 * vgpu_process_utilization_info_v1_dtype = _get_vgpu_process_utilization_info_v1_dtype_offsets()
 * 
 * cdef class VgpuProcessUtilizationInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlVgpuProcessUtilizationInfo_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":6579
 * vgpu_runtime_state_v1_dtype = _get_vgpu_runtime_state_v1_dtype_offsets()
 * 
 * cdef class VgpuRuntimeState_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuRuntimeState_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_vtab;
  nvmlVgpuRuntimeState_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":6711
 * _py_anon_pod2_dtype = _get__py_anon_pod2_dtype_offsets()
 * 
 * cdef class _py_anon_pod2:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `_anon_pod2`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_vtab;
  _anon_pod2 *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":6842
 * _py_anon_pod3_dtype = _get__py_anon_pod3_dtype_offsets()
 * 
 * cdef class _py_anon_pod3:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `_anon_pod3`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_vtab;
  _anon_pod3 *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":6967
 * vgpu_scheduler_log_entry_dtype = _get_vgpu_scheduler_log_entry_dtype_offsets()
 * 
 * cdef class VgpuSchedulerLogEntry:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlVgpuSchedulerLogEntry_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":7157
 * _py_anon_pod4_dtype = _get__py_anon_pod4_dtype_offsets()
 * 
 * cdef class _py_anon_pod4:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `_anon_pod4`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_vtab;
  _anon_pod4 *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":7288
 * _py_anon_pod5_dtype = _get__py_anon_pod5_dtype_offsets()
 * 
 * cdef class _py_anon_pod5:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `_anon_pod5`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_vtab;
  _anon_pod5 *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":7415
 * vgpu_scheduler_capabilities_dtype = _get_vgpu_scheduler_capabilities_dtype_offsets()
 * 
 * cdef class VgpuSchedulerCapabilities:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuSchedulerCapabilities_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_vtab;
  nvmlVgpuSchedulerCapabilities_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":7622
 * vgpu_license_expiry_dtype = _get_vgpu_license_expiry_dtype_offsets()
 * 
 * cdef class VgpuLicenseExpiry:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuLicenseExpiry_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_vtab;
  nvmlVgpuLicenseExpiry_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":7814
 * grid_license_expiry_dtype = _get_grid_license_expiry_dtype_offsets()
 * 
 * cdef class GridLicenseExpiry:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGridLicenseExpiry_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_vtab;
  nvmlGridLicenseExpiry_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":8002
 * vgpu_type_id_info_v1_dtype = _get_vgpu_type_id_info_v1_dtype_offsets()
 * 
 * cdef class VgpuTypeIdInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuTypeIdInfo_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_vtab;
  nvmlVgpuTypeIdInfo_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":8135
 * vgpu_type_max_instance_v1_dtype = _get_vgpu_type_max_instance_v1_dtype_offsets()
 * 
 * cdef class VgpuTypeMaxInstance_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuTypeMaxInstance_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_vtab;
  nvmlVgpuTypeMaxInstance_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":8279
 * active_vgpu_instance_info_v1_dtype = _get_active_vgpu_instance_info_v1_dtype_offsets()
 * 
 * cdef class ActiveVgpuInstanceInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlActiveVgpuInstanceInfo_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_vtab;
  nvmlActiveVgpuInstanceInfo_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":8414
 * vgpu_creatable_placement_info_v1_dtype = _get_vgpu_creatable_placement_info_v1_dtype_offsets()
 * 
 * cdef class VgpuCreatablePlacementInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuCreatablePlacementInfo_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_vtab;
  nvmlVgpuCreatablePlacementInfo_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
  PyObject *_refs;
};


/* "cuda/bindings/_nvml.pyx":8579
 * hwbc_entry_dtype = _get_hwbc_entry_dtype_offsets()
 * 
 * cdef class HwbcEntry:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlHwbcEntry_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":8723
 * led_state_dtype = _get_led_state_dtype_offsets()
 * 
 * cdef class LedState:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlLedState_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_LedState {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_LedState *__pyx_vtab;
  nvmlLedState_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":8861
 * unit_info_dtype = _get_unit_info_dtype_offsets()
 * 
 * cdef class UnitInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlUnitInfo_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_UnitInfo *__pyx_vtab;
  nvmlUnitInfo_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":9033
 * psu_info_dtype = _get_psu_info_dtype_offsets()
 * 
 * cdef class PSUInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlPSUInfo_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PSUInfo *__pyx_vtab;
  nvmlPSUInfo_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":9191
 * unit_fan_info_dtype = _get_unit_fan_info_dtype_offsets()
 * 
 * cdef class UnitFanInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlUnitFanInfo_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":9340
 * event_data_dtype = _get_event_data_dtype_offsets()
 * 
 * cdef class EventData:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlEventData_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_EventData {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EventData *__pyx_vtab;
  nvmlEventData_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":9510
 * accounting_stats_dtype = _get_accounting_stats_dtype_offsets()
 * 
 * cdef class AccountingStats:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlAccountingStats_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_AccountingStats *__pyx_vtab;
  nvmlAccountingStats_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":9692
 * encoder_session_info_dtype = _get_encoder_session_info_dtype_offsets()
 * 
 * cdef class EncoderSessionInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlEncoderSessionInfo_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":9905
 * fbc_stats_dtype = _get_fbc_stats_dtype_offsets()
 * 
 * cdef class FBCStats:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlFBCStats_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FBCStats *__pyx_vtab;
  nvmlFBCStats_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":10058
 * fbc_session_info_dtype = _get_fbc_session_info_dtype_offsets()
 * 
 * cdef class FBCSessionInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlFBCSessionInfo_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":10314
 * conf_compute_system_caps_dtype = _get_conf_compute_system_caps_dtype_offsets()
 * 
 * cdef class ConfComputeSystemCaps:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlConfComputeSystemCaps_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_vtab;
  nvmlConfComputeSystemCaps_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":10447
 * conf_compute_system_state_dtype = _get_conf_compute_system_state_dtype_offsets()
 * 
 * cdef class ConfComputeSystemState:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlConfComputeSystemState_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_vtab;
  nvmlConfComputeSystemState_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":10593
 * system_conf_compute_settings_v1_dtype = _get_system_conf_compute_settings_v1_dtype_offsets()
 * 
 * cdef class SystemConfComputeSettings_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlSystemConfComputeSettings_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_vtab;
  nvmlSystemConfComputeSettings_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":10758
 * conf_compute_mem_size_info_dtype = _get_conf_compute_mem_size_info_dtype_offsets()
 * 
 * cdef class ConfComputeMemSizeInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlConfComputeMemSizeInfo_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_vtab;
  nvmlConfComputeMemSizeInfo_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":10892
 * conf_compute_gpu_certificate_dtype = _get_conf_compute_gpu_certificate_dtype_offsets()
 * 
 * cdef class ConfComputeGpuCertificate:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlConfComputeGpuCertificate_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_vtab;
  nvmlConfComputeGpuCertificate_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":11058
 * conf_compute_gpu_attestation_report_dtype = _get_conf_compute_gpu_attestation_report_dtype_offsets()
 * 
 * cdef class ConfComputeGpuAttestationReport:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlConfComputeGpuAttestationReport_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_vtab;
  nvmlConfComputeGpuAttestationReport_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":11246
 * conf_compute_get_key_rotation_threshold_info_v1_dtype = _get_conf_compute_get_key_rotation_threshold_info_v1_dtype_offsets()
 * 
 * cdef class ConfComputeGetKeyRotationThresholdInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlConfComputeGetKeyRotationThresholdInfo_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_vtab;
  nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":11379
 * nvlink_supported_bw_modes_v1_dtype = _get_nvlink_supported_bw_modes_v1_dtype_offsets()
 * 
 * cdef class NvlinkSupportedBwModes_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlNvlinkSupportedBwModes_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_vtab;
  nvmlNvlinkSupportedBwModes_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":11527
 * nvlink_get_bw_mode_v1_dtype = _get_nvlink_get_bw_mode_v1_dtype_offsets()
 * 
 * cdef class NvlinkGetBwMode_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlNvlinkGetBwMode_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_vtab;
  nvmlNvlinkGetBwMode_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":11671
 * nvlink_set_bw_mode_v1_dtype = _get_nvlink_set_bw_mode_v1_dtype_offsets()
 * 
 * cdef class NvlinkSetBwMode_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlNvlinkSetBwMode_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_vtab;
  nvmlNvlinkSetBwMode_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":11814
 * vgpu_version_dtype = _get_vgpu_version_dtype_offsets()
 * 
 * cdef class VgpuVersion:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuVersion_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_vtab;
  nvmlVgpuVersion_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":11954
 * vgpu_metadata_dtype = _get_vgpu_metadata_dtype_offsets()
 * 
 * cdef class VgpuMetadata:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuMetadata_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_vtab;
  nvmlVgpuMetadata_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":12175
 * vgpu_pgpu_compatibility_dtype = _get_vgpu_pgpu_compatibility_dtype_offsets()
 * 
 * cdef class VgpuPgpuCompatibility:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuPgpuCompatibility_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_vtab;
  nvmlVgpuPgpuCompatibility_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":12307
 * gpu_instance_placement_dtype = _get_gpu_instance_placement_dtype_offsets()
 * 
 * cdef class GpuInstancePlacement:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlGpuInstancePlacement_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":12464
 * gpu_instance_profile_info_v2_dtype = _get_gpu_instance_profile_info_v2_dtype_offsets()
 * 
 * cdef class GpuInstanceProfileInfo_v2:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGpuInstanceProfileInfo_v2_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_vtab;
  nvmlGpuInstanceProfileInfo_v2_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":12732
 * gpu_instance_profile_info_v3_dtype = _get_gpu_instance_profile_info_v3_dtype_offsets()
 * 
 * cdef class GpuInstanceProfileInfo_v3:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGpuInstanceProfileInfo_v3_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_vtab;
  nvmlGpuInstanceProfileInfo_v3_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":12989
 * compute_instance_placement_dtype = _get_compute_instance_placement_dtype_offsets()
 * 
 * cdef class ComputeInstancePlacement:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlComputeInstancePlacement_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":13144
 * compute_instance_profile_info_v2_dtype = _get_compute_instance_profile_info_v2_dtype_offsets()
 * 
 * cdef class ComputeInstanceProfileInfo_v2:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlComputeInstanceProfileInfo_v2_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_vtab;
  nvmlComputeInstanceProfileInfo_v2_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":13389
 * compute_instance_profile_info_v3_dtype = _get_compute_instance_profile_info_v3_dtype_offsets()
 * 
 * cdef class ComputeInstanceProfileInfo_v3:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlComputeInstanceProfileInfo_v3_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_vtab;
  nvmlComputeInstanceProfileInfo_v3_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":13635
 * gpm_support_dtype = _get_gpm_support_dtype_offsets()
 * 
 * cdef class GpmSupport:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGpmSupport_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpmSupport *__pyx_vtab;
  nvmlGpmSupport_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":13767
 * device_capabilities_v1_dtype = _get_device_capabilities_v1_dtype_offsets()
 * 
 * cdef class DeviceCapabilities_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlDeviceCapabilities_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_vtab;
  nvmlDeviceCapabilities_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":13899
 * device_addressing_mode_v1_dtype = _get_device_addressing_mode_v1_dtype_offsets()
 * 
 * cdef class DeviceAddressingMode_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlDeviceAddressingMode_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_vtab;
  nvmlDeviceAddressingMode_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":14032
 * repair_status_v1_dtype = _get_repair_status_v1_dtype_offsets()
 * 
 * cdef class RepairStatus_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlRepairStatus_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_vtab;
  nvmlRepairStatus_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":14175
 * pdi_v1_dtype = _get_pdi_v1_dtype_offsets()
 * 
 * cdef class Pdi_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlPdi_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_vtab;
  nvmlPdi_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":14308
 * device_power_mizer_modes_v1_dtype = _get_device_power_mizer_modes_v1_dtype_offsets()
 * 
 * cdef class DevicePowerMizerModes_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlDevicePowerMizerModes_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_vtab;
  nvmlDevicePowerMizerModes_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":14456
 * ecc_sram_unique_uncorrected_error_entry_v1_dtype = _get_ecc_sram_unique_uncorrected_error_entry_v1_dtype_offsets()
 * 
 * cdef class EccSramUniqueUncorrectedErrorEntry_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlEccSramUniqueUncorrectedErrorEntry_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":14662
 * gpu_fabric_info_v3_dtype = _get_gpu_fabric_info_v3_dtype_offsets()
 * 
 * cdef class GpuFabricInfo_v3:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGpuFabricInfo_v3_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_vtab;
  nvmlGpuFabricInfo_v3_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":14855
 * nvlink_firmware_version_dtype = _get_nvlink_firmware_version_dtype_offsets()
 * 
 * cdef class NvlinkFirmwareVersion:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlNvlinkFirmwareVersion_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_vtab;
  nvmlNvlinkFirmwareVersion_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":15009
 * excluded_device_info_dtype = _get_excluded_device_info_dtype_offsets()
 * 
 * cdef class ExcludedDeviceInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlExcludedDeviceInfo_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_vtab;
  nvmlExcludedDeviceInfo_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":15148
 * process_detail_list_v1_dtype = _get_process_detail_list_v1_dtype_offsets()
 * 
 * cdef class ProcessDetailList_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlProcessDetailList_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_vtab;
  nvmlProcessDetailList_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
  PyObject *_refs;
};


/* "cuda/bindings/_nvml.pyx":15299
 * bridge_chip_hierarchy_dtype = _get_bridge_chip_hierarchy_dtype_offsets()
 * 
 * cdef class BridgeChipHierarchy:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlBridgeChipHierarchy_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_vtab;
  nvmlBridgeChipHierarchy_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":15434
 * sample_dtype = _get_sample_dtype_offsets()
 * 
 * cdef class Sample:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlSample_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_Sample {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Sample *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":15584
 * vgpu_instance_utilization_info_v1_dtype = _get_vgpu_instance_utilization_info_v1_dtype_offsets()
 * 
 * cdef class VgpuInstanceUtilizationInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlVgpuInstanceUtilizationInfo_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":15789
 * field_value_dtype = _get_field_value_dtype_offsets()
 * 
 * cdef class FieldValue:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlFieldValue_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FieldValue *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":15988
 * gpu_thermal_settings_dtype = _get_gpu_thermal_settings_dtype_offsets()
 * 
 * cdef class GpuThermalSettings:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGpuThermalSettings_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_vtab;
  nvmlGpuThermalSettings_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":16124
 * clk_mon_status_dtype = _get_clk_mon_status_dtype_offsets()
 * 
 * cdef class ClkMonStatus:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlClkMonStatus_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_vtab;
  nvmlClkMonStatus_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":16272
 * processes_utilization_info_v1_dtype = _get_processes_utilization_info_v1_dtype_offsets()
 * 
 * cdef class ProcessesUtilizationInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlProcessesUtilizationInfo_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_vtab;
  nvmlProcessesUtilizationInfo_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
  PyObject *_refs;
};


/* "cuda/bindings/_nvml.pyx":16423
 * gpu_dynamic_pstates_info_dtype = _get_gpu_dynamic_pstates_info_dtype_offsets()
 * 
 * cdef class GpuDynamicPstatesInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGpuDynamicPstatesInfo_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_vtab;
  nvmlGpuDynamicPstatesInfo_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":16560
 * vgpu_processes_utilization_info_v1_dtype = _get_vgpu_processes_utilization_info_v1_dtype_offsets()
 * 
 * cdef class VgpuProcessesUtilizationInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuProcessesUtilizationInfo_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_vtab;
  nvmlVgpuProcessesUtilizationInfo_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
  PyObject *_refs;
};


/* "cuda/bindings/_nvml.pyx":16706
 * 
 * 
 * cdef class VgpuSchedulerParams:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuSchedulerParams_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_vtab;
  nvmlVgpuSchedulerParams_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":16835
 * 
 * 
 * cdef class VgpuSchedulerSetParams:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuSchedulerSetParams_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_vtab;
  nvmlVgpuSchedulerSetParams_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":16970
 * vgpu_license_info_dtype = _get_vgpu_license_info_dtype_offsets()
 * 
 * cdef class VgpuLicenseInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuLicenseInfo_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_vtab;
  nvmlVgpuLicenseInfo_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":17118
 * grid_licensable_feature_dtype = _get_grid_licensable_feature_dtype_offsets()
 * 
 * cdef class GridLicensableFeature:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlGridLicensableFeature_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_vtab;
  PyObject *_data;
};


/* "cuda/bindings/_nvml.pyx":17302
 * unit_fan_speeds_dtype = _get_unit_fan_speeds_dtype_offsets()
 * 
 * cdef class UnitFanSpeeds:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlUnitFanSpeeds_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_vtab;
  nvmlUnitFanSpeeds_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":17443
 * vgpu_pgpu_metadata_dtype = _get_vgpu_pgpu_metadata_dtype_offsets()
 * 
 * cdef class VgpuPgpuMetadata:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuPgpuMetadata_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_vtab;
  nvmlVgpuPgpuMetadata_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":17641
 * gpu_instance_info_dtype = _get_gpu_instance_info_dtype_offsets()
 * 
 * cdef class GpuInstanceInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGpuInstanceInfo_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_vtab;
  nvmlGpuInstanceInfo_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":17799
 * compute_instance_info_dtype = _get_compute_instance_info_dtype_offsets()
 * 
 * cdef class ComputeInstanceInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlComputeInstanceInfo_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_vtab;
  nvmlComputeInstanceInfo_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":17966
 * ecc_sram_unique_uncorrected_error_counts_v1_dtype = _get_ecc_sram_unique_uncorrected_error_counts_v1_dtype_offsets()
 * 
 * cdef class EccSramUniqueUncorrectedErrorCounts_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlEccSramUniqueUncorrectedErrorCounts_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_vtab;
  nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
  PyObject *_refs;
};


/* "cuda/bindings/_nvml.pyx":18106
 * nvlink_firmware_info_dtype = _get_nvlink_firmware_info_dtype_offsets()
 * 
 * cdef class NvlinkFirmwareInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlNvlinkFirmwareInfo_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_vtab;
  nvmlNvlinkFirmwareInfo_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":18244
 * vgpu_instances_utilization_info_v1_dtype = _get_vgpu_instances_utilization_info_v1_dtype_offsets()
 * 
 * cdef class VgpuInstancesUtilizationInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuInstancesUtilizationInfo_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_vtab;
  nvmlVgpuInstancesUtilizationInfo_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
  PyObject *_refs;
};


/* "cuda/bindings/_nvml.pyx":18410
 * vgpu_scheduler_log_dtype = _get_vgpu_scheduler_log_dtype_offsets()
 * 
 * cdef class VgpuSchedulerLog:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuSchedulerLog_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_vtab;
  nvmlVgpuSchedulerLog_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":18591
 * vgpu_scheduler_get_state_dtype = _get_vgpu_scheduler_get_state_dtype_offsets()
 * 
 * cdef class VgpuSchedulerGetState:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuSchedulerGetState_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_vtab;
  nvmlVgpuSchedulerGetState_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":18738
 * vgpu_scheduler_state_info_v1_dtype = _get_vgpu_scheduler_state_info_v1_dtype_offsets()
 * 
 * cdef class VgpuSchedulerStateInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuSchedulerStateInfo_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_vtab;
  nvmlVgpuSchedulerStateInfo_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":18909
 * vgpu_scheduler_log_info_v1_dtype = _get_vgpu_scheduler_log_info_v1_dtype_offsets()
 * 
 * cdef class VgpuSchedulerLogInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuSchedulerLogInfo_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_vtab;
  nvmlVgpuSchedulerLogInfo_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":19103
 * vgpu_scheduler_state_v1_dtype = _get_vgpu_scheduler_state_v1_dtype_offsets()
 * 
 * cdef class VgpuSchedulerState_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuSchedulerState_v1_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_vtab;
  nvmlVgpuSchedulerState_v1_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":19270
 * grid_licensable_features_dtype = _get_grid_licensable_features_dtype_offsets()
 * 
 * cdef class GridLicensableFeatures:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGridLicensableFeatures_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_vtab;
  nvmlGridLicensableFeatures_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "cuda/bindings/_nvml.pyx":19417
 * nv_link_info_v2_dtype = _get_nv_link_info_v2_dtype_offsets()
 * 
 * cdef class NvLinkInfo_v2:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlNvLinkInfo_v2_t`.
 * 
*/
struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 {
  PyObject_HEAD
  struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_vtab;
  nvmlNvLinkInfo_v2_t *_ptr;
  PyObject *_owner;
  int _owned;
  int _readonly;
};


/* "View.MemoryView":110
 * 
 * 
 * @cython.collection_type("sequence")             # <<<<<<<<<<<<<<
 * @cname("__pyx_array")
 * cdef class array:
*/
struct __pyx_array_obj {
  PyObject_HEAD
  struct __pyx_vtabstruct_array *__pyx_vtab;
  char *data;
  Py_ssize_t len;
  char *format;
  int ndim;
  Py_ssize_t *_shape;
  Py_ssize_t *_strides;
  Py_ssize_t itemsize;
  PyObject *mode;
  PyObject *_format;
  void (*callback_free_data)(void *);
  int free_data;
  int dtype_is_object;
};


/* "View.MemoryView":299
 * 
 * 
 * @cname('__pyx_MemviewEnum')             # <<<<<<<<<<<<<<
 * cdef class Enum(object):
 *     cdef object name
*/
struct __pyx_MemviewEnum_obj {
  PyObject_HEAD
  PyObject *name;
};


/* "View.MemoryView":334
 * 
 * 
 * @cname('__pyx_memoryview')             # <<<<<<<<<<<<<<
 * cdef class memoryview:
 * 
*/
struct __pyx_memoryview_obj {
  PyObject_HEAD
  struct __pyx_vtabstruct_memoryview *__pyx_vtab;
  PyObject *obj;
  PyObject *_size;
  void *_unused;
  PyThread_type_lock lock;
  __pyx_atomic_int_type acquisition_count;
  Py_buffer view;
  int flags;
  int dtype_is_object;
  __Pyx_TypeInfo const *typeinfo;
};


/* "View.MemoryView":951
 * 
 * 
 * @cython.collection_type("sequence")             # <<<<<<<<<<<<<<
 * @cname('__pyx_memoryviewslice')
 * cdef class _memoryviewslice(memoryview):
*/
struct __pyx_memoryviewslice_obj {
  struct __pyx_memoryview_obj __pyx_base;
  __Pyx_memviewslice from_slice;
  PyObject *from_object;
  PyObject *(*to_object_func)(char *);
  int (*to_dtype_func)(char *, PyObject *);
};



/* "cuda/bindings/_nvml.pyx":1354
 * pci_info_ext_v1_dtype = _get_pci_info_ext_v1_dtype_offsets()
 * 
 * cdef class PciInfoExt_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlPciInfoExt_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PciInfoExt_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_PciInfoExt_v1;


/* "cuda/bindings/_nvml.pyx":1572
 * pci_info_dtype = _get_pci_info_dtype_offsets()
 * 
 * cdef class PciInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlPciInfo_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PciInfo {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PciInfo *__pyx_vtabptr_4cuda_8bindings_5_nvml_PciInfo;


/* "cuda/bindings/_nvml.pyx":1767
 * utilization_dtype = _get_utilization_dtype_offsets()
 * 
 * cdef class Utilization:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlUtilization_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Utilization {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Utilization *__pyx_vtabptr_4cuda_8bindings_5_nvml_Utilization;


/* "cuda/bindings/_nvml.pyx":1900
 * memory_dtype = _get_memory_dtype_offsets()
 * 
 * cdef class Memory:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlMemory_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Memory {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Memory *__pyx_vtabptr_4cuda_8bindings_5_nvml_Memory;


/* "cuda/bindings/_nvml.pyx":2046
 * memory_v2_dtype = _get_memory_v2_dtype_offsets()
 * 
 * cdef class Memory_v2:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlMemory_v2_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Memory_v2 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_vtabptr_4cuda_8bindings_5_nvml_Memory_v2;


/* "cuda/bindings/_nvml.pyx":2201
 * ba_r1memory_dtype = _get_ba_r1memory_dtype_offsets()
 * 
 * cdef class BAR1Memory:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlBAR1Memory_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_BAR1Memory {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_vtabptr_4cuda_8bindings_5_nvml_BAR1Memory;


/* "cuda/bindings/_nvml.pyx":2346
 * process_info_dtype = _get_process_info_dtype_offsets()
 * 
 * cdef class ProcessInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlProcessInfo_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessInfo {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessInfo;


/* "cuda/bindings/_nvml.pyx":2517
 * process_detail_v1_dtype = _get_process_detail_v1_dtype_offsets()
 * 
 * cdef class ProcessDetail_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlProcessDetail_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessDetail_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessDetail_v1;


/* "cuda/bindings/_nvml.pyx":2703
 * device_attributes_dtype = _get_device_attributes_dtype_offsets()
 * 
 * cdef class DeviceAttributes:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlDeviceAttributes_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceAttributes {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_vtabptr_4cuda_8bindings_5_nvml_DeviceAttributes;


/* "cuda/bindings/_nvml.pyx":2911
 * c2c_mode_info_v1_dtype = _get_c2c_mode_info_v1_dtype_offsets()
 * 
 * cdef class C2cModeInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlC2cModeInfo_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_C2cModeInfo_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_C2cModeInfo_v1;


/* "cuda/bindings/_nvml.pyx":3035
 * row_remapper_histogram_values_dtype = _get_row_remapper_histogram_values_dtype_offsets()
 * 
 * cdef class RowRemapperHistogramValues:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlRowRemapperHistogramValues_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_RowRemapperHistogramValues {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_vtabptr_4cuda_8bindings_5_nvml_RowRemapperHistogramValues;


/* "cuda/bindings/_nvml.pyx":3200
 * bridge_chip_info_dtype = _get_bridge_chip_info_dtype_offsets()
 * 
 * cdef class BridgeChipInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlBridgeChipInfo_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_BridgeChipInfo {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_vtabptr_4cuda_8bindings_5_nvml_BridgeChipInfo;


/* "cuda/bindings/_nvml.pyx":3346
 * 
 * 
 * cdef class Value:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlValue_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Value {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Value *__pyx_vtabptr_4cuda_8bindings_5_nvml_Value;


/* "cuda/bindings/_nvml.pyx":3536
 * _py_anon_pod0_dtype = _get__py_anon_pod0_dtype_offsets()
 * 
 * cdef class _py_anon_pod0:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `_anon_pod0`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod0 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod0;


/* "cuda/bindings/_nvml.pyx":3703
 * cooler_info_v1_dtype = _get_cooler_info_v1_dtype_offsets()
 * 
 * cdef class CoolerInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlCoolerInfo_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_CoolerInfo_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_CoolerInfo_v1;


/* "cuda/bindings/_nvml.pyx":3857
 * margin_temperature_v1_dtype = _get_margin_temperature_v1_dtype_offsets()
 * 
 * cdef class MarginTemperature_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlMarginTemperature_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_MarginTemperature_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_MarginTemperature_v1;


/* "cuda/bindings/_nvml.pyx":3989
 * clk_mon_fault_info_dtype = _get_clk_mon_fault_info_dtype_offsets()
 * 
 * cdef class ClkMonFaultInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlClkMonFaultInfo_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ClkMonFaultInfo {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_vtabptr_4cuda_8bindings_5_nvml_ClkMonFaultInfo;


/* "cuda/bindings/_nvml.pyx":4139
 * clock_offset_v1_dtype = _get_clock_offset_v1_dtype_offsets()
 * 
 * cdef class ClockOffset_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlClockOffset_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ClockOffset_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_ClockOffset_v1;


/* "cuda/bindings/_nvml.pyx":4316
 * fan_speed_info_v1_dtype = _get_fan_speed_info_v1_dtype_offsets()
 * 
 * cdef class FanSpeedInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlFanSpeedInfo_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_FanSpeedInfo_v1;


/* "cuda/bindings/_nvml.pyx":4459
 * device_perf_modes_v1_dtype = _get_device_perf_modes_v1_dtype_offsets()
 * 
 * cdef class DevicePerfModes_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlDevicePerfModes_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DevicePerfModes_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_DevicePerfModes_v1;


/* "cuda/bindings/_nvml.pyx":4595
 * device_current_clock_freqs_v1_dtype = _get_device_current_clock_freqs_v1_dtype_offsets()
 * 
 * cdef class DeviceCurrentClockFreqs_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlDeviceCurrentClockFreqs_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1;


/* "cuda/bindings/_nvml.pyx":4735
 * process_utilization_sample_dtype = _get_process_utilization_sample_dtype_offsets()
 * 
 * cdef class ProcessUtilizationSample:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlProcessUtilizationSample_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessUtilizationSample {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessUtilizationSample;


/* "cuda/bindings/_nvml.pyx":4931
 * process_utilization_info_v1_dtype = _get_process_utilization_info_v1_dtype_offsets()
 * 
 * cdef class ProcessUtilizationInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlProcessUtilizationInfo_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1;


/* "cuda/bindings/_nvml.pyx":5154
 * ecc_sram_error_status_v1_dtype = _get_ecc_sram_error_status_v1_dtype_offsets()
 * 
 * cdef class EccSramErrorStatus_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlEccSramErrorStatus_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1;


/* "cuda/bindings/_nvml.pyx":5413
 * platform_info_v2_dtype = _get_platform_info_v2_dtype_offsets()
 * 
 * cdef class PlatformInfo_v2:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlPlatformInfo_v2_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PlatformInfo_v2 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_vtabptr_4cuda_8bindings_5_nvml_PlatformInfo_v2;


/* "cuda/bindings/_nvml.pyx":5621
 * _py_anon_pod1_dtype = _get__py_anon_pod1_dtype_offsets()
 * 
 * cdef class _py_anon_pod1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `_anon_pod1`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod1;


/* "cuda/bindings/_nvml.pyx":5775
 * vgpu_heterogeneous_mode_v1_dtype = _get_vgpu_heterogeneous_mode_v1_dtype_offsets()
 * 
 * cdef class VgpuHeterogeneousMode_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuHeterogeneousMode_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1;


/* "cuda/bindings/_nvml.pyx":5907
 * vgpu_placement_id_v1_dtype = _get_vgpu_placement_id_v1_dtype_offsets()
 * 
 * cdef class VgpuPlacementId_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuPlacementId_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuPlacementId_v1;


/* "cuda/bindings/_nvml.pyx":6042
 * vgpu_placement_list_v2_dtype = _get_vgpu_placement_list_v2_dtype_offsets()
 * 
 * cdef class VgpuPlacementList_v2:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuPlacementList_v2_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuPlacementList_v2;


/* "cuda/bindings/_nvml.pyx":6207
 * vgpu_type_bar1info_v1_dtype = _get_vgpu_type_bar1info_v1_dtype_offsets()
 * 
 * cdef class VgpuTypeBar1Info_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuTypeBar1Info_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1;


/* "cuda/bindings/_nvml.pyx":6347
 * vgpu_process_utilization_info_v1_dtype = _get_vgpu_process_utilization_info_v1_dtype_offsets()
 * 
 * cdef class VgpuProcessUtilizationInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlVgpuProcessUtilizationInfo_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1;


/* "cuda/bindings/_nvml.pyx":6579
 * vgpu_runtime_state_v1_dtype = _get_vgpu_runtime_state_v1_dtype_offsets()
 * 
 * cdef class VgpuRuntimeState_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuRuntimeState_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1;


/* "cuda/bindings/_nvml.pyx":6711
 * _py_anon_pod2_dtype = _get__py_anon_pod2_dtype_offsets()
 * 
 * cdef class _py_anon_pod2:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `_anon_pod2`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod2 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod2;


/* "cuda/bindings/_nvml.pyx":6842
 * _py_anon_pod3_dtype = _get__py_anon_pod3_dtype_offsets()
 * 
 * cdef class _py_anon_pod3:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `_anon_pod3`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod3 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod3;


/* "cuda/bindings/_nvml.pyx":6967
 * vgpu_scheduler_log_entry_dtype = _get_vgpu_scheduler_log_entry_dtype_offsets()
 * 
 * cdef class VgpuSchedulerLogEntry:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlVgpuSchedulerLogEntry_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry;


/* "cuda/bindings/_nvml.pyx":7157
 * _py_anon_pod4_dtype = _get__py_anon_pod4_dtype_offsets()
 * 
 * cdef class _py_anon_pod4:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `_anon_pod4`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod4 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod4;


/* "cuda/bindings/_nvml.pyx":7288
 * _py_anon_pod5_dtype = _get__py_anon_pod5_dtype_offsets()
 * 
 * cdef class _py_anon_pod5:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `_anon_pod5`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod5 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod5;


/* "cuda/bindings/_nvml.pyx":7415
 * vgpu_scheduler_capabilities_dtype = _get_vgpu_scheduler_capabilities_dtype_offsets()
 * 
 * cdef class VgpuSchedulerCapabilities:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuSchedulerCapabilities_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities;


/* "cuda/bindings/_nvml.pyx":7622
 * vgpu_license_expiry_dtype = _get_vgpu_license_expiry_dtype_offsets()
 * 
 * cdef class VgpuLicenseExpiry:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuLicenseExpiry_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuLicenseExpiry {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuLicenseExpiry;


/* "cuda/bindings/_nvml.pyx":7814
 * grid_license_expiry_dtype = _get_grid_license_expiry_dtype_offsets()
 * 
 * cdef class GridLicenseExpiry:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGridLicenseExpiry_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GridLicenseExpiry {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_vtabptr_4cuda_8bindings_5_nvml_GridLicenseExpiry;


/* "cuda/bindings/_nvml.pyx":8002
 * vgpu_type_id_info_v1_dtype = _get_vgpu_type_id_info_v1_dtype_offsets()
 * 
 * cdef class VgpuTypeIdInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuTypeIdInfo_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1;


/* "cuda/bindings/_nvml.pyx":8135
 * vgpu_type_max_instance_v1_dtype = _get_vgpu_type_max_instance_v1_dtype_offsets()
 * 
 * cdef class VgpuTypeMaxInstance_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuTypeMaxInstance_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1;


/* "cuda/bindings/_nvml.pyx":8279
 * active_vgpu_instance_info_v1_dtype = _get_active_vgpu_instance_info_v1_dtype_offsets()
 * 
 * cdef class ActiveVgpuInstanceInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlActiveVgpuInstanceInfo_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1;


/* "cuda/bindings/_nvml.pyx":8414
 * vgpu_creatable_placement_info_v1_dtype = _get_vgpu_creatable_placement_info_v1_dtype_offsets()
 * 
 * cdef class VgpuCreatablePlacementInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuCreatablePlacementInfo_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1;


/* "cuda/bindings/_nvml.pyx":8579
 * hwbc_entry_dtype = _get_hwbc_entry_dtype_offsets()
 * 
 * cdef class HwbcEntry:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlHwbcEntry_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_HwbcEntry {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_vtabptr_4cuda_8bindings_5_nvml_HwbcEntry;


/* "cuda/bindings/_nvml.pyx":8723
 * led_state_dtype = _get_led_state_dtype_offsets()
 * 
 * cdef class LedState:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlLedState_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_LedState {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_LedState *__pyx_vtabptr_4cuda_8bindings_5_nvml_LedState;


/* "cuda/bindings/_nvml.pyx":8861
 * unit_info_dtype = _get_unit_info_dtype_offsets()
 * 
 * cdef class UnitInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlUnitInfo_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_UnitInfo {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_UnitInfo *__pyx_vtabptr_4cuda_8bindings_5_nvml_UnitInfo;


/* "cuda/bindings/_nvml.pyx":9033
 * psu_info_dtype = _get_psu_info_dtype_offsets()
 * 
 * cdef class PSUInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlPSUInfo_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PSUInfo {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PSUInfo *__pyx_vtabptr_4cuda_8bindings_5_nvml_PSUInfo;


/* "cuda/bindings/_nvml.pyx":9191
 * unit_fan_info_dtype = _get_unit_fan_info_dtype_offsets()
 * 
 * cdef class UnitFanInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlUnitFanInfo_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_UnitFanInfo {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_vtabptr_4cuda_8bindings_5_nvml_UnitFanInfo;


/* "cuda/bindings/_nvml.pyx":9340
 * event_data_dtype = _get_event_data_dtype_offsets()
 * 
 * cdef class EventData:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlEventData_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EventData {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EventData *__pyx_vtabptr_4cuda_8bindings_5_nvml_EventData;


/* "cuda/bindings/_nvml.pyx":9510
 * accounting_stats_dtype = _get_accounting_stats_dtype_offsets()
 * 
 * cdef class AccountingStats:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlAccountingStats_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_AccountingStats {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_AccountingStats *__pyx_vtabptr_4cuda_8bindings_5_nvml_AccountingStats;


/* "cuda/bindings/_nvml.pyx":9692
 * encoder_session_info_dtype = _get_encoder_session_info_dtype_offsets()
 * 
 * cdef class EncoderSessionInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlEncoderSessionInfo_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EncoderSessionInfo {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_vtabptr_4cuda_8bindings_5_nvml_EncoderSessionInfo;


/* "cuda/bindings/_nvml.pyx":9905
 * fbc_stats_dtype = _get_fbc_stats_dtype_offsets()
 * 
 * cdef class FBCStats:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlFBCStats_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FBCStats {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FBCStats *__pyx_vtabptr_4cuda_8bindings_5_nvml_FBCStats;


/* "cuda/bindings/_nvml.pyx":10058
 * fbc_session_info_dtype = _get_fbc_session_info_dtype_offsets()
 * 
 * cdef class FBCSessionInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlFBCSessionInfo_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FBCSessionInfo {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_vtabptr_4cuda_8bindings_5_nvml_FBCSessionInfo;


/* "cuda/bindings/_nvml.pyx":10314
 * conf_compute_system_caps_dtype = _get_conf_compute_system_caps_dtype_offsets()
 * 
 * cdef class ConfComputeSystemCaps:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlConfComputeSystemCaps_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeSystemCaps {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeSystemCaps;


/* "cuda/bindings/_nvml.pyx":10447
 * conf_compute_system_state_dtype = _get_conf_compute_system_state_dtype_offsets()
 * 
 * cdef class ConfComputeSystemState:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlConfComputeSystemState_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeSystemState {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeSystemState;


/* "cuda/bindings/_nvml.pyx":10593
 * system_conf_compute_settings_v1_dtype = _get_system_conf_compute_settings_v1_dtype_offsets()
 * 
 * cdef class SystemConfComputeSettings_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlSystemConfComputeSettings_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1;


/* "cuda/bindings/_nvml.pyx":10758
 * conf_compute_mem_size_info_dtype = _get_conf_compute_mem_size_info_dtype_offsets()
 * 
 * cdef class ConfComputeMemSizeInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlConfComputeMemSizeInfo_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo;


/* "cuda/bindings/_nvml.pyx":10892
 * conf_compute_gpu_certificate_dtype = _get_conf_compute_gpu_certificate_dtype_offsets()
 * 
 * cdef class ConfComputeGpuCertificate:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlConfComputeGpuCertificate_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate;


/* "cuda/bindings/_nvml.pyx":11058
 * conf_compute_gpu_attestation_report_dtype = _get_conf_compute_gpu_attestation_report_dtype_offsets()
 * 
 * cdef class ConfComputeGpuAttestationReport:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlConfComputeGpuAttestationReport_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport;


/* "cuda/bindings/_nvml.pyx":11246
 * conf_compute_get_key_rotation_threshold_info_v1_dtype = _get_conf_compute_get_key_rotation_threshold_info_v1_dtype_offsets()
 * 
 * cdef class ConfComputeGetKeyRotationThresholdInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlConfComputeGetKeyRotationThresholdInfo_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1;


/* "cuda/bindings/_nvml.pyx":11379
 * nvlink_supported_bw_modes_v1_dtype = _get_nvlink_supported_bw_modes_v1_dtype_offsets()
 * 
 * cdef class NvlinkSupportedBwModes_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlNvlinkSupportedBwModes_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1;


/* "cuda/bindings/_nvml.pyx":11527
 * nvlink_get_bw_mode_v1_dtype = _get_nvlink_get_bw_mode_v1_dtype_offsets()
 * 
 * cdef class NvlinkGetBwMode_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlNvlinkGetBwMode_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1;


/* "cuda/bindings/_nvml.pyx":11671
 * nvlink_set_bw_mode_v1_dtype = _get_nvlink_set_bw_mode_v1_dtype_offsets()
 * 
 * cdef class NvlinkSetBwMode_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlNvlinkSetBwMode_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1;


/* "cuda/bindings/_nvml.pyx":11814
 * vgpu_version_dtype = _get_vgpu_version_dtype_offsets()
 * 
 * cdef class VgpuVersion:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuVersion_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuVersion {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuVersion;


/* "cuda/bindings/_nvml.pyx":11954
 * vgpu_metadata_dtype = _get_vgpu_metadata_dtype_offsets()
 * 
 * cdef class VgpuMetadata:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuMetadata_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuMetadata {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuMetadata;


/* "cuda/bindings/_nvml.pyx":12175
 * vgpu_pgpu_compatibility_dtype = _get_vgpu_pgpu_compatibility_dtype_offsets()
 * 
 * cdef class VgpuPgpuCompatibility:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuPgpuCompatibility_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility;


/* "cuda/bindings/_nvml.pyx":12307
 * gpu_instance_placement_dtype = _get_gpu_instance_placement_dtype_offsets()
 * 
 * cdef class GpuInstancePlacement:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlGpuInstancePlacement_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstancePlacement {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_vtabptr_4cuda_8bindings_5_nvml_GpuInstancePlacement;


/* "cuda/bindings/_nvml.pyx":12464
 * gpu_instance_profile_info_v2_dtype = _get_gpu_instance_profile_info_v2_dtype_offsets()
 * 
 * cdef class GpuInstanceProfileInfo_v2:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGpuInstanceProfileInfo_v2_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_vtabptr_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2;


/* "cuda/bindings/_nvml.pyx":12732
 * gpu_instance_profile_info_v3_dtype = _get_gpu_instance_profile_info_v3_dtype_offsets()
 * 
 * cdef class GpuInstanceProfileInfo_v3:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGpuInstanceProfileInfo_v3_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_vtabptr_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3;


/* "cuda/bindings/_nvml.pyx":12989
 * compute_instance_placement_dtype = _get_compute_instance_placement_dtype_offsets()
 * 
 * cdef class ComputeInstancePlacement:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlComputeInstancePlacement_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstancePlacement {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_vtabptr_4cuda_8bindings_5_nvml_ComputeInstancePlacement;


/* "cuda/bindings/_nvml.pyx":13144
 * compute_instance_profile_info_v2_dtype = _get_compute_instance_profile_info_v2_dtype_offsets()
 * 
 * cdef class ComputeInstanceProfileInfo_v2:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlComputeInstanceProfileInfo_v2_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_vtabptr_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2;


/* "cuda/bindings/_nvml.pyx":13389
 * compute_instance_profile_info_v3_dtype = _get_compute_instance_profile_info_v3_dtype_offsets()
 * 
 * cdef class ComputeInstanceProfileInfo_v3:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlComputeInstanceProfileInfo_v3_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_vtabptr_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3;


/* "cuda/bindings/_nvml.pyx":13635
 * gpm_support_dtype = _get_gpm_support_dtype_offsets()
 * 
 * cdef class GpmSupport:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGpmSupport_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpmSupport {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpmSupport *__pyx_vtabptr_4cuda_8bindings_5_nvml_GpmSupport;


/* "cuda/bindings/_nvml.pyx":13767
 * device_capabilities_v1_dtype = _get_device_capabilities_v1_dtype_offsets()
 * 
 * cdef class DeviceCapabilities_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlDeviceCapabilities_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_DeviceCapabilities_v1;


/* "cuda/bindings/_nvml.pyx":13899
 * device_addressing_mode_v1_dtype = _get_device_addressing_mode_v1_dtype_offsets()
 * 
 * cdef class DeviceAddressingMode_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlDeviceAddressingMode_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1;


/* "cuda/bindings/_nvml.pyx":14032
 * repair_status_v1_dtype = _get_repair_status_v1_dtype_offsets()
 * 
 * cdef class RepairStatus_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlRepairStatus_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_RepairStatus_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_RepairStatus_v1;


/* "cuda/bindings/_nvml.pyx":14175
 * pdi_v1_dtype = _get_pdi_v1_dtype_offsets()
 * 
 * cdef class Pdi_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlPdi_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Pdi_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_Pdi_v1;


/* "cuda/bindings/_nvml.pyx":14308
 * device_power_mizer_modes_v1_dtype = _get_device_power_mizer_modes_v1_dtype_offsets()
 * 
 * cdef class DevicePowerMizerModes_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlDevicePowerMizerModes_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1;


/* "cuda/bindings/_nvml.pyx":14456
 * ecc_sram_unique_uncorrected_error_entry_v1_dtype = _get_ecc_sram_unique_uncorrected_error_entry_v1_dtype_offsets()
 * 
 * cdef class EccSramUniqueUncorrectedErrorEntry_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlEccSramUniqueUncorrectedErrorEntry_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1;


/* "cuda/bindings/_nvml.pyx":14662
 * gpu_fabric_info_v3_dtype = _get_gpu_fabric_info_v3_dtype_offsets()
 * 
 * cdef class GpuFabricInfo_v3:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGpuFabricInfo_v3_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_vtabptr_4cuda_8bindings_5_nvml_GpuFabricInfo_v3;


/* "cuda/bindings/_nvml.pyx":14855
 * nvlink_firmware_version_dtype = _get_nvlink_firmware_version_dtype_offsets()
 * 
 * cdef class NvlinkFirmwareVersion:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlNvlinkFirmwareVersion_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion;


/* "cuda/bindings/_nvml.pyx":15009
 * excluded_device_info_dtype = _get_excluded_device_info_dtype_offsets()
 * 
 * cdef class ExcludedDeviceInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlExcludedDeviceInfo_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ExcludedDeviceInfo {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_vtabptr_4cuda_8bindings_5_nvml_ExcludedDeviceInfo;


/* "cuda/bindings/_nvml.pyx":15148
 * process_detail_list_v1_dtype = _get_process_detail_list_v1_dtype_offsets()
 * 
 * cdef class ProcessDetailList_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlProcessDetailList_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessDetailList_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessDetailList_v1;


/* "cuda/bindings/_nvml.pyx":15299
 * bridge_chip_hierarchy_dtype = _get_bridge_chip_hierarchy_dtype_offsets()
 * 
 * cdef class BridgeChipHierarchy:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlBridgeChipHierarchy_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_BridgeChipHierarchy {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_vtabptr_4cuda_8bindings_5_nvml_BridgeChipHierarchy;


/* "cuda/bindings/_nvml.pyx":15434
 * sample_dtype = _get_sample_dtype_offsets()
 * 
 * cdef class Sample:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlSample_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Sample {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Sample *__pyx_vtabptr_4cuda_8bindings_5_nvml_Sample;


/* "cuda/bindings/_nvml.pyx":15584
 * vgpu_instance_utilization_info_v1_dtype = _get_vgpu_instance_utilization_info_v1_dtype_offsets()
 * 
 * cdef class VgpuInstanceUtilizationInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlVgpuInstanceUtilizationInfo_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1;


/* "cuda/bindings/_nvml.pyx":15789
 * field_value_dtype = _get_field_value_dtype_offsets()
 * 
 * cdef class FieldValue:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlFieldValue_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FieldValue {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FieldValue *__pyx_vtabptr_4cuda_8bindings_5_nvml_FieldValue;


/* "cuda/bindings/_nvml.pyx":15988
 * gpu_thermal_settings_dtype = _get_gpu_thermal_settings_dtype_offsets()
 * 
 * cdef class GpuThermalSettings:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGpuThermalSettings_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuThermalSettings {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_vtabptr_4cuda_8bindings_5_nvml_GpuThermalSettings;


/* "cuda/bindings/_nvml.pyx":16124
 * clk_mon_status_dtype = _get_clk_mon_status_dtype_offsets()
 * 
 * cdef class ClkMonStatus:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlClkMonStatus_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ClkMonStatus {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_vtabptr_4cuda_8bindings_5_nvml_ClkMonStatus;


/* "cuda/bindings/_nvml.pyx":16272
 * processes_utilization_info_v1_dtype = _get_processes_utilization_info_v1_dtype_offsets()
 * 
 * cdef class ProcessesUtilizationInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlProcessesUtilizationInfo_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1;


/* "cuda/bindings/_nvml.pyx":16423
 * gpu_dynamic_pstates_info_dtype = _get_gpu_dynamic_pstates_info_dtype_offsets()
 * 
 * cdef class GpuDynamicPstatesInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGpuDynamicPstatesInfo_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_vtabptr_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo;


/* "cuda/bindings/_nvml.pyx":16560
 * vgpu_processes_utilization_info_v1_dtype = _get_vgpu_processes_utilization_info_v1_dtype_offsets()
 * 
 * cdef class VgpuProcessesUtilizationInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuProcessesUtilizationInfo_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1;


/* "cuda/bindings/_nvml.pyx":16706
 * 
 * 
 * cdef class VgpuSchedulerParams:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuSchedulerParams_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerParams {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerParams;


/* "cuda/bindings/_nvml.pyx":16835
 * 
 * 
 * cdef class VgpuSchedulerSetParams:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuSchedulerSetParams_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams;


/* "cuda/bindings/_nvml.pyx":16970
 * vgpu_license_info_dtype = _get_vgpu_license_info_dtype_offsets()
 * 
 * cdef class VgpuLicenseInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuLicenseInfo_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuLicenseInfo {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuLicenseInfo;


/* "cuda/bindings/_nvml.pyx":17118
 * grid_licensable_feature_dtype = _get_grid_licensable_feature_dtype_offsets()
 * 
 * cdef class GridLicensableFeature:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an array of `nvmlGridLicensableFeature_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GridLicensableFeature {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_vtabptr_4cuda_8bindings_5_nvml_GridLicensableFeature;


/* "cuda/bindings/_nvml.pyx":17302
 * unit_fan_speeds_dtype = _get_unit_fan_speeds_dtype_offsets()
 * 
 * cdef class UnitFanSpeeds:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlUnitFanSpeeds_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_UnitFanSpeeds {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_vtabptr_4cuda_8bindings_5_nvml_UnitFanSpeeds;


/* "cuda/bindings/_nvml.pyx":17443
 * vgpu_pgpu_metadata_dtype = _get_vgpu_pgpu_metadata_dtype_offsets()
 * 
 * cdef class VgpuPgpuMetadata:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuPgpuMetadata_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPgpuMetadata {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuPgpuMetadata;


/* "cuda/bindings/_nvml.pyx":17641
 * gpu_instance_info_dtype = _get_gpu_instance_info_dtype_offsets()
 * 
 * cdef class GpuInstanceInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGpuInstanceInfo_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstanceInfo {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_vtabptr_4cuda_8bindings_5_nvml_GpuInstanceInfo;


/* "cuda/bindings/_nvml.pyx":17799
 * compute_instance_info_dtype = _get_compute_instance_info_dtype_offsets()
 * 
 * cdef class ComputeInstanceInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlComputeInstanceInfo_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstanceInfo {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_vtabptr_4cuda_8bindings_5_nvml_ComputeInstanceInfo;


/* "cuda/bindings/_nvml.pyx":17966
 * ecc_sram_unique_uncorrected_error_counts_v1_dtype = _get_ecc_sram_unique_uncorrected_error_counts_v1_dtype_offsets()
 * 
 * cdef class EccSramUniqueUncorrectedErrorCounts_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlEccSramUniqueUncorrectedErrorCounts_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1;


/* "cuda/bindings/_nvml.pyx":18106
 * nvlink_firmware_info_dtype = _get_nvlink_firmware_info_dtype_offsets()
 * 
 * cdef class NvlinkFirmwareInfo:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlNvlinkFirmwareInfo_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo;


/* "cuda/bindings/_nvml.pyx":18244
 * vgpu_instances_utilization_info_v1_dtype = _get_vgpu_instances_utilization_info_v1_dtype_offsets()
 * 
 * cdef class VgpuInstancesUtilizationInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuInstancesUtilizationInfo_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1;


/* "cuda/bindings/_nvml.pyx":18410
 * vgpu_scheduler_log_dtype = _get_vgpu_scheduler_log_dtype_offsets()
 * 
 * cdef class VgpuSchedulerLog:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuSchedulerLog_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerLog {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerLog;


/* "cuda/bindings/_nvml.pyx":18591
 * vgpu_scheduler_get_state_dtype = _get_vgpu_scheduler_get_state_dtype_offsets()
 * 
 * cdef class VgpuSchedulerGetState:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuSchedulerGetState_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerGetState {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerGetState;


/* "cuda/bindings/_nvml.pyx":18738
 * vgpu_scheduler_state_info_v1_dtype = _get_vgpu_scheduler_state_info_v1_dtype_offsets()
 * 
 * cdef class VgpuSchedulerStateInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuSchedulerStateInfo_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1;


/* "cuda/bindings/_nvml.pyx":18909
 * vgpu_scheduler_log_info_v1_dtype = _get_vgpu_scheduler_log_info_v1_dtype_offsets()
 * 
 * cdef class VgpuSchedulerLogInfo_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuSchedulerLogInfo_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1;


/* "cuda/bindings/_nvml.pyx":19103
 * vgpu_scheduler_state_v1_dtype = _get_vgpu_scheduler_state_v1_dtype_offsets()
 * 
 * cdef class VgpuSchedulerState_v1:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlVgpuSchedulerState_v1_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1;


/* "cuda/bindings/_nvml.pyx":19270
 * grid_licensable_features_dtype = _get_grid_licensable_features_dtype_offsets()
 * 
 * cdef class GridLicensableFeatures:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlGridLicensableFeatures_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GridLicensableFeatures {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_vtabptr_4cuda_8bindings_5_nvml_GridLicensableFeatures;


/* "cuda/bindings/_nvml.pyx":19417
 * nv_link_info_v2_dtype = _get_nv_link_info_v2_dtype_offsets()
 * 
 * cdef class NvLinkInfo_v2:             # <<<<<<<<<<<<<<
 *     """Empty-initialize an instance of `nvmlNvLinkInfo_v2_t`.
 * 
*/

struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvLinkInfo_v2 {
  intptr_t (*_get_ptr)(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *);
};
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_vtabptr_4cuda_8bindings_5_nvml_NvLinkInfo_v2;


/* "View.MemoryView":110
 * 
 * 
 * @cython.collection_type("sequence")             # <<<<<<<<<<<<<<
 * @cname("__pyx_array")
 * cdef class array:
*/

struct __pyx_vtabstruct_array {
  PyObject *(*get_memview)(struct __pyx_array_obj *);
};
static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;


/* "View.MemoryView":334
 * 
 * 
 * @cname('__pyx_memoryview')             # <<<<<<<<<<<<<<
 * cdef class memoryview:
 * 
*/

struct __pyx_vtabstruct_memoryview {
  char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
  PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
  PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
  PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
  PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
  PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
  PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
  PyObject *(*_get_base)(struct __pyx_memoryview_obj *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;


/* "View.MemoryView":951
 * 
 * 
 * @cython.collection_type("sequence")             # <<<<<<<<<<<<<<
 * @cname('__pyx_memoryviewslice')
 * cdef class _memoryviewslice(memoryview):
*/

struct __pyx_vtabstruct__memoryviewslice {
  struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
/* #### Code section: utility_code_proto ### */

/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
  #define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
  typedef struct {
    void (*INCREF)(void*, PyObject*, Py_ssize_t);
    void (*DECREF)(void*, PyObject*, Py_ssize_t);
    void (*GOTREF)(void*, PyObject*, Py_ssize_t);
    void (*GIVEREF)(void*, PyObject*, Py_ssize_t);
    void* (*SetupContext)(const char*, Py_ssize_t, const char*);
    void (*FinishContext)(void**);
  } __Pyx_RefNannyAPIStruct;
  static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
  static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
  #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
  #define __Pyx_RefNannySetupContext(name, acquire_gil)\
          if (acquire_gil) {\
              PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\
              PyGILState_Release(__pyx_gilstate_save);\
          } else {\
              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\
          }
  #define __Pyx_RefNannyFinishContextNogil() {\
              PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
              __Pyx_RefNannyFinishContext();\
              PyGILState_Release(__pyx_gilstate_save);\
          }
  #define __Pyx_RefNannyFinishContextNogil() {\
              PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
              __Pyx_RefNannyFinishContext();\
              PyGILState_Release(__pyx_gilstate_save);\
          }
  #define __Pyx_RefNannyFinishContext()\
          __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
  #define __Pyx_INCREF(r)  __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), (__LINE__))
  #define __Pyx_DECREF(r)  __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), (__LINE__))
  #define __Pyx_GOTREF(r)  __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), (__LINE__))
  #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), (__LINE__))
  #define __Pyx_XINCREF(r)  do { if((r) == NULL); else {__Pyx_INCREF(r); }} while(0)
  #define __Pyx_XDECREF(r)  do { if((r) == NULL); else {__Pyx_DECREF(r); }} while(0)
  #define __Pyx_XGOTREF(r)  do { if((r) == NULL); else {__Pyx_GOTREF(r); }} while(0)
  #define __Pyx_XGIVEREF(r) do { if((r) == NULL); else {__Pyx_GIVEREF(r);}} while(0)
#else
  #define __Pyx_RefNannyDeclarations
  #define __Pyx_RefNannySetupContext(name, acquire_gil)
  #define __Pyx_RefNannyFinishContextNogil()
  #define __Pyx_RefNannyFinishContext()
  #define __Pyx_INCREF(r) Py_INCREF(r)
  #define __Pyx_DECREF(r) Py_DECREF(r)
  #define __Pyx_GOTREF(r)
  #define __Pyx_GIVEREF(r)
  #define __Pyx_XINCREF(r) Py_XINCREF(r)
  #define __Pyx_XDECREF(r) Py_XDECREF(r)
  #define __Pyx_XGOTREF(r)
  #define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_Py_XDECREF_SET(r, v) do {\
        PyObject *tmp = (PyObject *) r;\
        r = v; Py_XDECREF(tmp);\
    } while (0)
#define __Pyx_XDECREF_SET(r, v) do {\
        PyObject *tmp = (PyObject *) r;\
        r = v; __Pyx_XDECREF(tmp);\
    } while (0)
#define __Pyx_DECREF_SET(r, v) do {\
        PyObject *tmp = (PyObject *) r;\
        r = v; __Pyx_DECREF(tmp);\
    } while (0)
#define __Pyx_CLEAR(r)    do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r)   do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)

/* PyErrExceptionMatches.proto (used by PyObjectGetAttrStrNoError) */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err)  PyErr_ExceptionMatches(err)
#endif

/* PyThreadStateGet.proto (used by PyErrFetchRestore) */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare  PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign  __pyx_tstate = __Pyx_PyThreadState_Current;
#if PY_VERSION_HEX >= 0x030C00A6
#define __Pyx_PyErr_Occurred()  (__pyx_tstate->current_exception != NULL)
#define __Pyx_PyErr_CurrentExceptionType()  (__pyx_tstate->current_exception ? (PyObject*) Py_TYPE(__pyx_tstate->current_exception) : (PyObject*) NULL)
#else
#define __Pyx_PyErr_Occurred()  (__pyx_tstate->curexc_type != NULL)
#define __Pyx_PyErr_CurrentExceptionType()  (__pyx_tstate->curexc_type)
#endif
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred()  (PyErr_Occurred() != NULL)
#define __Pyx_PyErr_CurrentExceptionType()  PyErr_Occurred()
#endif

/* PyErrFetchRestore.proto (used by PyObjectGetAttrStrNoError) */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb)  __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb)    __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb)  __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb)    __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A6
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb)  PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb)  PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb)  PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb)  PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb)  PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb)  PyErr_Fetch(type, value, tb)
#endif

/* PyObjectGetAttrStr.proto (used by PyObjectGetAttrStrNoError) */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif

/* PyObjectGetAttrStrNoError.proto (used by GetBuiltinName) */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);

/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);

/* TupleAndListFromArray.proto (used by fastcall) */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n);
#endif
#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_METH_FASTCALL
static CYTHON_INLINE PyObject* __Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n);
#endif

/* IncludeStringH.proto (used by BytesEquals) */
#include <string.h>

/* BytesEquals.proto (used by UnicodeEquals) */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);

/* UnicodeEquals.proto (used by fastcall) */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);

/* fastcall.proto */
#if CYTHON_AVOID_BORROWED_REFS
    #define __Pyx_ArgRef_VARARGS(args, i) __Pyx_PySequence_ITEM(args, i)
#elif CYTHON_ASSUME_SAFE_MACROS
    #define __Pyx_ArgRef_VARARGS(args, i) __Pyx_NewRef(__Pyx_PyTuple_GET_ITEM(args, i))
#else
    #define __Pyx_ArgRef_VARARGS(args, i) __Pyx_XNewRef(PyTuple_GetItem(args, i))
#endif
#define __Pyx_NumKwargs_VARARGS(kwds) PyDict_Size(kwds)
#define __Pyx_KwValues_VARARGS(args, nargs) NULL
#define __Pyx_GetKwValue_VARARGS(kw, kwvalues, s) __Pyx_PyDict_GetItemStrWithError(kw, s)
#define __Pyx_KwargsAsDict_VARARGS(kw, kwvalues) PyDict_Copy(kw)
#if CYTHON_METH_FASTCALL
    #define __Pyx_ArgRef_FASTCALL(args, i) __Pyx_NewRef(args[i])
    #define __Pyx_NumKwargs_FASTCALL(kwds) __Pyx_PyTuple_GET_SIZE(kwds)
    #define __Pyx_KwValues_FASTCALL(args, nargs) ((args) + (nargs))
    static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000 || CYTHON_COMPILING_IN_LIMITED_API
    CYTHON_UNUSED static PyObject *__Pyx_KwargsAsDict_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues);
  #else
    #define __Pyx_KwargsAsDict_FASTCALL(kw, kwvalues) _PyStack_AsDict(kwvalues, kw)
  #endif
#else
    #define __Pyx_ArgRef_FASTCALL __Pyx_ArgRef_VARARGS
    #define __Pyx_NumKwargs_FASTCALL __Pyx_NumKwargs_VARARGS
    #define __Pyx_KwValues_FASTCALL __Pyx_KwValues_VARARGS
    #define __Pyx_GetKwValue_FASTCALL __Pyx_GetKwValue_VARARGS
    #define __Pyx_KwargsAsDict_FASTCALL __Pyx_KwargsAsDict_VARARGS
#endif
#define __Pyx_ArgsSlice_VARARGS(args, start, stop) PyTuple_GetSlice(args, start, stop)
#if CYTHON_METH_FASTCALL || (CYTHON_COMPILING_IN_CPYTHON && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) __Pyx_PyTuple_FromArray(args + start, stop - start)
#else
#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) PyTuple_GetSlice(args, start, stop)
#endif

/* py_dict_items.proto (used by OwnedDictNext) */
static CYTHON_INLINE PyObject* __Pyx_PyDict_Items(PyObject* d);

/* CallCFunction.proto (used by CallUnboundCMethod0) */
#define __Pyx_CallCFunction(cfunc, self, args)\
    ((PyCFunction)(void(*)(void))(cfunc)->func)(self, args)
#define __Pyx_CallCFunctionWithKeywords(cfunc, self, args, kwargs)\
    ((PyCFunctionWithKeywords)(void(*)(void))(cfunc)->func)(self, args, kwargs)
#define __Pyx_CallCFunctionFast(cfunc, self, args, nargs)\
    ((__Pyx_PyCFunctionFast)(void(*)(void))(PyCFunction)(cfunc)->func)(self, args, nargs)
#define __Pyx_CallCFunctionFastWithKeywords(cfunc, self, args, nargs, kwnames)\
    ((__Pyx_PyCFunctionFastWithKeywords)(void(*)(void))(PyCFunction)(cfunc)->func)(self, args, nargs, kwnames)

/* PyObjectCall.proto (used by PyObjectFastCall) */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif

/* PyObjectCallMethO.proto (used by PyObjectFastCall) */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif

/* PyObjectFastCall.proto (used by PyObjectCallOneArg) */
#define __Pyx_PyObject_FastCall(func, args, nargs)  __Pyx_PyObject_FastCallDict(func, args, (size_t)(nargs), NULL)
static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject * const*args, size_t nargs, PyObject *kwargs);

/* PyObjectCallOneArg.proto (used by CallUnboundCMethod0) */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);

/* UnpackUnboundCMethod.proto (used by CallUnboundCMethod0) */
typedef struct {
    PyObject *type;
    PyObject **method_name;
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING && CYTHON_ATOMICS
    __pyx_atomic_int_type initialized;
#endif
    PyCFunction func;
    PyObject *method;
    int flag;
} __Pyx_CachedCFunction;
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
static CYTHON_INLINE int __Pyx_CachedCFunction_GetAndSetInitializing(__Pyx_CachedCFunction *cfunc) {
#if !CYTHON_ATOMICS
    return 1;
#else
    __pyx_nonatomic_int_type expected = 0;
    if (__pyx_atomic_int_cmp_exchange(&cfunc->initialized, &expected, 1)) {
        return 0;
    }
    return expected;
#endif
}
static CYTHON_INLINE void __Pyx_CachedCFunction_SetFinishedInitializing(__Pyx_CachedCFunction *cfunc) {
#if CYTHON_ATOMICS
    __pyx_atomic_store(&cfunc->initialized, 2);
#endif
}
#else
#define __Pyx_CachedCFunction_GetAndSetInitializing(cfunc) 2
#define __Pyx_CachedCFunction_SetFinishedInitializing(cfunc)
#endif

/* CallUnboundCMethod0.proto */
CYTHON_UNUSED
static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self);
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self);
#else
#define __Pyx_CallUnboundCMethod0(cfunc, self)  __Pyx__CallUnboundCMethod0(cfunc, self)
#endif

/* py_dict_values.proto (used by OwnedDictNext) */
static CYTHON_INLINE PyObject* __Pyx_PyDict_Values(PyObject* d);

/* OwnedDictNext.proto (used by ParseKeywordsImpl) */
#if CYTHON_AVOID_BORROWED_REFS
static int __Pyx_PyDict_NextRef(PyObject *p, PyObject **ppos, PyObject **pkey, PyObject **pvalue);
#else
CYTHON_INLINE
static int __Pyx_PyDict_NextRef(PyObject *p, Py_ssize_t *ppos, PyObject **pkey, PyObject **pvalue);
#endif

/* RaiseDoubleKeywords.proto (used by ParseKeywordsImpl) */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);

/* ParseKeywordsImpl.export */
static int __Pyx_ParseKeywordsTuple(
    PyObject *kwds,
    PyObject * const *kwvalues,
    PyObject ** const argnames[],
    PyObject *kwds2,
    PyObject *values[],
    Py_ssize_t num_pos_args,
    Py_ssize_t num_kwargs,
    const char* function_name,
    int ignore_unknown_kwargs
);
static int __Pyx_ParseKeywordDictToDict(
    PyObject *kwds,
    PyObject ** const argnames[],
    PyObject *kwds2,
    PyObject *values[],
    Py_ssize_t num_pos_args,
    const char* function_name
);
static int __Pyx_ParseKeywordDict(
    PyObject *kwds,
    PyObject ** const argnames[],
    PyObject *values[],
    Py_ssize_t num_pos_args,
    Py_ssize_t num_kwargs,
    const char* function_name,
    int ignore_unknown_kwargs
);

/* CallUnboundCMethod2.proto */
CYTHON_UNUSED
static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg1, PyObject* arg2);
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject *__Pyx_CallUnboundCMethod2(__Pyx_CachedCFunction *cfunc, PyObject *self, PyObject *arg1, PyObject *arg2);
#else
#define __Pyx_CallUnboundCMethod2(cfunc, self, arg1, arg2)  __Pyx__CallUnboundCMethod2(cfunc, self, arg1, arg2)
#endif

/* ParseKeywords.proto */
static CYTHON_INLINE int __Pyx_ParseKeywords(
    PyObject *kwds, PyObject *const *kwvalues, PyObject ** const argnames[],
    PyObject *kwds2, PyObject *values[],
    Py_ssize_t num_pos_args, Py_ssize_t num_kwargs,
    const char* function_name,
    int ignore_unknown_kwargs
);

/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
    Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);

/* ArgTypeTestFunc.export */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);

/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
    ((likely(__Pyx_IS_TYPE(obj, type) | (none_allowed && (obj == Py_None)))) ? 1 :\
        __Pyx__ArgTypeTest(obj, type, name, exact))

/* PyValueError_Check.proto */
#define __Pyx_PyExc_ValueError_Check(obj)  __Pyx_TypeCheck(obj, PyExc_ValueError)

/* RaiseException.export */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);

/* PyObjectFastCallMethod.proto */
#if CYTHON_VECTORCALL && PY_VERSION_HEX >= 0x03090000
#define __Pyx_PyObject_FastCallMethod(name, args, nargsf) PyObject_VectorcallMethod(name, args, nargsf, NULL)
#else
static PyObject *__Pyx_PyObject_FastCallMethod(PyObject *name, PyObject *const *args, size_t nargsf);
#endif

/* RaiseUnexpectedTypeError.proto */
static int __Pyx_RaiseUnexpectedTypeError(const char *expected, PyObject *obj);

/* PyMemoryError_Check.proto */
#define __Pyx_PyExc_MemoryError_Check(obj)  __Pyx_TypeCheck(obj, PyExc_MemoryError)

/* BuildPyUnicode.proto (used by COrdinalToPyUnicode) */
static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, const char* chars, int clength,
                                                int prepend_sign, char padding_char);

/* COrdinalToPyUnicode.proto (used by CIntToPyUnicode) */
static CYTHON_INLINE int __Pyx_CheckUnicodeValue(int value);
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromOrdinal_Padded(int value, Py_ssize_t width, char padding_char);

/* GCCDiagnostics.proto (used by CIntToPyUnicode) */
#if !defined(__INTEL_COMPILER) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
#define __Pyx_HAS_GCC_DIAGNOSTIC
#endif

/* IncludeStdlibH.proto (used by CIntToPyUnicode) */
#include <stdlib.h>

/* CIntToPyUnicode.proto */
#define __Pyx_PyUnicode_From_int(value, width, padding_char, format_char) (\
    ((format_char) == ('c')) ?\
        __Pyx_uchar___Pyx_PyUnicode_From_int(value, width, padding_char) :\
        __Pyx____Pyx_PyUnicode_From_int(value, width, padding_char, format_char)\
    )
static CYTHON_INLINE PyObject* __Pyx_uchar___Pyx_PyUnicode_From_int(int value, Py_ssize_t width, char padding_char);
static CYTHON_INLINE PyObject* __Pyx____Pyx_PyUnicode_From_int(int value, Py_ssize_t width, char padding_char, char format_char);

/* CIntToPyUnicode.proto */
#define __Pyx_PyUnicode_From_Py_ssize_t(value, width, padding_char, format_char) (\
    ((format_char) == ('c')) ?\
        __Pyx_uchar___Pyx_PyUnicode_From_Py_ssize_t(value, width, padding_char) :\
        __Pyx____Pyx_PyUnicode_From_Py_ssize_t(value, width, padding_char, format_char)\
    )
static CYTHON_INLINE PyObject* __Pyx_uchar___Pyx_PyUnicode_From_Py_ssize_t(Py_ssize_t value, Py_ssize_t width, char padding_char);
static CYTHON_INLINE PyObject* __Pyx____Pyx_PyUnicode_From_Py_ssize_t(Py_ssize_t value, Py_ssize_t width, char padding_char, char format_char);

/* JoinPyUnicode.export */
static PyObject* __Pyx_PyUnicode_Join(PyObject** values, Py_ssize_t value_count, Py_ssize_t result_ulength,
                                      Py_UCS4 max_char);

/* PyObjectFormatSimple.proto */
#if CYTHON_COMPILING_IN_PYPY
    #define __Pyx_PyObject_FormatSimple(s, f) (\
        likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\
        PyObject_Format(s, f))
#elif CYTHON_USE_TYPE_SLOTS
    #define __Pyx_PyObject_FormatSimple(s, f) (\
        likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\
        likely(PyLong_CheckExact(s)) ? PyLong_Type.tp_repr(s) :\
        likely(PyFloat_CheckExact(s)) ? PyFloat_Type.tp_repr(s) :\
        PyObject_Format(s, f))
#else
    #define __Pyx_PyObject_FormatSimple(s, f) (\
        likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\
        PyObject_Format(s, f))
#endif

CYTHON_UNUSED static int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
/* GetAttr.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);

/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck, has_gil, unsafe_shared)\
    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
    __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck, unsafe_shared) :\
    (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
               __Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck, has_gil, unsafe_shared)\
    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
    __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck, unsafe_shared) :\
    (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
                                                              int wraparound, int boundscheck, int unsafe_shared);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck, has_gil, unsafe_shared)\
    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
    __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck, unsafe_shared) :\
    (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
                                                              int wraparound, int boundscheck, int unsafe_shared);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
                                                     int is_list, int wraparound, int boundscheck, int unsafe_shared);

/* ObjectGetItem.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key);
#else
#define __Pyx_PyObject_GetItem(obj, key)  PyObject_GetItem(obj, key)
#endif

/* RejectKeywords.export */
static void __Pyx_RejectKeywords(const char* function_name, PyObject *kwds);

/* PyTypeError_Check.proto */
#define __Pyx_PyExc_TypeError_Check(obj)  __Pyx_TypeCheck(obj, PyExc_TypeError)

/* DivInt[Py_ssize_t].proto */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t, int b_is_constant);

/* UnaryNegOverflows.proto */
#define __Pyx_UNARY_NEG_WOULD_OVERFLOW(x)\
        (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))

/* GetAttr3.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);

/* PyDictVersioning.proto (used by GetModuleGlobalName) */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT  ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict)  (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
    (version_var) = __PYX_GET_DICT_VERSION(dict);\
    (cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
    static PY_UINT64_T __pyx_dict_version = 0;\
    static PyObject *__pyx_dict_cached_value = NULL;\
    if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
        (VAR) = __Pyx_XNewRef(__pyx_dict_cached_value);\
    } else {\
        (VAR) = __pyx_dict_cached_value = (LOOKUP);\
        __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
    }\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict)  (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP)  (VAR) = (LOOKUP);
#endif

/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name)  do {\
    static PY_UINT64_T __pyx_dict_version = 0;\
    static PyObject *__pyx_dict_cached_value = NULL;\
    (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_mstate_global->__pyx_d))) ?\
        (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
        __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
} while(0)
#define __Pyx_GetModuleGlobalNameUncached(var, name)  do {\
    PY_UINT64_T __pyx_dict_version;\
    PyObject *__pyx_dict_cached_value;\
    (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
} while(0)
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name)  (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name)  (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif

/* AssertionsEnabled.proto */
#if CYTHON_COMPILING_IN_LIMITED_API  ||  PY_VERSION_HEX >= 0x030C0000
  static int __pyx_assertions_enabled_flag;
  #define __pyx_assertions_enabled() (__pyx_assertions_enabled_flag)
  #if __clang__ || __GNUC__
  __attribute__((no_sanitize("thread")))
  #endif
  static int __Pyx_init_assertions_enabled(void) {
    PyObject *builtins, *debug, *debug_str;
    int flag;
    builtins = PyEval_GetBuiltins();
    if (!builtins) goto bad;
    debug_str = PyUnicode_FromStringAndSize("__debug__", 9);
    if (!debug_str) goto bad;
    debug = PyObject_GetItem(builtins, debug_str);
    Py_DECREF(debug_str);
    if (!debug) goto bad;
    flag = PyObject_IsTrue(debug);
    Py_DECREF(debug);
    if (flag == -1) goto bad;
    __pyx_assertions_enabled_flag = flag;
    return 0;
  bad:
    __pyx_assertions_enabled_flag = 1;
    return -1;
  }
#else
  #define __Pyx_init_assertions_enabled()  (0)
  #define __pyx_assertions_enabled()  (!Py_OptimizeFlag)
#endif

/* PyAssertionError_Check.proto */
#define __Pyx_PyExc_AssertionError_Check(obj)  __Pyx_TypeCheck(obj, PyExc_AssertionError)

/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);

/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);

/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);

/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);

/* GetTopmostException.proto (used by SaveResetException) */
#if CYTHON_USE_EXC_INFO_STACK && CYTHON_FAST_THREAD_STATE
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif

/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb)  __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb)  __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb)   PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb)  PyErr_SetExcInfo(type, value, tb)
#endif

/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb)  __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif

/* SwapException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSwap(type, value, tb)  __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#endif

/* HasAttr.proto (used by ImportImpl) */
#if __PYX_LIMITED_VERSION_HEX >= 0x030d0000
#define __Pyx_HasAttr(o, n)  PyObject_HasAttrWithError(o, n)
#else
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
#endif

/* ImportImpl.export */
static PyObject *__Pyx__Import(PyObject *name, PyObject *const *imported_names, Py_ssize_t len_imported_names, PyObject *qualname, PyObject *moddict, int level);

/* Import.proto */
static CYTHON_INLINE PyObject *__Pyx_Import(PyObject *name, PyObject *const *imported_names, Py_ssize_t len_imported_names, PyObject *qualname, int level);

/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
#define __Pyx_TypeCheck2(obj, type1, type2) __Pyx_IsAnySubtype2(Py_TYPE(obj), (PyTypeObject *)type1, (PyTypeObject *)type2)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_TypeCheck2(obj, type1, type2) (PyObject_TypeCheck(obj, (PyTypeObject *)type1) || PyObject_TypeCheck(obj, (PyTypeObject *)type2))
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2) {
    return PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2);
}
#endif
#define __Pyx_PyErr_ExceptionMatches2(err1, err2)  __Pyx_PyErr_GivenExceptionMatches2(__Pyx_PyErr_CurrentExceptionType(), err1, err2)
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
#ifdef PyExceptionInstance_Check
  #define __Pyx_PyBaseException_Check(obj) PyExceptionInstance_Check(obj)
#else
  #define __Pyx_PyBaseException_Check(obj) __Pyx_TypeCheck(obj, PyExc_BaseException)
#endif

CYTHON_UNUSED static int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
/* ListCompAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
    PyListObject* L = (PyListObject*) list;
    Py_ssize_t len = Py_SIZE(list);
    if (likely(L->allocated > len)) {
        Py_INCREF(x);
        #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000
        L->ob_item[len] = x;
        #else
        PyList_SET_ITEM(list, len, x);
        #endif
        __Pyx_SET_SIZE(list, len + 1);
        return 0;
    }
    return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif

/* PySequenceMultiply.proto */
#define __Pyx_PySequence_Multiply_Left(mul, seq)  __Pyx_PySequence_Multiply(seq, mul)
#if !CYTHON_USE_TYPE_SLOTS
#define  __Pyx_PySequence_Multiply PySequence_Repeat
#else
static CYTHON_INLINE PyObject* __Pyx_PySequence_Multiply(PyObject *seq, Py_ssize_t mul);
#endif

/* PyObjectFormatAndDecref.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatSimpleAndDecref(PyObject* s, PyObject* f);
static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatAndDecref(PyObject* s, PyObject* f);

/* PyObjectFormat.proto */
#if CYTHON_USE_UNICODE_WRITER
static PyObject* __Pyx_PyObject_Format(PyObject* s, PyObject* f);
#else
#define __Pyx_PyObject_Format(s, f) PyObject_Format(s, f)
#endif

/* SetItemInt.proto */
#define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck, has_gil, unsafe_shared)\
    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
    __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck, unsafe_shared) :\
    (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\
               __Pyx_SetItemInt_Generic(o, to_py_func(i), v)))
static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v);
static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v,
                                               int is_list, int wraparound, int boundscheck, int unsafe_shared);

/* RaiseUnboundLocalError.proto */
static void __Pyx_RaiseUnboundLocalError(const char *varname);

/* PyIndexError_Check.proto */
#define __Pyx_PyExc_IndexError_Check(obj)  __Pyx_TypeCheck(obj, PyExc_IndexError)

/* DivInt[long].proto */
static CYTHON_INLINE long __Pyx_div_long(long, long, int b_is_constant);

/* PyLongCompare.proto */
static CYTHON_INLINE int __Pyx_PyLong_BoolNeObjC(PyObject *op1, PyObject *op2, long intval, long inplace);

/* PyObjectDelAttr.proto (used by PyObjectSetAttrStr) */
#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030d0000
#define __Pyx_PyObject_DelAttr(o, n) PyObject_SetAttr(o, n, NULL)
#else
#define __Pyx_PyObject_DelAttr(o, n) PyObject_DelAttr(o, n)
#endif

/* PyObjectSetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL)
static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value);
#else
#define __Pyx_PyObject_DelAttrStr(o,n)   __Pyx_PyObject_DelAttr(o,n)
#define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v)
#endif

/* PyUnicode_Unicode.proto */
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Unicode(PyObject *obj);

/* PyNumber_Hex.proto */
#define __Pyx_PyNumber_Hex(obj) PyNumber_ToBase((obj), 16)

/* PyLongCompare.proto */
static CYTHON_INLINE int __Pyx_PyLong_BoolEqObjC(PyObject *op1, PyObject *op2, long intval, long inplace);

/* PyObjectVectorCallKwBuilder.proto */
CYTHON_UNUSED static int __Pyx_VectorcallBuilder_AddArg_Check(PyObject *key, PyObject *value, PyObject *builder, PyObject **args, int n);
#if CYTHON_VECTORCALL
#if PY_VERSION_HEX >= 0x03090000
#define __Pyx_Object_Vectorcall_CallFromBuilder PyObject_Vectorcall
#else
#define __Pyx_Object_Vectorcall_CallFromBuilder _PyObject_Vectorcall
#endif
#define __Pyx_MakeVectorcallBuilderKwds(n) PyTuple_New(n)
static int __Pyx_VectorcallBuilder_AddArg(PyObject *key, PyObject *value, PyObject *builder, PyObject **args, int n);
static int __Pyx_VectorcallBuilder_AddArgStr(const char *key, PyObject *value, PyObject *builder, PyObject **args, int n);
#else
#define __Pyx_Object_Vectorcall_CallFromBuilder __Pyx_PyObject_FastCallDict
#define __Pyx_MakeVectorcallBuilderKwds(n) __Pyx_PyDict_NewPresized(n)
#define __Pyx_VectorcallBuilder_AddArg(key, value, builder, args, n) PyDict_SetItem(builder, key, value)
#define __Pyx_VectorcallBuilder_AddArgStr(key, value, builder, args, n) PyDict_SetItemString(builder, key, value)
#endif

/* CIntToPyUnicode.proto */
#define __Pyx_PyUnicode_From_size_t(value, width, padding_char, format_char) (\
    ((format_char) == ('c')) ?\
        __Pyx_uchar___Pyx_PyUnicode_From_size_t(value, width, padding_char) :\
        __Pyx____Pyx_PyUnicode_From_size_t(value, width, padding_char, format_char)\
    )
static CYTHON_INLINE PyObject* __Pyx_uchar___Pyx_PyUnicode_From_size_t(size_t value, Py_ssize_t width, char padding_char);
static CYTHON_INLINE PyObject* __Pyx____Pyx_PyUnicode_From_size_t(size_t value, Py_ssize_t width, char padding_char, char format_char);

/* SliceObject.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
        PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
        PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
        int has_cstart, int has_cstop, int wraparound);

/* SliceObject.proto */
#define __Pyx_PyObject_DelSlice(obj, cstart, cstop, py_start, py_stop, py_slice, has_cstart, has_cstop, wraparound)\
    __Pyx_PyObject_SetSlice(obj, (PyObject*)NULL, cstart, cstop, py_start, py_stop, py_slice, has_cstart, has_cstop, wraparound)
static CYTHON_INLINE int __Pyx_PyObject_SetSlice(
        PyObject* obj, PyObject* value, Py_ssize_t cstart, Py_ssize_t cstop,
        PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
        int has_cstart, int has_cstop, int wraparound);

/* ErrOccurredWithGIL.proto */
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void);

/* decode_c_string_utf16.proto (used by decode_c_bytes) */
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
    int byteorder = 0;
    return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
    int byteorder = -1;
    return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
    int byteorder = 1;
    return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}

/* decode_c_bytes.proto (used by decode_bytes) */
static CYTHON_INLINE PyObject* __Pyx_decode_c_bytes(
         const char* cstring, Py_ssize_t length, Py_ssize_t start, Py_ssize_t stop,
         const char* encoding, const char* errors,
         PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));

/* decode_bytes.proto */
static CYTHON_INLINE PyObject* __Pyx_decode_bytes(
         PyObject* string, Py_ssize_t start, Py_ssize_t stop,
         const char* encoding, const char* errors,
         PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
    char* as_c_string;
    Py_ssize_t size;
#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE
    as_c_string = PyBytes_AS_STRING(string);
    size = PyBytes_GET_SIZE(string);
#else
    if (PyBytes_AsStringAndSize(string, &as_c_string, &size) < 0) {
        return NULL;
    }
#endif
    return __Pyx_decode_c_bytes(
        as_c_string, size,
        start, stop, encoding, errors, decode_func);
}

/* PyLongBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static CYTHON_INLINE PyObject* __Pyx_PyLong_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check);
#else
#define __Pyx_PyLong_AddObjC(op1, op2, intval, inplace, zerodivision_check)\
    (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif

/* AllocateExtensionType.proto */
static PyObject *__Pyx_AllocateExtensionType(PyTypeObject *t, int is_final);

/* CallTypeTraverse.proto */
#if !CYTHON_USE_TYPE_SPECS || (!CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x03090000)
#define __Pyx_call_type_traverse(o, always_call, visit, arg) 0
#else
static int __Pyx_call_type_traverse(PyObject *o, int always_call, visitproc visit, void *arg);
#endif

/* DefaultPlacementNew.proto */
#include <new>
template<typename T>
void __Pyx_default_placement_construct(T* x) {
    new (static_cast<void*>(x)) T();
}

/* FunctionExport.proto */
static int __Pyx_ExportFunction(PyObject *api_dict, const char *name, void (*f)(void), const char *sig);

/* GetApiDict.proto */
static PyObject *__Pyx_ApiExport_GetApiDict(void);

/* LimitedApiGetTypeDict.proto (used by SetItemOnTypeDict) */
#if CYTHON_COMPILING_IN_LIMITED_API
static PyObject *__Pyx_GetTypeDict(PyTypeObject *tp);
#endif

/* SetItemOnTypeDict.proto (used by FixUpExtensionType) */
static int __Pyx__SetItemOnTypeDict(PyTypeObject *tp, PyObject *k, PyObject *v);
#define __Pyx_SetItemOnTypeDict(tp, k, v) __Pyx__SetItemOnTypeDict((PyTypeObject*)tp, k, v)

/* FixUpExtensionType.proto */
static CYTHON_INLINE int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type);

/* PyObjectCallNoArg.proto (used by PyObjectCallMethod0) */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);

/* PyObjectGetMethod.proto (used by PyObjectCallMethod0) */
#if !(CYTHON_VECTORCALL && (__PYX_LIMITED_VERSION_HEX >= 0x030C0000 || (!CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x03090000)))
static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method);
#endif

/* PyObjectCallMethod0.proto (used by PyType_Ready) */
static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name);

/* ValidateBasesTuple.proto (used by PyType_Ready) */
#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_USE_TYPE_SPECS
static int __Pyx_validate_bases_tuple(const char *type_name, Py_ssize_t dictoffset, PyObject *bases);
#endif

/* PyType_Ready.proto */
CYTHON_UNUSED static int __Pyx_PyType_Ready(PyTypeObject *t);

/* SetVTable.proto */
static int __Pyx_SetVtable(PyTypeObject* typeptr , void* vtable);

/* GetVTable.proto (used by MergeVTables) */
static void* __Pyx_GetVtable(PyTypeObject *type);

/* MergeVTables.proto */
static int __Pyx_MergeVtables(PyTypeObject *type);

/* DelItemOnTypeDict.proto (used by SetupReduce) */
static int __Pyx__DelItemOnTypeDict(PyTypeObject *tp, PyObject *k);
#define __Pyx_DelItemOnTypeDict(tp, k) __Pyx__DelItemOnTypeDict((PyTypeObject*)tp, k)

/* SetupReduce.proto */
static int __Pyx_setup_reduce(PyObject* type_obj);

/* TypeImport.proto */
#ifndef __PYX_HAVE_RT_ImportType_proto_3_2_2
#define __PYX_HAVE_RT_ImportType_proto_3_2_2
#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
#include <stdalign.h>
#endif
#if (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || __cplusplus >= 201103L
#define __PYX_GET_STRUCT_ALIGNMENT_3_2_2(s) alignof(s)
#else
#define __PYX_GET_STRUCT_ALIGNMENT_3_2_2(s) sizeof(void*)
#endif
enum __Pyx_ImportType_CheckSize_3_2_2 {
   __Pyx_ImportType_CheckSize_Error_3_2_2 = 0,
   __Pyx_ImportType_CheckSize_Warn_3_2_2 = 1,
   __Pyx_ImportType_CheckSize_Ignore_3_2_2 = 2
};
static PyTypeObject *__Pyx_ImportType_3_2_2(PyObject* module, const char *module_name, const char *class_name, size_t size, size_t alignment, enum __Pyx_ImportType_CheckSize_3_2_2 check_size);
#endif

/* FunctionImport.proto */
static int __Pyx_ImportFunction_3_2_2(PyObject *module, const char *funcname, void (**f)(void), const char *sig);

/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);

/* Py3UpdateBases.proto */
static PyObject* __Pyx_PEP560_update_bases(PyObject *bases);

/* CalculateMetaclass.proto */
static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases);

/* SetNameInClass.proto */
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030d0000
#define __Pyx_SetNameInClass(ns, name, value)\
    (likely(PyDict_CheckExact(ns)) ? _PyDict_SetItem_KnownHash(ns, name, value, ((PyASCIIObject *) name)->hash) : PyObject_SetItem(ns, name, value))
#elif CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_SetNameInClass(ns, name, value)\
    (likely(PyDict_CheckExact(ns)) ? PyDict_SetItem(ns, name, value) : PyObject_SetItem(ns, name, value))
#else
#define __Pyx_SetNameInClass(ns, name, value)  PyObject_SetItem(ns, name, value)
#endif

/* PyObjectCall2Args.proto (used by Py3ClassCreate) */
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);

/* PyObjectLookupSpecial.proto (used by Py3ClassCreate) */
#if CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS
#define __Pyx_PyObject_LookupSpecialNoError(obj, attr_name)  __Pyx__PyObject_LookupSpecial(obj, attr_name, 0)
#define __Pyx_PyObject_LookupSpecial(obj, attr_name)  __Pyx__PyObject_LookupSpecial(obj, attr_name, 1)
static CYTHON_INLINE PyObject* __Pyx__PyObject_LookupSpecial(PyObject* obj, PyObject* attr_name, int with_error);
#else
#define __Pyx_PyObject_LookupSpecialNoError(o,n) __Pyx_PyObject_GetAttrStrNoError(o,n)
#define __Pyx_PyObject_LookupSpecial(o,n) __Pyx_PyObject_GetAttrStr(o,n)
#endif

/* Py3ClassCreate.proto */
static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname,
                                           PyObject *mkw, PyObject *modname, PyObject *doc);
static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict,
                                      PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass);

/* dict_setdefault.proto (used by FetchCommonType) */
static CYTHON_INLINE PyObject *__Pyx_PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *default_value);

/* AddModuleRef.proto (used by FetchSharedCythonModule) */
#if ((CYTHON_COMPILING_IN_CPYTHON_FREETHREADING ) ||\
     __PYX_LIMITED_VERSION_HEX < 0x030d0000)
  static PyObject *__Pyx_PyImport_AddModuleRef(const char *name);
#else
  #define __Pyx_PyImport_AddModuleRef(name) PyImport_AddModuleRef(name)
#endif

/* FetchSharedCythonModule.proto (used by FetchCommonType) */
static PyObject *__Pyx_FetchSharedCythonABIModule(void);

/* FetchCommonType.proto (used by CommonTypesMetaclass) */
static PyTypeObject* __Pyx_FetchCommonTypeFromSpec(PyTypeObject *metaclass, PyObject *module, PyType_Spec *spec, PyObject *bases);

/* CommonTypesMetaclass.proto (used by CythonFunctionShared) */
static int __pyx_CommonTypesMetaclass_init(PyObject *module);
#define __Pyx_CommonTypesMetaclass_USED

/* PyMethodNew.proto (used by CythonFunctionShared) */
static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ);

/* PyVectorcallFastCallDict.proto (used by CythonFunctionShared) */
#if CYTHON_METH_FASTCALL && CYTHON_VECTORCALL
static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw);
#endif

/* CythonFunctionShared.proto (used by CythonFunction) */
#define __Pyx_CyFunction_USED
#define __Pyx_CYFUNCTION_STATICMETHOD  0x01
#define __Pyx_CYFUNCTION_CLASSMETHOD   0x02
#define __Pyx_CYFUNCTION_CCLASS        0x04
#define __Pyx_CYFUNCTION_COROUTINE     0x08
#define __Pyx_CyFunction_GetClosure(f)\
    (((__pyx_CyFunctionObject *) (f))->func_closure)
#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API
  #define __Pyx_CyFunction_GetClassObj(f)\
      (((__pyx_CyFunctionObject *) (f))->func_classobj)
#else
  #define __Pyx_CyFunction_GetClassObj(f)\
      ((PyObject*) ((PyCMethodObject *) (f))->mm_class)
#endif
#define __Pyx_CyFunction_SetClassObj(f, classobj)\
    __Pyx__CyFunction_SetClassObj((__pyx_CyFunctionObject *) (f), (classobj))
#define __Pyx_CyFunction_Defaults(type, f)\
    ((type *)(((__pyx_CyFunctionObject *) (f))->defaults))
#define __Pyx_CyFunction_SetDefaultsGetter(f, g)\
    ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g)
typedef struct {
#if CYTHON_COMPILING_IN_LIMITED_API
    PyObject_HEAD
    PyObject *func;
#elif PY_VERSION_HEX < 0x030900B1
    PyCFunctionObject func;
#else
    PyCMethodObject func;
#endif
#if CYTHON_COMPILING_IN_LIMITED_API && CYTHON_METH_FASTCALL
    __pyx_vectorcallfunc func_vectorcall;
#endif
#if CYTHON_COMPILING_IN_LIMITED_API
    PyObject *func_weakreflist;
#endif
#if PY_VERSION_HEX < 0x030C0000 || CYTHON_COMPILING_IN_LIMITED_API
    PyObject *func_dict;
#endif
    PyObject *func_name;
    PyObject *func_qualname;
    PyObject *func_doc;
    PyObject *func_globals;
    PyObject *func_code;
    PyObject *func_closure;
#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API
    PyObject *func_classobj;
#endif
    PyObject *defaults;
    int flags;
    PyObject *defaults_tuple;
    PyObject *defaults_kwdict;
    PyObject *(*defaults_getter)(PyObject *);
    PyObject *func_annotations;
    PyObject *func_is_coroutine;
} __pyx_CyFunctionObject;
#undef __Pyx_CyOrPyCFunction_Check
#define __Pyx_CyFunction_Check(obj)  __Pyx_TypeCheck(obj, __pyx_mstate_global->__pyx_CyFunctionType)
#define __Pyx_CyOrPyCFunction_Check(obj)  __Pyx_TypeCheck2(obj, __pyx_mstate_global->__pyx_CyFunctionType, &PyCFunction_Type)
#define __Pyx_CyFunction_CheckExact(obj)  __Pyx_IS_TYPE(obj, __pyx_mstate_global->__pyx_CyFunctionType)
static CYTHON_INLINE int __Pyx__IsSameCyOrCFunction(PyObject *func, void (*cfunc)(void));
#undef __Pyx_IsSameCFunction
#define __Pyx_IsSameCFunction(func, cfunc)   __Pyx__IsSameCyOrCFunction(func, cfunc)
static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml,
                                      int flags, PyObject* qualname,
                                      PyObject *closure,
                                      PyObject *module, PyObject *globals,
                                      PyObject* code);
static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj);
static CYTHON_INLINE PyObject *__Pyx_CyFunction_InitDefaults(PyObject *func,
                                                         PyTypeObject *defaults_type);
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m,
                                                            PyObject *tuple);
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m,
                                                             PyObject *dict);
static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m,
                                                              PyObject *dict);
static int __pyx_CyFunction_init(PyObject *module);
#if CYTHON_METH_FASTCALL
static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames);
static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames);
static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames);
static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames);
#if CYTHON_COMPILING_IN_LIMITED_API
#define __Pyx_CyFunction_func_vectorcall(f) (((__pyx_CyFunctionObject*)f)->func_vectorcall)
#else
#define __Pyx_CyFunction_func_vectorcall(f) (((PyCFunctionObject*)f)->vectorcall)
#endif
#endif

/* CythonFunction.proto */
static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml,
                                      int flags, PyObject* qualname,
                                      PyObject *closure,
                                      PyObject *module, PyObject *globals,
                                      PyObject* code);

/* GetNameInClass.proto */
#define __Pyx_GetNameInClass(var, nmspace, name)  (var) = __Pyx__GetNameInClass(nmspace, name)
static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name);

/* CLineInTraceback.proto (used by AddTraceback) */
#if CYTHON_CLINE_IN_TRACEBACK && CYTHON_CLINE_IN_TRACEBACK_RUNTIME
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#else
#define __Pyx_CLineForTraceback(tstate, c_line)  (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#endif

/* CodeObjectCache.proto (used by AddTraceback) */
#if CYTHON_COMPILING_IN_LIMITED_API
typedef PyObject __Pyx_CachedCodeObjectType;
#else
typedef PyCodeObject __Pyx_CachedCodeObjectType;
#endif
typedef struct {
    __Pyx_CachedCodeObjectType* code_object;
    int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
    int count;
    int max_count;
    __Pyx_CodeObjectCacheEntry* entries;
  #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    __pyx_atomic_int_type accessor_count;
  #endif
};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static __Pyx_CachedCodeObjectType *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, __Pyx_CachedCodeObjectType* code_object);

/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
                               int py_line, const char *filename);

/* BufferStructDeclare.proto */
typedef struct {
  Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
  size_t refcount;
  Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
  __Pyx_Buffer *rcbuffer;
  char *data;
  __Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;

/* MemviewRefcount.proto */
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
    __pyx_atomic_int_type *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
    __pyx_atomic_int_type *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (&memview->acquisition_count)
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XCLEAR_MEMVIEW(slice, have_gil) __Pyx_XCLEAR_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XCLEAR_MEMVIEW(__Pyx_memviewslice *, int, int);

/* MemviewSliceIsContig.proto */
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim);

/* OverlappingSlices.proto */
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
                                __Pyx_memviewslice *slice2,
                                int ndim, size_t itemsize);

/* MemviewSliceInit.proto */
static int __Pyx_init_memviewslice(
                struct __pyx_memoryview_obj *memview,
                int ndim,
                __Pyx_memviewslice *memviewslice,
                int memview_is_new_reference);

/* CheckUnpickleChecksum.proto */
static CYTHON_INLINE int __Pyx_CheckUnpickleChecksum(long checksum, long checksum1, long checksum2, long checksum3, const char *members);

static PyObject* __pyx_convert__to_py_nvmlDramEncryptionInfo_v1_t(nvmlDramEncryptionInfo_v1_t s);
static PyObject* __pyx_convert__to_py_nvmlValue_t(nvmlValue_t s);
static PyObject* __pyx_convert__to_py_nvmlVgpuInstanceUtilizationSample_t(nvmlVgpuInstanceUtilizationSample_t s);
static PyObject* __pyx_convert__to_py_nvmlVgpuProcessUtilizationSample_t(nvmlVgpuProcessUtilizationSample_t s);
/* MemviewSliceCopy.proto */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
                                 const char *mode, int ndim,
                                 size_t sizeof_dtype, int contig_flag,
                                 int dtype_is_object);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlBridgeChipType_t(nvmlBridgeChipType_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlNvLinkUtilizationCountUnits_t(nvmlNvLinkUtilizationCountUnits_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlNvLinkUtilizationCountPktTypes_t(nvmlNvLinkUtilizationCountPktTypes_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlNvLinkCapability_t(nvmlNvLinkCapability_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlNvLinkErrorCounter_t(nvmlNvLinkErrorCounter_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlIntNvLinkDeviceType_t(nvmlIntNvLinkDeviceType_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlGpuTopologyLevel_t(nvmlGpuTopologyLevel_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlGpuP2PStatus_t(nvmlGpuP2PStatus_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlGpuP2PCapsIndex_t(nvmlGpuP2PCapsIndex_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlSamplingType_t(nvmlSamplingType_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlPcieUtilCounter_t(nvmlPcieUtilCounter_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlValueType_t(nvmlValueType_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlPerfPolicyType_t(nvmlPerfPolicyType_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlThermalTarget_t(nvmlThermalTarget_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlThermalController_t(nvmlThermalController_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlCoolerControl_t(nvmlCoolerControl_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlCoolerTarget_t(nvmlCoolerTarget_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlUUIDType_t(nvmlUUIDType_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlEnableState_t(nvmlEnableState_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlBrandType_t(nvmlBrandType_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlTemperatureThresholds_t(nvmlTemperatureThresholds_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlTemperatureSensors_t(nvmlTemperatureSensors_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlComputeMode_t(nvmlComputeMode_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlMemoryErrorType_t(nvmlMemoryErrorType_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlNvlinkVersion_t(nvmlNvlinkVersion_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlEccCounterType_t(nvmlEccCounterType_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlClockType_t(nvmlClockType_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlClockId_t(nvmlClockId_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlDriverModel_t(nvmlDriverModel_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlPstates_t(nvmlPstates_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlGpuOperationMode_t(nvmlGpuOperationMode_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlInforomObject_t(nvmlInforomObject_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlReturn_t(nvmlReturn_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlMemoryLocation_t(nvmlMemoryLocation_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlPageRetirementCause_t(nvmlPageRetirementCause_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlRestrictedAPI_t(nvmlRestrictedAPI_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlGpuUtilizationDomainId_t(nvmlGpuUtilizationDomainId_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlGpuVirtualizationMode_t(nvmlGpuVirtualizationMode_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlHostVgpuMode_t(nvmlHostVgpuMode_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlVgpuVmIdType_t(nvmlVgpuVmIdType_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlVgpuGuestInfoState_t(nvmlVgpuGuestInfoState_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlGridLicenseFeatureCode_t(nvmlGridLicenseFeatureCode_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlVgpuCapability_t(nvmlVgpuCapability_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlVgpuDriverCapability_t(nvmlVgpuDriverCapability_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlDeviceVgpuCapability_t(nvmlDeviceVgpuCapability_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlDeviceGpuRecoveryAction_t(nvmlDeviceGpuRecoveryAction_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlFanState_t(nvmlFanState_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlLedColor_t(nvmlLedColor_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlEncoderType_t(nvmlEncoderType_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlFBCSessionType_t(nvmlFBCSessionType_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlDetachGpuState_t(nvmlDetachGpuState_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlPcieLinkState_t(nvmlPcieLinkState_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlClockLimitId_t(nvmlClockLimitId_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlVgpuVmCompatibility_t(nvmlVgpuVmCompatibility_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlVgpuPgpuCompatibilityLimitCode_t(nvmlVgpuPgpuCompatibilityLimitCode_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlGpmMetricId_t(nvmlGpmMetricId_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlPowerProfileType_t(nvmlPowerProfileType_t value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlDeviceAddressingModeType_t(nvmlDeviceAddressingModeType_t value);

/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyLong_As_int(PyObject *);

/* CIntFromPy.proto */
static CYTHON_INLINE size_t __Pyx_PyLong_As_size_t(PyObject *);

/* CIntFromPy.proto */
static CYTHON_INLINE unsigned int __Pyx_PyLong_As_unsigned_int(PyObject *);

/* CIntFromPy.proto */
static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyLong_As_unsigned_PY_LONG_LONG(PyObject *);

/* CIntFromPy.proto */
static CYTHON_INLINE nvmlTemperatureSensors_t __Pyx_PyLong_As_nvmlTemperatureSensors_t(PyObject *);

/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyLong_As_long(PyObject *);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_int(int value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_unsigned_int(unsigned int value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_unsigned_PY_LONG_LONG(unsigned PY_LONG_LONG value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_long(long value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From___pyx_anon_enum(int value);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_unsigned_long(unsigned long value);

/* CIntFromPy.proto */
static CYTHON_INLINE unsigned long __Pyx_PyLong_As_unsigned_long(PyObject *);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_PY_LONG_LONG(PY_LONG_LONG value);

/* CIntFromPy.proto */
static CYTHON_INLINE PY_LONG_LONG __Pyx_PyLong_As_PY_LONG_LONG(PyObject *);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_unsigned_short(unsigned short value);

/* CIntFromPy.proto */
static CYTHON_INLINE unsigned short __Pyx_PyLong_As_unsigned_short(PyObject *);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_unsigned_char(unsigned char value);

/* CIntFromPy.proto */
static CYTHON_INLINE unsigned char __Pyx_PyLong_As_unsigned_char(PyObject *);

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_char(char value);

/* PyObjectCallMethod1.proto */
static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg);

/* UpdateUnpickledDict.proto */
static int __Pyx_UpdateUnpickledDict(PyObject *obj, PyObject *state, Py_ssize_t index);

/* CIntFromPy.proto */
static CYTHON_INLINE char __Pyx_PyLong_As_char(PyObject *);

/* FormatTypeName.proto */
#if CYTHON_COMPILING_IN_LIMITED_API
typedef PyObject *__Pyx_TypeName;
#define __Pyx_FMT_TYPENAME "%U"
#define __Pyx_DECREF_TypeName(obj) Py_XDECREF(obj)
#if __PYX_LIMITED_VERSION_HEX >= 0x030d0000
#define __Pyx_PyType_GetFullyQualifiedName PyType_GetFullyQualifiedName
#else
static __Pyx_TypeName __Pyx_PyType_GetFullyQualifiedName(PyTypeObject* tp);
#endif
#else  // !LIMITED_API
typedef const char *__Pyx_TypeName;
#define __Pyx_FMT_TYPENAME "%.200s"
#define __Pyx_PyType_GetFullyQualifiedName(tp) ((tp)->tp_name)
#define __Pyx_DECREF_TypeName(obj)
#endif

/* GetRuntimeVersion.proto */
#if __PYX_LIMITED_VERSION_HEX < 0x030b0000
static unsigned long __Pyx_cached_runtime_version = 0;
static void __Pyx_init_runtime_version(void);
#else
#define __Pyx_init_runtime_version()
#endif
static unsigned long __Pyx_get_runtime_version(void);

/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(unsigned long ct_version, unsigned long rt_version, int allow_newer);

/* DecompressString.proto */
static PyObject *__Pyx_DecompressString(const char *s, Py_ssize_t length, int algo);

/* MultiPhaseInitModuleState.proto */
#if CYTHON_PEP489_MULTI_PHASE_INIT && CYTHON_USE_MODULE_STATE
static PyObject *__Pyx_State_FindModule(void*);
static int __Pyx_State_AddModule(PyObject* module, void*);
static int __Pyx_State_RemoveModule(void*);
#elif CYTHON_USE_MODULE_STATE
#define __Pyx_State_FindModule PyState_FindModule
#define __Pyx_State_AddModule PyState_AddModule
#define __Pyx_State_RemoveModule PyState_RemoveModule
#endif

/* #### Code section: module_declarations ### */
/* CythonABIVersion.proto */
#if CYTHON_COMPILING_IN_LIMITED_API
    #if CYTHON_METH_FASTCALL
        #define __PYX_FASTCALL_ABI_SUFFIX  "_fastcall"
    #else
        #define __PYX_FASTCALL_ABI_SUFFIX
    #endif
    #define __PYX_LIMITED_ABI_SUFFIX "limited" __PYX_FASTCALL_ABI_SUFFIX __PYX_AM_SEND_ABI_SUFFIX
#else
    #define __PYX_LIMITED_ABI_SUFFIX
#endif
#if __PYX_HAS_PY_AM_SEND == 1
    #define __PYX_AM_SEND_ABI_SUFFIX
#elif __PYX_HAS_PY_AM_SEND == 2
    #define __PYX_AM_SEND_ABI_SUFFIX "amsendbackport"
#else
    #define __PYX_AM_SEND_ABI_SUFFIX "noamsend"
#endif
#ifndef __PYX_MONITORING_ABI_SUFFIX
    #define __PYX_MONITORING_ABI_SUFFIX
#endif
#if CYTHON_USE_TP_FINALIZE
    #define __PYX_TP_FINALIZE_ABI_SUFFIX
#else
    #define __PYX_TP_FINALIZE_ABI_SUFFIX "nofinalize"
#endif
#if CYTHON_USE_FREELISTS || !defined(__Pyx_AsyncGen_USED)
    #define __PYX_FREELISTS_ABI_SUFFIX
#else
    #define __PYX_FREELISTS_ABI_SUFFIX "nofreelists"
#endif
#define CYTHON_ABI  __PYX_ABI_VERSION __PYX_LIMITED_ABI_SUFFIX __PYX_MONITORING_ABI_SUFFIX __PYX_TP_FINALIZE_ABI_SUFFIX __PYX_FREELISTS_ABI_SUFFIX __PYX_AM_SEND_ABI_SUFFIX
#define __PYX_ABI_MODULE_NAME "_cython_" CYTHON_ABI
#define __PYX_TYPE_MODULE_PREFIX __PYX_ABI_MODULE_NAME "."

static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview__get_base(struct __pyx_memoryview_obj *__pyx_v_self); /* proto*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryviewslice__get_base(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto*/
#if !CYTHON_COMPILING_IN_LIMITED_API
static CYTHON_INLINE double __pyx_f_7cpython_7complex_7complex_4real_real(PyComplexObject *__pyx_v_self); /* proto*/
#endif
#if !CYTHON_COMPILING_IN_LIMITED_API
static CYTHON_INLINE double __pyx_f_7cpython_7complex_7complex_4imag_imag(PyComplexObject *__pyx_v_self); /* proto*/
#endif
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13PciInfoExt_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_7PciInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_11Utilization__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_6Memory__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_9Memory_v2__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_10BAR1Memory__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_11ProcessInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_16ProcessDetail_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_16DeviceAttributes__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_14C2cModeInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_14BridgeChipInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_5Value__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod0__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13CoolerInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_20MarginTemperature_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_15ClkMonFaultInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_14ClockOffset_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_18DevicePerfModes_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_24ProcessUtilizationSample__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_15PlatformInfo_v2__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod2__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod3__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod4__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod5__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_17GridLicenseExpiry__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_9HwbcEntry__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_8LedState__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_8UnitInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_7PSUInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_11UnitFanInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_9EventData__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_15AccountingStats__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_18EncoderSessionInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_8FBCStats__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_14FBCSessionInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_22ConfComputeSystemState__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_11VgpuVersion__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_12VgpuMetadata__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_20GpuInstancePlacement__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_24ComputeInstancePlacement__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_10GpmSupport__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_15RepairStatus_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_6Pdi_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_20ProcessDetailList_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_19BridgeChipHierarchy__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_6Sample__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_10FieldValue__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_18GpuThermalSettings__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_12ClkMonStatus__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_19VgpuSchedulerParams__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_15VgpuLicenseInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21GridLicensableFeature__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13UnitFanSpeeds__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_15GpuInstanceInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_19ComputeInstanceInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_16VgpuSchedulerLog__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_22GridLicensableFeatures__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self); /* proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13NvLinkInfo_v2__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self); /* proto*/

/* Module declarations from "libc.stdint" */

/* Module declarations from "cuda.bindings.cy_nvml" */
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlInit_v2)(void); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlInitWithFlags)(unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlShutdown)(void); /*proto*/
static char const *(*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlErrorString)(nvmlReturn_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetDriverVersion)(char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetNVMLVersion)(char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetCudaDriverVersion)(int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetCudaDriverVersion_v2)(int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetProcessName)(unsigned int, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetHicVersion)(unsigned int *, nvmlHwbcEntry_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetTopologyGpuSet)(unsigned int, unsigned int *, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetDriverBranch)(nvmlSystemDriverBranchInfo_t *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetCount)(unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetHandleByIndex)(unsigned int, nvmlUnit_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetUnitInfo)(nvmlUnit_t, nvmlUnitInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetLedState)(nvmlUnit_t, nvmlLedState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetPsuInfo)(nvmlUnit_t, nvmlPSUInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetTemperature)(nvmlUnit_t, unsigned int, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetFanSpeedInfo)(nvmlUnit_t, nvmlUnitFanSpeeds_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetDevices)(nvmlUnit_t, unsigned int *, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCount_v2)(unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAttributes_v2)(nvmlDevice_t, nvmlDeviceAttributes_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByIndex_v2)(unsigned int, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleBySerial)(char const *, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByUUID)(char const *, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByUUIDV)(nvmlUUID_t const *, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByPciBusId_v2)(char const *, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetName)(nvmlDevice_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBrand)(nvmlDevice_t, nvmlBrandType_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetIndex)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSerial)(nvmlDevice_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetModuleId)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetC2cModeInfoV)(nvmlDevice_t, nvmlC2cModeInfo_v1_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryAffinity)(nvmlDevice_t, unsigned int, unsigned long *, nvmlAffinityScope_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCpuAffinityWithinScope)(nvmlDevice_t, unsigned int, unsigned long *, nvmlAffinityScope_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCpuAffinity)(nvmlDevice_t, unsigned int, unsigned long *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetCpuAffinity)(nvmlDevice_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearCpuAffinity)(nvmlDevice_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNumaNodeId)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTopologyCommonAncestor)(nvmlDevice_t, nvmlDevice_t, nvmlGpuTopologyLevel_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTopologyNearestGpus)(nvmlDevice_t, nvmlGpuTopologyLevel_t, unsigned int *, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetP2PStatus)(nvmlDevice_t, nvmlDevice_t, nvmlGpuP2PCapsIndex_t, nvmlGpuP2PStatus_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetUUID)(nvmlDevice_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMinorNumber)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBoardPartNumber)(nvmlDevice_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetInforomVersion)(nvmlDevice_t, nvmlInforomObject_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetInforomImageVersion)(nvmlDevice_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetInforomConfigurationChecksum)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceValidateInforom)(nvmlDevice_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetLastBBXFlushTime)(nvmlDevice_t, unsigned PY_LONG_LONG *, unsigned long *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDisplayMode)(nvmlDevice_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDisplayActive)(nvmlDevice_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPersistenceMode)(nvmlDevice_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPciInfoExt)(nvmlDevice_t, nvmlPciInfoExt_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPciInfo_v3)(nvmlDevice_t, nvmlPciInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxPcieLinkGeneration)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuMaxPcieLinkGeneration)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxPcieLinkWidth)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrPcieLinkGeneration)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrPcieLinkWidth)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieThroughput)(nvmlDevice_t, nvmlPcieUtilCounter_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieReplayCounter)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClockInfo)(nvmlDevice_t, nvmlClockType_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxClockInfo)(nvmlDevice_t, nvmlClockType_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpcClkVfOffset)(nvmlDevice_t, int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClock)(nvmlDevice_t, nvmlClockType_t, nvmlClockId_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxCustomerBoostClock)(nvmlDevice_t, nvmlClockType_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedMemoryClocks)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedGraphicsClocks)(nvmlDevice_t, unsigned int, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAutoBoostedClocksEnabled)(nvmlDevice_t, nvmlEnableState_t *, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanSpeed)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanSpeed_v2)(nvmlDevice_t, unsigned int, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanSpeedRPM)(nvmlDevice_t, nvmlFanSpeedInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTargetFanSpeed)(nvmlDevice_t, unsigned int, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMinMaxFanSpeed)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanControlPolicy_v2)(nvmlDevice_t, unsigned int, nvmlFanControlPolicy_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNumFans)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCoolerInfo)(nvmlDevice_t, nvmlCoolerInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTemperatureV)(nvmlDevice_t, nvmlTemperature_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTemperatureThreshold)(nvmlDevice_t, nvmlTemperatureThresholds_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMarginTemperature)(nvmlDevice_t, nvmlMarginTemperature_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetThermalSettings)(nvmlDevice_t, unsigned int, nvmlGpuThermalSettings_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPerformanceState)(nvmlDevice_t, nvmlPstates_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrentClocksEventReasons)(nvmlDevice_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedClocksEventReasons)(nvmlDevice_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerState)(nvmlDevice_t, nvmlPstates_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDynamicPstatesInfo)(nvmlDevice_t, nvmlGpuDynamicPstatesInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemClkVfOffset)(nvmlDevice_t, int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMinMaxClockOfPState)(nvmlDevice_t, nvmlClockType_t, nvmlPstates_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedPerformanceStates)(nvmlDevice_t, nvmlPstates_t *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpcClkMinMaxVfOffset)(nvmlDevice_t, int *, int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemClkMinMaxVfOffset)(nvmlDevice_t, int *, int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClockOffsets)(nvmlDevice_t, nvmlClockOffset_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetClockOffsets)(nvmlDevice_t, nvmlClockOffset_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPerformanceModes)(nvmlDevice_t, nvmlDevicePerfModes_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrentClockFreqs)(nvmlDevice_t, nvmlDeviceCurrentClockFreqs_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerManagementLimit)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerManagementLimitConstraints)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerManagementDefaultLimit)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerUsage)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTotalEnergyConsumption)(nvmlDevice_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEnforcedPowerLimit)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuOperationMode)(nvmlDevice_t, nvmlGpuOperationMode_t *, nvmlGpuOperationMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryInfo_v2)(nvmlDevice_t, nvmlMemory_v2_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetComputeMode)(nvmlDevice_t, nvmlComputeMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCudaComputeCapability)(nvmlDevice_t, int *, int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDramEncryptionMode)(nvmlDevice_t, nvmlDramEncryptionInfo_t *, nvmlDramEncryptionInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDramEncryptionMode)(nvmlDevice_t, nvmlDramEncryptionInfo_t const *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEccMode)(nvmlDevice_t, nvmlEnableState_t *, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDefaultEccMode)(nvmlDevice_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBoardId)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMultiGpuBoard)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTotalEccErrors)(nvmlDevice_t, nvmlMemoryErrorType_t, nvmlEccCounterType_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryErrorCounter)(nvmlDevice_t, nvmlMemoryErrorType_t, nvmlEccCounterType_t, nvmlMemoryLocation_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetUtilizationRates)(nvmlDevice_t, nvmlUtilization_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderUtilization)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderCapacity)(nvmlDevice_t, nvmlEncoderType_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderStats)(nvmlDevice_t, unsigned int *, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderSessions)(nvmlDevice_t, unsigned int *, nvmlEncoderSessionInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDecoderUtilization)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetJpgUtilization)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetOfaUtilization)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFBCStats)(nvmlDevice_t, nvmlFBCStats_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFBCSessions)(nvmlDevice_t, unsigned int *, nvmlFBCSessionInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDriverModel_v2)(nvmlDevice_t, nvmlDriverModel_t *, nvmlDriverModel_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVbiosVersion)(nvmlDevice_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBridgeChipInfo)(nvmlDevice_t, nvmlBridgeChipHierarchy_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetComputeRunningProcesses_v3)(nvmlDevice_t, unsigned int *, nvmlProcessInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMPSComputeRunningProcesses_v3)(nvmlDevice_t, unsigned int *, nvmlProcessInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRunningProcessDetailList)(nvmlDevice_t, nvmlProcessDetailList_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceOnSameBoard)(nvmlDevice_t, nvmlDevice_t, int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAPIRestriction)(nvmlDevice_t, nvmlRestrictedAPI_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSamples)(nvmlDevice_t, nvmlSamplingType_t, unsigned PY_LONG_LONG, nvmlValueType_t *, unsigned int *, nvmlSample_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBAR1MemoryInfo)(nvmlDevice_t, nvmlBAR1Memory_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetIrqNum)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNumGpuCores)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerSource)(nvmlDevice_t, nvmlPowerSource_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryBusWidth)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieLinkMaxSpeed)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieSpeed)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAdaptiveClockInfoStatus)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBusType)(nvmlDevice_t, nvmlBusType_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuFabricInfoV)(nvmlDevice_t, nvmlGpuFabricInfoV_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeCapabilities)(nvmlConfComputeSystemCaps_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeState)(nvmlConfComputeSystemState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeMemSizeInfo)(nvmlDevice_t, nvmlConfComputeMemSizeInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeGpusReadyState)(unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeProtectedMemoryUsage)(nvmlDevice_t, nvmlMemory_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeGpuCertificate)(nvmlDevice_t, nvmlConfComputeGpuCertificate_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeGpuAttestationReport)(nvmlDevice_t, nvmlConfComputeGpuAttestationReport_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeKeyRotationThresholdInfo)(nvmlConfComputeGetKeyRotationThresholdInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetConfComputeUnprotectedMemSize)(nvmlDevice_t, unsigned PY_LONG_LONG); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemSetConfComputeGpusReadyState)(unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemSetConfComputeKeyRotationThresholdInfo)(nvmlConfComputeSetKeyRotationThresholdInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeSettings)(nvmlSystemConfComputeSettings_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGspFirmwareVersion)(nvmlDevice_t, char *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGspFirmwareMode)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSramEccErrorStatus)(nvmlDevice_t, nvmlEccSramErrorStatus_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingMode)(nvmlDevice_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingStats)(nvmlDevice_t, unsigned int, nvmlAccountingStats_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingPids)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingBufferSize)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRetiredPages)(nvmlDevice_t, nvmlPageRetirementCause_t, unsigned int *, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRetiredPages_v2)(nvmlDevice_t, nvmlPageRetirementCause_t, unsigned int *, unsigned PY_LONG_LONG *, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRetiredPagesPendingStatus)(nvmlDevice_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRemappedRows)(nvmlDevice_t, unsigned int *, unsigned int *, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRowRemapperHistogram)(nvmlDevice_t, nvmlRowRemapperHistogramValues_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetArchitecture)(nvmlDevice_t, nvmlDeviceArchitecture_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClkMonStatus)(nvmlDevice_t, nvmlClkMonStatus_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetProcessUtilization)(nvmlDevice_t, nvmlProcessUtilizationSample_t *, unsigned int *, unsigned PY_LONG_LONG); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetProcessesUtilizationInfo)(nvmlDevice_t, nvmlProcessesUtilizationInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPlatformInfo)(nvmlDevice_t, nvmlPlatformInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitSetLedState)(nvmlUnit_t, nvmlLedColor_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPersistenceMode)(nvmlDevice_t, nvmlEnableState_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetComputeMode)(nvmlDevice_t, nvmlComputeMode_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetEccMode)(nvmlDevice_t, nvmlEnableState_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearEccErrorCounts)(nvmlDevice_t, nvmlEccCounterType_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDriverModel)(nvmlDevice_t, nvmlDriverModel_t, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetGpuLockedClocks)(nvmlDevice_t, unsigned int, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceResetGpuLockedClocks)(nvmlDevice_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetMemoryLockedClocks)(nvmlDevice_t, unsigned int, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceResetMemoryLockedClocks)(nvmlDevice_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetAutoBoostedClocksEnabled)(nvmlDevice_t, nvmlEnableState_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDefaultAutoBoostedClocksEnabled)(nvmlDevice_t, nvmlEnableState_t, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDefaultFanSpeed_v2)(nvmlDevice_t, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetFanControlPolicy)(nvmlDevice_t, unsigned int, nvmlFanControlPolicy_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetTemperatureThreshold)(nvmlDevice_t, nvmlTemperatureThresholds_t, int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPowerManagementLimit)(nvmlDevice_t, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetGpuOperationMode)(nvmlDevice_t, nvmlGpuOperationMode_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetAPIRestriction)(nvmlDevice_t, nvmlRestrictedAPI_t, nvmlEnableState_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetFanSpeed_v2)(nvmlDevice_t, unsigned int, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetAccountingMode)(nvmlDevice_t, nvmlEnableState_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearAccountingPids)(nvmlDevice_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPowerManagementLimit_v2)(nvmlDevice_t, nvmlPowerValue_v2_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkState)(nvmlDevice_t, unsigned int, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkVersion)(nvmlDevice_t, unsigned int, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkCapability)(nvmlDevice_t, unsigned int, nvmlNvLinkCapability_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkRemotePciInfo_v2)(nvmlDevice_t, unsigned int, nvmlPciInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkErrorCounter)(nvmlDevice_t, unsigned int, nvmlNvLinkErrorCounter_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceResetNvLinkErrorCounters)(nvmlDevice_t, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkRemoteDeviceType)(nvmlDevice_t, unsigned int, nvmlIntNvLinkDeviceType_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetNvLinkDeviceLowPowerThreshold)(nvmlDevice_t, nvmlNvLinkPowerThres_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemSetNvlinkBwMode)(unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetNvlinkBwMode)(unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvlinkSupportedBwModes)(nvmlDevice_t, nvmlNvlinkSupportedBwModes_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvlinkBwMode)(nvmlDevice_t, nvmlNvlinkGetBwMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetNvlinkBwMode)(nvmlDevice_t, nvmlNvlinkSetBwMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlEventSetCreate)(nvmlEventSet_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceRegisterEvents)(nvmlDevice_t, unsigned PY_LONG_LONG, nvmlEventSet_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedEventTypes)(nvmlDevice_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlEventSetWait_v2)(nvmlEventSet_t, nvmlEventData_t *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlEventSetFree)(nvmlEventSet_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemEventSetCreate)(nvmlSystemEventSetCreateRequest_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemEventSetFree)(nvmlSystemEventSetFreeRequest_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemRegisterEvents)(nvmlSystemRegisterEventRequest_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemEventSetWait)(nvmlSystemEventSetWaitRequest_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceModifyDrainState)(nvmlPciInfo_t *, nvmlEnableState_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceQueryDrainState)(nvmlPciInfo_t *, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceRemoveGpu_v2)(nvmlPciInfo_t *, nvmlDetachGpuState_t, nvmlPcieLinkState_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceDiscoverGpus)(nvmlPciInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFieldValues)(nvmlDevice_t, int, nvmlFieldValue_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearFieldValues)(nvmlDevice_t, int, nvmlFieldValue_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVirtualizationMode)(nvmlDevice_t, nvmlGpuVirtualizationMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHostVgpuMode)(nvmlDevice_t, nvmlHostVgpuMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVirtualizationMode)(nvmlDevice_t, nvmlGpuVirtualizationMode_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuHeterogeneousMode)(nvmlDevice_t, nvmlVgpuHeterogeneousMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVgpuHeterogeneousMode)(nvmlDevice_t, nvmlVgpuHeterogeneousMode_t const *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetPlacementId)(nvmlVgpuInstance_t, nvmlVgpuPlacementId_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuTypeSupportedPlacements)(nvmlDevice_t, nvmlVgpuTypeId_t, nvmlVgpuPlacementList_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuTypeCreatablePlacements)(nvmlDevice_t, nvmlVgpuTypeId_t, nvmlVgpuPlacementList_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetGspHeapSize)(nvmlVgpuTypeId_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetFbReservation)(nvmlVgpuTypeId_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetRuntimeStateSize)(nvmlVgpuInstance_t, nvmlVgpuRuntimeState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVgpuCapabilities)(nvmlDevice_t, nvmlDeviceVgpuCapability_t, nvmlEnableState_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGridLicensableFeatures_v4)(nvmlDevice_t, nvmlGridLicensableFeatures_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetVgpuDriverCapabilities)(nvmlVgpuDriverCapability_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuCapabilities)(nvmlDevice_t, nvmlDeviceVgpuCapability_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedVgpus)(nvmlDevice_t, unsigned int *, nvmlVgpuTypeId_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCreatableVgpus)(nvmlDevice_t, unsigned int *, nvmlVgpuTypeId_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetClass)(nvmlVgpuTypeId_t, char *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetName)(nvmlVgpuTypeId_t, char *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetGpuInstanceProfileId)(nvmlVgpuTypeId_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetDeviceID)(nvmlVgpuTypeId_t, unsigned PY_LONG_LONG *, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetFramebufferSize)(nvmlVgpuTypeId_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetNumDisplayHeads)(nvmlVgpuTypeId_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetResolution)(nvmlVgpuTypeId_t, unsigned int, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetLicense)(nvmlVgpuTypeId_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetFrameRateLimit)(nvmlVgpuTypeId_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetMaxInstances)(nvmlDevice_t, nvmlVgpuTypeId_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetMaxInstancesPerVm)(nvmlVgpuTypeId_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetBAR1Info)(nvmlVgpuTypeId_t, nvmlVgpuTypeBar1Info_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetActiveVgpus)(nvmlDevice_t, unsigned int *, nvmlVgpuInstance_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetVmID)(nvmlVgpuInstance_t, char *, unsigned int, nvmlVgpuVmIdType_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetUUID)(nvmlVgpuInstance_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetVmDriverVersion)(nvmlVgpuInstance_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFbUsage)(nvmlVgpuInstance_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetLicenseStatus)(nvmlVgpuInstance_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetType)(nvmlVgpuInstance_t, nvmlVgpuTypeId_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFrameRateLimit)(nvmlVgpuInstance_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEccMode)(nvmlVgpuInstance_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEncoderCapacity)(nvmlVgpuInstance_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceSetEncoderCapacity)(nvmlVgpuInstance_t, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEncoderStats)(nvmlVgpuInstance_t, unsigned int *, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEncoderSessions)(nvmlVgpuInstance_t, unsigned int *, nvmlEncoderSessionInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFBCStats)(nvmlVgpuInstance_t, nvmlFBCStats_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFBCSessions)(nvmlVgpuInstance_t, unsigned int *, nvmlFBCSessionInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetGpuInstanceId)(nvmlVgpuInstance_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetGpuPciId)(nvmlVgpuInstance_t, char *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetCapabilities)(nvmlVgpuTypeId_t, nvmlVgpuCapability_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetMdevUUID)(nvmlVgpuInstance_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetCreatableVgpus)(nvmlGpuInstance_t, nvmlVgpuTypeIdInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetMaxInstancesPerGpuInstance)(nvmlVgpuTypeMaxInstance_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetActiveVgpus)(nvmlGpuInstance_t, nvmlActiveVgpuInstanceInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceSetVgpuSchedulerState)(nvmlGpuInstance_t, nvmlVgpuSchedulerState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuSchedulerState)(nvmlGpuInstance_t, nvmlVgpuSchedulerStateInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuSchedulerLog)(nvmlGpuInstance_t, nvmlVgpuSchedulerLogInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuTypeCreatablePlacements)(nvmlGpuInstance_t, nvmlVgpuCreatablePlacementInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuHeterogeneousMode)(nvmlGpuInstance_t, nvmlVgpuHeterogeneousMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceSetVgpuHeterogeneousMode)(nvmlGpuInstance_t, nvmlVgpuHeterogeneousMode_t const *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetMetadata)(nvmlVgpuInstance_t, nvmlVgpuMetadata_t *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuMetadata)(nvmlDevice_t, nvmlVgpuPgpuMetadata_t *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetVgpuCompatibility)(nvmlVgpuMetadata_t *, nvmlVgpuPgpuMetadata_t *, nvmlVgpuPgpuCompatibility_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPgpuMetadataString)(nvmlDevice_t, char *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuSchedulerLog)(nvmlDevice_t, nvmlVgpuSchedulerLog_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuSchedulerState)(nvmlDevice_t, nvmlVgpuSchedulerGetState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuSchedulerCapabilities)(nvmlDevice_t, nvmlVgpuSchedulerCapabilities_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVgpuSchedulerState)(nvmlDevice_t, nvmlVgpuSchedulerSetState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetVgpuVersion)(nvmlVgpuVersion_t *, nvmlVgpuVersion_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSetVgpuVersion)(nvmlVgpuVersion_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuUtilization)(nvmlDevice_t, unsigned PY_LONG_LONG, nvmlValueType_t *, unsigned int *, nvmlVgpuInstanceUtilizationSample_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuInstancesUtilizationInfo)(nvmlDevice_t, nvmlVgpuInstancesUtilizationInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuProcessUtilization)(nvmlDevice_t, unsigned PY_LONG_LONG, unsigned int *, nvmlVgpuProcessUtilizationSample_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuProcessesUtilizationInfo)(nvmlDevice_t, nvmlVgpuProcessesUtilizationInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetAccountingMode)(nvmlVgpuInstance_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetAccountingPids)(nvmlVgpuInstance_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetAccountingStats)(nvmlVgpuInstance_t, unsigned int, nvmlAccountingStats_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceClearAccountingPids)(nvmlVgpuInstance_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetLicenseInfo_v2)(nvmlVgpuInstance_t, nvmlVgpuLicenseInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetExcludedDeviceCount)(unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetExcludedDeviceInfoByIndex)(unsigned int, nvmlExcludedDeviceInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetMigMode)(nvmlDevice_t, unsigned int, nvmlReturn_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMigMode)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceProfileInfoV)(nvmlDevice_t, unsigned int, nvmlGpuInstanceProfileInfo_v2_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstancePossiblePlacements_v2)(nvmlDevice_t, unsigned int, nvmlGpuInstancePlacement_t *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceRemainingCapacity)(nvmlDevice_t, unsigned int, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceCreateGpuInstance)(nvmlDevice_t, unsigned int, nvmlGpuInstance_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceCreateGpuInstanceWithPlacement)(nvmlDevice_t, unsigned int, nvmlGpuInstancePlacement_t const *, nvmlGpuInstance_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceDestroy)(nvmlGpuInstance_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstances)(nvmlDevice_t, unsigned int, nvmlGpuInstance_t *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceById)(nvmlDevice_t, unsigned int, nvmlGpuInstance_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetInfo)(nvmlGpuInstance_t, nvmlGpuInstanceInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstanceProfileInfoV)(nvmlGpuInstance_t, unsigned int, unsigned int, nvmlComputeInstanceProfileInfo_v2_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstanceRemainingCapacity)(nvmlGpuInstance_t, unsigned int, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstancePossiblePlacements)(nvmlGpuInstance_t, unsigned int, nvmlComputeInstancePlacement_t *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceCreateComputeInstance)(nvmlGpuInstance_t, unsigned int, nvmlComputeInstance_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceCreateComputeInstanceWithPlacement)(nvmlGpuInstance_t, unsigned int, nvmlComputeInstancePlacement_t const *, nvmlComputeInstance_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlComputeInstanceDestroy)(nvmlComputeInstance_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstances)(nvmlGpuInstance_t, unsigned int, nvmlComputeInstance_t *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstanceById)(nvmlGpuInstance_t, unsigned int, nvmlComputeInstance_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlComputeInstanceGetInfo_v2)(nvmlComputeInstance_t, nvmlComputeInstanceInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceIsMigDeviceHandle)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceId)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetComputeInstanceId)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxMigDeviceCount)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMigDeviceHandleByIndex)(nvmlDevice_t, unsigned int, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDeviceHandleFromMigDeviceHandle)(nvmlDevice_t, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmSampleGet)(nvmlDevice_t, nvmlGpmSample_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmMigSampleGet)(nvmlDevice_t, unsigned int, nvmlGpmSample_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmQueryDeviceSupport)(nvmlDevice_t, nvmlGpmSupport_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmQueryIfStreamingEnabled)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmSetStreamingEnabled)(nvmlDevice_t, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCapabilities)(nvmlDevice_t, nvmlDeviceCapabilities_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceWorkloadPowerProfileClearRequestedProfiles)(nvmlDevice_t, nvmlWorkloadPowerProfileRequestedProfiles_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDevicePowerSmoothingActivatePresetProfile)(nvmlDevice_t, nvmlPowerSmoothingProfile_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDevicePowerSmoothingUpdatePresetProfileParam)(nvmlDevice_t, nvmlPowerSmoothingProfile_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDevicePowerSmoothingSetState)(nvmlDevice_t, nvmlPowerSmoothingState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAddressingMode)(nvmlDevice_t, nvmlDeviceAddressingMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRepairStatus)(nvmlDevice_t, nvmlRepairStatus_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerMizerMode_v1)(nvmlDevice_t, nvmlDevicePowerMizerModes_v1_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPowerMizerMode_v1)(nvmlDevice_t, nvmlDevicePowerMizerModes_v1_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPdi)(nvmlDevice_t, nvmlPdi_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetHostname_v1)(nvmlDevice_t, nvmlHostname_v1_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHostname_v1)(nvmlDevice_t, nvmlHostname_v1_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkInfo)(nvmlDevice_t, nvmlNvLinkInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceReadWritePRM_v1)(nvmlDevice_t, nvmlPRMTLV_v1_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceProfileInfoByIdV)(nvmlDevice_t, unsigned int, nvmlGpuInstanceProfileInfo_v2_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts)(nvmlDevice_t, nvmlEccSramUniqueUncorrectedErrorCounts_t *); /*proto*/

/* Module declarations from "cython.view" */

/* Module declarations from "cython.dataclasses" */

/* Module declarations from "cython" */

/* Module declarations from "cpython.version" */

/* Module declarations from "__builtin__" */

/* Module declarations from "cpython.type" */

/* Module declarations from "libc.string" */

/* Module declarations from "libc.stdio" */

/* Module declarations from "cpython.object" */

/* Module declarations from "cpython.ref" */

/* Module declarations from "cpython.exc" */

/* Module declarations from "cpython.module" */

/* Module declarations from "cpython.mem" */

/* Module declarations from "cpython.tuple" */

/* Module declarations from "cpython.list" */

/* Module declarations from "cpython.sequence" */

/* Module declarations from "cpython.mapping" */

/* Module declarations from "cpython.iterator" */

/* Module declarations from "cpython.number" */

/* Module declarations from "__builtin__" */

/* Module declarations from "cpython.bool" */

/* Module declarations from "cpython.long" */

/* Module declarations from "cpython.float" */

/* Module declarations from "__builtin__" */

/* Module declarations from "cpython.complex" */

/* Module declarations from "libc.stddef" */

/* Module declarations from "cpython.unicode" */

/* Module declarations from "cpython.pyport" */

/* Module declarations from "cpython.dict" */

/* Module declarations from "cpython.instance" */

/* Module declarations from "cpython.function" */

/* Module declarations from "cpython.method" */

/* Module declarations from "cpython.weakref" */

/* Module declarations from "cpython.getargs" */

/* Module declarations from "cpython.pythread" */

/* Module declarations from "cpython.pystate" */

/* Module declarations from "cpython.set" */

/* Module declarations from "cpython.buffer" */

/* Module declarations from "cpython.bytes" */

/* Module declarations from "cpython.pycapsule" */

/* Module declarations from "cpython.contextvars" */

/* Module declarations from "cpython.memoryview" */

/* Module declarations from "cpython" */

/* Module declarations from "libcpp.vector" */

/* Module declarations from "libcpp" */

/* Module declarations from "libcpp.memory" */

/* Module declarations from "cuda.bindings._internal.utils" */
static void *(*__pyx_f_4cuda_8bindings_9_internal_5utils_get_buffer_pointer)(PyObject *, Py_ssize_t, struct __pyx_opt_args_4cuda_8bindings_9_internal_5utils_get_buffer_pointer *__pyx_optional_args); /*proto*/
static int (*__pyx_fuse_0__pyx_f_4cuda_8bindings_9_internal_5utils_get_nested_resource_ptr)(__pyx_t_4cuda_8bindings_9_internal_5utils_nested_resource<int>  &, PyObject *, int *); /*proto*/
static int (*__pyx_fuse_1__pyx_f_4cuda_8bindings_9_internal_5utils_get_nested_resource_ptr)(__pyx_t_4cuda_8bindings_9_internal_5utils_nested_resource<int32_t>  &, PyObject *, int32_t *); /*proto*/
static int (*__pyx_fuse_2__pyx_f_4cuda_8bindings_9_internal_5utils_get_nested_resource_ptr)(__pyx_t_4cuda_8bindings_9_internal_5utils_nested_resource<int64_t>  &, PyObject *, int64_t *); /*proto*/
static int (*__pyx_fuse_3__pyx_f_4cuda_8bindings_9_internal_5utils_get_nested_resource_ptr)(__pyx_t_4cuda_8bindings_9_internal_5utils_nested_resource<char>  &, PyObject *, char *); /*proto*/
static int (*__pyx_fuse_4__pyx_f_4cuda_8bindings_9_internal_5utils_get_nested_resource_ptr)(__pyx_t_4cuda_8bindings_9_internal_5utils_nested_resource<float>  &, PyObject *, float *); /*proto*/
static int (*__pyx_fuse_5__pyx_f_4cuda_8bindings_9_internal_5utils_get_nested_resource_ptr)(__pyx_t_4cuda_8bindings_9_internal_5utils_nested_resource<double>  &, PyObject *, double *); /*proto*/

/* Module declarations from "libc.stdlib" */

/* Module declarations from "cuda.bindings._nvml" */
static PyObject *__pyx_collections_abc_Sequence = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static int __pyx_memoryview_thread_locks_used;
static PyThread_type_lock __pyx_memoryview_thread_locks[8];
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_init_v2(int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_init_with_flags(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_shutdown(int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_error_string(int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_driver_version(int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_nvml_version(int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_system_get_cuda_driver_version(int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_system_get_cuda_driver_version_v2(int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_process_name(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_hic_version(int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_unit_get_count(int __pyx_skip_dispatch); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_unit_get_handle_by_index(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_unit_get_unit_info(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_unit_get_led_state(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_unit_get_psu_info(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_unit_get_temperature(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_unit_get_fan_speed_info(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_count_v2(int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_attributes_v2(intptr_t, int __pyx_skip_dispatch); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_index_v2(unsigned int, int __pyx_skip_dispatch); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_serial(PyObject *, int __pyx_skip_dispatch); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_uuid(PyObject *, int __pyx_skip_dispatch); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_uuidv(intptr_t, int __pyx_skip_dispatch); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_pci_bus_id_v2(PyObject *, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_name(intptr_t, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_brand(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_index(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_serial(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_module_id(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_c2c_mode_info_v(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_memory_affinity(intptr_t, unsigned int, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_cpu_affinity_within_scope(intptr_t, unsigned int, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_cpu_affinity(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_cpu_affinity(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_clear_cpu_affinity(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_numa_node_id(intptr_t, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_topology_common_ancestor(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_p2p_status(intptr_t, intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_uuid(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_minor_number(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_board_part_number(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_inforom_version(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_inforom_image_version(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_inforom_configuration_checksum(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_validate_inforom(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned long __pyx_f_4cuda_8bindings_5_nvml_device_get_last_bbx_flush_time(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_display_mode(intptr_t, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_display_active(intptr_t, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_persistence_mode(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_pci_info_ext(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_pci_info_v3(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_max_pcie_link_generation(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_max_pcie_link_generation(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_max_pcie_link_width(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_curr_pcie_link_generation(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_curr_pcie_link_width(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_pcie_throughput(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_pcie_replay_counter(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_clock_info(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_max_clock_info(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_gpc_clk_vf_offset(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_clock(intptr_t, int, int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_max_customer_boost_clock(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_supported_memory_clocks(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_supported_graphics_clocks(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_auto_boosted_clocks_enabled(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_fan_speed(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_fan_speed_v2(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_fan_speed_rpm(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_target_fan_speed(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_min_max_fan_speed(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_fan_control_policy_v2(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_num_fans(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_cooler_info(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_temperature_threshold(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_margin_temperature(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_thermal_settings(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_performance_state(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_device_get_current_clocks_event_reasons(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_device_get_supported_clocks_event_reasons(intptr_t, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_power_state(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_dynamic_pstates_info(intptr_t, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_mem_clk_vf_offset(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_min_max_clock_of_p_state(intptr_t, int, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_gpc_clk_min_max_vf_offset(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_mem_clk_min_max_vf_offset(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_clock_offsets(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_clock_offsets(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_performance_modes(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_current_clock_freqs(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_power_management_limit(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_power_management_limit_constraints(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_power_management_default_limit(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_power_usage(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_device_get_total_energy_consumption(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_enforced_power_limit(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_operation_mode(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_memory_info_v2(intptr_t, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_compute_mode(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_cuda_compute_capability(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_dram_encryption_mode(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_dram_encryption_mode(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_ecc_mode(intptr_t, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_default_ecc_mode(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_board_id(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_multi_gpu_board(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_device_get_total_ecc_errors(intptr_t, int, int, int __pyx_skip_dispatch); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_device_get_memory_error_counter(intptr_t, int, int, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_utilization_rates(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_encoder_utilization(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_encoder_capacity(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_encoder_stats(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_encoder_sessions(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_decoder_utilization(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_jpg_utilization(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_ofa_utilization(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_fbc_stats(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_fbc_sessions(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_driver_model_v2(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vbios_version(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_bridge_chip_info(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_compute_running_processes_v3(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_mps_compute_running_processes_v3(intptr_t, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_on_same_board(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_api_restriction(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_bar1_memory_info(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_irq_num(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_num_gpu_cores(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_power_source(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_memory_bus_width(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_pcie_link_max_speed(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_pcie_speed(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_adaptive_clock_info_status(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_bus_type(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_fabric_info_v(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_capabilities(int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_state(int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_conf_compute_mem_size_info(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_gpus_ready_state(int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_conf_compute_protected_memory_usage(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_conf_compute_gpu_certificate(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_conf_compute_gpu_attestation_report(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_key_rotation_threshold_info(int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_conf_compute_unprotected_mem_size(intptr_t, unsigned PY_LONG_LONG, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_set_conf_compute_gpus_ready_state(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_set_conf_compute_key_rotation_threshold_info(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_settings(int __pyx_skip_dispatch); /*proto*/
static char __pyx_f_4cuda_8bindings_5_nvml_device_get_gsp_firmware_version(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_gsp_firmware_mode(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_sram_ecc_error_status(intptr_t, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_accounting_mode(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_accounting_stats(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_accounting_pids(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_accounting_buffer_size(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_retired_pages(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_retired_pages_pending_status(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_remapped_rows(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_row_remapper_histogram(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_architecture(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_clk_mon_status(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_process_utilization(intptr_t, unsigned PY_LONG_LONG, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_platform_info(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_unit_set_led_state(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_persistence_mode(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_compute_mode(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_ecc_mode(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_clear_ecc_error_counts(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_driver_model(intptr_t, int, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_gpu_locked_clocks(intptr_t, unsigned int, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_reset_gpu_locked_clocks(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_memory_locked_clocks(intptr_t, unsigned int, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_reset_memory_locked_clocks(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_auto_boosted_clocks_enabled(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_default_auto_boosted_clocks_enabled(intptr_t, int, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_default_fan_speed_v2(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_fan_control_policy(intptr_t, unsigned int, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_temperature_threshold(intptr_t, int, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_power_management_limit(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_gpu_operation_mode(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_api_restriction(intptr_t, int, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_fan_speed_v2(intptr_t, unsigned int, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_accounting_mode(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_clear_accounting_pids(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_power_management_limit_v2(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_state(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_version(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_capability(intptr_t, unsigned int, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_remote_pci_info_v2(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_error_counter(intptr_t, unsigned int, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_reset_nvlink_error_counters(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_remote_device_type(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_nvlink_device_low_power_threshold(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_set_nvlink_bw_mode(unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_system_get_nvlink_bw_mode(int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_supported_bw_modes(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_bw_mode(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_nvlink_bw_mode(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_event_set_create(int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_register_events(intptr_t, unsigned PY_LONG_LONG, intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_device_get_supported_event_types(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_event_set_wait_v2(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_event_set_free(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_event_set_create(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_event_set_free(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_register_events(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_event_set_wait(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_modify_drain_state(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_query_drain_state(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_remove_gpu_v2(intptr_t, int, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_discover_gpus(intptr_t, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_virtualization_mode(intptr_t, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_host_vgpu_mode(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_virtualization_mode(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_heterogeneous_mode(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_vgpu_heterogeneous_mode(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_placement_id(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_type_supported_placements(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_gsp_heap_size(unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_fb_reservation(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_runtime_state_size(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_vgpu_capabilities(intptr_t, int, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_grid_licensable_features_v4(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_get_vgpu_driver_capabilities(int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_capabilities(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_class(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_name(unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_gpu_instance_profile_id(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_device_id(unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_framebuffer_size(unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_num_display_heads(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_resolution(unsigned int, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_license(unsigned int, intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_frame_rate_limit(unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_max_instances(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_max_instances_per_vm(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_bar1_info(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_uuid(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_vm_driver_version(unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_fb_usage(unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_license_status(unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_type(unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_frame_rate_limit(unsigned int, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_ecc_mode(unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_encoder_capacity(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_set_encoder_capacity(unsigned int, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_encoder_stats(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_encoder_sessions(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_fbc_stats(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_fbc_sessions(unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_gpu_instance_id(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_gpu_pci_id(unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_capabilities(unsigned int, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_mdev_uuid(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_max_instances_per_gpu_instance(int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_set_vgpu_scheduler_state(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_vgpu_scheduler_state(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_vgpu_scheduler_log(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_vgpu_heterogeneous_mode(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_set_vgpu_heterogeneous_mode(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_pgpu_metadata_string(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_scheduler_log(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_scheduler_state(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_scheduler_capabilities(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_vgpu_scheduler_state(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_set_vgpu_version(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_utilization(intptr_t, unsigned PY_LONG_LONG, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_process_utilization(intptr_t, unsigned PY_LONG_LONG, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_accounting_mode(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_accounting_pids(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_accounting_stats(unsigned int, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_clear_accounting_pids(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_license_info_v2(unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_get_excluded_device_count(int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_get_excluded_device_info_by_index(unsigned int, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_set_mig_mode(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_mig_mode(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_profile_info_v(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_possible_placements_v2(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_remaining_capacity(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_create_gpu_instance(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_create_gpu_instance_with_placement(intptr_t, unsigned int, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_destroy(intptr_t, int __pyx_skip_dispatch); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_by_id(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_info(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instance_profile_info_v(intptr_t, unsigned int, unsigned int, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instance_remaining_capacity(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instance_possible_placements(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_create_compute_instance(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_create_compute_instance_with_placement(intptr_t, unsigned int, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_compute_instance_destroy(intptr_t, int __pyx_skip_dispatch); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instance_by_id(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_compute_instance_get_info_v2(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_is_mig_device_handle(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_id(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_compute_instance_id(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_max_mig_device_count(intptr_t, int __pyx_skip_dispatch); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_get_mig_device_handle_by_index(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_get_device_handle_from_mig_device_handle(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpm_sample_get(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpm_mig_sample_get(intptr_t, unsigned int, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpm_query_device_support(intptr_t, int __pyx_skip_dispatch); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_gpm_query_if_streaming_enabled(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpm_set_streaming_enabled(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_capabilities(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_workload_power_profile_clear_requested_profiles(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_power_smoothing_activate_preset_profile(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_power_smoothing_update_preset_profile_param(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_power_smoothing_set_state(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_addressing_mode(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_repair_status(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_power_mizer_mode_v1(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_power_mizer_mode_v1(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_pdi(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_info(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_read_write_prm_v1(intptr_t, intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_profile_info_by_id_v(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___from_data(PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__nvml_error_factory(int); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_check_status(int, int __pyx_skip_dispatch); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_check_status_size(int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_pci_info_ext_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_pci_info_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_utilization_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_memory_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_memory_v2_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_ba_r1memory_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_process_info_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_process_detail_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_device_attributes_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_c2c_mode_info_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_row_remapper_histogram_values_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_bridge_chip_info_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod0_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_cooler_info_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_margin_temperature_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_clk_mon_fault_info_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_clock_offset_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_fan_speed_info_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_device_perf_modes_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_device_current_clock_freqs_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_process_utilization_sample_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_process_utilization_info_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_ecc_sram_error_status_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_platform_info_v2_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_heterogeneous_mode_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_placement_id_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_placement_list_v2_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_type_bar1info_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_process_utilization_info_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_runtime_state_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod2_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod3_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_log_entry_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod4_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod5_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_capabilities_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_license_expiry_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_grid_license_expiry_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_type_id_info_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_type_max_instance_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_active_vgpu_instance_info_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_creatable_placement_info_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_hwbc_entry_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_led_state_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_unit_info_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_psu_info_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_unit_fan_info_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_event_data_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_accounting_stats_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_encoder_session_info_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_fbc_stats_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_fbc_session_info_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_system_caps_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_system_state_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_system_conf_compute_settings_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_mem_size_info_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_gpu_certificate_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_gpu_attestation_report_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_get_key_rotation_threshold_info_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_nvlink_supported_bw_modes_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_nvlink_get_bw_mode_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_nvlink_set_bw_mode_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_version_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_metadata_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_pgpu_compatibility_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_gpu_instance_placement_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_gpu_instance_profile_info_v2_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_gpu_instance_profile_info_v3_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_compute_instance_placement_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_compute_instance_profile_info_v2_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_compute_instance_profile_info_v3_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_gpm_support_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_device_capabilities_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_device_addressing_mode_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_repair_status_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_pdi_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_device_power_mizer_modes_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_ecc_sram_unique_uncorrected_error_entry_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_gpu_fabric_info_v3_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_nvlink_firmware_version_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_excluded_device_info_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_process_detail_list_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_bridge_chip_hierarchy_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_sample_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_instance_utilization_info_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_field_value_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_gpu_thermal_settings_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_clk_mon_status_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_processes_utilization_info_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_gpu_dynamic_pstates_info_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_processes_utilization_info_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_license_info_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_grid_licensable_feature_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_unit_fan_speeds_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_pgpu_metadata_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_gpu_instance_info_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_compute_instance_info_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_ecc_sram_unique_uncorrected_error_counts_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_nvlink_firmware_info_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_instances_utilization_info_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_log_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_get_state_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_state_info_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_log_info_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_state_v1_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_grid_licensable_features_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_nv_link_info_v2_dtype_offsets(void); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_topology_gpu_set(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_driver_branch(int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_unit_get_devices(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_topology_nearest_gpus(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_temperature_v(intptr_t, nvmlTemperatureSensors_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_supported_performance_states(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_running_process_detail_list(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_samples(intptr_t, int, unsigned PY_LONG_LONG, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_retired_pages_v2(intptr_t, int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_processes_utilization_info(intptr_t, unsigned PY_LONG_LONG, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_hostname_v1(intptr_t, PyObject *, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_hostname_v1(intptr_t, int __pyx_skip_dispatch); /*proto*/
static struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_f_4cuda_8bindings_5_nvml__cast_field_values(PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_field_values(intptr_t, PyObject *, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_clear_field_values(intptr_t, PyObject *, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_supported_vgpus(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_creatable_vgpus(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_active_vgpus(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_vm_id(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_creatable_vgpus(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_active_vgpus(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_vgpu_type_creatable_placements(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_type_creatable_placements(intptr_t, unsigned int, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_metadata(unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_metadata(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_get_vgpu_compatibility(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *, struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_get_vgpu_version(int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_instances_utilization_info(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_processes_utilization_info(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instances(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instances(intptr_t, unsigned int, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_sram_unique_uncorrected_ecc_error_counts(intptr_t, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ProcessInfo__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ProcessDetail_v1__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_BridgeChipInfo__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ClkMonFaultInfo__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ProcessUtilizationSample__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ProcessUtilizationInfo_v1__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_VgpuSchedulerLogEntry__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_HwbcEntry__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_UnitFanInfo__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_EncoderSessionInfo__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_FBCSessionInfo__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_GpuInstancePlacement__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ComputeInstancePlacement__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_Sample__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_FieldValue__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *, PyObject *); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_GridLicensableFeature__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *, PyObject *); /*proto*/
static int __pyx_array_allocate_buffer(struct __pyx_array_obj *); /*proto*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char const *, char *); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo const *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static int assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, PyObject *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, PyObject *); /*proto*/
static int __pyx_memoryview_err_no_memory(void); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/
/* #### Code section: typeinfo ### */
/* #### Code section: before_global_var ### */
#define __Pyx_MODULE_NAME "cuda.bindings._nvml"
extern int __pyx_module_is_main_cuda__bindings___nvml;
int __pyx_module_is_main_cuda__bindings___nvml = 0;

/* Implementation of "cuda.bindings._nvml" */
/* #### Code section: global_var ### */
static PyObject *__pyx_builtin_staticmethod;
static PyObject *__pyx_builtin_super;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin___import__;
static PyObject *__pyx_builtin_Ellipsis;
/* #### Code section: string_decls ### */
static const char __pyx_k_c[] = "c";
static const char __pyx_k_name[] = "name";
static const char __pyx_k_data_2[] = "_data";
static const char __pyx_k_fortran[] = "fortran";
/* #### Code section: decls ### */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9NvmlError___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_status); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9NvmlError_2__reduce__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_check_status(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_status); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_2check_status_size(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_status); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6domain___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6domain_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3bus___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3bus_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7device____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7device__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13pci_device_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13pci_device_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17pci_sub_system_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17pci_sub_system_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_10base_class___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_10base_class_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_9sub_class___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_9sub_class_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6bus_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6bus_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_13bus_id_legacy___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_13bus_id_legacy_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_6domain___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_6domain_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_3bus___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_3bus_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_7device____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_7device__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_13pci_device_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_13pci_device_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_17pci_sub_system_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_17pci_sub_system_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_6bus_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_6bus_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11Utilization___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_3gpu___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_3gpu_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_6memory___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_6memory_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_6Memory___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_6Memory_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_6Memory_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_5total___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_6Memory_5total_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_4free___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_6Memory_4free_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_4used___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_6Memory_4used_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_5total___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_5total_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_4free___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_4free_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_4used___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_4used_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1total___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1total_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_8bar1free___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_8bar1free_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1_used___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1_used_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_3pid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_3pid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_15used_gpu_memory___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_15used_gpu_memory_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_15gpu_instance_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_15gpu_instance_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_19compute_instance_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_19compute_instance_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3pid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3pid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15used_gpu_memory___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15used_gpu_memory_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15gpu_instance_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15gpu_instance_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19compute_instance_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19compute_instance_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_28used_gpu_cc_protected_memory___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_28used_gpu_cc_protected_memory_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20multiprocessor_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20multiprocessor_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_24shared_copy_engine_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_24shared_copy_engine_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_decoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_decoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_encoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_encoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_17shared_jpeg_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_17shared_jpeg_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_16shared_ofa_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_16shared_ofa_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_24gpu_instance_slice_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_24gpu_instance_slice_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_28compute_instance_slice_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_28compute_instance_slice_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_14memory_size_mb___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_14memory_size_mb_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14is_c2c_enabled___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14is_c2c_enabled_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4max____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4max__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4high___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4high_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_7partial___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_7partial_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3low___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3low_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4none___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4none_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_4type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_4type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_10fw_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_10fw_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_5Value___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_5Value_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_5Value_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_5d_val___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_5Value_5d_val_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_6si_val___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_5Value_6si_val_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_6ui_val___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_5Value_6ui_val_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_6ul_val___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_5Value_6ul_val_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_7ull_val___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_5Value_7ull_val_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_7sll_val___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_5Value_7sll_val_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_6us_val___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_5Value_6us_val_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0___init__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_10controller___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_10controller_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_min_temp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_min_temp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_max_temp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_max_temp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_12current_temp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_12current_temp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_6target___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_6target_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6ind_ex___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6ind_ex_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_11signal_type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_11signal_type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6target___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6target_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18margin_temperature___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18margin_temperature_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14clk_api_domain___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14clk_api_domain_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21clk_domain_fault_mask___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21clk_domain_fault_mask_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_4type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_4type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_6pstate___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_6pstate_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_17clock_offset_m_hz___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_17clock_offset_m_hz_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_21min_clock_offset_m_hz___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_21min_clock_offset_m_hz_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_21max_clock_offset_m_hz___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_21max_clock_offset_m_hz_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3fan___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3fan_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_5speed___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_5speed_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3str___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3str_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3str___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3str_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3pid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3pid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_10time_stamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_10time_stamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7sm_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7sm_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8mem_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8mem_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8enc_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8enc_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8dec_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8dec_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_10time_stamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_10time_stamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3pid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3pid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7sm_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7sm_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8mem_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8mem_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8enc_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8enc_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8dec_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8dec_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8jpg_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8jpg_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8ofa_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8ofa_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20aggregate_unc_parity___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20aggregate_unc_parity_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_21aggregate_unc_sec_ded___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_21aggregate_unc_sec_ded_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13aggregate_cor___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13aggregate_cor_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19volatile_unc_parity___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19volatile_unc_parity_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20volatile_unc_sec_ded___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20volatile_unc_sec_ded_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12volatile_cor___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12volatile_cor_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_l2___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_l2_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_sm___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_sm_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_25aggregate_unc_bucket_pcie___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_25aggregate_unc_bucket_pcie_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_24aggregate_unc_bucket_mcu___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_24aggregate_unc_bucket_mcu_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_26aggregate_unc_bucket_other___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_26aggregate_unc_bucket_other_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20b_threshold_exceeded___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20b_threshold_exceeded_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7ib_guid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7ib_guid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_21chassis_serial_number___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_21chassis_serial_number_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11slot_number___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11slot_number_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11tray_ind_ex___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11tray_ind_ex_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7host_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7host_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9peer_type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9peer_type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9module_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9module_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_12b_is_present___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_12b_is_present_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_10percentage___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_10percentage_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_13inc_threshold___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_13inc_threshold_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_13dec_threshold___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_13dec_threshold_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_4mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_4mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12placement_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12placement_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14placement_size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14placement_size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13placement_ids___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13placement_ids_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_4mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_4mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_8bar1size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_8bar1size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_12process_name___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_12process_name_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_10time_stamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_10time_stamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_13vgpu_instance___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_13vgpu_instance_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3pid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3pid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7sm_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7sm_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8mem_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8mem_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8enc_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8enc_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8dec_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8dec_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8jpg_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8jpg_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8ofa_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8ofa_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_5size____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_5size__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2___init__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_10avg_factor___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_10avg_factor_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_9timeslice___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_9timeslice_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3___init__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_9timeslice___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_9timeslice_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_9timestamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_9timestamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14time_run_total___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14time_run_total_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_8time_run___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_8time_run_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_13sw_runlist_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_13sw_runlist_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17target_time_slice___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17target_time_slice_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_26cumulative_preemption_time___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_26cumulative_preemption_time_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4___init__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_10avg_factor___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_10avg_factor_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_9frequency___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_9frequency_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5___init__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_9timeslice___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_9timeslice_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_20supported_schedulers___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_20supported_schedulers_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13max_timeslice___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13max_timeslice_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13min_timeslice___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13min_timeslice_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21is_arr_mode_supported___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21is_arr_mode_supported_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21max_frequency_for_arr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21max_frequency_for_arr_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21min_frequency_for_arr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21min_frequency_for_arr_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22max_avg_factor_for_arr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22max_avg_factor_for_arr_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22min_avg_factor_for_arr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22min_avg_factor_for_arr_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4year___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4year_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_5month___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_5month_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3day___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3day_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4hour___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4hour_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4min____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4min__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3sec___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3sec_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_6status___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_6status_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4year___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4year_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_5month___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_5month_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3day___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3day_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4hour___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4hour_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4min____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4min__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3sec___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3sec_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_6status___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_6status_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13vgpu_type_ids___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13vgpu_type_ids_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12vgpu_type_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12vgpu_type_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19max_instance_per_gi___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19max_instance_per_gi_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14vgpu_instances___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14vgpu_instances_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12vgpu_type_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12vgpu_type_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_5count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_5count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13placement_ids___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13placement_ids_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_7hwbc_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_7hwbc_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_16firmware_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_16firmware_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_8LedState___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_8LedState_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_8LedState_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_5cause___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_8LedState_5cause_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_5color___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_8LedState_5color_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_4name___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_4name_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_2id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_2id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_6serial___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_6serial_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_16firmware_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_16firmware_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_5state___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_5state_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_7current___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_7current_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_7voltage___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_7voltage_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_5power___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_5power_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_5speed___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_5speed_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_5state___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_5state_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_9EventData___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_9EventData_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_9EventData_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_7device____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_9EventData_7device__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_10event_type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_9EventData_10event_type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_10event_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_9EventData_10event_data_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_15gpu_instance_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_9EventData_15gpu_instance_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_19compute_instance_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_9EventData_19compute_instance_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_15gpu_utilization___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_15gpu_utilization_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_18memory_utilization___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_18memory_utilization_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_16max_memory_usage___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_16max_memory_usage_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_4time___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_4time_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_10start_time___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_10start_time_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_10is_running___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_10is_running_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10session_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10session_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3pid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3pid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_13vgpu_instance___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_13vgpu_instance_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10codec_type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10codec_type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12h_resolution___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12h_resolution_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12v_resolution___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12v_resolution_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_11average_fps___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_11average_fps_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15average_latency___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15average_latency_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_14sessions_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_14sessions_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_11average_fps___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_11average_fps_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_15average_latency___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_15average_latency_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_10session_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_10session_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_3pid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_3pid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_13vgpu_instance___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_13vgpu_instance_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_15display_ordinal___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_15display_ordinal_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12session_type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12session_type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_13session_flags___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_13session_flags_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_16h_max_resolution___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_16h_max_resolution_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_16v_max_resolution___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_16v_max_resolution_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12h_resolution___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12h_resolution_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12v_resolution___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12v_resolution_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_11average_fps___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_11average_fps_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_15average_latency___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_15average_latency_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_8cpu_caps___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_8cpu_caps_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_9gpus_caps___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_9gpus_caps_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_11environment___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_11environment_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_10cc_feature___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_10cc_feature_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14dev_tools_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14dev_tools_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_11environment___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_11environment_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_10cc_feature___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_10cc_feature_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14dev_tools_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14dev_tools_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14multi_gpu_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14multi_gpu_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_22protected_mem_size_kib___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_22protected_mem_size_kib_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_24unprotected_mem_size_kib___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_24unprotected_mem_size_kib_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15cert_chain_size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15cert_chain_size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_27attestation_cert_chain_size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_27attestation_cert_chain_size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_10cert_chain___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_10cert_chain_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_22attestation_cert_chain___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_22attestation_cert_chain_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_33is_cec_attestation_report_present___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_33is_cec_attestation_report_present_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_23attestation_report_size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_23attestation_report_size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_27cec_attestation_report_size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_27cec_attestation_report_size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_5nonce___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_5nonce_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18attestation_report___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18attestation_report_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_22cec_attestation_report___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_22cec_attestation_report_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18attacker_advantage___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18attacker_advantage_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_8bw_modes___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_8bw_modes_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14total_bw_modes___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14total_bw_modes_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_9b_is_best___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_9b_is_best_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7bw_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7bw_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_10b_set_best___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_10b_set_best_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7bw_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7bw_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_11min_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_11min_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_11max_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_11max_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_8revision___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_8revision_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_16guest_info_state___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_16guest_info_state_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_20guest_driver_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_20guest_driver_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_19host_driver_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_19host_driver_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_24vgpu_virtualization_caps___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_24vgpu_virtualization_caps_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_18guest_vgpu_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_18guest_vgpu_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_16opaque_data_size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_16opaque_data_size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_11opaque_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_11opaque_data_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_21vgpu_vm_compatibility___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_21vgpu_vm_compatibility_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_24compatibility_limit_code___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_24compatibility_limit_code_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5start___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5start_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5size____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5size__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_2id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_2id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16is_p2p_supported___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16is_p2p_supported_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_11slice_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_11slice_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14instance_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14instance_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_20multiprocessor_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_20multiprocessor_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17copy_engine_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17copy_engine_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13decoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13decoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13encoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13encoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_10jpeg_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_10jpeg_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_9ofa_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_9ofa_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14memory_size_mb___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14memory_size_mb_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_4name___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_4name_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_2id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_2id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_11slice_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_11slice_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14instance_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14instance_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_20multiprocessor_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_20multiprocessor_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17copy_engine_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17copy_engine_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13decoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13decoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13encoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13encoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_10jpeg_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_10jpeg_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_9ofa_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_9ofa_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14memory_size_mb___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14memory_size_mb_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_4name___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_4name_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12capabilities___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12capabilities_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5start___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5start_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5size____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5size__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_2id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_2id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_11slice_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_11slice_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14instance_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14instance_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20multiprocessor_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20multiprocessor_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_24shared_copy_engine_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_24shared_copy_engine_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_decoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_decoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_encoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_encoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17shared_jpeg_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17shared_jpeg_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16shared_ofa_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16shared_ofa_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_4name___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_4name_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_2id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_2id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_11slice_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_11slice_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14instance_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14instance_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20multiprocessor_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20multiprocessor_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_24shared_copy_engine_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_24shared_copy_engine_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_decoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_decoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_encoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_encoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17shared_jpeg_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17shared_jpeg_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16shared_ofa_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16shared_ofa_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_4name___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_4name_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12capabilities___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12capabilities_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_19is_supported_device___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_19is_supported_device_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_8cap_mask___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_8cap_mask_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_5value___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_5value_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_24b_channel_repair_pending___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_24b_channel_repair_pending_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_20b_tpc_repair_pending___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_20b_tpc_repair_pending_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_5value___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_5value_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12current_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12current_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_4mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_4mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_27supported_power_mizer_modes___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_27supported_power_mizer_modes_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_4unit___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_4unit_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_8location___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_8location_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11sublocation___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11sublocation_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11extlocation___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11extlocation_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7address___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7address_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_9is_parity___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_9is_parity_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12cluster_uuid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12cluster_uuid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_6status___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_6status_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_9clique_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_9clique_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_5state___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_5state_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_11health_mask___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_11health_mask_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14health_summary___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14health_summary_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_10ucode_type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_10ucode_type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5major___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5major_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5minor___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5minor_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_9sub_minor___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_9sub_minor_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_8pci_info___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_8pci_info_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_4uuid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_4uuid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_4mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_4mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_10proc_array___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_10proc_array_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16bridge_chip_info___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16bridge_chip_info_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12bridge_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12bridge_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_6Sample___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_6Sample_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_10time_stamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_6Sample_10time_stamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_12sample_value___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_6Sample_12sample_value_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_6Sample_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_10time_stamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_10time_stamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_13vgpu_instance___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_13vgpu_instance_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7sm_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7sm_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8mem_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8mem_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8enc_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8enc_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8dec_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8dec_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8jpg_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8jpg_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8ofa_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8ofa_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_8field_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_8field_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_8scope_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_8scope_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_9timestamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_9timestamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_12latency_usec___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_12latency_usec_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_10value_type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_10value_type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_11nvml_return___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_11nvml_return_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_5value___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_5value_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_6sensor___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_6sensor_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_5count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_5count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_12clk_mon_list___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_12clk_mon_list_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_15b_global_status___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_15b_global_status_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_17clk_mon_list_size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_17clk_mon_list_size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_20last_seen_time_stamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_20last_seen_time_stamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15proc_util_array___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15proc_util_array_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_11utilization___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_11utilization_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_6flags____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_6flags__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20last_seen_time_stamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20last_seen_time_stamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20vgpu_proc_util_array___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20vgpu_proc_util_array_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_24vgpu_sched_data_with_arr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_24vgpu_sched_data_with_arr_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15vgpu_sched_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15vgpu_sched_data_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_24vgpu_sched_data_with_arr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_24vgpu_sched_data_with_arr_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15vgpu_sched_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15vgpu_sched_data_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14license_expiry___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14license_expiry_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_11is_licensed___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_11is_licensed_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13current_state___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13current_state_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12feature_code___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12feature_code_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_13feature_state___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_13feature_state_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12license_info___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12license_info_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12product_name___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12product_name_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_15feature_enabled___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_15feature_enabled_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_14license_expiry___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_14license_expiry_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_14from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_4fans___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_4fans_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_5count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_5count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_25host_supported_vgpu_range___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_25host_supported_vgpu_range_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_8revision___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_8revision_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19host_driver_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19host_driver_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_24pgpu_virtualization_caps___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_24pgpu_virtualization_caps_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16opaque_data_size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16opaque_data_size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_11opaque_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_11opaque_data_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_9placement___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_9placement_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_7device____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_7device__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_2id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_2id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_10profile_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_10profile_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_9placement___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_9placement_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_7device____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_7device__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12gpu_instance___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12gpu_instance_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_2id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_2id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_10profile_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_10profile_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7entries___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7entries_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16firmware_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16firmware_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17num_valid_entries___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17num_valid_entries_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15sample_val_type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15sample_val_type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_20last_seen_time_stamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_20last_seen_time_stamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15vgpu_util_array___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15vgpu_util_array_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_params___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_params_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_11log_entries___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_11log_entries_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_9engine_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_9engine_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_policy___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_policy_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_8arr_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_8arr_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13entries_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13entries_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_params___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_params_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_policy___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_policy_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_8arr_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_8arr_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_params___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_params_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_9engine_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_9engine_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_policy___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_policy_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_8arr_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_8arr_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_params___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_params_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_11log_entries___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_11log_entries_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_9engine_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_9engine_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_policy___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_policy_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_8arr_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_8arr_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13entries_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13entries_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_params___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_params_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_9engine_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_9engine_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_policy___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_policy_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15enable_arr_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15enable_arr_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_24grid_licensable_features___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_24grid_licensable_features_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25is_grid_license_supported___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25is_grid_license_supported_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25licensable_features_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25licensable_features_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self); /* proto */
static void __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self, PyObject *__pyx_v_other); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13firmware_info___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13firmware_info_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15is_nvle_enabled___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self); /* proto */
static int __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15is_nvle_enabled_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_12from_data(PyObject *__pyx_v_data); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_4init_v2(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6init_with_flags(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8shutdown(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10error_string(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_result); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12system_get_driver_version(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14system_get_nvml_version(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16system_get_cuda_driver_version(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18system_get_cuda_driver_version_v2(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20system_get_process_name(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_pid); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22system_get_hic_version(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24unit_get_count(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26unit_get_handle_by_index(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_ind_ex); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28unit_get_unit_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_unit); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30unit_get_led_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_unit); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_32unit_get_psu_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_unit); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_34unit_get_temperature(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_unit, unsigned int __pyx_v_type); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_36unit_get_fan_speed_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_unit); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38device_get_count_v2(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_40device_get_attributes_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_42device_get_handle_by_index_v2(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_ind_ex); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_44device_get_handle_by_serial(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_serial); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_46device_get_handle_by_uuid(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_uuid); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_48device_get_handle_by_uuidv(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_uuid); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_50device_get_handle_by_pci_bus_id_v2(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_pci_bus_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_52device_get_name(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_54device_get_brand(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_56device_get_index(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_58device_get_serial(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_60device_get_module_id(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_62device_get_c2c_mode_info_v(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_64device_get_memory_affinity(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_node_set_size, unsigned int __pyx_v_scope); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_66device_get_cpu_affinity_within_scope(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_cpu_set_size, unsigned int __pyx_v_scope); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_68device_get_cpu_affinity(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_cpu_set_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_70device_set_cpu_affinity(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_72device_clear_cpu_affinity(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_74device_get_numa_node_id(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_76device_get_topology_common_ancestor(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device1, intptr_t __pyx_v_device2); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_78device_get_p2p_status(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device1, intptr_t __pyx_v_device2, int __pyx_v_p2p_ind_ex); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_80device_get_uuid(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_82device_get_minor_number(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_84device_get_board_part_number(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_86device_get_inforom_version(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_object); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_88device_get_inforom_image_version(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_90device_get_inforom_configuration_checksum(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_92device_validate_inforom(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_94device_get_last_bbx_flush_time(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_timestamp); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_96device_get_display_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_98device_get_display_active(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_100device_get_persistence_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_102device_get_pci_info_ext(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_104device_get_pci_info_v3(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_106device_get_max_pcie_link_generation(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_108device_get_gpu_max_pcie_link_generation(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_110device_get_max_pcie_link_width(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_112device_get_curr_pcie_link_generation(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_114device_get_curr_pcie_link_width(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_116device_get_pcie_throughput(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_counter); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_118device_get_pcie_replay_counter(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_120device_get_clock_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_type); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_122device_get_max_clock_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_type); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_124device_get_gpc_clk_vf_offset(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_126device_get_clock(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_clock_type, int __pyx_v_clock_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_128device_get_max_customer_boost_clock(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_clock_type); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_130device_get_supported_memory_clocks(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_132device_get_supported_graphics_clocks(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_memory_clock_m_hz); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_134device_get_auto_boosted_clocks_enabled(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_136device_get_fan_speed(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_138device_get_fan_speed_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_fan); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_140device_get_fan_speed_rpm(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_142device_get_target_fan_speed(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_fan); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_144device_get_min_max_fan_speed(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_146device_get_fan_control_policy_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_fan); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_148device_get_num_fans(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_150device_get_cooler_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_152device_get_temperature_threshold(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_threshold_type); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_154device_get_margin_temperature(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_156device_get_thermal_settings(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_sensor_ind_ex); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_158device_get_performance_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_160device_get_current_clocks_event_reasons(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_162device_get_supported_clocks_event_reasons(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_164device_get_power_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_166device_get_dynamic_pstates_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_168device_get_mem_clk_vf_offset(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_170device_get_min_max_clock_of_p_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_type, int __pyx_v_pstate); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_172device_get_gpc_clk_min_max_vf_offset(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_174device_get_mem_clk_min_max_vf_offset(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_176device_get_clock_offsets(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_178device_set_clock_offsets(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_info); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_180device_get_performance_modes(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_182device_get_current_clock_freqs(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_184device_get_power_management_limit(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_186device_get_power_management_limit_constraints(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_188device_get_power_management_default_limit(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_190device_get_power_usage(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_192device_get_total_energy_consumption(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_194device_get_enforced_power_limit(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_196device_get_gpu_operation_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_198device_get_memory_info_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_200device_get_compute_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_202device_get_cuda_compute_capability(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_204device_get_dram_encryption_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_206device_set_dram_encryption_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_dram_encryption); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_208device_get_ecc_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_210device_get_default_ecc_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_212device_get_board_id(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_214device_get_multi_gpu_board(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_216device_get_total_ecc_errors(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_error_type, int __pyx_v_counter_type); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_218device_get_memory_error_counter(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_error_type, int __pyx_v_counter_type, int __pyx_v_location_type); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_220device_get_utilization_rates(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_222device_get_encoder_utilization(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_224device_get_encoder_capacity(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_encoder_query_type); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_226device_get_encoder_stats(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_228device_get_encoder_sessions(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_230device_get_decoder_utilization(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_232device_get_jpg_utilization(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_234device_get_ofa_utilization(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_236device_get_fbc_stats(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_238device_get_fbc_sessions(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_240device_get_driver_model_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_242device_get_vbios_version(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_244device_get_bridge_chip_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_246device_get_compute_running_processes_v3(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_248device_get_mps_compute_running_processes_v3(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_250device_on_same_board(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device1, intptr_t __pyx_v_device2); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_252device_get_api_restriction(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_api_type); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_254device_get_bar1_memory_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_256device_get_irq_num(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_258device_get_num_gpu_cores(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_260device_get_power_source(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_262device_get_memory_bus_width(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_264device_get_pcie_link_max_speed(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_266device_get_pcie_speed(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_268device_get_adaptive_clock_info_status(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_270device_get_bus_type(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_272device_get_gpu_fabric_info_v(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_274system_get_conf_compute_capabilities(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_276system_get_conf_compute_state(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_278device_get_conf_compute_mem_size_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_280system_get_conf_compute_gpus_ready_state(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_282device_get_conf_compute_protected_memory_usage(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_284device_get_conf_compute_gpu_certificate(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_286device_get_conf_compute_gpu_attestation_report(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_288system_get_conf_compute_key_rotation_threshold_info(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_290device_set_conf_compute_unprotected_mem_size(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_size_ki_b); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_292system_set_conf_compute_gpus_ready_state(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_is_accepting_work); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_294system_set_conf_compute_key_rotation_threshold_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_p_key_rotation_thr_info); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_296system_get_conf_compute_settings(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_298device_get_gsp_firmware_version(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_300device_get_gsp_firmware_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_302device_get_sram_ecc_error_status(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_304device_get_accounting_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_306device_get_accounting_stats(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_pid); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_308device_get_accounting_pids(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_310device_get_accounting_buffer_size(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_312device_get_retired_pages(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_cause); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_314device_get_retired_pages_pending_status(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_316device_get_remapped_rows(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_318device_get_row_remapper_histogram(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_320device_get_architecture(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_322device_get_clk_mon_status(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_324device_get_process_utilization(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_326device_get_platform_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_328unit_set_led_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_unit, int __pyx_v_color); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_330device_set_persistence_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_mode); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_332device_set_compute_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_mode); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_334device_set_ecc_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_ecc); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_336device_clear_ecc_error_counts(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_counter_type); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_338device_set_driver_model(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_driver_model, unsigned int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_340device_set_gpu_locked_clocks(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_min_gpu_clock_m_hz, unsigned int __pyx_v_max_gpu_clock_m_hz); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_342device_reset_gpu_locked_clocks(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_344device_set_memory_locked_clocks(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_min_mem_clock_m_hz, unsigned int __pyx_v_max_mem_clock_m_hz); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_346device_reset_memory_locked_clocks(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_348device_set_auto_boosted_clocks_enabled(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_enabled); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_350device_set_default_auto_boosted_clocks_enabled(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_enabled, unsigned int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_352device_set_default_fan_speed_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_fan); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_354device_set_fan_control_policy(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_fan, unsigned int __pyx_v_policy); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_356device_set_temperature_threshold(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_threshold_type, intptr_t __pyx_v_temp); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_358device_set_power_management_limit(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_limit); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_360device_set_gpu_operation_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_mode); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_362device_set_api_restriction(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_api_type, int __pyx_v_is_restricted); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_364device_set_fan_speed_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_fan, unsigned int __pyx_v_speed); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_366device_set_accounting_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_mode); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_368device_clear_accounting_pids(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_370device_set_power_management_limit_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_power_value); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_372device_get_nvlink_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_link); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_374device_get_nvlink_version(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_link); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_376device_get_nvlink_capability(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_link, int __pyx_v_capability); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_378device_get_nvlink_remote_pci_info_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_link); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_380device_get_nvlink_error_counter(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_link, int __pyx_v_counter); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_382device_reset_nvlink_error_counters(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_link); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_384device_get_nvlink_remote_device_type(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_link); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_386device_set_nvlink_device_low_power_threshold(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_info); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_388system_set_nvlink_bw_mode(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_nvlink_bw_mode); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_390system_get_nvlink_bw_mode(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_392device_get_nvlink_supported_bw_modes(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_394device_get_nvlink_bw_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_396device_set_nvlink_bw_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_set_bw_mode); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_398event_set_create(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_400device_register_events(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_event_types, intptr_t __pyx_v_set); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_402device_get_supported_event_types(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_404event_set_wait_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_set, unsigned int __pyx_v_timeoutms); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_406event_set_free(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_set); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_408system_event_set_create(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_request); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_410system_event_set_free(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_request); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_412system_register_events(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_request); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_414system_event_set_wait(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_request); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_416device_modify_drain_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_pci_info, int __pyx_v_new_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_418device_query_drain_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_pci_info); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_420device_remove_gpu_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_pci_info, int __pyx_v_gpu_state, int __pyx_v_link_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_422device_discover_gpus(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_pci_info); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_424device_get_virtualization_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_426device_get_host_vgpu_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_428device_set_virtualization_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_virtual_mode); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_430device_get_vgpu_heterogeneous_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_432device_set_vgpu_heterogeneous_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_p_heterogeneous_mode); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_434vgpu_instance_get_placement_id(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_436device_get_vgpu_type_supported_placements(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_vgpu_type_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_438vgpu_type_get_gsp_heap_size(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_440vgpu_type_get_fb_reservation(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_442vgpu_instance_get_runtime_state_size(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_444device_set_vgpu_capabilities(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_capability, int __pyx_v_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_446device_get_grid_licensable_features_v4(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_448get_vgpu_driver_capabilities(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_capability); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_450device_get_vgpu_capabilities(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_capability); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_452vgpu_type_get_class(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_454vgpu_type_get_name(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_456vgpu_type_get_gpu_instance_profile_id(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_458vgpu_type_get_device_id(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_460vgpu_type_get_framebuffer_size(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_462vgpu_type_get_num_display_heads(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_464vgpu_type_get_resolution(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id, unsigned int __pyx_v_display_ind_ex); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_466vgpu_type_get_license(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id, intptr_t __pyx_v_vgpu_type_license_string, unsigned int __pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_468vgpu_type_get_frame_rate_limit(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_470vgpu_type_get_max_instances(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_vgpu_type_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_472vgpu_type_get_max_instances_per_vm(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_474vgpu_type_get_bar1_info(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_476vgpu_instance_get_uuid(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_478vgpu_instance_get_vm_driver_version(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_480vgpu_instance_get_fb_usage(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_482vgpu_instance_get_license_status(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_484vgpu_instance_get_type(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_486vgpu_instance_get_frame_rate_limit(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_488vgpu_instance_get_ecc_mode(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_490vgpu_instance_get_encoder_capacity(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_492vgpu_instance_set_encoder_capacity(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance, unsigned int __pyx_v_encoder_capacity); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_494vgpu_instance_get_encoder_stats(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_496vgpu_instance_get_encoder_sessions(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_498vgpu_instance_get_fbc_stats(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_500vgpu_instance_get_fbc_sessions(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_502vgpu_instance_get_gpu_instance_id(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_504vgpu_instance_get_gpu_pci_id(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_506vgpu_type_get_capabilities(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id, int __pyx_v_capability); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_508vgpu_instance_get_mdev_uuid(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_510vgpu_type_get_max_instances_per_gpu_instance(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_512gpu_instance_set_vgpu_scheduler_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, intptr_t __pyx_v_p_scheduler); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_514gpu_instance_get_vgpu_scheduler_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_516gpu_instance_get_vgpu_scheduler_log(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_518gpu_instance_get_vgpu_heterogeneous_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_520gpu_instance_set_vgpu_heterogeneous_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, intptr_t __pyx_v_p_heterogeneous_mode); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_522device_get_pgpu_metadata_string(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_524device_get_vgpu_scheduler_log(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_526device_get_vgpu_scheduler_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_528device_get_vgpu_scheduler_capabilities(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_530device_set_vgpu_scheduler_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_p_scheduler_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_532set_vgpu_version(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_vgpu_version); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_534device_get_vgpu_utilization(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_536device_get_vgpu_process_utilization(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_538vgpu_instance_get_accounting_mode(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_540vgpu_instance_get_accounting_pids(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_542vgpu_instance_get_accounting_stats(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance, unsigned int __pyx_v_pid); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_544vgpu_instance_clear_accounting_pids(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_546vgpu_instance_get_license_info_v2(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_548get_excluded_device_count(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_550get_excluded_device_info_by_index(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_ind_ex); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_552device_set_mig_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_mode); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_554device_get_mig_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_556device_get_gpu_instance_profile_info_v(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_profile); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_558device_get_gpu_instance_possible_placements_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_560device_get_gpu_instance_remaining_capacity(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_562device_create_gpu_instance(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_564device_create_gpu_instance_with_placement(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id, intptr_t __pyx_v_placement); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_566gpu_instance_destroy(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_568device_get_gpu_instance_by_id(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_570gpu_instance_get_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_572gpu_instance_get_compute_instance_profile_info_v(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile, unsigned int __pyx_v_eng_profile); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_574gpu_instance_get_compute_instance_remaining_capacity(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_576gpu_instance_get_compute_instance_possible_placements(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_578gpu_instance_create_compute_instance(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_580gpu_instance_create_compute_instance_with_placement(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile_id, intptr_t __pyx_v_placement); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_582compute_instance_destroy(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_compute_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_584gpu_instance_get_compute_instance_by_id(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_586compute_instance_get_info_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_compute_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_588device_is_mig_device_handle(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_590device_get_gpu_instance_id(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_592device_get_compute_instance_id(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_594device_get_max_mig_device_count(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_596device_get_mig_device_handle_by_index(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_ind_ex); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_598device_get_device_handle_from_mig_device_handle(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_mig_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_600gpm_sample_get(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_gpm_sample); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_602gpm_mig_sample_get(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_gpu_instance_id, intptr_t __pyx_v_gpm_sample); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_604gpm_query_device_support(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_606gpm_query_if_streaming_enabled(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_608gpm_set_streaming_enabled(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_610device_get_capabilities(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_612device_workload_power_profile_clear_requested_profiles(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_requested_profiles); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_614device_power_smoothing_activate_preset_profile(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_profile); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_616device_power_smoothing_update_preset_profile_param(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_profile); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_618device_power_smoothing_set_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_620device_get_addressing_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_622device_get_repair_status(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_624device_get_power_mizer_mode_v1(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_626device_set_power_mizer_mode_v1(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_power_mizer_mode); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_628device_get_pdi(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_630device_get_nvlink_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_632device_read_write_prm_v1(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_buffer); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_634device_get_gpu_instance_profile_info_by_id_v(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_636system_get_topology_gpu_set(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_cpuNumber); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_638system_get_driver_branch(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_640unit_get_devices(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_unit); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_642device_get_topology_nearest_gpus(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_level); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_644device_get_temperature_v(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, nvmlTemperatureSensors_t __pyx_v_sensorType); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_646device_get_supported_performance_states(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_size); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_648device_get_running_process_detail_list(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_mode); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_650device_get_samples(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_type, unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_652device_get_retired_pages_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_cause); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_654device_get_processes_utilization_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_656device_set_hostname_v1(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, PyObject *__pyx_v_hostname); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_658device_get_hostname_v1(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_660device_get_field_values(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, PyObject *__pyx_v_values); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_662device_clear_field_values(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, PyObject *__pyx_v_values); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_664device_get_supported_vgpus(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_666device_get_creatable_vgpus(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_668device_get_active_vgpus(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_670vgpu_instance_get_vm_id(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_672gpu_instance_get_creatable_vgpus(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_674gpu_instance_get_active_vgpus(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_676gpu_instance_get_vgpu_type_creatable_placements(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_vgpu_type_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_678device_get_vgpu_type_creatable_placements(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_vgpu_type_id, unsigned int __pyx_v_mode); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_680vgpu_instance_get_metadata(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_682device_get_vgpu_metadata(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_684get_vgpu_compatibility(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_vgpu_metadata, struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_pgpu_metadata); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_686get_vgpu_version(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_688device_get_vgpu_instances_utilization_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_690device_get_vgpu_processes_utilization_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_last_seen_time_stamp); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_692device_get_gpu_instances(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_694gpu_instance_get_compute_instances(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile_id); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_696device_get_sram_unique_uncorrected_ecc_error_counts(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_698__pyx_unpickle_ProcessInfo(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_700__pyx_unpickle_ProcessDetail_v1(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_702__pyx_unpickle_BridgeChipInfo(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_704__pyx_unpickle_ClkMonFaultInfo(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_706__pyx_unpickle_ProcessUtilizationSample(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_708__pyx_unpickle_ProcessUtilizationInfo_v1(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_710__pyx_unpickle_VgpuProcessUtilizationInfo_v1(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_712__pyx_unpickle_VgpuSchedulerLogEntry(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_714__pyx_unpickle_HwbcEntry(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_716__pyx_unpickle_UnitFanInfo(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_718__pyx_unpickle_EncoderSessionInfo(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_720__pyx_unpickle_FBCSessionInfo(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_722__pyx_unpickle_GpuInstancePlacement(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_724__pyx_unpickle_ComputeInstancePlacement(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_726__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_728__pyx_unpickle_Sample(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_730__pyx_unpickle_VgpuInstanceUtilizationInfo_v1(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_732__pyx_unpickle_FieldValue(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_734__pyx_unpickle_GridLicensableFeature(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_PciInfoExt_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_PciInfo(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_Utilization(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_Memory(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_Memory_v2(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_BAR1Memory(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessInfo(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessDetail_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_DeviceAttributes(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_C2cModeInfo_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_RowRemapperHistogramValues(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_BridgeChipInfo(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_Value(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod0(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_CoolerInfo_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_MarginTemperature_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ClkMonFaultInfo(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ClockOffset_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_FanSpeedInfo_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_DevicePerfModes_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessUtilizationSample(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_PlatformInfo_v2(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPlacementId_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPlacementList_v2(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod2(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod3(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod4(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod5(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuLicenseExpiry(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GridLicenseExpiry(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_HwbcEntry(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_LedState(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_UnitInfo(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_PSUInfo(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_UnitFanInfo(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_EventData(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_AccountingStats(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_EncoderSessionInfo(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_FBCStats(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_FBCSessionInfo(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeSystemCaps(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeSystemState(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuVersion(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuMetadata(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstancePlacement(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstancePlacement(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GpmSupport(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_DeviceCapabilities_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_RepairStatus_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_Pdi_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GpuFabricInfo_v3(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ExcludedDeviceInfo(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessDetailList_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_BridgeChipHierarchy(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_Sample(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_FieldValue(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GpuThermalSettings(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ClkMonStatus(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerParams(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuLicenseInfo(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GridLicensableFeature(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_UnitFanSpeeds(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPgpuMetadata(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstanceInfo(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstanceInfo(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerLog(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerGetState(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GridLicensableFeatures(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_NvLinkInfo_v2(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
/* #### Code section: late_includes ### */
/* #### Code section: module_state ### */
/* SmallCodeConfig */
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
    #define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
    #define CYTHON_SMALL_CODE __attribute__((cold))
#else
    #define CYTHON_SMALL_CODE
#endif
#endif

typedef struct {
  PyObject *__pyx_d;
  PyObject *__pyx_b;
  PyObject *__pyx_cython_runtime;
  PyObject *__pyx_empty_tuple;
  PyObject *__pyx_empty_bytes;
  PyObject *__pyx_empty_unicode;
  PyTypeObject *__pyx_ptype_7cpython_4type_type;
  PyTypeObject *__pyx_ptype_7cpython_4bool_bool;
  PyTypeObject *__pyx_ptype_7cpython_7complex_complex;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_PciInfoExt_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_PciInfo;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_Utilization;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_Memory;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_Memory_v2;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_BAR1Memory;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ProcessInfo;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ProcessDetail_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_DeviceAttributes;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_C2cModeInfo_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_RowRemapperHistogramValues;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_BridgeChipInfo;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_Value;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod0;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_CoolerInfo_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_MarginTemperature_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ClkMonFaultInfo;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ClockOffset_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_FanSpeedInfo_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_DevicePerfModes_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationSample;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_PlatformInfo_v2;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementId_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementList_v2;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod2;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod3;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod4;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod5;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseExpiry;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_GridLicenseExpiry;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_HwbcEntry;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_LedState;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_UnitInfo;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_PSUInfo;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_UnitFanInfo;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_EventData;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_AccountingStats;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_EncoderSessionInfo;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_FBCStats;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_FBCSessionInfo;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemCaps;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemState;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuVersion;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuMetadata;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_GpuInstancePlacement;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ComputeInstancePlacement;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_GpmSupport;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_DeviceCapabilities_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_RepairStatus_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_Pdi_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_GpuFabricInfo_v3;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ExcludedDeviceInfo;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ProcessDetailList_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_BridgeChipHierarchy;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_Sample;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_FieldValue;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_GpuThermalSettings;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ClkMonStatus;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerParams;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseInfo;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeature;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_UnitFanSpeeds;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuMetadata;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceInfo;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceInfo;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLog;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerGetState;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeatures;
  PyObject *__pyx_type_4cuda_8bindings_5_nvml_NvLinkInfo_v2;
  PyObject *__pyx_type___pyx_array;
  PyObject *__pyx_type___pyx_MemviewEnum;
  PyObject *__pyx_type___pyx_memoryview;
  PyObject *__pyx_type___pyx_memoryviewslice;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_Utilization;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_Memory;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_Value;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_LedState;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_EventData;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_Sample;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures;
  PyTypeObject *__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2;
  PyTypeObject *__pyx_array_type;
  PyTypeObject *__pyx_MemviewEnum_type;
  PyTypeObject *__pyx_memoryview_type;
  PyTypeObject *__pyx_memoryviewslice_type;
  __Pyx_CachedCFunction __pyx_umethod_PyDict_Type_items;
  __Pyx_CachedCFunction __pyx_umethod_PyDict_Type_pop;
  __Pyx_CachedCFunction __pyx_umethod_PyDict_Type_values;
  PyObject *__pyx_slice[2];
  PyObject *__pyx_tuple[13];
  PyObject *__pyx_codeobj_tab[814];
  PyObject *__pyx_string_tab[3377];
  PyObject *__pyx_number_tab[281];
/* #### Code section: module_state_contents ### */
/* CommonTypesMetaclass.module_state_decls */
PyTypeObject *__pyx_CommonTypesMetaclassType;

/* CachedMethodType.module_state_decls */
#if CYTHON_COMPILING_IN_LIMITED_API
PyObject *__Pyx_CachedMethodType;
#endif

/* CythonFunctionShared.module_state_decls */
PyTypeObject *__pyx_CyFunctionType;

/* CodeObjectCache.module_state_decls */
struct __Pyx_CodeObjectCache __pyx_code_cache;

/* #### Code section: module_state_end ### */
} __pyx_mstatetype;

#if CYTHON_USE_MODULE_STATE
#ifdef __cplusplus
namespace {
extern struct PyModuleDef __pyx_moduledef;
} /* anonymous namespace */
#else
static struct PyModuleDef __pyx_moduledef;
#endif

#define __pyx_mstate_global (__Pyx_PyModule_GetState(__Pyx_State_FindModule(&__pyx_moduledef)))

#define __pyx_m (__Pyx_State_FindModule(&__pyx_moduledef))
#else
static __pyx_mstatetype __pyx_mstate_global_static =
#ifdef __cplusplus
    {};
#else
    {0};
#endif
static __pyx_mstatetype * const __pyx_mstate_global = &__pyx_mstate_global_static;
#endif
/* #### Code section: constant_name_defines ### */
#define __pyx_kp_u_ __pyx_string_tab[0]
#define __pyx_kp_u_AccountingStats_object_at __pyx_string_tab[1]
#define __pyx_kp_u_ActiveVgpuInstanceInfo_v1_objec __pyx_string_tab[2]
#define __pyx_kp_u_All_dimensions_preceding_dimensi __pyx_string_tab[3]
#define __pyx_kp_u_BAR1Memory_object_at __pyx_string_tab[4]
#define __pyx_kp_u_BridgeChipHierarchy_object_at __pyx_string_tab[5]
#define __pyx_kp_u_BridgeChipInfo_Array __pyx_string_tab[6]
#define __pyx_kp_u_BridgeChipInfo_object_at __pyx_string_tab[7]
#define __pyx_kp_u_Buffer_view_does_not_expose_stri __pyx_string_tab[8]
#define __pyx_kp_u_C2cModeInfo_v1_object_at __pyx_string_tab[9]
#define __pyx_kp_u_Can_only_create_a_buffer_that_is __pyx_string_tab[10]
#define __pyx_kp_u_Cannot_assign_to_read_only_memor __pyx_string_tab[11]
#define __pyx_kp_u_Cannot_create_writable_memory_vi __pyx_string_tab[12]
#define __pyx_kp_u_Cannot_index_with_type __pyx_string_tab[13]
#define __pyx_kp_u_Cannot_transpose_memoryview_with __pyx_string_tab[14]
#define __pyx_kp_u_ClkMonFaultInfo_Array __pyx_string_tab[15]
#define __pyx_kp_u_ClkMonFaultInfo_object_at __pyx_string_tab[16]
#define __pyx_kp_u_ClkMonStatus_object_at __pyx_string_tab[17]
#define __pyx_kp_u_ClockOffset_v1_object_at __pyx_string_tab[18]
#define __pyx_kp_u_ComputeInstanceInfo_object_at __pyx_string_tab[19]
#define __pyx_kp_u_ComputeInstancePlacement_Array __pyx_string_tab[20]
#define __pyx_kp_u_ComputeInstancePlacement_object __pyx_string_tab[21]
#define __pyx_kp_u_ComputeInstanceProfileInfo_v2_o __pyx_string_tab[22]
#define __pyx_kp_u_ComputeInstanceProfileInfo_v3_o __pyx_string_tab[23]
#define __pyx_kp_u_ConfComputeGetKeyRotationThresh __pyx_string_tab[24]
#define __pyx_kp_u_ConfComputeGpuAttestationReport __pyx_string_tab[25]
#define __pyx_kp_u_ConfComputeGpuCertificate_objec __pyx_string_tab[26]
#define __pyx_kp_u_ConfComputeMemSizeInfo_object_a __pyx_string_tab[27]
#define __pyx_kp_u_ConfComputeSystemCaps_object_at __pyx_string_tab[28]
#define __pyx_kp_u_ConfComputeSystemState_object_a __pyx_string_tab[29]
#define __pyx_kp_u_CoolerInfo_v1_object_at __pyx_string_tab[30]
#define __pyx_kp_u_DeviceAddressingMode_v1_object __pyx_string_tab[31]
#define __pyx_kp_u_DeviceAttributes_object_at __pyx_string_tab[32]
#define __pyx_kp_u_DeviceCapabilities_v1_object_at __pyx_string_tab[33]
#define __pyx_kp_u_DeviceCurrentClockFreqs_v1_obje __pyx_string_tab[34]
#define __pyx_kp_u_DevicePerfModes_v1_object_at __pyx_string_tab[35]
#define __pyx_kp_u_DevicePowerMizerModes_v1_object __pyx_string_tab[36]
#define __pyx_kp_u_Dimension_d_is_not_direct __pyx_string_tab[37]
#define __pyx_kp_u_EccSramErrorStatus_v1_object_at __pyx_string_tab[38]
#define __pyx_kp_u_EccSramUniqueUncorrectedErrorCo __pyx_string_tab[39]
#define __pyx_kp_u_EccSramUniqueUncorrectedErrorEn __pyx_string_tab[40]
#define __pyx_kp_u_EccSramUniqueUncorrectedErrorEn_2 __pyx_string_tab[41]
#define __pyx_kp_u_Empty_shape_tuple_for_cython_arr __pyx_string_tab[42]
#define __pyx_kp_u_EncoderSessionInfo_Array __pyx_string_tab[43]
#define __pyx_kp_u_EncoderSessionInfo_object_at __pyx_string_tab[44]
#define __pyx_kp_u_Error_allocating_AccountingStats __pyx_string_tab[45]
#define __pyx_kp_u_Error_allocating_ActiveVgpuInsta __pyx_string_tab[46]
#define __pyx_kp_u_Error_allocating_BAR1Memory __pyx_string_tab[47]
#define __pyx_kp_u_Error_allocating_BridgeChipHiera __pyx_string_tab[48]
#define __pyx_kp_u_Error_allocating_C2cModeInfo_v1 __pyx_string_tab[49]
#define __pyx_kp_u_Error_allocating_ClkMonStatus __pyx_string_tab[50]
#define __pyx_kp_u_Error_allocating_ClockOffset_v1 __pyx_string_tab[51]
#define __pyx_kp_u_Error_allocating_ComputeInstance __pyx_string_tab[52]
#define __pyx_kp_u_Error_allocating_ComputeInstance_2 __pyx_string_tab[53]
#define __pyx_kp_u_Error_allocating_ComputeInstance_3 __pyx_string_tab[54]
#define __pyx_kp_u_Error_allocating_ConfComputeGetK __pyx_string_tab[55]
#define __pyx_kp_u_Error_allocating_ConfComputeGpuA __pyx_string_tab[56]
#define __pyx_kp_u_Error_allocating_ConfComputeGpuC __pyx_string_tab[57]
#define __pyx_kp_u_Error_allocating_ConfComputeMemS __pyx_string_tab[58]
#define __pyx_kp_u_Error_allocating_ConfComputeSyst __pyx_string_tab[59]
#define __pyx_kp_u_Error_allocating_ConfComputeSyst_2 __pyx_string_tab[60]
#define __pyx_kp_u_Error_allocating_CoolerInfo_v1 __pyx_string_tab[61]
#define __pyx_kp_u_Error_allocating_DeviceAddressin __pyx_string_tab[62]
#define __pyx_kp_u_Error_allocating_DeviceAttribute __pyx_string_tab[63]
#define __pyx_kp_u_Error_allocating_DeviceCapabilit __pyx_string_tab[64]
#define __pyx_kp_u_Error_allocating_DeviceCurrentCl __pyx_string_tab[65]
#define __pyx_kp_u_Error_allocating_DevicePerfModes __pyx_string_tab[66]
#define __pyx_kp_u_Error_allocating_DevicePowerMize __pyx_string_tab[67]
#define __pyx_kp_u_Error_allocating_EccSramErrorSta __pyx_string_tab[68]
#define __pyx_kp_u_Error_allocating_EccSramUniqueUn __pyx_string_tab[69]
#define __pyx_kp_u_Error_allocating_EventData __pyx_string_tab[70]
#define __pyx_kp_u_Error_allocating_ExcludedDeviceI __pyx_string_tab[71]
#define __pyx_kp_u_Error_allocating_FBCStats __pyx_string_tab[72]
#define __pyx_kp_u_Error_allocating_FanSpeedInfo_v1 __pyx_string_tab[73]
#define __pyx_kp_u_Error_allocating_GpmSupport __pyx_string_tab[74]
#define __pyx_kp_u_Error_allocating_GpuDynamicPstat __pyx_string_tab[75]
#define __pyx_kp_u_Error_allocating_GpuFabricInfo_v __pyx_string_tab[76]
#define __pyx_kp_u_Error_allocating_GpuInstanceInfo __pyx_string_tab[77]
#define __pyx_kp_u_Error_allocating_GpuInstanceProf __pyx_string_tab[78]
#define __pyx_kp_u_Error_allocating_GpuInstanceProf_2 __pyx_string_tab[79]
#define __pyx_kp_u_Error_allocating_GpuThermalSetti __pyx_string_tab[80]
#define __pyx_kp_u_Error_allocating_GridLicensableF __pyx_string_tab[81]
#define __pyx_kp_u_Error_allocating_GridLicenseExpi __pyx_string_tab[82]
#define __pyx_kp_u_Error_allocating_LedState __pyx_string_tab[83]
#define __pyx_kp_u_Error_allocating_MarginTemperatu __pyx_string_tab[84]
#define __pyx_kp_u_Error_allocating_Memory __pyx_string_tab[85]
#define __pyx_kp_u_Error_allocating_Memory_v2 __pyx_string_tab[86]
#define __pyx_kp_u_Error_allocating_NvLinkInfo_v2 __pyx_string_tab[87]
#define __pyx_kp_u_Error_allocating_NvlinkFirmwareI __pyx_string_tab[88]
#define __pyx_kp_u_Error_allocating_NvlinkFirmwareV __pyx_string_tab[89]
#define __pyx_kp_u_Error_allocating_NvlinkGetBwMode __pyx_string_tab[90]
#define __pyx_kp_u_Error_allocating_NvlinkSetBwMode __pyx_string_tab[91]
#define __pyx_kp_u_Error_allocating_NvlinkSupported __pyx_string_tab[92]
#define __pyx_kp_u_Error_allocating_PSUInfo __pyx_string_tab[93]
#define __pyx_kp_u_Error_allocating_PciInfo __pyx_string_tab[94]
#define __pyx_kp_u_Error_allocating_PciInfoExt_v1 __pyx_string_tab[95]
#define __pyx_kp_u_Error_allocating_Pdi_v1 __pyx_string_tab[96]
#define __pyx_kp_u_Error_allocating_PlatformInfo_v2 __pyx_string_tab[97]
#define __pyx_kp_u_Error_allocating_ProcessDetailLi __pyx_string_tab[98]
#define __pyx_kp_u_Error_allocating_ProcessesUtiliz __pyx_string_tab[99]
#define __pyx_kp_u_Error_allocating_RepairStatus_v1 __pyx_string_tab[100]
#define __pyx_kp_u_Error_allocating_RowRemapperHist __pyx_string_tab[101]
#define __pyx_kp_u_Error_allocating_SystemConfCompu __pyx_string_tab[102]
#define __pyx_kp_u_Error_allocating_UnitFanSpeeds __pyx_string_tab[103]
#define __pyx_kp_u_Error_allocating_UnitInfo __pyx_string_tab[104]
#define __pyx_kp_u_Error_allocating_Utilization __pyx_string_tab[105]
#define __pyx_kp_u_Error_allocating_Value __pyx_string_tab[106]
#define __pyx_kp_u_Error_allocating_VgpuCreatablePl __pyx_string_tab[107]
#define __pyx_kp_u_Error_allocating_VgpuHeterogeneo __pyx_string_tab[108]
#define __pyx_kp_u_Error_allocating_VgpuInstancesUt __pyx_string_tab[109]
#define __pyx_kp_u_Error_allocating_VgpuLicenseExpi __pyx_string_tab[110]
#define __pyx_kp_u_Error_allocating_VgpuLicenseInfo __pyx_string_tab[111]
#define __pyx_kp_u_Error_allocating_VgpuMetadata __pyx_string_tab[112]
#define __pyx_kp_u_Error_allocating_VgpuPgpuCompati __pyx_string_tab[113]
#define __pyx_kp_u_Error_allocating_VgpuPgpuMetadat __pyx_string_tab[114]
#define __pyx_kp_u_Error_allocating_VgpuPlacementId __pyx_string_tab[115]
#define __pyx_kp_u_Error_allocating_VgpuPlacementLi __pyx_string_tab[116]
#define __pyx_kp_u_Error_allocating_VgpuProcessesUt __pyx_string_tab[117]
#define __pyx_kp_u_Error_allocating_VgpuRuntimeStat __pyx_string_tab[118]
#define __pyx_kp_u_Error_allocating_VgpuSchedulerCa __pyx_string_tab[119]
#define __pyx_kp_u_Error_allocating_VgpuSchedulerGe __pyx_string_tab[120]
#define __pyx_kp_u_Error_allocating_VgpuSchedulerLo __pyx_string_tab[121]
#define __pyx_kp_u_Error_allocating_VgpuSchedulerLo_2 __pyx_string_tab[122]
#define __pyx_kp_u_Error_allocating_VgpuSchedulerPa __pyx_string_tab[123]
#define __pyx_kp_u_Error_allocating_VgpuSchedulerSe __pyx_string_tab[124]
#define __pyx_kp_u_Error_allocating_VgpuSchedulerSt __pyx_string_tab[125]
#define __pyx_kp_u_Error_allocating_VgpuSchedulerSt_2 __pyx_string_tab[126]
#define __pyx_kp_u_Error_allocating_VgpuTypeBar1Inf __pyx_string_tab[127]
#define __pyx_kp_u_Error_allocating_VgpuTypeIdInfo __pyx_string_tab[128]
#define __pyx_kp_u_Error_allocating_VgpuTypeMaxInst __pyx_string_tab[129]
#define __pyx_kp_u_Error_allocating_VgpuVersion __pyx_string_tab[130]
#define __pyx_kp_u_Error_allocating__py_anon_pod0 __pyx_string_tab[131]
#define __pyx_kp_u_Error_allocating__py_anon_pod1 __pyx_string_tab[132]
#define __pyx_kp_u_Error_allocating__py_anon_pod2 __pyx_string_tab[133]
#define __pyx_kp_u_Error_allocating__py_anon_pod3 __pyx_string_tab[134]
#define __pyx_kp_u_Error_allocating__py_anon_pod4 __pyx_string_tab[135]
#define __pyx_kp_u_Error_allocating__py_anon_pod5 __pyx_string_tab[136]
#define __pyx_kp_u_EventData_object_at __pyx_string_tab[137]
#define __pyx_kp_u_ExcludedDeviceInfo_object_at __pyx_string_tab[138]
#define __pyx_kp_u_Expected_length_100_for_field_fi __pyx_string_tab[139]
#define __pyx_kp_u_Expected_length_128_for_field_br __pyx_string_tab[140]
#define __pyx_kp_u_Expected_length_200_for_field_lo __pyx_string_tab[141]
#define __pyx_kp_u_Expected_length_24_for_field_fan __pyx_string_tab[142]
#define __pyx_kp_u_Expected_length_32_for_field_clk __pyx_string_tab[143]
#define __pyx_kp_u_Expected_length_3_for_field_grid __pyx_string_tab[144]
#define __pyx_kp_u_Expected_length_3_for_field_sens __pyx_string_tab[145]
#define __pyx_kp_u_Expected_length_8_for_field_util __pyx_string_tab[146]
#define __pyx_kp_u_FBCSessionInfo_Array __pyx_string_tab[147]
#define __pyx_kp_u_FBCSessionInfo_object_at __pyx_string_tab[148]
#define __pyx_kp_u_FBCStats_object_at __pyx_string_tab[149]
#define __pyx_kp_u_FanSpeedInfo_v1_object_at __pyx_string_tab[150]
#define __pyx_kp_u_FieldValue_Array __pyx_string_tab[151]
#define __pyx_kp_u_FieldValue_object_at __pyx_string_tab[152]
#define __pyx_kp_u_GpmSupport_object_at __pyx_string_tab[153]
#define __pyx_kp_u_GpuDynamicPstatesInfo_object_at __pyx_string_tab[154]
#define __pyx_kp_u_GpuFabricInfo_v3_object_at __pyx_string_tab[155]
#define __pyx_kp_u_GpuInstanceInfo_object_at __pyx_string_tab[156]
#define __pyx_kp_u_GpuInstancePlacement_Array __pyx_string_tab[157]
#define __pyx_kp_u_GpuInstancePlacement_object_at __pyx_string_tab[158]
#define __pyx_kp_u_GpuInstanceProfileInfo_v2_objec __pyx_string_tab[159]
#define __pyx_kp_u_GpuInstanceProfileInfo_v3_objec __pyx_string_tab[160]
#define __pyx_kp_u_GpuThermalSettings_object_at __pyx_string_tab[161]
#define __pyx_kp_u_GridLicensableFeature_Array __pyx_string_tab[162]
#define __pyx_kp_u_GridLicensableFeature_object_at __pyx_string_tab[163]
#define __pyx_kp_u_GridLicensableFeatures_object_a __pyx_string_tab[164]
#define __pyx_kp_u_GridLicenseExpiry_object_at __pyx_string_tab[165]
#define __pyx_kp_u_HwbcEntry_Array __pyx_string_tab[166]
#define __pyx_kp_u_HwbcEntry_object_at __pyx_string_tab[167]
#define __pyx_kp_u_Index_out_of_bounds_axis_d __pyx_string_tab[168]
#define __pyx_kp_u_Indirect_dimensions_not_supporte __pyx_string_tab[169]
#define __pyx_kp_u_Invalid_mode_expected_c_or_fortr __pyx_string_tab[170]
#define __pyx_kp_u_Invalid_shape_in_axis __pyx_string_tab[171]
#define __pyx_kp_u_LedState_object_at __pyx_string_tab[172]
#define __pyx_kp_u_MarginTemperature_v1_object_at __pyx_string_tab[173]
#define __pyx_kp_u_MemoryView_of __pyx_string_tab[174]
#define __pyx_kp_u_Memory_object_at __pyx_string_tab[175]
#define __pyx_kp_u_Memory_v2_object_at __pyx_string_tab[176]
#define __pyx_kp_u_None __pyx_string_tab[177]
#define __pyx_kp_u_Note_that_Cython_is_deliberately __pyx_string_tab[178]
#define __pyx_kp_u_NvLinkInfo_v2_object_at __pyx_string_tab[179]
#define __pyx_kp_u_NvlinkFirmwareInfo_object_at __pyx_string_tab[180]
#define __pyx_kp_u_NvlinkFirmwareVersion_object_at __pyx_string_tab[181]
#define __pyx_kp_u_NvlinkGetBwMode_v1_object_at __pyx_string_tab[182]
#define __pyx_kp_u_NvlinkSetBwMode_v1_object_at __pyx_string_tab[183]
#define __pyx_kp_u_NvlinkSupportedBwModes_v1_objec __pyx_string_tab[184]
#define __pyx_kp_u_Out_of_bounds_on_buffer_access_a __pyx_string_tab[185]
#define __pyx_kp_u_PSUInfo_object_at __pyx_string_tab[186]
#define __pyx_kp_u_PciInfoExt_v1_object_at __pyx_string_tab[187]
#define __pyx_kp_u_PciInfo_object_at __pyx_string_tab[188]
#define __pyx_kp_u_Pdi_v1_object_at __pyx_string_tab[189]
#define __pyx_kp_u_PlatformInfo_v2_object_at __pyx_string_tab[190]
#define __pyx_kp_u_ProcessDetailList_v1_object_at __pyx_string_tab[191]
#define __pyx_kp_u_ProcessDetail_v1_Array __pyx_string_tab[192]
#define __pyx_kp_u_ProcessDetail_v1_object_at __pyx_string_tab[193]
#define __pyx_kp_u_ProcessInfo_Array __pyx_string_tab[194]
#define __pyx_kp_u_ProcessInfo_object_at __pyx_string_tab[195]
#define __pyx_kp_u_ProcessUtilizationInfo_v1_Array __pyx_string_tab[196]
#define __pyx_kp_u_ProcessUtilizationInfo_v1_objec __pyx_string_tab[197]
#define __pyx_kp_u_ProcessUtilizationSample_Array __pyx_string_tab[198]
#define __pyx_kp_u_ProcessUtilizationSample_object __pyx_string_tab[199]
#define __pyx_kp_u_ProcessesUtilizationInfo_v1_obj __pyx_string_tab[200]
#define __pyx_kp_u_RepairStatus_v1_object_at __pyx_string_tab[201]
#define __pyx_kp_u_RowRemapperHistogramValues_obje __pyx_string_tab[202]
#define __pyx_kp_u_Sample_Array __pyx_string_tab[203]
#define __pyx_kp_u_Sample_object_at __pyx_string_tab[204]
#define __pyx_kp_u_See_nvmlBrandType_t __pyx_string_tab[205]
#define __pyx_kp_u_See_nvmlBridgeChipType_t __pyx_string_tab[206]
#define __pyx_kp_u_See_nvmlClockId_t __pyx_string_tab[207]
#define __pyx_kp_u_See_nvmlClockLimitId_t __pyx_string_tab[208]
#define __pyx_kp_u_See_nvmlClockType_t __pyx_string_tab[209]
#define __pyx_kp_u_See_nvmlComputeMode_t __pyx_string_tab[210]
#define __pyx_kp_u_See_nvmlCoolerControl_t __pyx_string_tab[211]
#define __pyx_kp_u_See_nvmlCoolerTarget_t __pyx_string_tab[212]
#define __pyx_kp_u_See_nvmlDetachGpuState_t __pyx_string_tab[213]
#define __pyx_kp_u_See_nvmlDeviceAddressingModeType __pyx_string_tab[214]
#define __pyx_kp_u_See_nvmlDeviceGpuRecoveryAction __pyx_string_tab[215]
#define __pyx_kp_u_See_nvmlDeviceVgpuCapability_t __pyx_string_tab[216]
#define __pyx_kp_u_See_nvmlDriverModel_t __pyx_string_tab[217]
#define __pyx_kp_u_See_nvmlEccCounterType_t __pyx_string_tab[218]
#define __pyx_kp_u_See_nvmlEnableState_t __pyx_string_tab[219]
#define __pyx_kp_u_See_nvmlEncoderType_t __pyx_string_tab[220]
#define __pyx_kp_u_See_nvmlFBCSessionType_t __pyx_string_tab[221]
#define __pyx_kp_u_See_nvmlFanState_t __pyx_string_tab[222]
#define __pyx_kp_u_See_nvmlGpmMetricId_t __pyx_string_tab[223]
#define __pyx_kp_u_See_nvmlGpuOperationMode_t __pyx_string_tab[224]
#define __pyx_kp_u_See_nvmlGpuP2PCapsIndex_t __pyx_string_tab[225]
#define __pyx_kp_u_See_nvmlGpuP2PStatus_t __pyx_string_tab[226]
#define __pyx_kp_u_See_nvmlGpuTopologyLevel_t __pyx_string_tab[227]
#define __pyx_kp_u_See_nvmlGpuUtilizationDomainId_t __pyx_string_tab[228]
#define __pyx_kp_u_See_nvmlGpuVirtualizationMode_t __pyx_string_tab[229]
#define __pyx_kp_u_See_nvmlGridLicenseFeatureCode_t __pyx_string_tab[230]
#define __pyx_kp_u_See_nvmlHostVgpuMode_t __pyx_string_tab[231]
#define __pyx_kp_u_See_nvmlInforomObject_t __pyx_string_tab[232]
#define __pyx_kp_u_See_nvmlIntNvLinkDeviceType_t __pyx_string_tab[233]
#define __pyx_kp_u_See_nvmlLedColor_t __pyx_string_tab[234]
#define __pyx_kp_u_See_nvmlMemoryErrorType_t __pyx_string_tab[235]
#define __pyx_kp_u_See_nvmlMemoryLocation_t __pyx_string_tab[236]
#define __pyx_kp_u_See_nvmlNvLinkCapability_t __pyx_string_tab[237]
#define __pyx_kp_u_See_nvmlNvLinkErrorCounter_t __pyx_string_tab[238]
#define __pyx_kp_u_See_nvmlNvLinkUtilizationCountPk __pyx_string_tab[239]
#define __pyx_kp_u_See_nvmlNvLinkUtilizationCountUn __pyx_string_tab[240]
#define __pyx_kp_u_See_nvmlNvlinkVersion_t __pyx_string_tab[241]
#define __pyx_kp_u_See_nvmlPageRetirementCause_t __pyx_string_tab[242]
#define __pyx_kp_u_See_nvmlPcieLinkState_t __pyx_string_tab[243]
#define __pyx_kp_u_See_nvmlPcieUtilCounter_t __pyx_string_tab[244]
#define __pyx_kp_u_See_nvmlPerfPolicyType_t __pyx_string_tab[245]
#define __pyx_kp_u_See_nvmlPowerProfileType_t __pyx_string_tab[246]
#define __pyx_kp_u_See_nvmlPstates_t __pyx_string_tab[247]
#define __pyx_kp_u_See_nvmlRestrictedAPI_t __pyx_string_tab[248]
#define __pyx_kp_u_See_nvmlReturn_t __pyx_string_tab[249]
#define __pyx_kp_u_See_nvmlSamplingType_t __pyx_string_tab[250]
#define __pyx_kp_u_See_nvmlTemperatureSensors_t __pyx_string_tab[251]
#define __pyx_kp_u_See_nvmlTemperatureThresholds_t __pyx_string_tab[252]
#define __pyx_kp_u_See_nvmlThermalController_t __pyx_string_tab[253]
#define __pyx_kp_u_See_nvmlThermalTarget_t __pyx_string_tab[254]
#define __pyx_kp_u_See_nvmlUUIDType_t __pyx_string_tab[255]
#define __pyx_kp_u_See_nvmlValueType_t __pyx_string_tab[256]
#define __pyx_kp_u_See_nvmlVgpuCapability_t __pyx_string_tab[257]
#define __pyx_kp_u_See_nvmlVgpuDriverCapability_t __pyx_string_tab[258]
#define __pyx_kp_u_See_nvmlVgpuGuestInfoState_t __pyx_string_tab[259]
#define __pyx_kp_u_See_nvmlVgpuPgpuCompatibilityLim __pyx_string_tab[260]
#define __pyx_kp_u_See_nvmlVgpuVmCompatibility_t __pyx_string_tab[261]
#define __pyx_kp_u_See_nvmlVgpuVmIdType_t __pyx_string_tab[262]
#define __pyx_kp_u_Step_may_not_be_zero_axis_d __pyx_string_tab[263]
#define __pyx_kp_u_String_too_long_for_field_bus_id __pyx_string_tab[264]
#define __pyx_kp_u_String_too_long_for_field_bus_id_2 __pyx_string_tab[265]
#define __pyx_kp_u_String_too_long_for_field_cause __pyx_string_tab[266]
#define __pyx_kp_u_String_too_long_for_field_firmwa __pyx_string_tab[267]
#define __pyx_kp_u_String_too_long_for_field_guest __pyx_string_tab[268]
#define __pyx_kp_u_String_too_long_for_field_host_d __pyx_string_tab[269]
#define __pyx_kp_u_String_too_long_for_field_id_max __pyx_string_tab[270]
#define __pyx_kp_u_String_too_long_for_field_name_m __pyx_string_tab[271]
#define __pyx_kp_u_String_too_long_for_field_opaque __pyx_string_tab[272]
#define __pyx_kp_u_String_too_long_for_field_serial __pyx_string_tab[273]
#define __pyx_kp_u_String_too_long_for_field_state __pyx_string_tab[274]
#define __pyx_kp_u_String_too_long_for_field_str_ma __pyx_string_tab[275]
#define __pyx_kp_u_String_too_long_for_field_uuid_m __pyx_string_tab[276]
#define __pyx_kp_u_SystemConfComputeSettings_v1_ob __pyx_string_tab[277]
#define __pyx_kp_u_This_AccountingStats_instance_is __pyx_string_tab[278]
#define __pyx_kp_u_This_ActiveVgpuInstanceInfo_v1_i __pyx_string_tab[279]
#define __pyx_kp_u_This_BAR1Memory_instance_is_read __pyx_string_tab[280]
#define __pyx_kp_u_This_BridgeChipHierarchy_instanc __pyx_string_tab[281]
#define __pyx_kp_u_This_C2cModeInfo_v1_instance_is __pyx_string_tab[282]
#define __pyx_kp_u_This_ClkMonStatus_instance_is_re __pyx_string_tab[283]
#define __pyx_kp_u_This_ClockOffset_v1_instance_is __pyx_string_tab[284]
#define __pyx_kp_u_This_ComputeInstanceInfo_instanc __pyx_string_tab[285]
#define __pyx_kp_u_This_ComputeInstanceProfileInfo __pyx_string_tab[286]
#define __pyx_kp_u_This_ComputeInstanceProfileInfo_2 __pyx_string_tab[287]
#define __pyx_kp_u_This_ConfComputeGetKeyRotationTh __pyx_string_tab[288]
#define __pyx_kp_u_This_ConfComputeGpuAttestationRe __pyx_string_tab[289]
#define __pyx_kp_u_This_ConfComputeGpuCertificate_i __pyx_string_tab[290]
#define __pyx_kp_u_This_ConfComputeMemSizeInfo_inst __pyx_string_tab[291]
#define __pyx_kp_u_This_ConfComputeSystemCaps_insta __pyx_string_tab[292]
#define __pyx_kp_u_This_ConfComputeSystemState_inst __pyx_string_tab[293]
#define __pyx_kp_u_This_CoolerInfo_v1_instance_is_r __pyx_string_tab[294]
#define __pyx_kp_u_This_DeviceAddressingMode_v1_ins __pyx_string_tab[295]
#define __pyx_kp_u_This_DeviceAttributes_instance_i __pyx_string_tab[296]
#define __pyx_kp_u_This_DeviceCapabilities_v1_insta __pyx_string_tab[297]
#define __pyx_kp_u_This_DeviceCurrentClockFreqs_v1 __pyx_string_tab[298]
#define __pyx_kp_u_This_DevicePerfModes_v1_instance __pyx_string_tab[299]
#define __pyx_kp_u_This_DevicePowerMizerModes_v1_in __pyx_string_tab[300]
#define __pyx_kp_u_This_EccSramErrorStatus_v1_insta __pyx_string_tab[301]
#define __pyx_kp_u_This_EccSramUniqueUncorrectedErr __pyx_string_tab[302]
#define __pyx_kp_u_This_EventData_instance_is_read __pyx_string_tab[303]
#define __pyx_kp_u_This_ExcludedDeviceInfo_instance __pyx_string_tab[304]
#define __pyx_kp_u_This_FBCStats_instance_is_read_o __pyx_string_tab[305]
#define __pyx_kp_u_This_FanSpeedInfo_v1_instance_is __pyx_string_tab[306]
#define __pyx_kp_u_This_GpmSupport_instance_is_read __pyx_string_tab[307]
#define __pyx_kp_u_This_GpuDynamicPstatesInfo_insta __pyx_string_tab[308]
#define __pyx_kp_u_This_GpuFabricInfo_v3_instance_i __pyx_string_tab[309]
#define __pyx_kp_u_This_GpuInstanceInfo_instance_is __pyx_string_tab[310]
#define __pyx_kp_u_This_GpuInstanceProfileInfo_v2_i __pyx_string_tab[311]
#define __pyx_kp_u_This_GpuInstanceProfileInfo_v3_i __pyx_string_tab[312]
#define __pyx_kp_u_This_GpuThermalSettings_instance __pyx_string_tab[313]
#define __pyx_kp_u_This_GridLicensableFeatures_inst __pyx_string_tab[314]
#define __pyx_kp_u_This_GridLicenseExpiry_instance __pyx_string_tab[315]
#define __pyx_kp_u_This_LedState_instance_is_read_o __pyx_string_tab[316]
#define __pyx_kp_u_This_MarginTemperature_v1_instan __pyx_string_tab[317]
#define __pyx_kp_u_This_Memory_instance_is_read_onl __pyx_string_tab[318]
#define __pyx_kp_u_This_Memory_v2_instance_is_read __pyx_string_tab[319]
#define __pyx_kp_u_This_NvLinkInfo_v2_instance_is_r __pyx_string_tab[320]
#define __pyx_kp_u_This_NvlinkFirmwareInfo_instance __pyx_string_tab[321]
#define __pyx_kp_u_This_NvlinkFirmwareVersion_insta __pyx_string_tab[322]
#define __pyx_kp_u_This_NvlinkGetBwMode_v1_instance __pyx_string_tab[323]
#define __pyx_kp_u_This_NvlinkSetBwMode_v1_instance __pyx_string_tab[324]
#define __pyx_kp_u_This_NvlinkSupportedBwModes_v1_i __pyx_string_tab[325]
#define __pyx_kp_u_This_PSUInfo_instance_is_read_on __pyx_string_tab[326]
#define __pyx_kp_u_This_PciInfoExt_v1_instance_is_r __pyx_string_tab[327]
#define __pyx_kp_u_This_PciInfo_instance_is_read_on __pyx_string_tab[328]
#define __pyx_kp_u_This_Pdi_v1_instance_is_read_onl __pyx_string_tab[329]
#define __pyx_kp_u_This_PlatformInfo_v2_instance_is __pyx_string_tab[330]
#define __pyx_kp_u_This_ProcessDetailList_v1_instan __pyx_string_tab[331]
#define __pyx_kp_u_This_ProcessesUtilizationInfo_v1 __pyx_string_tab[332]
#define __pyx_kp_u_This_RepairStatus_v1_instance_is __pyx_string_tab[333]
#define __pyx_kp_u_This_RowRemapperHistogramValues __pyx_string_tab[334]
#define __pyx_kp_u_This_SystemConfComputeSettings_v __pyx_string_tab[335]
#define __pyx_kp_u_This_UnitFanSpeeds_instance_is_r __pyx_string_tab[336]
#define __pyx_kp_u_This_UnitInfo_instance_is_read_o __pyx_string_tab[337]
#define __pyx_kp_u_This_Utilization_instance_is_rea __pyx_string_tab[338]
#define __pyx_kp_u_This_Value_instance_is_read_only __pyx_string_tab[339]
#define __pyx_kp_u_This_VgpuCreatablePlacementInfo __pyx_string_tab[340]
#define __pyx_kp_u_This_VgpuHeterogeneousMode_v1_in __pyx_string_tab[341]
#define __pyx_kp_u_This_VgpuInstancesUtilizationInf __pyx_string_tab[342]
#define __pyx_kp_u_This_VgpuLicenseExpiry_instance __pyx_string_tab[343]
#define __pyx_kp_u_This_VgpuLicenseInfo_instance_is __pyx_string_tab[344]
#define __pyx_kp_u_This_VgpuMetadata_instance_is_re __pyx_string_tab[345]
#define __pyx_kp_u_This_VgpuPgpuCompatibility_insta __pyx_string_tab[346]
#define __pyx_kp_u_This_VgpuPgpuMetadata_instance_i __pyx_string_tab[347]
#define __pyx_kp_u_This_VgpuPlacementId_v1_instance __pyx_string_tab[348]
#define __pyx_kp_u_This_VgpuPlacementList_v2_instan __pyx_string_tab[349]
#define __pyx_kp_u_This_VgpuProcessesUtilizationInf __pyx_string_tab[350]
#define __pyx_kp_u_This_VgpuRuntimeState_v1_instanc __pyx_string_tab[351]
#define __pyx_kp_u_This_VgpuSchedulerCapabilities_i __pyx_string_tab[352]
#define __pyx_kp_u_This_VgpuSchedulerGetState_insta __pyx_string_tab[353]
#define __pyx_kp_u_This_VgpuSchedulerLogInfo_v1_ins __pyx_string_tab[354]
#define __pyx_kp_u_This_VgpuSchedulerLog_instance_i __pyx_string_tab[355]
#define __pyx_kp_u_This_VgpuSchedulerParams_instanc __pyx_string_tab[356]
#define __pyx_kp_u_This_VgpuSchedulerSetParams_inst __pyx_string_tab[357]
#define __pyx_kp_u_This_VgpuSchedulerStateInfo_v1_i __pyx_string_tab[358]
#define __pyx_kp_u_This_VgpuSchedulerState_v1_insta __pyx_string_tab[359]
#define __pyx_kp_u_This_VgpuTypeBar1Info_v1_instanc __pyx_string_tab[360]
#define __pyx_kp_u_This_VgpuTypeIdInfo_v1_instance __pyx_string_tab[361]
#define __pyx_kp_u_This_VgpuTypeMaxInstance_v1_inst __pyx_string_tab[362]
#define __pyx_kp_u_This_VgpuVersion_instance_is_rea __pyx_string_tab[363]
#define __pyx_kp_u_This__py_anon_pod0_instance_is_r __pyx_string_tab[364]
#define __pyx_kp_u_This__py_anon_pod1_instance_is_r __pyx_string_tab[365]
#define __pyx_kp_u_This__py_anon_pod2_instance_is_r __pyx_string_tab[366]
#define __pyx_kp_u_This__py_anon_pod3_instance_is_r __pyx_string_tab[367]
#define __pyx_kp_u_This__py_anon_pod4_instance_is_r __pyx_string_tab[368]
#define __pyx_kp_u_This__py_anon_pod5_instance_is_r __pyx_string_tab[369]
#define __pyx_kp_u_Unable_to_convert_item_to_object __pyx_string_tab[370]
#define __pyx_kp_u_UnitFanInfo_Array __pyx_string_tab[371]
#define __pyx_kp_u_UnitFanInfo_object_at __pyx_string_tab[372]
#define __pyx_kp_u_UnitFanSpeeds_object_at __pyx_string_tab[373]
#define __pyx_kp_u_UnitInfo_object_at __pyx_string_tab[374]
#define __pyx_kp_u_Utilization_object_at __pyx_string_tab[375]
#define __pyx_kp_u_Value_object_at __pyx_string_tab[376]
#define __pyx_kp_u_VgpuCreatablePlacementInfo_v1_o __pyx_string_tab[377]
#define __pyx_kp_u_VgpuHeterogeneousMode_v1_object __pyx_string_tab[378]
#define __pyx_kp_u_VgpuInstanceUtilizationInfo_v1 __pyx_string_tab[379]
#define __pyx_kp_u_VgpuInstanceUtilizationInfo_v1_2 __pyx_string_tab[380]
#define __pyx_kp_u_VgpuInstancesUtilizationInfo_v1 __pyx_string_tab[381]
#define __pyx_kp_u_VgpuLicenseExpiry_object_at __pyx_string_tab[382]
#define __pyx_kp_u_VgpuLicenseInfo_object_at __pyx_string_tab[383]
#define __pyx_kp_u_VgpuMetadata_object_at __pyx_string_tab[384]
#define __pyx_kp_u_VgpuPgpuCompatibility_object_at __pyx_string_tab[385]
#define __pyx_kp_u_VgpuPgpuMetadata_object_at __pyx_string_tab[386]
#define __pyx_kp_u_VgpuPlacementId_v1_object_at __pyx_string_tab[387]
#define __pyx_kp_u_VgpuPlacementList_v2_object_at __pyx_string_tab[388]
#define __pyx_kp_u_VgpuProcessUtilizationInfo_v1_A __pyx_string_tab[389]
#define __pyx_kp_u_VgpuProcessUtilizationInfo_v1_o __pyx_string_tab[390]
#define __pyx_kp_u_VgpuProcessesUtilizationInfo_v1 __pyx_string_tab[391]
#define __pyx_kp_u_VgpuRuntimeState_v1_object_at __pyx_string_tab[392]
#define __pyx_kp_u_VgpuSchedulerCapabilities_objec __pyx_string_tab[393]
#define __pyx_kp_u_VgpuSchedulerGetState_object_at __pyx_string_tab[394]
#define __pyx_kp_u_VgpuSchedulerLogEntry_Array __pyx_string_tab[395]
#define __pyx_kp_u_VgpuSchedulerLogEntry_object_at __pyx_string_tab[396]
#define __pyx_kp_u_VgpuSchedulerLogInfo_v1_object __pyx_string_tab[397]
#define __pyx_kp_u_VgpuSchedulerLog_object_at __pyx_string_tab[398]
#define __pyx_kp_u_VgpuSchedulerParams_object_at __pyx_string_tab[399]
#define __pyx_kp_u_VgpuSchedulerSetParams_object_a __pyx_string_tab[400]
#define __pyx_kp_u_VgpuSchedulerStateInfo_v1_objec __pyx_string_tab[401]
#define __pyx_kp_u_VgpuSchedulerState_v1_object_at __pyx_string_tab[402]
#define __pyx_kp_u_VgpuTypeBar1Info_v1_object_at __pyx_string_tab[403]
#define __pyx_kp_u_VgpuTypeIdInfo_v1_object_at __pyx_string_tab[404]
#define __pyx_kp_u_VgpuTypeMaxInstance_v1_object_a __pyx_string_tab[405]
#define __pyx_kp_u_VgpuVersion_object_at __pyx_string_tab[406]
#define __pyx_kp_u__2 __pyx_string_tab[407]
#define __pyx_kp_u__3 __pyx_string_tab[408]
#define __pyx_kp_u__4 __pyx_string_tab[409]
#define __pyx_kp_u__5 __pyx_string_tab[410]
#define __pyx_kp_u__6 __pyx_string_tab[411]
#define __pyx_kp_u__7 __pyx_string_tab[412]
#define __pyx_kp_u__8 __pyx_string_tab[413]
#define __pyx_kp_u_add_note __pyx_string_tab[414]
#define __pyx_kp_u_and __pyx_string_tab[415]
#define __pyx_kp_u_at_0x __pyx_string_tab[416]
#define __pyx_kp_u_collections_abc __pyx_string_tab[417]
#define __pyx_kp_u_contiguous_and_direct __pyx_string_tab[418]
#define __pyx_kp_u_contiguous_and_indirect __pyx_string_tab[419]
#define __pyx_kp_u_cuda_bindings__nvml_pyx __pyx_string_tab[420]
#define __pyx_kp_u_data_argument_must_be_a_NumPy_nd __pyx_string_tab[421]
#define __pyx_kp_u_data_array_must_be_1D __pyx_string_tab[422]
#define __pyx_kp_u_data_array_must_be_of_dtype __pyx_string_tab[423]
#define __pyx_kp_u_data_array_must_be_of_dtype_brid __pyx_string_tab[424]
#define __pyx_kp_u_data_array_must_be_of_dtype_clk __pyx_string_tab[425]
#define __pyx_kp_u_data_array_must_be_of_dtype_comp __pyx_string_tab[426]
#define __pyx_kp_u_data_array_must_be_of_dtype_ecc __pyx_string_tab[427]
#define __pyx_kp_u_data_array_must_be_of_dtype_enco __pyx_string_tab[428]
#define __pyx_kp_u_data_array_must_be_of_dtype_fbc __pyx_string_tab[429]
#define __pyx_kp_u_data_array_must_be_of_dtype_fiel __pyx_string_tab[430]
#define __pyx_kp_u_data_array_must_be_of_dtype_gpu __pyx_string_tab[431]
#define __pyx_kp_u_data_array_must_be_of_dtype_grid __pyx_string_tab[432]
#define __pyx_kp_u_data_array_must_be_of_dtype_hwbc __pyx_string_tab[433]
#define __pyx_kp_u_data_array_must_be_of_dtype_proc __pyx_string_tab[434]
#define __pyx_kp_u_data_array_must_be_of_dtype_proc_2 __pyx_string_tab[435]
#define __pyx_kp_u_data_array_must_be_of_dtype_proc_3 __pyx_string_tab[436]
#define __pyx_kp_u_data_array_must_be_of_dtype_proc_4 __pyx_string_tab[437]
#define __pyx_kp_u_data_array_must_be_of_dtype_samp __pyx_string_tab[438]
#define __pyx_kp_u_data_array_must_be_of_dtype_unit __pyx_string_tab[439]
#define __pyx_kp_u_data_array_must_be_of_dtype_vgpu __pyx_string_tab[440]
#define __pyx_kp_u_data_array_must_be_of_dtype_vgpu_2 __pyx_string_tab[441]
#define __pyx_kp_u_data_array_must_be_of_dtype_vgpu_3 __pyx_string_tab[442]
#define __pyx_kp_u_data_array_must_have_a_size_of_1 __pyx_string_tab[443]
#define __pyx_kp_u_disable __pyx_string_tab[444]
#define __pyx_kp_u_enable __pyx_string_tab[445]
#define __pyx_kp_u_gc __pyx_string_tab[446]
#define __pyx_kp_u_got __pyx_string_tab[447]
#define __pyx_kp_u_got_differing_extents_in_dimensi __pyx_string_tab[448]
#define __pyx_kp_u_hostname_must_64_characters_or_l __pyx_string_tab[449]
#define __pyx_kp_u_index_is_out_of_bounds __pyx_string_tab[450]
#define __pyx_kp_u_int_argument_must_be_a_bytes_lik __pyx_string_tab[451]
#define __pyx_kp_u_isenabled __pyx_string_tab[452]
#define __pyx_kp_u_itemsize_0_for_cython_array __pyx_string_tab[453]
#define __pyx_kp_u_itemsize_2 __pyx_string_tab[454]
#define __pyx_kp_u_mismatches_struct_size __pyx_string_tab[455]
#define __pyx_kp_u_no_default___reduce___due_to_non __pyx_string_tab[456]
#define __pyx_kp_u_object __pyx_string_tab[457]
#define __pyx_kp_u_object_at __pyx_string_tab[458]
#define __pyx_kp_u_pci_bus_id_must_be_a_Python_str __pyx_string_tab[459]
#define __pyx_kp_u_ptr_must_not_be_null_0 __pyx_string_tab[460]
#define __pyx_kp_u_py_anon_pod0_object_at __pyx_string_tab[461]
#define __pyx_kp_u_py_anon_pod1_object_at __pyx_string_tab[462]
#define __pyx_kp_u_py_anon_pod2_object_at __pyx_string_tab[463]
#define __pyx_kp_u_py_anon_pod3_object_at __pyx_string_tab[464]
#define __pyx_kp_u_py_anon_pod4_object_at __pyx_string_tab[465]
#define __pyx_kp_u_py_anon_pod5_object_at __pyx_string_tab[466]
#define __pyx_kp_u_self__data_is_not_None __pyx_string_tab[467]
#define __pyx_kp_u_self__ptr_cannot_be_converted_to __pyx_string_tab[468]
#define __pyx_kp_u_self_name_is_not_None __pyx_string_tab[469]
#define __pyx_kp_u_serial_must_be_a_Python_str __pyx_string_tab[470]
#define __pyx_kp_u_strided_and_direct __pyx_string_tab[471]
#define __pyx_kp_u_strided_and_direct_or_indirect __pyx_string_tab[472]
#define __pyx_kp_u_strided_and_indirect __pyx_string_tab[473]
#define __pyx_kp_u_stringsource __pyx_string_tab[474]
#define __pyx_kp_u_unable_to_allocate_array_data __pyx_string_tab[475]
#define __pyx_kp_u_unable_to_allocate_shape_and_str __pyx_string_tab[476]
#define __pyx_kp_u_uuid_must_be_a_Python_str __pyx_string_tab[477]
#define __pyx_n_u_ADM1032 __pyx_string_tab[478]
#define __pyx_n_u_ADT7461 __pyx_string_tab[479]
#define __pyx_n_u_ADT7473 __pyx_string_tab[480]
#define __pyx_n_u_ADT7473S __pyx_string_tab[481]
#define __pyx_n_u_AGGREGATE_ECC __pyx_string_tab[482]
#define __pyx_n_u_ALL __pyx_string_tab[483]
#define __pyx_n_u_AMBER __pyx_string_tab[484]
#define __pyx_n_u_APP_CLOCK_DEFAULT __pyx_string_tab[485]
#define __pyx_n_u_APP_CLOCK_TARGET __pyx_string_tab[486]
#define __pyx_n_u_ASCII __pyx_string_tab[487]
#define __pyx_n_u_AccountingStats __pyx_string_tab[488]
#define __pyx_n_u_AccountingStats___reduce_cython __pyx_string_tab[489]
#define __pyx_n_u_AccountingStats___setstate_cytho __pyx_string_tab[490]
#define __pyx_n_u_AccountingStats_from_data __pyx_string_tab[491]
#define __pyx_n_u_AccountingStats_from_ptr __pyx_string_tab[492]
#define __pyx_n_u_ActiveVgpuInstanceInfo_v1 __pyx_string_tab[493]
#define __pyx_n_u_ActiveVgpuInstanceInfo_v1___redu __pyx_string_tab[494]
#define __pyx_n_u_ActiveVgpuInstanceInfo_v1___sets __pyx_string_tab[495]
#define __pyx_n_u_ActiveVgpuInstanceInfo_v1_from_d __pyx_string_tab[496]
#define __pyx_n_u_ActiveVgpuInstanceInfo_v1_from_p __pyx_string_tab[497]
#define __pyx_n_u_AffinityScope __pyx_string_tab[498]
#define __pyx_n_u_AlreadyInitializedError __pyx_string_tab[499]
#define __pyx_n_u_ArgumentVersionMismatchError __pyx_string_tab[500]
#define __pyx_n_u_B __pyx_string_tab[501]
#define __pyx_n_u_BAR1Memory __pyx_string_tab[502]
#define __pyx_n_u_BAR1Memory___reduce_cython __pyx_string_tab[503]
#define __pyx_n_u_BAR1Memory___setstate_cython __pyx_string_tab[504]
#define __pyx_n_u_BAR1Memory_from_data __pyx_string_tab[505]
#define __pyx_n_u_BAR1Memory_from_ptr __pyx_string_tab[506]
#define __pyx_n_u_BINARY __pyx_string_tab[507]
#define __pyx_n_u_BOARD __pyx_string_tab[508]
#define __pyx_n_u_BRAND_COUNT __pyx_string_tab[509]
#define __pyx_n_u_BRAND_GEFORCE __pyx_string_tab[510]
#define __pyx_n_u_BRAND_GEFORCE_RTX __pyx_string_tab[511]
#define __pyx_n_u_BRAND_GRID __pyx_string_tab[512]
#define __pyx_n_u_BRAND_NVIDIA __pyx_string_tab[513]
#define __pyx_n_u_BRAND_NVIDIA_CLOUD_GAMING __pyx_string_tab[514]
#define __pyx_n_u_BRAND_NVIDIA_RTX __pyx_string_tab[515]
#define __pyx_n_u_BRAND_NVIDIA_VAPPS __pyx_string_tab[516]
#define __pyx_n_u_BRAND_NVIDIA_VCS __pyx_string_tab[517]
#define __pyx_n_u_BRAND_NVIDIA_VGAMING __pyx_string_tab[518]
#define __pyx_n_u_BRAND_NVIDIA_VPC __pyx_string_tab[519]
#define __pyx_n_u_BRAND_NVIDIA_VWS __pyx_string_tab[520]
#define __pyx_n_u_BRAND_NVS __pyx_string_tab[521]
#define __pyx_n_u_BRAND_QUADRO __pyx_string_tab[522]
#define __pyx_n_u_BRAND_QUADRO_RTX __pyx_string_tab[523]
#define __pyx_n_u_BRAND_TESLA __pyx_string_tab[524]
#define __pyx_n_u_BRAND_TITAN __pyx_string_tab[525]
#define __pyx_n_u_BRAND_TITAN_RTX __pyx_string_tab[526]
#define __pyx_n_u_BRAND_UNKNOWN __pyx_string_tab[527]
#define __pyx_n_u_BRIDGE_CHIP_BRO4 __pyx_string_tab[528]
#define __pyx_n_u_BRIDGE_CHIP_PLX __pyx_string_tab[529]
#define __pyx_n_u_BrandType __pyx_string_tab[530]
#define __pyx_n_u_BridgeChipHierarchy __pyx_string_tab[531]
#define __pyx_n_u_BridgeChipHierarchy___reduce_cyt __pyx_string_tab[532]
#define __pyx_n_u_BridgeChipHierarchy___setstate_c __pyx_string_tab[533]
#define __pyx_n_u_BridgeChipHierarchy_from_data __pyx_string_tab[534]
#define __pyx_n_u_BridgeChipHierarchy_from_ptr __pyx_string_tab[535]
#define __pyx_n_u_BridgeChipInfo __pyx_string_tab[536]
#define __pyx_n_u_BridgeChipInfo___reduce_cython __pyx_string_tab[537]
#define __pyx_n_u_BridgeChipInfo___setstate_cython __pyx_string_tab[538]
#define __pyx_n_u_BridgeChipInfo_from_data __pyx_string_tab[539]
#define __pyx_n_u_BridgeChipInfo_from_ptr __pyx_string_tab[540]
#define __pyx_n_u_BridgeChipType __pyx_string_tab[541]
#define __pyx_n_u_C2cModeInfo_v1 __pyx_string_tab[542]
#define __pyx_n_u_C2cModeInfo_v1___reduce_cython __pyx_string_tab[543]
#define __pyx_n_u_C2cModeInfo_v1___setstate_cython __pyx_string_tab[544]
#define __pyx_n_u_C2cModeInfo_v1_from_data __pyx_string_tab[545]
#define __pyx_n_u_C2cModeInfo_v1_from_ptr __pyx_string_tab[546]
#define __pyx_n_u_CBU __pyx_string_tab[547]
#define __pyx_n_u_CLOCK_COUNT __pyx_string_tab[548]
#define __pyx_n_u_CLOCK_GRAPHICS __pyx_string_tab[549]
#define __pyx_n_u_CLOCK_MEM __pyx_string_tab[550]
#define __pyx_n_u_CLOCK_SM __pyx_string_tab[551]
#define __pyx_n_u_CLOCK_VIDEO __pyx_string_tab[552]
#define __pyx_n_u_COLD __pyx_string_tab[553]
#define __pyx_n_u_COMPUTE __pyx_string_tab[554]
#define __pyx_n_u_COMPUTEMODE_COUNT __pyx_string_tab[555]
#define __pyx_n_u_COMPUTEMODE_DEFAULT __pyx_string_tab[556]
#define __pyx_n_u_COMPUTEMODE_EXCLUSIVE_PROCESS __pyx_string_tab[557]
#define __pyx_n_u_COMPUTEMODE_EXCLUSIVE_THREAD __pyx_string_tab[558]
#define __pyx_n_u_COMPUTEMODE_PROHIBITED __pyx_string_tab[559]
#define __pyx_n_u_CORRECTED __pyx_string_tab[560]
#define __pyx_n_u_COUNT __pyx_string_tab[561]
#define __pyx_n_u_CUDA __pyx_string_tab[562]
#define __pyx_n_u_CURRENT __pyx_string_tab[563]
#define __pyx_n_u_CUSTOMER_BOOST_MAX __pyx_string_tab[564]
#define __pyx_n_u_ClkMonFaultInfo __pyx_string_tab[565]
#define __pyx_n_u_ClkMonFaultInfo___reduce_cython __pyx_string_tab[566]
#define __pyx_n_u_ClkMonFaultInfo___setstate_cytho __pyx_string_tab[567]
#define __pyx_n_u_ClkMonFaultInfo_from_data __pyx_string_tab[568]
#define __pyx_n_u_ClkMonFaultInfo_from_ptr __pyx_string_tab[569]
#define __pyx_n_u_ClkMonStatus __pyx_string_tab[570]
#define __pyx_n_u_ClkMonStatus___reduce_cython __pyx_string_tab[571]
#define __pyx_n_u_ClkMonStatus___setstate_cython __pyx_string_tab[572]
#define __pyx_n_u_ClkMonStatus_from_data __pyx_string_tab[573]
#define __pyx_n_u_ClkMonStatus_from_ptr __pyx_string_tab[574]
#define __pyx_n_u_ClockId __pyx_string_tab[575]
#define __pyx_n_u_ClockLimitId __pyx_string_tab[576]
#define __pyx_n_u_ClockOffset_v1 __pyx_string_tab[577]
#define __pyx_n_u_ClockOffset_v1___reduce_cython __pyx_string_tab[578]
#define __pyx_n_u_ClockOffset_v1___setstate_cython __pyx_string_tab[579]
#define __pyx_n_u_ClockOffset_v1_from_data __pyx_string_tab[580]
#define __pyx_n_u_ClockOffset_v1_from_ptr __pyx_string_tab[581]
#define __pyx_n_u_ClockType __pyx_string_tab[582]
#define __pyx_n_u_ComputeInstanceInfo __pyx_string_tab[583]
#define __pyx_n_u_ComputeInstanceInfo___reduce_cyt __pyx_string_tab[584]
#define __pyx_n_u_ComputeInstanceInfo___setstate_c __pyx_string_tab[585]
#define __pyx_n_u_ComputeInstanceInfo_from_data __pyx_string_tab[586]
#define __pyx_n_u_ComputeInstanceInfo_from_ptr __pyx_string_tab[587]
#define __pyx_n_u_ComputeInstancePlacement __pyx_string_tab[588]
#define __pyx_n_u_ComputeInstancePlacement___reduc __pyx_string_tab[589]
#define __pyx_n_u_ComputeInstancePlacement___setst __pyx_string_tab[590]
#define __pyx_n_u_ComputeInstancePlacement_from_da __pyx_string_tab[591]
#define __pyx_n_u_ComputeInstancePlacement_from_pt __pyx_string_tab[592]
#define __pyx_n_u_ComputeInstanceProfileInfo_v2 __pyx_string_tab[593]
#define __pyx_n_u_ComputeInstanceProfileInfo_v2_2 __pyx_string_tab[594]
#define __pyx_n_u_ComputeInstanceProfileInfo_v2_3 __pyx_string_tab[595]
#define __pyx_n_u_ComputeInstanceProfileInfo_v2_fr __pyx_string_tab[596]
#define __pyx_n_u_ComputeInstanceProfileInfo_v2_fr_2 __pyx_string_tab[597]
#define __pyx_n_u_ComputeInstanceProfileInfo_v3 __pyx_string_tab[598]
#define __pyx_n_u_ComputeInstanceProfileInfo_v3_2 __pyx_string_tab[599]
#define __pyx_n_u_ComputeInstanceProfileInfo_v3_3 __pyx_string_tab[600]
#define __pyx_n_u_ComputeInstanceProfileInfo_v3_fr __pyx_string_tab[601]
#define __pyx_n_u_ComputeInstanceProfileInfo_v3_fr_2 __pyx_string_tab[602]
#define __pyx_n_u_ComputeMode __pyx_string_tab[603]
#define __pyx_n_u_ConfComputeGetKeyRotationThresho __pyx_string_tab[604]
#define __pyx_n_u_ConfComputeGetKeyRotationThresho_2 __pyx_string_tab[605]
#define __pyx_n_u_ConfComputeGetKeyRotationThresho_3 __pyx_string_tab[606]
#define __pyx_n_u_ConfComputeGetKeyRotationThresho_4 __pyx_string_tab[607]
#define __pyx_n_u_ConfComputeGetKeyRotationThresho_5 __pyx_string_tab[608]
#define __pyx_n_u_ConfComputeGpuAttestationReport_2 __pyx_string_tab[609]
#define __pyx_n_u_ConfComputeGpuAttestationReport_3 __pyx_string_tab[610]
#define __pyx_n_u_ConfComputeGpuAttestationReport_4 __pyx_string_tab[611]
#define __pyx_n_u_ConfComputeGpuAttestationReport_5 __pyx_string_tab[612]
#define __pyx_n_u_ConfComputeGpuAttestationReport_6 __pyx_string_tab[613]
#define __pyx_n_u_ConfComputeGpuCertificate __pyx_string_tab[614]
#define __pyx_n_u_ConfComputeGpuCertificate___redu __pyx_string_tab[615]
#define __pyx_n_u_ConfComputeGpuCertificate___sets __pyx_string_tab[616]
#define __pyx_n_u_ConfComputeGpuCertificate_from_d __pyx_string_tab[617]
#define __pyx_n_u_ConfComputeGpuCertificate_from_p __pyx_string_tab[618]
#define __pyx_n_u_ConfComputeMemSizeInfo __pyx_string_tab[619]
#define __pyx_n_u_ConfComputeMemSizeInfo___reduce __pyx_string_tab[620]
#define __pyx_n_u_ConfComputeMemSizeInfo___setstat __pyx_string_tab[621]
#define __pyx_n_u_ConfComputeMemSizeInfo_from_data __pyx_string_tab[622]
#define __pyx_n_u_ConfComputeMemSizeInfo_from_ptr __pyx_string_tab[623]
#define __pyx_n_u_ConfComputeSystemCaps __pyx_string_tab[624]
#define __pyx_n_u_ConfComputeSystemCaps___reduce_c __pyx_string_tab[625]
#define __pyx_n_u_ConfComputeSystemCaps___setstate __pyx_string_tab[626]
#define __pyx_n_u_ConfComputeSystemCaps_from_data __pyx_string_tab[627]
#define __pyx_n_u_ConfComputeSystemCaps_from_ptr __pyx_string_tab[628]
#define __pyx_n_u_ConfComputeSystemState __pyx_string_tab[629]
#define __pyx_n_u_ConfComputeSystemState___reduce __pyx_string_tab[630]
#define __pyx_n_u_ConfComputeSystemState___setstat __pyx_string_tab[631]
#define __pyx_n_u_ConfComputeSystemState_from_data __pyx_string_tab[632]
#define __pyx_n_u_ConfComputeSystemState_from_ptr __pyx_string_tab[633]
#define __pyx_n_u_CoolerControl __pyx_string_tab[634]
#define __pyx_n_u_CoolerInfo_v1 __pyx_string_tab[635]
#define __pyx_n_u_CoolerInfo_v1___reduce_cython __pyx_string_tab[636]
#define __pyx_n_u_CoolerInfo_v1___setstate_cython __pyx_string_tab[637]
#define __pyx_n_u_CoolerInfo_v1_from_data __pyx_string_tab[638]
#define __pyx_n_u_CoolerInfo_v1_from_ptr __pyx_string_tab[639]
#define __pyx_n_u_CoolerTarget __pyx_string_tab[640]
#define __pyx_n_u_CorruptedInforomError __pyx_string_tab[641]
#define __pyx_n_u_DEC_UTILIZATION_SAMPLES __pyx_string_tab[642]
#define __pyx_n_u_DETACH_GPU_KEEP __pyx_string_tab[643]
#define __pyx_n_u_DETACH_GPU_REMOVE __pyx_string_tab[644]
#define __pyx_n_u_DEVICE_ADDRESSING_MODE_ATS __pyx_string_tab[645]
#define __pyx_n_u_DEVICE_ADDRESSING_MODE_HMM __pyx_string_tab[646]
#define __pyx_n_u_DEVICE_ADDRESSING_MODE_NONE __pyx_string_tab[647]
#define __pyx_n_u_DEVICE_MEMORY __pyx_string_tab[648]
#define __pyx_n_u_DEVICE_VGPU_CAP_COMPUTE_MEDIA_EN __pyx_string_tab[649]
#define __pyx_n_u_DEVICE_VGPU_CAP_COUNT __pyx_string_tab[650]
#define __pyx_n_u_DEVICE_VGPU_CAP_DEVICE_STREAMING __pyx_string_tab[651]
#define __pyx_n_u_DEVICE_VGPU_CAP_FRACTIONAL_MULTI __pyx_string_tab[652]
#define __pyx_n_u_DEVICE_VGPU_CAP_HETEROGENEOUS_TI __pyx_string_tab[653]
#define __pyx_n_u_DEVICE_VGPU_CAP_HETEROGENEOUS_TI_2 __pyx_string_tab[654]
#define __pyx_n_u_DEVICE_VGPU_CAP_HOMOGENEOUS_PLAC __pyx_string_tab[655]
#define __pyx_n_u_DEVICE_VGPU_CAP_MIG_TIMESLICING __pyx_string_tab[656]
#define __pyx_n_u_DEVICE_VGPU_CAP_MIG_TIMESLICING_2 __pyx_string_tab[657]
#define __pyx_n_u_DEVICE_VGPU_CAP_MINI_QUARTER_GPU __pyx_string_tab[658]
#define __pyx_n_u_DEVICE_VGPU_CAP_READ_DEVICE_BUFF __pyx_string_tab[659]
#define __pyx_n_u_DEVICE_VGPU_CAP_WARM_UPDATE __pyx_string_tab[660]
#define __pyx_n_u_DEVICE_VGPU_CAP_WRITE_DEVICE_BUF __pyx_string_tab[661]
#define __pyx_n_u_DEV_C2C_LINK_COUNT __pyx_string_tab[662]
#define __pyx_n_u_DEV_C2C_LINK_ERROR_INTR __pyx_string_tab[663]
#define __pyx_n_u_DEV_C2C_LINK_ERROR_REPLAY __pyx_string_tab[664]
#define __pyx_n_u_DEV_C2C_LINK_ERROR_REPLAY_B2B __pyx_string_tab[665]
#define __pyx_n_u_DEV_C2C_LINK_GET_MAX_BW __pyx_string_tab[666]
#define __pyx_n_u_DEV_C2C_LINK_GET_STATUS __pyx_string_tab[667]
#define __pyx_n_u_DEV_C2C_LINK_POWER_STATE __pyx_string_tab[668]
#define __pyx_n_u_DEV_CLOCKS_EVENT_REASON_HW_POWER __pyx_string_tab[669]
#define __pyx_n_u_DEV_CLOCKS_EVENT_REASON_HW_THERM __pyx_string_tab[670]
#define __pyx_n_u_DEV_CLOCKS_EVENT_REASON_SW_POWER __pyx_string_tab[671]
#define __pyx_n_u_DEV_CLOCKS_EVENT_REASON_SW_THERM __pyx_string_tab[672]
#define __pyx_n_u_DEV_CLOCKS_EVENT_REASON_SYNC_BOO __pyx_string_tab[673]
#define __pyx_n_u_DEV_DRAIN_AND_RESET_STATUS __pyx_string_tab[674]
#define __pyx_n_u_DEV_ECC_CURRENT __pyx_string_tab[675]
#define __pyx_n_u_DEV_ECC_DBE_AGG_CBU __pyx_string_tab[676]
#define __pyx_n_u_DEV_ECC_DBE_AGG_DEV __pyx_string_tab[677]
#define __pyx_n_u_DEV_ECC_DBE_AGG_L1 __pyx_string_tab[678]
#define __pyx_n_u_DEV_ECC_DBE_AGG_L2 __pyx_string_tab[679]
#define __pyx_n_u_DEV_ECC_DBE_AGG_REG __pyx_string_tab[680]
#define __pyx_n_u_DEV_ECC_DBE_AGG_TEX __pyx_string_tab[681]
#define __pyx_n_u_DEV_ECC_DBE_AGG_TOTAL __pyx_string_tab[682]
#define __pyx_n_u_DEV_ECC_DBE_VOL_CBU __pyx_string_tab[683]
#define __pyx_n_u_DEV_ECC_DBE_VOL_DEV __pyx_string_tab[684]
#define __pyx_n_u_DEV_ECC_DBE_VOL_L1 __pyx_string_tab[685]
#define __pyx_n_u_DEV_ECC_DBE_VOL_L2 __pyx_string_tab[686]
#define __pyx_n_u_DEV_ECC_DBE_VOL_REG __pyx_string_tab[687]
#define __pyx_n_u_DEV_ECC_DBE_VOL_TEX __pyx_string_tab[688]
#define __pyx_n_u_DEV_ECC_DBE_VOL_TOTAL __pyx_string_tab[689]
#define __pyx_n_u_DEV_ECC_PENDING __pyx_string_tab[690]
#define __pyx_n_u_DEV_ECC_SBE_AGG_DEV __pyx_string_tab[691]
#define __pyx_n_u_DEV_ECC_SBE_AGG_L1 __pyx_string_tab[692]
#define __pyx_n_u_DEV_ECC_SBE_AGG_L2 __pyx_string_tab[693]
#define __pyx_n_u_DEV_ECC_SBE_AGG_REG __pyx_string_tab[694]
#define __pyx_n_u_DEV_ECC_SBE_AGG_TEX __pyx_string_tab[695]
#define __pyx_n_u_DEV_ECC_SBE_AGG_TOTAL __pyx_string_tab[696]
#define __pyx_n_u_DEV_ECC_SBE_VOL_DEV __pyx_string_tab[697]
#define __pyx_n_u_DEV_ECC_SBE_VOL_L1 __pyx_string_tab[698]
#define __pyx_n_u_DEV_ECC_SBE_VOL_L2 __pyx_string_tab[699]
#define __pyx_n_u_DEV_ECC_SBE_VOL_REG __pyx_string_tab[700]
#define __pyx_n_u_DEV_ECC_SBE_VOL_TEX __pyx_string_tab[701]
#define __pyx_n_u_DEV_ECC_SBE_VOL_TOTAL __pyx_string_tab[702]
#define __pyx_n_u_DEV_ENERGY __pyx_string_tab[703]
#define __pyx_n_u_DEV_GET_GPU_RECOVERY_ACTION __pyx_string_tab[704]
#define __pyx_n_u_DEV_IS_MIG_MODE_INDEPENDENT_MIG __pyx_string_tab[705]
#define __pyx_n_u_DEV_IS_RESETLESS_MIG_SUPPORTED __pyx_string_tab[706]
#define __pyx_n_u_DEV_MEMORY_TEMP __pyx_string_tab[707]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L0 __pyx_string_tab[708]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L1 __pyx_string_tab[709]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L10 __pyx_string_tab[710]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L11 __pyx_string_tab[711]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L2 __pyx_string_tab[712]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L3 __pyx_string_tab[713]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L4 __pyx_string_tab[714]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L5 __pyx_string_tab[715]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L6 __pyx_string_tab[716]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L7 __pyx_string_tab[717]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L8 __pyx_string_tab[718]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L9 __pyx_string_tab[719]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_TOTAL __pyx_string_tab[720]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L0 __pyx_string_tab[721]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L1 __pyx_string_tab[722]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L10 __pyx_string_tab[723]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L11 __pyx_string_tab[724]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L2 __pyx_string_tab[725]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L3 __pyx_string_tab[726]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L4 __pyx_string_tab[727]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L5 __pyx_string_tab[728]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L6 __pyx_string_tab[729]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L7 __pyx_string_tab[730]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L8 __pyx_string_tab[731]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L9 __pyx_string_tab[732]
#define __pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_TOTAL __pyx_string_tab[733]
#define __pyx_n_u_DEV_NVLINK_COUNT_BUFFER_OVERRUN __pyx_string_tab[734]
#define __pyx_n_u_DEV_NVLINK_COUNT_EFFECTIVE_BER __pyx_string_tab[735]
#define __pyx_n_u_DEV_NVLINK_COUNT_EFFECTIVE_ERROR __pyx_string_tab[736]
#define __pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_0 __pyx_string_tab[737]
#define __pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_1 __pyx_string_tab[738]
#define __pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_10 __pyx_string_tab[739]
#define __pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_11 __pyx_string_tab[740]
#define __pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_12 __pyx_string_tab[741]
#define __pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_13 __pyx_string_tab[742]
#define __pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_14 __pyx_string_tab[743]
#define __pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_15 __pyx_string_tab[744]
#define __pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_2 __pyx_string_tab[745]
#define __pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_3 __pyx_string_tab[746]
#define __pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_4 __pyx_string_tab[747]
#define __pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_5 __pyx_string_tab[748]
#define __pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_6 __pyx_string_tab[749]
#define __pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_7 __pyx_string_tab[750]
#define __pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_8 __pyx_string_tab[751]
#define __pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_9 __pyx_string_tab[752]
#define __pyx_n_u_DEV_NVLINK_COUNT_LINK_RECOVERY_E __pyx_string_tab[753]
#define __pyx_n_u_DEV_NVLINK_COUNT_LINK_RECOVERY_F __pyx_string_tab[754]
#define __pyx_n_u_DEV_NVLINK_COUNT_LINK_RECOVERY_S __pyx_string_tab[755]
#define __pyx_n_u_DEV_NVLINK_COUNT_LOCAL_LINK_INTE __pyx_string_tab[756]
#define __pyx_n_u_DEV_NVLINK_COUNT_MALFORMED_PACKE __pyx_string_tab[757]
#define __pyx_n_u_DEV_NVLINK_COUNT_RAW_BER __pyx_string_tab[758]
#define __pyx_n_u_DEV_NVLINK_COUNT_RAW_BER_LANE0 __pyx_string_tab[759]
#define __pyx_n_u_DEV_NVLINK_COUNT_RAW_BER_LANE1 __pyx_string_tab[760]
#define __pyx_n_u_DEV_NVLINK_COUNT_RCV_BYTES __pyx_string_tab[761]
#define __pyx_n_u_DEV_NVLINK_COUNT_RCV_ERRORS __pyx_string_tab[762]
#define __pyx_n_u_DEV_NVLINK_COUNT_RCV_GENERAL_ERR __pyx_string_tab[763]
#define __pyx_n_u_DEV_NVLINK_COUNT_RCV_PACKETS __pyx_string_tab[764]
#define __pyx_n_u_DEV_NVLINK_COUNT_RCV_REMOTE_ERRO __pyx_string_tab[765]
#define __pyx_n_u_DEV_NVLINK_COUNT_SYMBOL_BER __pyx_string_tab[766]
#define __pyx_n_u_DEV_NVLINK_COUNT_SYMBOL_ERRORS __pyx_string_tab[767]
#define __pyx_n_u_DEV_NVLINK_COUNT_VL15_DROPPED __pyx_string_tab[768]
#define __pyx_n_u_DEV_NVLINK_COUNT_XMIT_BYTES __pyx_string_tab[769]
#define __pyx_n_u_DEV_NVLINK_COUNT_XMIT_DISCARDS __pyx_string_tab[770]
#define __pyx_n_u_DEV_NVLINK_COUNT_XMIT_PACKETS __pyx_string_tab[771]
#define __pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT __pyx_string_tab[772]
#define __pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_10 __pyx_string_tab[773]
#define __pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_11 __pyx_string_tab[774]
#define __pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_12 __pyx_string_tab[775]
#define __pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_13 __pyx_string_tab[776]
#define __pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_2 __pyx_string_tab[777]
#define __pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_3 __pyx_string_tab[778]
#define __pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_4 __pyx_string_tab[779]
#define __pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_5 __pyx_string_tab[780]
#define __pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_6 __pyx_string_tab[781]
#define __pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_7 __pyx_string_tab[782]
#define __pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_8 __pyx_string_tab[783]
#define __pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_9 __pyx_string_tab[784]
#define __pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT __pyx_string_tab[785]
#define __pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_10 __pyx_string_tab[786]
#define __pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_11 __pyx_string_tab[787]
#define __pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_12 __pyx_string_tab[788]
#define __pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_13 __pyx_string_tab[789]
#define __pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_2 __pyx_string_tab[790]
#define __pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_3 __pyx_string_tab[791]
#define __pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_4 __pyx_string_tab[792]
#define __pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_5 __pyx_string_tab[793]
#define __pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_6 __pyx_string_tab[794]
#define __pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_7 __pyx_string_tab[795]
#define __pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_8 __pyx_string_tab[796]
#define __pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_9 __pyx_string_tab[797]
#define __pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT __pyx_string_tab[798]
#define __pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_10 __pyx_string_tab[799]
#define __pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_11 __pyx_string_tab[800]
#define __pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_12 __pyx_string_tab[801]
#define __pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_13 __pyx_string_tab[802]
#define __pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_2 __pyx_string_tab[803]
#define __pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_3 __pyx_string_tab[804]
#define __pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_4 __pyx_string_tab[805]
#define __pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_5 __pyx_string_tab[806]
#define __pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_6 __pyx_string_tab[807]
#define __pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_7 __pyx_string_tab[808]
#define __pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_8 __pyx_string_tab[809]
#define __pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_9 __pyx_string_tab[810]
#define __pyx_n_u_DEV_NVLINK_ERROR_DL_CRC __pyx_string_tab[811]
#define __pyx_n_u_DEV_NVLINK_ERROR_DL_RECOVERY __pyx_string_tab[812]
#define __pyx_n_u_DEV_NVLINK_ERROR_DL_REPLAY __pyx_string_tab[813]
#define __pyx_n_u_DEV_NVLINK_GET_POWER_STATE __pyx_string_tab[814]
#define __pyx_n_u_DEV_NVLINK_GET_POWER_THRESHOLD __pyx_string_tab[815]
#define __pyx_n_u_DEV_NVLINK_GET_POWER_THRESHOLD_M __pyx_string_tab[816]
#define __pyx_n_u_DEV_NVLINK_GET_POWER_THRESHOLD_M_2 __pyx_string_tab[817]
#define __pyx_n_u_DEV_NVLINK_GET_POWER_THRESHOLD_S __pyx_string_tab[818]
#define __pyx_n_u_DEV_NVLINK_GET_POWER_THRESHOLD_U __pyx_string_tab[819]
#define __pyx_n_u_DEV_NVLINK_GET_SPEED __pyx_string_tab[820]
#define __pyx_n_u_DEV_NVLINK_GET_STATE __pyx_string_tab[821]
#define __pyx_n_u_DEV_NVLINK_GET_VERSION __pyx_string_tab[822]
#define __pyx_n_u_DEV_NVLINK_LINK_COUNT __pyx_string_tab[823]
#define __pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT __pyx_string_tab[824]
#define __pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_10 __pyx_string_tab[825]
#define __pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_11 __pyx_string_tab[826]
#define __pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_12 __pyx_string_tab[827]
#define __pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_13 __pyx_string_tab[828]
#define __pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_2 __pyx_string_tab[829]
#define __pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_3 __pyx_string_tab[830]
#define __pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_4 __pyx_string_tab[831]
#define __pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_5 __pyx_string_tab[832]
#define __pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_6 __pyx_string_tab[833]
#define __pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_7 __pyx_string_tab[834]
#define __pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_8 __pyx_string_tab[835]
#define __pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_9 __pyx_string_tab[836]
#define __pyx_n_u_DEV_NVLINK_REMOTE_NVLINK_ID __pyx_string_tab[837]
#define __pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L0 __pyx_string_tab[838]
#define __pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L1 __pyx_string_tab[839]
#define __pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L1_2 __pyx_string_tab[840]
#define __pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L1_3 __pyx_string_tab[841]
#define __pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L2 __pyx_string_tab[842]
#define __pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L3 __pyx_string_tab[843]
#define __pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L4 __pyx_string_tab[844]
#define __pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L5 __pyx_string_tab[845]
#define __pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L6 __pyx_string_tab[846]
#define __pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L7 __pyx_string_tab[847]
#define __pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L8 __pyx_string_tab[848]
#define __pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L9 __pyx_string_tab[849]
#define __pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_TO __pyx_string_tab[850]
#define __pyx_n_u_DEV_NVLINK_SPEED_MBPS_COMMON __pyx_string_tab[851]
#define __pyx_n_u_DEV_NVLINK_SPEED_MBPS_L0 __pyx_string_tab[852]
#define __pyx_n_u_DEV_NVLINK_SPEED_MBPS_L1 __pyx_string_tab[853]
#define __pyx_n_u_DEV_NVLINK_SPEED_MBPS_L10 __pyx_string_tab[854]
#define __pyx_n_u_DEV_NVLINK_SPEED_MBPS_L11 __pyx_string_tab[855]
#define __pyx_n_u_DEV_NVLINK_SPEED_MBPS_L2 __pyx_string_tab[856]
#define __pyx_n_u_DEV_NVLINK_SPEED_MBPS_L3 __pyx_string_tab[857]
#define __pyx_n_u_DEV_NVLINK_SPEED_MBPS_L4 __pyx_string_tab[858]
#define __pyx_n_u_DEV_NVLINK_SPEED_MBPS_L5 __pyx_string_tab[859]
#define __pyx_n_u_DEV_NVLINK_SPEED_MBPS_L6 __pyx_string_tab[860]
#define __pyx_n_u_DEV_NVLINK_SPEED_MBPS_L7 __pyx_string_tab[861]
#define __pyx_n_u_DEV_NVLINK_SPEED_MBPS_L8 __pyx_string_tab[862]
#define __pyx_n_u_DEV_NVLINK_SPEED_MBPS_L9 __pyx_string_tab[863]
#define __pyx_n_u_DEV_NVLINK_THROUGHPUT_DATA_RX __pyx_string_tab[864]
#define __pyx_n_u_DEV_NVLINK_THROUGHPUT_DATA_TX __pyx_string_tab[865]
#define __pyx_n_u_DEV_NVLINK_THROUGHPUT_RAW_RX __pyx_string_tab[866]
#define __pyx_n_u_DEV_NVLINK_THROUGHPUT_RAW_TX __pyx_string_tab[867]
#define __pyx_n_u_DEV_NVSWITCH_CONNECTED_LINK_COUN __pyx_string_tab[868]
#define __pyx_n_u_DEV_PCIE_COUNT_BAD_DLLP __pyx_string_tab[869]
#define __pyx_n_u_DEV_PCIE_COUNT_BAD_TLP __pyx_string_tab[870]
#define __pyx_n_u_DEV_PCIE_COUNT_CORRECTABLE_ERROR __pyx_string_tab[871]
#define __pyx_n_u_DEV_PCIE_COUNT_FATAL_ERROR __pyx_string_tab[872]
#define __pyx_n_u_DEV_PCIE_COUNT_LANE_ERROR __pyx_string_tab[873]
#define __pyx_n_u_DEV_PCIE_COUNT_LCRC_ERROR __pyx_string_tab[874]
#define __pyx_n_u_DEV_PCIE_COUNT_NAKS_RECEIVED __pyx_string_tab[875]
#define __pyx_n_u_DEV_PCIE_COUNT_NAKS_SENT __pyx_string_tab[876]
#define __pyx_n_u_DEV_PCIE_COUNT_NON_FATAL_ERROR __pyx_string_tab[877]
#define __pyx_n_u_DEV_PCIE_COUNT_RECEIVER_ERROR __pyx_string_tab[878]
#define __pyx_n_u_DEV_PCIE_COUNT_RX_BYTES __pyx_string_tab[879]
#define __pyx_n_u_DEV_PCIE_COUNT_TX_BYTES __pyx_string_tab[880]
#define __pyx_n_u_DEV_PCIE_COUNT_UNSUPPORTED_REQ __pyx_string_tab[881]
#define __pyx_n_u_DEV_PCIE_INBOUND_ATOMICS_MASK __pyx_string_tab[882]
#define __pyx_n_u_DEV_PCIE_L0_TO_RECOVERY_COUNTER __pyx_string_tab[883]
#define __pyx_n_u_DEV_PCIE_OUTBOUND_ATOMICS_MASK __pyx_string_tab[884]
#define __pyx_n_u_DEV_PCIE_REPLAY_COUNTER __pyx_string_tab[885]
#define __pyx_n_u_DEV_PCIE_REPLAY_ROLLOVER_COUNTER __pyx_string_tab[886]
#define __pyx_n_u_DEV_PERF_POLICY_BOARD_LIMIT __pyx_string_tab[887]
#define __pyx_n_u_DEV_PERF_POLICY_LOW_UTILIZATION __pyx_string_tab[888]
#define __pyx_n_u_DEV_PERF_POLICY_POWER __pyx_string_tab[889]
#define __pyx_n_u_DEV_PERF_POLICY_RELIABILITY __pyx_string_tab[890]
#define __pyx_n_u_DEV_PERF_POLICY_SYNC_BOOST __pyx_string_tab[891]
#define __pyx_n_u_DEV_PERF_POLICY_THERMAL __pyx_string_tab[892]
#define __pyx_n_u_DEV_PERF_POLICY_TOTAL_APP_CLOCKS __pyx_string_tab[893]
#define __pyx_n_u_DEV_PERF_POLICY_TOTAL_BASE_CLOCK __pyx_string_tab[894]
#define __pyx_n_u_DEV_POWER_AVERAGE __pyx_string_tab[895]
#define __pyx_n_u_DEV_POWER_CURRENT_LIMIT __pyx_string_tab[896]
#define __pyx_n_u_DEV_POWER_DEFAULT_LIMIT __pyx_string_tab[897]
#define __pyx_n_u_DEV_POWER_INSTANT __pyx_string_tab[898]
#define __pyx_n_u_DEV_POWER_MAX_LIMIT __pyx_string_tab[899]
#define __pyx_n_u_DEV_POWER_MIN_LIMIT __pyx_string_tab[900]
#define __pyx_n_u_DEV_POWER_REQUESTED_LIMIT __pyx_string_tab[901]
#define __pyx_n_u_DEV_POWER_SYNC_BALANCING_AF __pyx_string_tab[902]
#define __pyx_n_u_DEV_POWER_SYNC_BALANCING_FREQ __pyx_string_tab[903]
#define __pyx_n_u_DEV_REMAPPED_COR __pyx_string_tab[904]
#define __pyx_n_u_DEV_REMAPPED_FAILURE __pyx_string_tab[905]
#define __pyx_n_u_DEV_REMAPPED_PENDING __pyx_string_tab[906]
#define __pyx_n_u_DEV_REMAPPED_UNC __pyx_string_tab[907]
#define __pyx_n_u_DEV_RESET_STATUS __pyx_string_tab[908]
#define __pyx_n_u_DEV_RETIRED_DBE __pyx_string_tab[909]
#define __pyx_n_u_DEV_RETIRED_PENDING __pyx_string_tab[910]
#define __pyx_n_u_DEV_RETIRED_PENDING_DBE __pyx_string_tab[911]
#define __pyx_n_u_DEV_RETIRED_PENDING_SBE __pyx_string_tab[912]
#define __pyx_n_u_DEV_RETIRED_SBE __pyx_string_tab[913]
#define __pyx_n_u_DEV_TEMPERATURE_GPU_MAX_TLIMIT __pyx_string_tab[914]
#define __pyx_n_u_DEV_TEMPERATURE_MEM_MAX_TLIMIT __pyx_string_tab[915]
#define __pyx_n_u_DEV_TEMPERATURE_SHUTDOWN_TLIMIT __pyx_string_tab[916]
#define __pyx_n_u_DEV_TEMPERATURE_SLOWDOWN_TLIMIT __pyx_string_tab[917]
#define __pyx_n_u_DEV_TOTAL_ENERGY_CONSUMPTION __pyx_string_tab[918]
#define __pyx_n_u_DOUBLE __pyx_string_tab[919]
#define __pyx_n_u_DOUBLE_BIT_ECC_ERROR __pyx_string_tab[920]
#define __pyx_n_u_DRAM __pyx_string_tab[921]
#define __pyx_n_u_DRIVER_MCDM __pyx_string_tab[922]
#define __pyx_n_u_DRIVER_WDDM __pyx_string_tab[923]
#define __pyx_n_u_DRIVER_WDM __pyx_string_tab[924]
#define __pyx_n_u_DeprecatedError __pyx_string_tab[925]
#define __pyx_n_u_DetachGpuState __pyx_string_tab[926]
#define __pyx_n_u_DeviceAddressingModeType __pyx_string_tab[927]
#define __pyx_n_u_DeviceAddressingMode_v1 __pyx_string_tab[928]
#define __pyx_n_u_DeviceAddressingMode_v1___reduce __pyx_string_tab[929]
#define __pyx_n_u_DeviceAddressingMode_v1___setsta __pyx_string_tab[930]
#define __pyx_n_u_DeviceAddressingMode_v1_from_dat __pyx_string_tab[931]
#define __pyx_n_u_DeviceAddressingMode_v1_from_ptr __pyx_string_tab[932]
#define __pyx_n_u_DeviceAttributes __pyx_string_tab[933]
#define __pyx_n_u_DeviceAttributes___reduce_cython __pyx_string_tab[934]
#define __pyx_n_u_DeviceAttributes___setstate_cyth __pyx_string_tab[935]
#define __pyx_n_u_DeviceAttributes_from_data __pyx_string_tab[936]
#define __pyx_n_u_DeviceAttributes_from_ptr __pyx_string_tab[937]
#define __pyx_n_u_DeviceCapabilities_v1 __pyx_string_tab[938]
#define __pyx_n_u_DeviceCapabilities_v1___reduce_c __pyx_string_tab[939]
#define __pyx_n_u_DeviceCapabilities_v1___setstate __pyx_string_tab[940]
#define __pyx_n_u_DeviceCapabilities_v1_from_data __pyx_string_tab[941]
#define __pyx_n_u_DeviceCapabilities_v1_from_ptr __pyx_string_tab[942]
#define __pyx_n_u_DeviceCurrentClockFreqs_v1 __pyx_string_tab[943]
#define __pyx_n_u_DeviceCurrentClockFreqs_v1___red __pyx_string_tab[944]
#define __pyx_n_u_DeviceCurrentClockFreqs_v1___set __pyx_string_tab[945]
#define __pyx_n_u_DeviceCurrentClockFreqs_v1_from __pyx_string_tab[946]
#define __pyx_n_u_DeviceCurrentClockFreqs_v1_from_2 __pyx_string_tab[947]
#define __pyx_n_u_DeviceGpuRecoveryAction __pyx_string_tab[948]
#define __pyx_n_u_DevicePerfModes_v1 __pyx_string_tab[949]
#define __pyx_n_u_DevicePerfModes_v1___reduce_cyth __pyx_string_tab[950]
#define __pyx_n_u_DevicePerfModes_v1___setstate_cy __pyx_string_tab[951]
#define __pyx_n_u_DevicePerfModes_v1_from_data __pyx_string_tab[952]
#define __pyx_n_u_DevicePerfModes_v1_from_ptr __pyx_string_tab[953]
#define __pyx_n_u_DevicePowerMizerModes_v1 __pyx_string_tab[954]
#define __pyx_n_u_DevicePowerMizerModes_v1___reduc __pyx_string_tab[955]
#define __pyx_n_u_DevicePowerMizerModes_v1___setst __pyx_string_tab[956]
#define __pyx_n_u_DevicePowerMizerModes_v1_from_da __pyx_string_tab[957]
#define __pyx_n_u_DevicePowerMizerModes_v1_from_pt __pyx_string_tab[958]
#define __pyx_n_u_DeviceVgpuCapability __pyx_string_tab[959]
#define __pyx_n_u_DriverModel __pyx_string_tab[960]
#define __pyx_n_u_DriverNotLoadedError __pyx_string_tab[961]
#define __pyx_n_u_ENCODER_QUERY_AV1 __pyx_string_tab[962]
#define __pyx_n_u_ENCODER_QUERY_H264 __pyx_string_tab[963]
#define __pyx_n_u_ENCODER_QUERY_HEVC __pyx_string_tab[964]
#define __pyx_n_u_ENCODER_QUERY_UNKNOWN __pyx_string_tab[965]
#define __pyx_n_u_ENC_UTILIZATION_SAMPLES __pyx_string_tab[966]
#define __pyx_n_u_ERROR_ALREADY_INITIALIZED __pyx_string_tab[967]
#define __pyx_n_u_ERROR_ARGUMENT_VERSION_MISMATCH __pyx_string_tab[968]
#define __pyx_n_u_ERROR_CORRUPTED_INFOROM __pyx_string_tab[969]
#define __pyx_n_u_ERROR_DEPRECATED __pyx_string_tab[970]
#define __pyx_n_u_ERROR_DRIVER_NOT_LOADED __pyx_string_tab[971]
#define __pyx_n_u_ERROR_FREQ_NOT_SUPPORTED __pyx_string_tab[972]
#define __pyx_n_u_ERROR_FUNCTION_NOT_FOUND __pyx_string_tab[973]
#define __pyx_n_u_ERROR_GPU_IS_LOST __pyx_string_tab[974]
#define __pyx_n_u_ERROR_GPU_NOT_FOUND __pyx_string_tab[975]
#define __pyx_n_u_ERROR_INSUFFICIENT_POWER __pyx_string_tab[976]
#define __pyx_n_u_ERROR_INSUFFICIENT_RESOURCES __pyx_string_tab[977]
#define __pyx_n_u_ERROR_INSUFFICIENT_SIZE __pyx_string_tab[978]
#define __pyx_n_u_ERROR_INVALID_ARGUMENT __pyx_string_tab[979]
#define __pyx_n_u_ERROR_INVALID_STATE __pyx_string_tab[980]
#define __pyx_n_u_ERROR_IN_USE __pyx_string_tab[981]
#define __pyx_n_u_ERROR_IRQ_ISSUE __pyx_string_tab[982]
#define __pyx_n_u_ERROR_LIBRARY_NOT_FOUND __pyx_string_tab[983]
#define __pyx_n_u_ERROR_LIB_RM_VERSION_MISMATCH __pyx_string_tab[984]
#define __pyx_n_u_ERROR_MEMORY __pyx_string_tab[985]
#define __pyx_n_u_ERROR_NOT_FOUND __pyx_string_tab[986]
#define __pyx_n_u_ERROR_NOT_READY __pyx_string_tab[987]
#define __pyx_n_u_ERROR_NOT_SUPPORTED __pyx_string_tab[988]
#define __pyx_n_u_ERROR_NO_DATA __pyx_string_tab[989]
#define __pyx_n_u_ERROR_NO_PERMISSION __pyx_string_tab[990]
#define __pyx_n_u_ERROR_OPERATING_SYSTEM __pyx_string_tab[991]
#define __pyx_n_u_ERROR_RESET_REQUIRED __pyx_string_tab[992]
#define __pyx_n_u_ERROR_RESET_TYPE_NOT_SUPPORTED __pyx_string_tab[993]
#define __pyx_n_u_ERROR_TIMEOUT __pyx_string_tab[994]
#define __pyx_n_u_ERROR_UNINITIALIZED __pyx_string_tab[995]
#define __pyx_n_u_ERROR_UNKNOWN __pyx_string_tab[996]
#define __pyx_n_u_ERROR_VGPU_ECC_NOT_SUPPORTED __pyx_string_tab[997]
#define __pyx_n_u_EccCounterType __pyx_string_tab[998]
#define __pyx_n_u_EccSramErrorStatus_v1 __pyx_string_tab[999]
#define __pyx_n_u_EccSramErrorStatus_v1___reduce_c __pyx_string_tab[1000]
#define __pyx_n_u_EccSramErrorStatus_v1___setstate __pyx_string_tab[1001]
#define __pyx_n_u_EccSramErrorStatus_v1_from_data __pyx_string_tab[1002]
#define __pyx_n_u_EccSramErrorStatus_v1_from_ptr __pyx_string_tab[1003]
#define __pyx_n_u_EccSramUniqueUncorrectedErrorCou __pyx_string_tab[1004]
#define __pyx_n_u_EccSramUniqueUncorrectedErrorCou_2 __pyx_string_tab[1005]
#define __pyx_n_u_EccSramUniqueUncorrectedErrorCou_3 __pyx_string_tab[1006]
#define __pyx_n_u_EccSramUniqueUncorrectedErrorCou_4 __pyx_string_tab[1007]
#define __pyx_n_u_EccSramUniqueUncorrectedErrorCou_5 __pyx_string_tab[1008]
#define __pyx_n_u_EccSramUniqueUncorrectedErrorEnt __pyx_string_tab[1009]
#define __pyx_n_u_EccSramUniqueUncorrectedErrorEnt_2 __pyx_string_tab[1010]
#define __pyx_n_u_EccSramUniqueUncorrectedErrorEnt_3 __pyx_string_tab[1011]
#define __pyx_n_u_EccSramUniqueUncorrectedErrorEnt_4 __pyx_string_tab[1012]
#define __pyx_n_u_EccSramUniqueUncorrectedErrorEnt_5 __pyx_string_tab[1013]
#define __pyx_n_u_Ellipsis __pyx_string_tab[1014]
#define __pyx_n_u_EnableState __pyx_string_tab[1015]
#define __pyx_n_u_EncoderSessionInfo __pyx_string_tab[1016]
#define __pyx_n_u_EncoderSessionInfo___reduce_cyth __pyx_string_tab[1017]
#define __pyx_n_u_EncoderSessionInfo___setstate_cy __pyx_string_tab[1018]
#define __pyx_n_u_EncoderSessionInfo_from_data __pyx_string_tab[1019]
#define __pyx_n_u_EncoderSessionInfo_from_ptr __pyx_string_tab[1020]
#define __pyx_n_u_EncoderType __pyx_string_tab[1021]
#define __pyx_n_u_EventData __pyx_string_tab[1022]
#define __pyx_n_u_EventData___reduce_cython __pyx_string_tab[1023]
#define __pyx_n_u_EventData___setstate_cython __pyx_string_tab[1024]
#define __pyx_n_u_EventData_from_data __pyx_string_tab[1025]
#define __pyx_n_u_EventData_from_ptr __pyx_string_tab[1026]
#define __pyx_n_u_ExcludedDeviceInfo __pyx_string_tab[1027]
#define __pyx_n_u_ExcludedDeviceInfo___reduce_cyth __pyx_string_tab[1028]
#define __pyx_n_u_ExcludedDeviceInfo___setstate_cy __pyx_string_tab[1029]
#define __pyx_n_u_ExcludedDeviceInfo_from_data __pyx_string_tab[1030]
#define __pyx_n_u_ExcludedDeviceInfo_from_ptr __pyx_string_tab[1031]
#define __pyx_n_u_FAN_FAILED __pyx_string_tab[1032]
#define __pyx_n_u_FAN_NORMAL __pyx_string_tab[1033]
#define __pyx_n_u_FBCSessionInfo __pyx_string_tab[1034]
#define __pyx_n_u_FBCSessionInfo___reduce_cython __pyx_string_tab[1035]
#define __pyx_n_u_FBCSessionInfo___setstate_cython __pyx_string_tab[1036]
#define __pyx_n_u_FBCSessionInfo_from_data __pyx_string_tab[1037]
#define __pyx_n_u_FBCSessionInfo_from_ptr __pyx_string_tab[1038]
#define __pyx_n_u_FBCSessionType __pyx_string_tab[1039]
#define __pyx_n_u_FBCStats __pyx_string_tab[1040]
#define __pyx_n_u_FBCStats___reduce_cython __pyx_string_tab[1041]
#define __pyx_n_u_FBCStats___setstate_cython __pyx_string_tab[1042]
#define __pyx_n_u_FBCStats_from_data __pyx_string_tab[1043]
#define __pyx_n_u_FBCStats_from_ptr __pyx_string_tab[1044]
#define __pyx_n_u_FEATURE_DISABLED __pyx_string_tab[1045]
#define __pyx_n_u_FEATURE_ENABLED __pyx_string_tab[1046]
#define __pyx_n_u_FI __pyx_string_tab[1047]
#define __pyx_n_u_FanSpeedInfo_v1 __pyx_string_tab[1048]
#define __pyx_n_u_FanSpeedInfo_v1___reduce_cython __pyx_string_tab[1049]
#define __pyx_n_u_FanSpeedInfo_v1___setstate_cytho __pyx_string_tab[1050]
#define __pyx_n_u_FanSpeedInfo_v1_from_data __pyx_string_tab[1051]
#define __pyx_n_u_FanSpeedInfo_v1_from_ptr __pyx_string_tab[1052]
#define __pyx_n_u_FanState __pyx_string_tab[1053]
#define __pyx_n_u_FieldValue __pyx_string_tab[1054]
#define __pyx_n_u_FieldValue___reduce_cython __pyx_string_tab[1055]
#define __pyx_n_u_FieldValue___setstate_cython __pyx_string_tab[1056]
#define __pyx_n_u_FieldValue_from_data __pyx_string_tab[1057]
#define __pyx_n_u_FieldValue_from_ptr __pyx_string_tab[1058]
#define __pyx_n_u_FreqNotSupportedError __pyx_string_tab[1059]
#define __pyx_n_u_FunctionNotFoundError __pyx_string_tab[1060]
#define __pyx_n_u_G781 __pyx_string_tab[1061]
#define __pyx_n_u_GAMING __pyx_string_tab[1062]
#define __pyx_n_u_GOM_ALL_ON __pyx_string_tab[1063]
#define __pyx_n_u_GOM_COMPUTE __pyx_string_tab[1064]
#define __pyx_n_u_GOM_LOW_DP __pyx_string_tab[1065]
#define __pyx_n_u_GPM_METRIC_ANY_TENSOR_UTIL __pyx_string_tab[1066]
#define __pyx_n_u_GPM_METRIC_C2C_DATA_RX_PER_SEC __pyx_string_tab[1067]
#define __pyx_n_u_GPM_METRIC_C2C_DATA_TX_PER_SEC __pyx_string_tab[1068]
#define __pyx_n_u_GPM_METRIC_C2C_LINK0_DATA_RX_PER __pyx_string_tab[1069]
#define __pyx_n_u_GPM_METRIC_C2C_LINK0_DATA_TX_PER __pyx_string_tab[1070]
#define __pyx_n_u_GPM_METRIC_C2C_LINK0_TOTAL_RX_PE __pyx_string_tab[1071]
#define __pyx_n_u_GPM_METRIC_C2C_LINK0_TOTAL_TX_PE __pyx_string_tab[1072]
#define __pyx_n_u_GPM_METRIC_C2C_LINK10_DATA_RX_PE __pyx_string_tab[1073]
#define __pyx_n_u_GPM_METRIC_C2C_LINK10_DATA_TX_PE __pyx_string_tab[1074]
#define __pyx_n_u_GPM_METRIC_C2C_LINK10_TOTAL_RX_P __pyx_string_tab[1075]
#define __pyx_n_u_GPM_METRIC_C2C_LINK10_TOTAL_TX_P __pyx_string_tab[1076]
#define __pyx_n_u_GPM_METRIC_C2C_LINK11_DATA_RX_PE __pyx_string_tab[1077]
#define __pyx_n_u_GPM_METRIC_C2C_LINK11_DATA_TX_PE __pyx_string_tab[1078]
#define __pyx_n_u_GPM_METRIC_C2C_LINK11_TOTAL_RX_P __pyx_string_tab[1079]
#define __pyx_n_u_GPM_METRIC_C2C_LINK11_TOTAL_TX_P __pyx_string_tab[1080]
#define __pyx_n_u_GPM_METRIC_C2C_LINK12_DATA_RX_PE __pyx_string_tab[1081]
#define __pyx_n_u_GPM_METRIC_C2C_LINK12_DATA_TX_PE __pyx_string_tab[1082]
#define __pyx_n_u_GPM_METRIC_C2C_LINK12_TOTAL_RX_P __pyx_string_tab[1083]
#define __pyx_n_u_GPM_METRIC_C2C_LINK12_TOTAL_TX_P __pyx_string_tab[1084]
#define __pyx_n_u_GPM_METRIC_C2C_LINK13_DATA_RX_PE __pyx_string_tab[1085]
#define __pyx_n_u_GPM_METRIC_C2C_LINK13_DATA_TX_PE __pyx_string_tab[1086]
#define __pyx_n_u_GPM_METRIC_C2C_LINK13_TOTAL_RX_P __pyx_string_tab[1087]
#define __pyx_n_u_GPM_METRIC_C2C_LINK13_TOTAL_TX_P __pyx_string_tab[1088]
#define __pyx_n_u_GPM_METRIC_C2C_LINK1_DATA_RX_PER __pyx_string_tab[1089]
#define __pyx_n_u_GPM_METRIC_C2C_LINK1_DATA_TX_PER __pyx_string_tab[1090]
#define __pyx_n_u_GPM_METRIC_C2C_LINK1_TOTAL_RX_PE __pyx_string_tab[1091]
#define __pyx_n_u_GPM_METRIC_C2C_LINK1_TOTAL_TX_PE __pyx_string_tab[1092]
#define __pyx_n_u_GPM_METRIC_C2C_LINK2_DATA_RX_PER __pyx_string_tab[1093]
#define __pyx_n_u_GPM_METRIC_C2C_LINK2_DATA_TX_PER __pyx_string_tab[1094]
#define __pyx_n_u_GPM_METRIC_C2C_LINK2_TOTAL_RX_PE __pyx_string_tab[1095]
#define __pyx_n_u_GPM_METRIC_C2C_LINK2_TOTAL_TX_PE __pyx_string_tab[1096]
#define __pyx_n_u_GPM_METRIC_C2C_LINK3_DATA_RX_PER __pyx_string_tab[1097]
#define __pyx_n_u_GPM_METRIC_C2C_LINK3_DATA_TX_PER __pyx_string_tab[1098]
#define __pyx_n_u_GPM_METRIC_C2C_LINK3_TOTAL_RX_PE __pyx_string_tab[1099]
#define __pyx_n_u_GPM_METRIC_C2C_LINK3_TOTAL_TX_PE __pyx_string_tab[1100]
#define __pyx_n_u_GPM_METRIC_C2C_LINK4_DATA_RX_PER __pyx_string_tab[1101]
#define __pyx_n_u_GPM_METRIC_C2C_LINK4_DATA_TX_PER __pyx_string_tab[1102]
#define __pyx_n_u_GPM_METRIC_C2C_LINK4_TOTAL_RX_PE __pyx_string_tab[1103]
#define __pyx_n_u_GPM_METRIC_C2C_LINK4_TOTAL_TX_PE __pyx_string_tab[1104]
#define __pyx_n_u_GPM_METRIC_C2C_LINK5_DATA_RX_PER __pyx_string_tab[1105]
#define __pyx_n_u_GPM_METRIC_C2C_LINK5_DATA_TX_PER __pyx_string_tab[1106]
#define __pyx_n_u_GPM_METRIC_C2C_LINK5_TOTAL_RX_PE __pyx_string_tab[1107]
#define __pyx_n_u_GPM_METRIC_C2C_LINK5_TOTAL_TX_PE __pyx_string_tab[1108]
#define __pyx_n_u_GPM_METRIC_C2C_LINK6_DATA_RX_PER __pyx_string_tab[1109]
#define __pyx_n_u_GPM_METRIC_C2C_LINK6_DATA_TX_PER __pyx_string_tab[1110]
#define __pyx_n_u_GPM_METRIC_C2C_LINK6_TOTAL_RX_PE __pyx_string_tab[1111]
#define __pyx_n_u_GPM_METRIC_C2C_LINK6_TOTAL_TX_PE __pyx_string_tab[1112]
#define __pyx_n_u_GPM_METRIC_C2C_LINK7_DATA_RX_PER __pyx_string_tab[1113]
#define __pyx_n_u_GPM_METRIC_C2C_LINK7_DATA_TX_PER __pyx_string_tab[1114]
#define __pyx_n_u_GPM_METRIC_C2C_LINK7_TOTAL_RX_PE __pyx_string_tab[1115]
#define __pyx_n_u_GPM_METRIC_C2C_LINK7_TOTAL_TX_PE __pyx_string_tab[1116]
#define __pyx_n_u_GPM_METRIC_C2C_LINK8_DATA_RX_PER __pyx_string_tab[1117]
#define __pyx_n_u_GPM_METRIC_C2C_LINK8_DATA_TX_PER __pyx_string_tab[1118]
#define __pyx_n_u_GPM_METRIC_C2C_LINK8_TOTAL_RX_PE __pyx_string_tab[1119]
#define __pyx_n_u_GPM_METRIC_C2C_LINK8_TOTAL_TX_PE __pyx_string_tab[1120]
#define __pyx_n_u_GPM_METRIC_C2C_LINK9_DATA_RX_PER __pyx_string_tab[1121]
#define __pyx_n_u_GPM_METRIC_C2C_LINK9_DATA_TX_PER __pyx_string_tab[1122]
#define __pyx_n_u_GPM_METRIC_C2C_LINK9_TOTAL_RX_PE __pyx_string_tab[1123]
#define __pyx_n_u_GPM_METRIC_C2C_LINK9_TOTAL_TX_PE __pyx_string_tab[1124]
#define __pyx_n_u_GPM_METRIC_C2C_TOTAL_RX_PER_SEC __pyx_string_tab[1125]
#define __pyx_n_u_GPM_METRIC_C2C_TOTAL_TX_PER_SEC __pyx_string_tab[1126]
#define __pyx_n_u_GPM_METRIC_DFMA_TENSOR_UTIL __pyx_string_tab[1127]
#define __pyx_n_u_GPM_METRIC_DRAM_BW_UTIL __pyx_string_tab[1128]
#define __pyx_n_u_GPM_METRIC_DRAM_CACHE_HIT __pyx_string_tab[1129]
#define __pyx_n_u_GPM_METRIC_DRAM_CACHE_MISS __pyx_string_tab[1130]
#define __pyx_n_u_GPM_METRIC_FP16_UTIL __pyx_string_tab[1131]
#define __pyx_n_u_GPM_METRIC_FP32_UTIL __pyx_string_tab[1132]
#define __pyx_n_u_GPM_METRIC_FP64_UTIL __pyx_string_tab[1133]
#define __pyx_n_u_GPM_METRIC_GR0_CTXSW_ACTIVE_PCT __pyx_string_tab[1134]
#define __pyx_n_u_GPM_METRIC_GR0_CTXSW_CYCLES_ACTI __pyx_string_tab[1135]
#define __pyx_n_u_GPM_METRIC_GR0_CTXSW_CYCLES_ELAP __pyx_string_tab[1136]
#define __pyx_n_u_GPM_METRIC_GR0_CTXSW_CYCLES_PER __pyx_string_tab[1137]
#define __pyx_n_u_GPM_METRIC_GR0_CTXSW_REQUESTS __pyx_string_tab[1138]
#define __pyx_n_u_GPM_METRIC_GR1_CTXSW_ACTIVE_PCT __pyx_string_tab[1139]
#define __pyx_n_u_GPM_METRIC_GR1_CTXSW_CYCLES_ACTI __pyx_string_tab[1140]
#define __pyx_n_u_GPM_METRIC_GR1_CTXSW_CYCLES_ELAP __pyx_string_tab[1141]
#define __pyx_n_u_GPM_METRIC_GR1_CTXSW_CYCLES_PER __pyx_string_tab[1142]
#define __pyx_n_u_GPM_METRIC_GR1_CTXSW_REQUESTS __pyx_string_tab[1143]
#define __pyx_n_u_GPM_METRIC_GR2_CTXSW_ACTIVE_PCT __pyx_string_tab[1144]
#define __pyx_n_u_GPM_METRIC_GR2_CTXSW_CYCLES_ACTI __pyx_string_tab[1145]
#define __pyx_n_u_GPM_METRIC_GR2_CTXSW_CYCLES_ELAP __pyx_string_tab[1146]
#define __pyx_n_u_GPM_METRIC_GR2_CTXSW_CYCLES_PER __pyx_string_tab[1147]
#define __pyx_n_u_GPM_METRIC_GR2_CTXSW_REQUESTS __pyx_string_tab[1148]
#define __pyx_n_u_GPM_METRIC_GR3_CTXSW_ACTIVE_PCT __pyx_string_tab[1149]
#define __pyx_n_u_GPM_METRIC_GR3_CTXSW_CYCLES_ACTI __pyx_string_tab[1150]
#define __pyx_n_u_GPM_METRIC_GR3_CTXSW_CYCLES_ELAP __pyx_string_tab[1151]
#define __pyx_n_u_GPM_METRIC_GR3_CTXSW_CYCLES_PER __pyx_string_tab[1152]
#define __pyx_n_u_GPM_METRIC_GR3_CTXSW_REQUESTS __pyx_string_tab[1153]
#define __pyx_n_u_GPM_METRIC_GR4_CTXSW_ACTIVE_PCT __pyx_string_tab[1154]
#define __pyx_n_u_GPM_METRIC_GR4_CTXSW_CYCLES_ACTI __pyx_string_tab[1155]
#define __pyx_n_u_GPM_METRIC_GR4_CTXSW_CYCLES_ELAP __pyx_string_tab[1156]
#define __pyx_n_u_GPM_METRIC_GR4_CTXSW_CYCLES_PER __pyx_string_tab[1157]
#define __pyx_n_u_GPM_METRIC_GR4_CTXSW_REQUESTS __pyx_string_tab[1158]
#define __pyx_n_u_GPM_METRIC_GR5_CTXSW_ACTIVE_PCT __pyx_string_tab[1159]
#define __pyx_n_u_GPM_METRIC_GR5_CTXSW_CYCLES_ACTI __pyx_string_tab[1160]
#define __pyx_n_u_GPM_METRIC_GR5_CTXSW_CYCLES_ELAP __pyx_string_tab[1161]
#define __pyx_n_u_GPM_METRIC_GR5_CTXSW_CYCLES_PER __pyx_string_tab[1162]
#define __pyx_n_u_GPM_METRIC_GR5_CTXSW_REQUESTS __pyx_string_tab[1163]
#define __pyx_n_u_GPM_METRIC_GR6_CTXSW_ACTIVE_PCT __pyx_string_tab[1164]
#define __pyx_n_u_GPM_METRIC_GR6_CTXSW_CYCLES_ACTI __pyx_string_tab[1165]
#define __pyx_n_u_GPM_METRIC_GR6_CTXSW_CYCLES_ELAP __pyx_string_tab[1166]
#define __pyx_n_u_GPM_METRIC_GR6_CTXSW_CYCLES_PER __pyx_string_tab[1167]
#define __pyx_n_u_GPM_METRIC_GR6_CTXSW_REQUESTS __pyx_string_tab[1168]
#define __pyx_n_u_GPM_METRIC_GR7_CTXSW_ACTIVE_PCT __pyx_string_tab[1169]
#define __pyx_n_u_GPM_METRIC_GR7_CTXSW_CYCLES_ACTI __pyx_string_tab[1170]
#define __pyx_n_u_GPM_METRIC_GR7_CTXSW_CYCLES_ELAP __pyx_string_tab[1171]
#define __pyx_n_u_GPM_METRIC_GR7_CTXSW_CYCLES_PER __pyx_string_tab[1172]
#define __pyx_n_u_GPM_METRIC_GR7_CTXSW_REQUESTS __pyx_string_tab[1173]
#define __pyx_n_u_GPM_METRIC_GRAPHICS_UTIL __pyx_string_tab[1174]
#define __pyx_n_u_GPM_METRIC_HMMA_TENSOR_UTIL __pyx_string_tab[1175]
#define __pyx_n_u_GPM_METRIC_HOSTMEM_CACHE_HIT __pyx_string_tab[1176]
#define __pyx_n_u_GPM_METRIC_HOSTMEM_CACHE_MISS __pyx_string_tab[1177]
#define __pyx_n_u_GPM_METRIC_IMMA_TENSOR_UTIL __pyx_string_tab[1178]
#define __pyx_n_u_GPM_METRIC_INTEGER_UTIL __pyx_string_tab[1179]
#define __pyx_n_u_GPM_METRIC_MAX __pyx_string_tab[1180]
#define __pyx_n_u_GPM_METRIC_NVDEC_0_UTIL __pyx_string_tab[1181]
#define __pyx_n_u_GPM_METRIC_NVDEC_1_UTIL __pyx_string_tab[1182]
#define __pyx_n_u_GPM_METRIC_NVDEC_2_UTIL __pyx_string_tab[1183]
#define __pyx_n_u_GPM_METRIC_NVDEC_3_UTIL __pyx_string_tab[1184]
#define __pyx_n_u_GPM_METRIC_NVDEC_4_UTIL __pyx_string_tab[1185]
#define __pyx_n_u_GPM_METRIC_NVDEC_5_UTIL __pyx_string_tab[1186]
#define __pyx_n_u_GPM_METRIC_NVDEC_6_UTIL __pyx_string_tab[1187]
#define __pyx_n_u_GPM_METRIC_NVDEC_7_UTIL __pyx_string_tab[1188]
#define __pyx_n_u_GPM_METRIC_NVENC_0_UTIL __pyx_string_tab[1189]
#define __pyx_n_u_GPM_METRIC_NVENC_1_UTIL __pyx_string_tab[1190]
#define __pyx_n_u_GPM_METRIC_NVENC_2_UTIL __pyx_string_tab[1191]
#define __pyx_n_u_GPM_METRIC_NVENC_3_UTIL __pyx_string_tab[1192]
#define __pyx_n_u_GPM_METRIC_NVJPG_0_UTIL __pyx_string_tab[1193]
#define __pyx_n_u_GPM_METRIC_NVJPG_1_UTIL __pyx_string_tab[1194]
#define __pyx_n_u_GPM_METRIC_NVJPG_2_UTIL __pyx_string_tab[1195]
#define __pyx_n_u_GPM_METRIC_NVJPG_3_UTIL __pyx_string_tab[1196]
#define __pyx_n_u_GPM_METRIC_NVJPG_4_UTIL __pyx_string_tab[1197]
#define __pyx_n_u_GPM_METRIC_NVJPG_5_UTIL __pyx_string_tab[1198]
#define __pyx_n_u_GPM_METRIC_NVJPG_6_UTIL __pyx_string_tab[1199]
#define __pyx_n_u_GPM_METRIC_NVJPG_7_UTIL __pyx_string_tab[1200]
#define __pyx_n_u_GPM_METRIC_NVLINK_L0_RX_PER_SEC __pyx_string_tab[1201]
#define __pyx_n_u_GPM_METRIC_NVLINK_L0_TX_PER_SEC __pyx_string_tab[1202]
#define __pyx_n_u_GPM_METRIC_NVLINK_L10_RX_PER_SEC __pyx_string_tab[1203]
#define __pyx_n_u_GPM_METRIC_NVLINK_L10_TX_PER_SEC __pyx_string_tab[1204]
#define __pyx_n_u_GPM_METRIC_NVLINK_L11_RX_PER_SEC __pyx_string_tab[1205]
#define __pyx_n_u_GPM_METRIC_NVLINK_L11_TX_PER_SEC __pyx_string_tab[1206]
#define __pyx_n_u_GPM_METRIC_NVLINK_L12_RX_PER_SEC __pyx_string_tab[1207]
#define __pyx_n_u_GPM_METRIC_NVLINK_L12_TX_PER_SEC __pyx_string_tab[1208]
#define __pyx_n_u_GPM_METRIC_NVLINK_L13_RX_PER_SEC __pyx_string_tab[1209]
#define __pyx_n_u_GPM_METRIC_NVLINK_L13_TX_PER_SEC __pyx_string_tab[1210]
#define __pyx_n_u_GPM_METRIC_NVLINK_L14_RX_PER_SEC __pyx_string_tab[1211]
#define __pyx_n_u_GPM_METRIC_NVLINK_L14_TX_PER_SEC __pyx_string_tab[1212]
#define __pyx_n_u_GPM_METRIC_NVLINK_L15_RX_PER_SEC __pyx_string_tab[1213]
#define __pyx_n_u_GPM_METRIC_NVLINK_L15_TX_PER_SEC __pyx_string_tab[1214]
#define __pyx_n_u_GPM_METRIC_NVLINK_L16_RX_PER_SEC __pyx_string_tab[1215]
#define __pyx_n_u_GPM_METRIC_NVLINK_L16_TX_PER_SEC __pyx_string_tab[1216]
#define __pyx_n_u_GPM_METRIC_NVLINK_L17_RX_PER_SEC __pyx_string_tab[1217]
#define __pyx_n_u_GPM_METRIC_NVLINK_L17_TX_PER_SEC __pyx_string_tab[1218]
#define __pyx_n_u_GPM_METRIC_NVLINK_L1_RX_PER_SEC __pyx_string_tab[1219]
#define __pyx_n_u_GPM_METRIC_NVLINK_L1_TX_PER_SEC __pyx_string_tab[1220]
#define __pyx_n_u_GPM_METRIC_NVLINK_L2_RX_PER_SEC __pyx_string_tab[1221]
#define __pyx_n_u_GPM_METRIC_NVLINK_L2_TX_PER_SEC __pyx_string_tab[1222]
#define __pyx_n_u_GPM_METRIC_NVLINK_L3_RX_PER_SEC __pyx_string_tab[1223]
#define __pyx_n_u_GPM_METRIC_NVLINK_L3_TX_PER_SEC __pyx_string_tab[1224]
#define __pyx_n_u_GPM_METRIC_NVLINK_L4_RX_PER_SEC __pyx_string_tab[1225]
#define __pyx_n_u_GPM_METRIC_NVLINK_L4_TX_PER_SEC __pyx_string_tab[1226]
#define __pyx_n_u_GPM_METRIC_NVLINK_L5_RX_PER_SEC __pyx_string_tab[1227]
#define __pyx_n_u_GPM_METRIC_NVLINK_L5_TX_PER_SEC __pyx_string_tab[1228]
#define __pyx_n_u_GPM_METRIC_NVLINK_L6_RX_PER_SEC __pyx_string_tab[1229]
#define __pyx_n_u_GPM_METRIC_NVLINK_L6_TX_PER_SEC __pyx_string_tab[1230]
#define __pyx_n_u_GPM_METRIC_NVLINK_L7_RX_PER_SEC __pyx_string_tab[1231]
#define __pyx_n_u_GPM_METRIC_NVLINK_L7_TX_PER_SEC __pyx_string_tab[1232]
#define __pyx_n_u_GPM_METRIC_NVLINK_L8_RX_PER_SEC __pyx_string_tab[1233]
#define __pyx_n_u_GPM_METRIC_NVLINK_L8_TX_PER_SEC __pyx_string_tab[1234]
#define __pyx_n_u_GPM_METRIC_NVLINK_L9_RX_PER_SEC __pyx_string_tab[1235]
#define __pyx_n_u_GPM_METRIC_NVLINK_L9_TX_PER_SEC __pyx_string_tab[1236]
#define __pyx_n_u_GPM_METRIC_NVLINK_TOTAL_RX_PER_S __pyx_string_tab[1237]
#define __pyx_n_u_GPM_METRIC_NVLINK_TOTAL_TX_PER_S __pyx_string_tab[1238]
#define __pyx_n_u_GPM_METRIC_NVOFA_0_UTIL __pyx_string_tab[1239]
#define __pyx_n_u_GPM_METRIC_NVOFA_1_UTIL __pyx_string_tab[1240]
#define __pyx_n_u_GPM_METRIC_PCIE_RX_PER_SEC __pyx_string_tab[1241]
#define __pyx_n_u_GPM_METRIC_PCIE_TX_PER_SEC __pyx_string_tab[1242]
#define __pyx_n_u_GPM_METRIC_PEERMEM_CACHE_HIT __pyx_string_tab[1243]
#define __pyx_n_u_GPM_METRIC_PEERMEM_CACHE_MISS __pyx_string_tab[1244]
#define __pyx_n_u_GPM_METRIC_SM_OCCUPANCY __pyx_string_tab[1245]
#define __pyx_n_u_GPM_METRIC_SM_UTIL __pyx_string_tab[1246]
#define __pyx_n_u_GPU __pyx_string_tab[1247]
#define __pyx_n_u_GPU_INTERNAL __pyx_string_tab[1248]
#define __pyx_n_u_GPU_RECOVERY_ACTION_DRAIN_AND_RE __pyx_string_tab[1249]
#define __pyx_n_u_GPU_RECOVERY_ACTION_DRAIN_P2P __pyx_string_tab[1250]
#define __pyx_n_u_GPU_RECOVERY_ACTION_GPU_RESET __pyx_string_tab[1251]
#define __pyx_n_u_GPU_RECOVERY_ACTION_NODE_REBOOT __pyx_string_tab[1252]
#define __pyx_n_u_GPU_RECOVERY_ACTION_NONE __pyx_string_tab[1253]
#define __pyx_n_u_GPU_UTILIZATION_DOMAIN_BUS __pyx_string_tab[1254]
#define __pyx_n_u_GPU_UTILIZATION_DOMAIN_FB __pyx_string_tab[1255]
#define __pyx_n_u_GPU_UTILIZATION_DOMAIN_GPU __pyx_string_tab[1256]
#define __pyx_n_u_GPU_UTILIZATION_DOMAIN_VID __pyx_string_tab[1257]
#define __pyx_n_u_GPU_UTILIZATION_SAMPLES __pyx_string_tab[1258]
#define __pyx_n_u_GREEN __pyx_string_tab[1259]
#define __pyx_n_u_GpmMetricId __pyx_string_tab[1260]
#define __pyx_n_u_GpmSupport __pyx_string_tab[1261]
#define __pyx_n_u_GpmSupport___reduce_cython __pyx_string_tab[1262]
#define __pyx_n_u_GpmSupport___setstate_cython __pyx_string_tab[1263]
#define __pyx_n_u_GpmSupport_from_data __pyx_string_tab[1264]
#define __pyx_n_u_GpmSupport_from_ptr __pyx_string_tab[1265]
#define __pyx_n_u_GpuDynamicPstatesInfo __pyx_string_tab[1266]
#define __pyx_n_u_GpuDynamicPstatesInfo___reduce_c __pyx_string_tab[1267]
#define __pyx_n_u_GpuDynamicPstatesInfo___setstate __pyx_string_tab[1268]
#define __pyx_n_u_GpuDynamicPstatesInfo_from_data __pyx_string_tab[1269]
#define __pyx_n_u_GpuDynamicPstatesInfo_from_ptr __pyx_string_tab[1270]
#define __pyx_n_u_GpuFabricInfo_v3 __pyx_string_tab[1271]
#define __pyx_n_u_GpuFabricInfo_v3___reduce_cython __pyx_string_tab[1272]
#define __pyx_n_u_GpuFabricInfo_v3___setstate_cyth __pyx_string_tab[1273]
#define __pyx_n_u_GpuFabricInfo_v3_from_data __pyx_string_tab[1274]
#define __pyx_n_u_GpuFabricInfo_v3_from_ptr __pyx_string_tab[1275]
#define __pyx_n_u_GpuInstanceInfo __pyx_string_tab[1276]
#define __pyx_n_u_GpuInstanceInfo___reduce_cython __pyx_string_tab[1277]
#define __pyx_n_u_GpuInstanceInfo___setstate_cytho __pyx_string_tab[1278]
#define __pyx_n_u_GpuInstanceInfo_from_data __pyx_string_tab[1279]
#define __pyx_n_u_GpuInstanceInfo_from_ptr __pyx_string_tab[1280]
#define __pyx_n_u_GpuInstancePlacement __pyx_string_tab[1281]
#define __pyx_n_u_GpuInstancePlacement___reduce_cy __pyx_string_tab[1282]
#define __pyx_n_u_GpuInstancePlacement___setstate __pyx_string_tab[1283]
#define __pyx_n_u_GpuInstancePlacement_from_data __pyx_string_tab[1284]
#define __pyx_n_u_GpuInstancePlacement_from_ptr __pyx_string_tab[1285]
#define __pyx_n_u_GpuInstanceProfileInfo_v2 __pyx_string_tab[1286]
#define __pyx_n_u_GpuInstanceProfileInfo_v2___redu __pyx_string_tab[1287]
#define __pyx_n_u_GpuInstanceProfileInfo_v2___sets __pyx_string_tab[1288]
#define __pyx_n_u_GpuInstanceProfileInfo_v2_from_d __pyx_string_tab[1289]
#define __pyx_n_u_GpuInstanceProfileInfo_v2_from_p __pyx_string_tab[1290]
#define __pyx_n_u_GpuInstanceProfileInfo_v3 __pyx_string_tab[1291]
#define __pyx_n_u_GpuInstanceProfileInfo_v3___redu __pyx_string_tab[1292]
#define __pyx_n_u_GpuInstanceProfileInfo_v3___sets __pyx_string_tab[1293]
#define __pyx_n_u_GpuInstanceProfileInfo_v3_from_d __pyx_string_tab[1294]
#define __pyx_n_u_GpuInstanceProfileInfo_v3_from_p __pyx_string_tab[1295]
#define __pyx_n_u_GpuIsLostError __pyx_string_tab[1296]
#define __pyx_n_u_GpuNotFoundError __pyx_string_tab[1297]
#define __pyx_n_u_GpuOperationMode __pyx_string_tab[1298]
#define __pyx_n_u_GpuP2PCapsIndex __pyx_string_tab[1299]
#define __pyx_n_u_GpuP2PStatus __pyx_string_tab[1300]
#define __pyx_n_u_GpuThermalSettings __pyx_string_tab[1301]
#define __pyx_n_u_GpuThermalSettings___reduce_cyth __pyx_string_tab[1302]
#define __pyx_n_u_GpuThermalSettings___setstate_cy __pyx_string_tab[1303]
#define __pyx_n_u_GpuThermalSettings_from_data __pyx_string_tab[1304]
#define __pyx_n_u_GpuThermalSettings_from_ptr __pyx_string_tab[1305]
#define __pyx_n_u_GpuTopologyLevel __pyx_string_tab[1306]
#define __pyx_n_u_GpuUtilizationDomainId __pyx_string_tab[1307]
#define __pyx_n_u_GpuVirtualizationMode __pyx_string_tab[1308]
#define __pyx_n_u_GridLicensableFeature __pyx_string_tab[1309]
#define __pyx_n_u_GridLicensableFeature___reduce_c __pyx_string_tab[1310]
#define __pyx_n_u_GridLicensableFeature___setstate __pyx_string_tab[1311]
#define __pyx_n_u_GridLicensableFeature_from_data __pyx_string_tab[1312]
#define __pyx_n_u_GridLicensableFeature_from_ptr __pyx_string_tab[1313]
#define __pyx_n_u_GridLicensableFeatures __pyx_string_tab[1314]
#define __pyx_n_u_GridLicensableFeatures___reduce __pyx_string_tab[1315]
#define __pyx_n_u_GridLicensableFeatures___setstat __pyx_string_tab[1316]
#define __pyx_n_u_GridLicensableFeatures_from_data __pyx_string_tab[1317]
#define __pyx_n_u_GridLicensableFeatures_from_ptr __pyx_string_tab[1318]
#define __pyx_n_u_GridLicenseExpiry __pyx_string_tab[1319]
#define __pyx_n_u_GridLicenseExpiry___reduce_cytho __pyx_string_tab[1320]
#define __pyx_n_u_GridLicenseExpiry___setstate_cyt __pyx_string_tab[1321]
#define __pyx_n_u_GridLicenseExpiry_from_data __pyx_string_tab[1322]
#define __pyx_n_u_GridLicenseExpiry_from_ptr __pyx_string_tab[1323]
#define __pyx_n_u_GridLicenseFeatureCode __pyx_string_tab[1324]
#define __pyx_n_u_HIBERNATE __pyx_string_tab[1325]
#define __pyx_n_u_HOST_VGPU __pyx_string_tab[1326]
#define __pyx_n_u_HOST_VSGA __pyx_string_tab[1327]
#define __pyx_n_u_HWENC __pyx_string_tab[1328]
#define __pyx_n_u_HostVgpuMode __pyx_string_tab[1329]
#define __pyx_n_u_HwbcEntry __pyx_string_tab[1330]
#define __pyx_n_u_HwbcEntry___reduce_cython __pyx_string_tab[1331]
#define __pyx_n_u_HwbcEntry___setstate_cython __pyx_string_tab[1332]
#define __pyx_n_u_HwbcEntry_from_data __pyx_string_tab[1333]
#define __pyx_n_u_HwbcEntry_from_ptr __pyx_string_tab[1334]
#define __pyx_n_u_I __pyx_string_tab[1335]
#define __pyx_n_u_INFOROM_COUNT __pyx_string_tab[1336]
#define __pyx_n_u_INFOROM_DEN __pyx_string_tab[1337]
#define __pyx_n_u_INFOROM_ECC __pyx_string_tab[1338]
#define __pyx_n_u_INFOROM_OEM __pyx_string_tab[1339]
#define __pyx_n_u_INFOROM_POWER __pyx_string_tab[1340]
#define __pyx_n_u_InUseError __pyx_string_tab[1341]
#define __pyx_n_u_InforomObject __pyx_string_tab[1342]
#define __pyx_n_u_InsufficientPowerError __pyx_string_tab[1343]
#define __pyx_n_u_InsufficientResourcesError __pyx_string_tab[1344]
#define __pyx_n_u_InsufficientSizeError __pyx_string_tab[1345]
#define __pyx_n_u_IntEnum __pyx_string_tab[1346]
#define __pyx_n_u_IntEnum_2 __pyx_string_tab[1347]
#define __pyx_n_u_IntNvLinkDeviceType __pyx_string_tab[1348]
#define __pyx_n_u_InvalidArgumentError __pyx_string_tab[1349]
#define __pyx_n_u_InvalidStateError __pyx_string_tab[1350]
#define __pyx_n_u_IrqIssueError __pyx_string_tab[1351]
#define __pyx_n_u_JPG_UTILIZATION_SAMPLES __pyx_string_tab[1352]
#define __pyx_n_u_L __pyx_string_tab[1353]
#define __pyx_n_u_L1_CACHE __pyx_string_tab[1354]
#define __pyx_n_u_L2_CACHE __pyx_string_tab[1355]
#define __pyx_n_u_LIVE __pyx_string_tab[1356]
#define __pyx_n_u_LM64 __pyx_string_tab[1357]
#define __pyx_n_u_LM89 __pyx_string_tab[1358]
#define __pyx_n_u_LM99 __pyx_string_tab[1359]
#define __pyx_n_u_LedColor __pyx_string_tab[1360]
#define __pyx_n_u_LedState __pyx_string_tab[1361]
#define __pyx_n_u_LedState___reduce_cython __pyx_string_tab[1362]
#define __pyx_n_u_LedState___setstate_cython __pyx_string_tab[1363]
#define __pyx_n_u_LedState_from_data __pyx_string_tab[1364]
#define __pyx_n_u_LedState_from_ptr __pyx_string_tab[1365]
#define __pyx_n_u_LibRmVersionMismatchError __pyx_string_tab[1366]
#define __pyx_n_u_LibraryNotFoundError __pyx_string_tab[1367]
#define __pyx_n_u_MAX __pyx_string_tab[1368]
#define __pyx_n_u_MAX1617 __pyx_string_tab[1369]
#define __pyx_n_u_MAX6649 __pyx_string_tab[1370]
#define __pyx_n_u_MAX6649R __pyx_string_tab[1371]
#define __pyx_n_u_MEMORY __pyx_string_tab[1372]
#define __pyx_n_u_MEMORY_CLK_SAMPLES __pyx_string_tab[1373]
#define __pyx_n_u_MEMORY_UTILIZATION_SAMPLES __pyx_string_tab[1374]
#define __pyx_n_u_MODULE_POWER_SAMPLES __pyx_string_tab[1375]
#define __pyx_n_u_MULTIPLE_SINGLE_BIT_ECC_ERRORS __pyx_string_tab[1376]
#define __pyx_n_u_MarginTemperature_v1 __pyx_string_tab[1377]
#define __pyx_n_u_MarginTemperature_v1___reduce_cy __pyx_string_tab[1378]
#define __pyx_n_u_MarginTemperature_v1___setstate __pyx_string_tab[1379]
#define __pyx_n_u_MarginTemperature_v1_from_data __pyx_string_tab[1380]
#define __pyx_n_u_MarginTemperature_v1_from_ptr __pyx_string_tab[1381]
#define __pyx_n_u_Memory __pyx_string_tab[1382]
#define __pyx_n_u_MemoryError __pyx_string_tab[1383]
#define __pyx_n_u_MemoryErrorType __pyx_string_tab[1384]
#define __pyx_n_u_MemoryLocation __pyx_string_tab[1385]
#define __pyx_n_u_Memory___reduce_cython __pyx_string_tab[1386]
#define __pyx_n_u_Memory___setstate_cython __pyx_string_tab[1387]
#define __pyx_n_u_Memory_from_data __pyx_string_tab[1388]
#define __pyx_n_u_Memory_from_ptr __pyx_string_tab[1389]
#define __pyx_n_u_Memory_v2 __pyx_string_tab[1390]
#define __pyx_n_u_Memory_v2___reduce_cython __pyx_string_tab[1391]
#define __pyx_n_u_Memory_v2___setstate_cython __pyx_string_tab[1392]
#define __pyx_n_u_Memory_v2_from_data __pyx_string_tab[1393]
#define __pyx_n_u_Memory_v2_from_ptr __pyx_string_tab[1394]
#define __pyx_n_u_NODE __pyx_string_tab[1395]
#define __pyx_n_u_NONE __pyx_string_tab[1396]
#define __pyx_n_u_NON_SRIOV __pyx_string_tab[1397]
#define __pyx_n_u_NVIDIA_RTX __pyx_string_tab[1398]
#define __pyx_n_u_NVLINK_CAP_COUNT __pyx_string_tab[1399]
#define __pyx_n_u_NVLINK_CAP_P2P_ATOMICS __pyx_string_tab[1400]
#define __pyx_n_u_NVLINK_CAP_P2P_SUPPORTED __pyx_string_tab[1401]
#define __pyx_n_u_NVLINK_CAP_SLI_BRIDGE __pyx_string_tab[1402]
#define __pyx_n_u_NVLINK_CAP_SYSMEM_ACCESS __pyx_string_tab[1403]
#define __pyx_n_u_NVLINK_CAP_SYSMEM_ATOMICS __pyx_string_tab[1404]
#define __pyx_n_u_NVLINK_CAP_VALID __pyx_string_tab[1405]
#define __pyx_n_u_NVLINK_COUNTER_PKTFILTER_ALL __pyx_string_tab[1406]
#define __pyx_n_u_NVLINK_COUNTER_PKTFILTER_FLUSH __pyx_string_tab[1407]
#define __pyx_n_u_NVLINK_COUNTER_PKTFILTER_NOP __pyx_string_tab[1408]
#define __pyx_n_u_NVLINK_COUNTER_PKTFILTER_NRATOM __pyx_string_tab[1409]
#define __pyx_n_u_NVLINK_COUNTER_PKTFILTER_RATOM __pyx_string_tab[1410]
#define __pyx_n_u_NVLINK_COUNTER_PKTFILTER_READ __pyx_string_tab[1411]
#define __pyx_n_u_NVLINK_COUNTER_PKTFILTER_RESPDAT __pyx_string_tab[1412]
#define __pyx_n_u_NVLINK_COUNTER_PKTFILTER_RESPNOD __pyx_string_tab[1413]
#define __pyx_n_u_NVLINK_COUNTER_PKTFILTER_WRITE __pyx_string_tab[1414]
#define __pyx_n_u_NVLINK_COUNTER_UNIT_BYTES __pyx_string_tab[1415]
#define __pyx_n_u_NVLINK_COUNTER_UNIT_COUNT __pyx_string_tab[1416]
#define __pyx_n_u_NVLINK_COUNTER_UNIT_CYCLES __pyx_string_tab[1417]
#define __pyx_n_u_NVLINK_COUNTER_UNIT_PACKETS __pyx_string_tab[1418]
#define __pyx_n_u_NVLINK_COUNTER_UNIT_RESERVED __pyx_string_tab[1419]
#define __pyx_n_u_NVLINK_DEVICE_TYPE_GPU __pyx_string_tab[1420]
#define __pyx_n_u_NVLINK_DEVICE_TYPE_IBMNPU __pyx_string_tab[1421]
#define __pyx_n_u_NVLINK_DEVICE_TYPE_SWITCH __pyx_string_tab[1422]
#define __pyx_n_u_NVLINK_DEVICE_TYPE_UNKNOWN __pyx_string_tab[1423]
#define __pyx_n_u_NVLINK_ERROR_COUNT __pyx_string_tab[1424]
#define __pyx_n_u_NVLINK_ERROR_DL_CRC_DATA __pyx_string_tab[1425]
#define __pyx_n_u_NVLINK_ERROR_DL_CRC_FLIT __pyx_string_tab[1426]
#define __pyx_n_u_NVLINK_ERROR_DL_ECC_DATA __pyx_string_tab[1427]
#define __pyx_n_u_NVLINK_ERROR_DL_RECOVERY __pyx_string_tab[1428]
#define __pyx_n_u_NVLINK_ERROR_DL_REPLAY __pyx_string_tab[1429]
#define __pyx_n_u_NVLINK_MAX_LINKS __pyx_string_tab[1430]
#define __pyx_n_u_NVSYSCON_CANOAS __pyx_string_tab[1431]
#define __pyx_n_u_NVSYSCON_E551 __pyx_string_tab[1432]
#define __pyx_n_u_NoDataError __pyx_string_tab[1433]
#define __pyx_n_u_NoPermissionError __pyx_string_tab[1434]
#define __pyx_n_u_NotFoundError __pyx_string_tab[1435]
#define __pyx_n_u_NotReadyError __pyx_string_tab[1436]
#define __pyx_n_u_NotSupportedError __pyx_string_tab[1437]
#define __pyx_n_u_NvLinkCapability __pyx_string_tab[1438]
#define __pyx_n_u_NvLinkErrorCounter __pyx_string_tab[1439]
#define __pyx_n_u_NvLinkInfo_v2 __pyx_string_tab[1440]
#define __pyx_n_u_NvLinkInfo_v2___reduce_cython __pyx_string_tab[1441]
#define __pyx_n_u_NvLinkInfo_v2___setstate_cython __pyx_string_tab[1442]
#define __pyx_n_u_NvLinkInfo_v2_from_data __pyx_string_tab[1443]
#define __pyx_n_u_NvLinkInfo_v2_from_ptr __pyx_string_tab[1444]
#define __pyx_n_u_NvLinkUtilizationCountPktTypes __pyx_string_tab[1445]
#define __pyx_n_u_NvLinkUtilizationCountUnits __pyx_string_tab[1446]
#define __pyx_n_u_NvlinkFirmwareInfo __pyx_string_tab[1447]
#define __pyx_n_u_NvlinkFirmwareInfo___reduce_cyth __pyx_string_tab[1448]
#define __pyx_n_u_NvlinkFirmwareInfo___setstate_cy __pyx_string_tab[1449]
#define __pyx_n_u_NvlinkFirmwareInfo_from_data __pyx_string_tab[1450]
#define __pyx_n_u_NvlinkFirmwareInfo_from_ptr __pyx_string_tab[1451]
#define __pyx_n_u_NvlinkFirmwareVersion __pyx_string_tab[1452]
#define __pyx_n_u_NvlinkFirmwareVersion___reduce_c __pyx_string_tab[1453]
#define __pyx_n_u_NvlinkFirmwareVersion___setstate __pyx_string_tab[1454]
#define __pyx_n_u_NvlinkFirmwareVersion_from_data __pyx_string_tab[1455]
#define __pyx_n_u_NvlinkFirmwareVersion_from_ptr __pyx_string_tab[1456]
#define __pyx_n_u_NvlinkGetBwMode_v1 __pyx_string_tab[1457]
#define __pyx_n_u_NvlinkGetBwMode_v1___reduce_cyth __pyx_string_tab[1458]
#define __pyx_n_u_NvlinkGetBwMode_v1___setstate_cy __pyx_string_tab[1459]
#define __pyx_n_u_NvlinkGetBwMode_v1_from_data __pyx_string_tab[1460]
#define __pyx_n_u_NvlinkGetBwMode_v1_from_ptr __pyx_string_tab[1461]
#define __pyx_n_u_NvlinkSetBwMode_v1 __pyx_string_tab[1462]
#define __pyx_n_u_NvlinkSetBwMode_v1___reduce_cyth __pyx_string_tab[1463]
#define __pyx_n_u_NvlinkSetBwMode_v1___setstate_cy __pyx_string_tab[1464]
#define __pyx_n_u_NvlinkSetBwMode_v1_from_data __pyx_string_tab[1465]
#define __pyx_n_u_NvlinkSetBwMode_v1_from_ptr __pyx_string_tab[1466]
#define __pyx_n_u_NvlinkSupportedBwModes_v1 __pyx_string_tab[1467]
#define __pyx_n_u_NvlinkSupportedBwModes_v1___redu __pyx_string_tab[1468]
#define __pyx_n_u_NvlinkSupportedBwModes_v1___sets __pyx_string_tab[1469]
#define __pyx_n_u_NvlinkSupportedBwModes_v1_from_d __pyx_string_tab[1470]
#define __pyx_n_u_NvlinkSupportedBwModes_v1_from_p __pyx_string_tab[1471]
#define __pyx_n_u_NvlinkVersion __pyx_string_tab[1472]
#define __pyx_n_u_NvmlError __pyx_string_tab[1473]
#define __pyx_n_u_NvmlError___init __pyx_string_tab[1474]
#define __pyx_n_u_NvmlError___reduce __pyx_string_tab[1475]
#define __pyx_n_u_OFA_UTILIZATION_SAMPLES __pyx_string_tab[1476]
#define __pyx_n_u_OS __pyx_string_tab[1477]
#define __pyx_n_u_OperatingSystemError __pyx_string_tab[1478]
#define __pyx_n_u_P2P_CAPS_INDEX_ATOMICS __pyx_string_tab[1479]
#define __pyx_n_u_P2P_CAPS_INDEX_NVLINK __pyx_string_tab[1480]
#define __pyx_n_u_P2P_CAPS_INDEX_PCI __pyx_string_tab[1481]
#define __pyx_n_u_P2P_CAPS_INDEX_PROP __pyx_string_tab[1482]
#define __pyx_n_u_P2P_CAPS_INDEX_READ __pyx_string_tab[1483]
#define __pyx_n_u_P2P_CAPS_INDEX_UNKNOWN __pyx_string_tab[1484]
#define __pyx_n_u_P2P_CAPS_INDEX_WRITE __pyx_string_tab[1485]
#define __pyx_n_u_P2P_STATUS_CHIPSET_NOT_SUPPORED __pyx_string_tab[1486]
#define __pyx_n_u_P2P_STATUS_CHIPSET_NOT_SUPPORTED __pyx_string_tab[1487]
#define __pyx_n_u_P2P_STATUS_DISABLED_BY_REGKEY __pyx_string_tab[1488]
#define __pyx_n_u_P2P_STATUS_GPU_NOT_SUPPORTED __pyx_string_tab[1489]
#define __pyx_n_u_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPP __pyx_string_tab[1490]
#define __pyx_n_u_P2P_STATUS_NOT_SUPPORTED __pyx_string_tab[1491]
#define __pyx_n_u_P2P_STATUS_OK __pyx_string_tab[1492]
#define __pyx_n_u_P2P_STATUS_UNKNOWN __pyx_string_tab[1493]
#define __pyx_n_u_PASSTHROUGH __pyx_string_tab[1494]
#define __pyx_n_u_PCIE_LINK_KEEP __pyx_string_tab[1495]
#define __pyx_n_u_PCIE_LINK_SHUT_DOWN __pyx_string_tab[1496]
#define __pyx_n_u_PCIE_UTIL_COUNT __pyx_string_tab[1497]
#define __pyx_n_u_PCIE_UTIL_RX_BYTES __pyx_string_tab[1498]
#define __pyx_n_u_PCIE_UTIL_TX_BYTES __pyx_string_tab[1499]
#define __pyx_n_u_PERF_POLICY_BOARD_LIMIT __pyx_string_tab[1500]
#define __pyx_n_u_PERF_POLICY_COUNT __pyx_string_tab[1501]
#define __pyx_n_u_PERF_POLICY_LOW_UTILIZATION __pyx_string_tab[1502]
#define __pyx_n_u_PERF_POLICY_POWER __pyx_string_tab[1503]
#define __pyx_n_u_PERF_POLICY_RELIABILITY __pyx_string_tab[1504]
#define __pyx_n_u_PERF_POLICY_SYNC_BOOST __pyx_string_tab[1505]
#define __pyx_n_u_PERF_POLICY_THERMAL __pyx_string_tab[1506]
#define __pyx_n_u_PERF_POLICY_TOTAL_APP_CLOCKS __pyx_string_tab[1507]
#define __pyx_n_u_PERF_POLICY_TOTAL_BASE_CLOCKS __pyx_string_tab[1508]
#define __pyx_n_u_POWER_PROFILE_BALANCED __pyx_string_tab[1509]
#define __pyx_n_u_POWER_PROFILE_COMPUTE __pyx_string_tab[1510]
#define __pyx_n_u_POWER_PROFILE_DCPCIE __pyx_string_tab[1511]
#define __pyx_n_u_POWER_PROFILE_HMMA_DENSE __pyx_string_tab[1512]
#define __pyx_n_u_POWER_PROFILE_HMMA_SPARSE __pyx_string_tab[1513]
#define __pyx_n_u_POWER_PROFILE_HPC __pyx_string_tab[1514]
#define __pyx_n_u_POWER_PROFILE_LLM_INFERENCE __pyx_string_tab[1515]
#define __pyx_n_u_POWER_PROFILE_LLM_TRAINING __pyx_string_tab[1516]
#define __pyx_n_u_POWER_PROFILE_MAX __pyx_string_tab[1517]
#define __pyx_n_u_POWER_PROFILE_MAX_P __pyx_string_tab[1518]
#define __pyx_n_u_POWER_PROFILE_MAX_Q __pyx_string_tab[1519]
#define __pyx_n_u_POWER_PROFILE_MEMORY_BOUND __pyx_string_tab[1520]
#define __pyx_n_u_POWER_PROFILE_MIG __pyx_string_tab[1521]
#define __pyx_n_u_POWER_PROFILE_NETWORK __pyx_string_tab[1522]
#define __pyx_n_u_POWER_PROFILE_RBM __pyx_string_tab[1523]
#define __pyx_n_u_POWER_PROFILE_SYNC_BALANCED __pyx_string_tab[1524]
#define __pyx_n_u_POWER_SUPPLY __pyx_string_tab[1525]
#define __pyx_n_u_PROCESSOR_CLK_SAMPLES __pyx_string_tab[1526]
#define __pyx_n_u_PSTATE_0 __pyx_string_tab[1527]
#define __pyx_n_u_PSTATE_1 __pyx_string_tab[1528]
#define __pyx_n_u_PSTATE_10 __pyx_string_tab[1529]
#define __pyx_n_u_PSTATE_11 __pyx_string_tab[1530]
#define __pyx_n_u_PSTATE_12 __pyx_string_tab[1531]
#define __pyx_n_u_PSTATE_13 __pyx_string_tab[1532]
#define __pyx_n_u_PSTATE_14 __pyx_string_tab[1533]
#define __pyx_n_u_PSTATE_15 __pyx_string_tab[1534]
#define __pyx_n_u_PSTATE_2 __pyx_string_tab[1535]
#define __pyx_n_u_PSTATE_3 __pyx_string_tab[1536]
#define __pyx_n_u_PSTATE_4 __pyx_string_tab[1537]
#define __pyx_n_u_PSTATE_5 __pyx_string_tab[1538]
#define __pyx_n_u_PSTATE_6 __pyx_string_tab[1539]
#define __pyx_n_u_PSTATE_7 __pyx_string_tab[1540]
#define __pyx_n_u_PSTATE_8 __pyx_string_tab[1541]
#define __pyx_n_u_PSTATE_9 __pyx_string_tab[1542]
#define __pyx_n_u_PSTATE_UNKNOWN __pyx_string_tab[1543]
#define __pyx_n_u_PSUInfo __pyx_string_tab[1544]
#define __pyx_n_u_PSUInfo___reduce_cython __pyx_string_tab[1545]
#define __pyx_n_u_PSUInfo___setstate_cython __pyx_string_tab[1546]
#define __pyx_n_u_PSUInfo_from_data __pyx_string_tab[1547]
#define __pyx_n_u_PSUInfo_from_ptr __pyx_string_tab[1548]
#define __pyx_n_u_PWR_SMOOTHING_ACTIVE_PRESET_PROF __pyx_string_tab[1549]
#define __pyx_n_u_PWR_SMOOTHING_ADMIN_OVERRIDE_PER __pyx_string_tab[1550]
#define __pyx_n_u_PWR_SMOOTHING_ADMIN_OVERRIDE_RAM __pyx_string_tab[1551]
#define __pyx_n_u_PWR_SMOOTHING_ADMIN_OVERRIDE_RAM_2 __pyx_string_tab[1552]
#define __pyx_n_u_PWR_SMOOTHING_ADMIN_OVERRIDE_RAM_3 __pyx_string_tab[1553]
#define __pyx_n_u_PWR_SMOOTHING_APPLIED_TMP_CEIL __pyx_string_tab[1554]
#define __pyx_n_u_PWR_SMOOTHING_APPLIED_TMP_FLOOR __pyx_string_tab[1555]
#define __pyx_n_u_PWR_SMOOTHING_ENABLED __pyx_string_tab[1556]
#define __pyx_n_u_PWR_SMOOTHING_HW_CIRCUITRY_PERCE __pyx_string_tab[1557]
#define __pyx_n_u_PWR_SMOOTHING_IMM_RAMP_DOWN_ENAB __pyx_string_tab[1558]
#define __pyx_n_u_PWR_SMOOTHING_MAX_NUM_PRESET_PRO __pyx_string_tab[1559]
#define __pyx_n_u_PWR_SMOOTHING_MAX_PERCENT_TMP_FL __pyx_string_tab[1560]
#define __pyx_n_u_PWR_SMOOTHING_MIN_PERCENT_TMP_FL __pyx_string_tab[1561]
#define __pyx_n_u_PWR_SMOOTHING_PRIV_LVL __pyx_string_tab[1562]
#define __pyx_n_u_PWR_SMOOTHING_PROFILE_PERCENT_TM __pyx_string_tab[1563]
#define __pyx_n_u_PWR_SMOOTHING_PROFILE_RAMP_DOWN __pyx_string_tab[1564]
#define __pyx_n_u_PWR_SMOOTHING_PROFILE_RAMP_DOWN_2 __pyx_string_tab[1565]
#define __pyx_n_u_PWR_SMOOTHING_PROFILE_RAMP_UP_RA __pyx_string_tab[1566]
#define __pyx_n_u_PageRetirementCause __pyx_string_tab[1567]
#define __pyx_n_u_PciInfo __pyx_string_tab[1568]
#define __pyx_n_u_PciInfoExt_v1 __pyx_string_tab[1569]
#define __pyx_n_u_PciInfoExt_v1___reduce_cython __pyx_string_tab[1570]
#define __pyx_n_u_PciInfoExt_v1___setstate_cython __pyx_string_tab[1571]
#define __pyx_n_u_PciInfoExt_v1_from_data __pyx_string_tab[1572]
#define __pyx_n_u_PciInfoExt_v1_from_ptr __pyx_string_tab[1573]
#define __pyx_n_u_PciInfo___reduce_cython __pyx_string_tab[1574]
#define __pyx_n_u_PciInfo___setstate_cython __pyx_string_tab[1575]
#define __pyx_n_u_PciInfo_from_data __pyx_string_tab[1576]
#define __pyx_n_u_PciInfo_from_ptr __pyx_string_tab[1577]
#define __pyx_n_u_PcieLinkState __pyx_string_tab[1578]
#define __pyx_n_u_PcieUtilCounter __pyx_string_tab[1579]
#define __pyx_n_u_Pdi_v1 __pyx_string_tab[1580]
#define __pyx_n_u_Pdi_v1___reduce_cython __pyx_string_tab[1581]
#define __pyx_n_u_Pdi_v1___setstate_cython __pyx_string_tab[1582]
#define __pyx_n_u_Pdi_v1_from_data __pyx_string_tab[1583]
#define __pyx_n_u_Pdi_v1_from_ptr __pyx_string_tab[1584]
#define __pyx_n_u_PerfPolicyType __pyx_string_tab[1585]
#define __pyx_n_u_PlatformInfo_v2 __pyx_string_tab[1586]
#define __pyx_n_u_PlatformInfo_v2___reduce_cython __pyx_string_tab[1587]
#define __pyx_n_u_PlatformInfo_v2___setstate_cytho __pyx_string_tab[1588]
#define __pyx_n_u_PlatformInfo_v2_from_data __pyx_string_tab[1589]
#define __pyx_n_u_PlatformInfo_v2_from_ptr __pyx_string_tab[1590]
#define __pyx_n_u_PowerProfileType __pyx_string_tab[1591]
#define __pyx_n_u_ProcessDetailList_v1 __pyx_string_tab[1592]
#define __pyx_n_u_ProcessDetailList_v1___reduce_cy __pyx_string_tab[1593]
#define __pyx_n_u_ProcessDetailList_v1___setstate __pyx_string_tab[1594]
#define __pyx_n_u_ProcessDetailList_v1_from_data __pyx_string_tab[1595]
#define __pyx_n_u_ProcessDetailList_v1_from_ptr __pyx_string_tab[1596]
#define __pyx_n_u_ProcessDetail_v1 __pyx_string_tab[1597]
#define __pyx_n_u_ProcessDetail_v1___reduce_cython __pyx_string_tab[1598]
#define __pyx_n_u_ProcessDetail_v1___setstate_cyth __pyx_string_tab[1599]
#define __pyx_n_u_ProcessDetail_v1_from_data __pyx_string_tab[1600]
#define __pyx_n_u_ProcessDetail_v1_from_ptr __pyx_string_tab[1601]
#define __pyx_n_u_ProcessInfo __pyx_string_tab[1602]
#define __pyx_n_u_ProcessInfo___reduce_cython __pyx_string_tab[1603]
#define __pyx_n_u_ProcessInfo___setstate_cython __pyx_string_tab[1604]
#define __pyx_n_u_ProcessInfo_from_data __pyx_string_tab[1605]
#define __pyx_n_u_ProcessInfo_from_ptr __pyx_string_tab[1606]
#define __pyx_n_u_ProcessUtilizationInfo_v1 __pyx_string_tab[1607]
#define __pyx_n_u_ProcessUtilizationInfo_v1___redu __pyx_string_tab[1608]
#define __pyx_n_u_ProcessUtilizationInfo_v1___sets __pyx_string_tab[1609]
#define __pyx_n_u_ProcessUtilizationInfo_v1_from_d __pyx_string_tab[1610]
#define __pyx_n_u_ProcessUtilizationInfo_v1_from_p __pyx_string_tab[1611]
#define __pyx_n_u_ProcessUtilizationSample __pyx_string_tab[1612]
#define __pyx_n_u_ProcessUtilizationSample___reduc __pyx_string_tab[1613]
#define __pyx_n_u_ProcessUtilizationSample___setst __pyx_string_tab[1614]
#define __pyx_n_u_ProcessUtilizationSample_from_da __pyx_string_tab[1615]
#define __pyx_n_u_ProcessUtilizationSample_from_pt __pyx_string_tab[1616]
#define __pyx_n_u_ProcessesUtilizationInfo_v1 __pyx_string_tab[1617]
#define __pyx_n_u_ProcessesUtilizationInfo_v1___re __pyx_string_tab[1618]
#define __pyx_n_u_ProcessesUtilizationInfo_v1___se __pyx_string_tab[1619]
#define __pyx_n_u_ProcessesUtilizationInfo_v1_from __pyx_string_tab[1620]
#define __pyx_n_u_ProcessesUtilizationInfo_v1_from_2 __pyx_string_tab[1621]
#define __pyx_n_u_Pstates __pyx_string_tab[1622]
#define __pyx_n_u_Pyx_PyDict_NextRef __pyx_string_tab[1623]
#define __pyx_n_u_Q __pyx_string_tab[1624]
#define __pyx_n_u_RANGE_START __pyx_string_tab[1625]
#define __pyx_n_u_REGISTER_FILE __pyx_string_tab[1626]
#define __pyx_n_u_RepairStatus_v1 __pyx_string_tab[1627]
#define __pyx_n_u_RepairStatus_v1___reduce_cython __pyx_string_tab[1628]
#define __pyx_n_u_RepairStatus_v1___setstate_cytho __pyx_string_tab[1629]
#define __pyx_n_u_RepairStatus_v1_from_data __pyx_string_tab[1630]
#define __pyx_n_u_RepairStatus_v1_from_ptr __pyx_string_tab[1631]
#define __pyx_n_u_ResetRequiredError __pyx_string_tab[1632]
#define __pyx_n_u_ResetTypeNotSupportedError __pyx_string_tab[1633]
#define __pyx_n_u_RestrictedAPI __pyx_string_tab[1634]
#define __pyx_n_u_Return __pyx_string_tab[1635]
#define __pyx_n_u_RowRemapperHistogramValues __pyx_string_tab[1636]
#define __pyx_n_u_RowRemapperHistogramValues___red __pyx_string_tab[1637]
#define __pyx_n_u_RowRemapperHistogramValues___set __pyx_string_tab[1638]
#define __pyx_n_u_RowRemapperHistogramValues_from __pyx_string_tab[1639]
#define __pyx_n_u_RowRemapperHistogramValues_from_2 __pyx_string_tab[1640]
#define __pyx_n_u_SAMPLINGTYPE_COUNT __pyx_string_tab[1641]
#define __pyx_n_u_SBMAX6649 __pyx_string_tab[1642]
#define __pyx_n_u_SET_APPLICATION_CLOCKS __pyx_string_tab[1643]
#define __pyx_n_u_SET_AUTO_BOOSTED_CLOCKS __pyx_string_tab[1644]
#define __pyx_n_u_SIGNED_INT __pyx_string_tab[1645]
#define __pyx_n_u_SIGNED_LONG_LONG __pyx_string_tab[1646]
#define __pyx_n_u_SLEEP __pyx_string_tab[1647]
#define __pyx_n_u_SOCKET __pyx_string_tab[1648]
#define __pyx_n_u_SRAM __pyx_string_tab[1649]
#define __pyx_n_u_SRIOV __pyx_string_tab[1650]
#define __pyx_n_u_SUCCESS __pyx_string_tab[1651]
#define __pyx_n_u_Sample __pyx_string_tab[1652]
#define __pyx_n_u_Sample___reduce_cython __pyx_string_tab[1653]
#define __pyx_n_u_Sample___setstate_cython __pyx_string_tab[1654]
#define __pyx_n_u_Sample_from_data __pyx_string_tab[1655]
#define __pyx_n_u_Sample_from_ptr __pyx_string_tab[1656]
#define __pyx_n_u_SamplingType __pyx_string_tab[1657]
#define __pyx_n_u_Sequence __pyx_string_tab[1658]
#define __pyx_n_u_SystemConfComputeSettings_v1 __pyx_string_tab[1659]
#define __pyx_n_u_SystemConfComputeSettings_v1___r __pyx_string_tab[1660]
#define __pyx_n_u_SystemConfComputeSettings_v1___s __pyx_string_tab[1661]
#define __pyx_n_u_SystemConfComputeSettings_v1_fro __pyx_string_tab[1662]
#define __pyx_n_u_SystemConfComputeSettings_v1_fro_2 __pyx_string_tab[1663]
#define __pyx_n_u_TDP __pyx_string_tab[1664]
#define __pyx_n_u_TEMPERATURE_COUNT __pyx_string_tab[1665]
#define __pyx_n_u_TEMPERATURE_GPU __pyx_string_tab[1666]
#define __pyx_n_u_TEMPERATURE_THRESHOLD_ACOUSTIC_C __pyx_string_tab[1667]
#define __pyx_n_u_TEMPERATURE_THRESHOLD_ACOUSTIC_M __pyx_string_tab[1668]
#define __pyx_n_u_TEMPERATURE_THRESHOLD_ACOUSTIC_M_2 __pyx_string_tab[1669]
#define __pyx_n_u_TEMPERATURE_THRESHOLD_COUNT __pyx_string_tab[1670]
#define __pyx_n_u_TEMPERATURE_THRESHOLD_GPS_CURR __pyx_string_tab[1671]
#define __pyx_n_u_TEMPERATURE_THRESHOLD_GPU_MAX __pyx_string_tab[1672]
#define __pyx_n_u_TEMPERATURE_THRESHOLD_MEM_MAX __pyx_string_tab[1673]
#define __pyx_n_u_TEMPERATURE_THRESHOLD_SHUTDOWN __pyx_string_tab[1674]
#define __pyx_n_u_TEMPERATURE_THRESHOLD_SLOWDOWN __pyx_string_tab[1675]
#define __pyx_n_u_TEXTURE_MEMORY __pyx_string_tab[1676]
#define __pyx_n_u_TEXTURE_SHM __pyx_string_tab[1677]
#define __pyx_n_u_THERMAL_COOLER_SIGNAL_COUNT __pyx_string_tab[1678]
#define __pyx_n_u_THERMAL_COOLER_SIGNAL_NONE __pyx_string_tab[1679]
#define __pyx_n_u_THERMAL_COOLER_SIGNAL_TOGGLE __pyx_string_tab[1680]
#define __pyx_n_u_THERMAL_COOLER_SIGNAL_VARIABLE __pyx_string_tab[1681]
#define __pyx_n_u_THERMAL_GPU __pyx_string_tab[1682]
#define __pyx_n_u_THERMAL_GPU_RELATED __pyx_string_tab[1683]
#define __pyx_n_u_THERMAL_MEMORY __pyx_string_tab[1684]
#define __pyx_n_u_THERMAL_NONE __pyx_string_tab[1685]
#define __pyx_n_u_THERMAL_POWER_SUPPLY __pyx_string_tab[1686]
#define __pyx_n_u_TOPOLOGY_HOSTBRIDGE __pyx_string_tab[1687]
#define __pyx_n_u_TOPOLOGY_INTERNAL __pyx_string_tab[1688]
#define __pyx_n_u_TOPOLOGY_MULTIPLE __pyx_string_tab[1689]
#define __pyx_n_u_TOPOLOGY_NODE __pyx_string_tab[1690]
#define __pyx_n_u_TOPOLOGY_SINGLE __pyx_string_tab[1691]
#define __pyx_n_u_TOPOLOGY_SYSTEM __pyx_string_tab[1692]
#define __pyx_n_u_TOSYS __pyx_string_tab[1693]
#define __pyx_n_u_TOTAL_POWER_SAMPLES __pyx_string_tab[1694]
#define __pyx_n_u_TemperatureSensors __pyx_string_tab[1695]
#define __pyx_n_u_TemperatureThresholds __pyx_string_tab[1696]
#define __pyx_n_u_ThermalController __pyx_string_tab[1697]
#define __pyx_n_u_ThermalTarget __pyx_string_tab[1698]
#define __pyx_n_u_TimeoutError __pyx_string_tab[1699]
#define __pyx_n_u_UNCORRECTED __pyx_string_tab[1700]
#define __pyx_n_u_UNKNOWN __pyx_string_tab[1701]
#define __pyx_n_u_UNLIMITED __pyx_string_tab[1702]
#define __pyx_n_u_UNSIGNED_INT __pyx_string_tab[1703]
#define __pyx_n_u_UNSIGNED_LONG __pyx_string_tab[1704]
#define __pyx_n_u_UNSIGNED_LONG_LONG __pyx_string_tab[1705]
#define __pyx_n_u_UNSIGNED_SHORT __pyx_string_tab[1706]
#define __pyx_n_u_UUIDType __pyx_string_tab[1707]
#define __pyx_n_u_UninitializedError __pyx_string_tab[1708]
#define __pyx_n_u_UnitFanInfo __pyx_string_tab[1709]
#define __pyx_n_u_UnitFanInfo___reduce_cython __pyx_string_tab[1710]
#define __pyx_n_u_UnitFanInfo___setstate_cython __pyx_string_tab[1711]
#define __pyx_n_u_UnitFanInfo_from_data __pyx_string_tab[1712]
#define __pyx_n_u_UnitFanInfo_from_ptr __pyx_string_tab[1713]
#define __pyx_n_u_UnitFanSpeeds __pyx_string_tab[1714]
#define __pyx_n_u_UnitFanSpeeds___reduce_cython __pyx_string_tab[1715]
#define __pyx_n_u_UnitFanSpeeds___setstate_cython __pyx_string_tab[1716]
#define __pyx_n_u_UnitFanSpeeds_from_data __pyx_string_tab[1717]
#define __pyx_n_u_UnitFanSpeeds_from_ptr __pyx_string_tab[1718]
#define __pyx_n_u_UnitInfo __pyx_string_tab[1719]
#define __pyx_n_u_UnitInfo___reduce_cython __pyx_string_tab[1720]
#define __pyx_n_u_UnitInfo___setstate_cython __pyx_string_tab[1721]
#define __pyx_n_u_UnitInfo_from_data __pyx_string_tab[1722]
#define __pyx_n_u_UnitInfo_from_ptr __pyx_string_tab[1723]
#define __pyx_n_u_UnknownError __pyx_string_tab[1724]
#define __pyx_n_u_Utilization __pyx_string_tab[1725]
#define __pyx_n_u_Utilization___reduce_cython __pyx_string_tab[1726]
#define __pyx_n_u_Utilization___setstate_cython __pyx_string_tab[1727]
#define __pyx_n_u_Utilization_from_data __pyx_string_tab[1728]
#define __pyx_n_u_Utilization_from_ptr __pyx_string_tab[1729]
#define __pyx_n_u_VBIOSEVT __pyx_string_tab[1730]
#define __pyx_n_u_VCD_BOARD __pyx_string_tab[1731]
#define __pyx_n_u_VCD_INLET __pyx_string_tab[1732]
#define __pyx_n_u_VCD_OUTLET __pyx_string_tab[1733]
#define __pyx_n_u_VERSION_1_0 __pyx_string_tab[1734]
#define __pyx_n_u_VERSION_2_0 __pyx_string_tab[1735]
#define __pyx_n_u_VERSION_2_2 __pyx_string_tab[1736]
#define __pyx_n_u_VERSION_3_0 __pyx_string_tab[1737]
#define __pyx_n_u_VERSION_3_1 __pyx_string_tab[1738]
#define __pyx_n_u_VERSION_4_0 __pyx_string_tab[1739]
#define __pyx_n_u_VERSION_5_0 __pyx_string_tab[1740]
#define __pyx_n_u_VERSION_INVALID __pyx_string_tab[1741]
#define __pyx_n_u_VGPU __pyx_string_tab[1742]
#define __pyx_n_u_VGPU_CAP_COUNT __pyx_string_tab[1743]
#define __pyx_n_u_VGPU_CAP_EXCLUSIVE_SIZE __pyx_string_tab[1744]
#define __pyx_n_u_VGPU_CAP_EXCLUSIVE_TYPE __pyx_string_tab[1745]
#define __pyx_n_u_VGPU_CAP_GPUDIRECT __pyx_string_tab[1746]
#define __pyx_n_u_VGPU_CAP_MULTI_VGPU_EXCLUSIVE __pyx_string_tab[1747]
#define __pyx_n_u_VGPU_CAP_NVLINK_P2P __pyx_string_tab[1748]
#define __pyx_n_u_VGPU_COMPATIBILITY_LIMIT_GPU __pyx_string_tab[1749]
#define __pyx_n_u_VGPU_COMPATIBILITY_LIMIT_GUEST_D __pyx_string_tab[1750]
#define __pyx_n_u_VGPU_COMPATIBILITY_LIMIT_HOST_DR __pyx_string_tab[1751]
#define __pyx_n_u_VGPU_COMPATIBILITY_LIMIT_NONE __pyx_string_tab[1752]
#define __pyx_n_u_VGPU_COMPATIBILITY_LIMIT_OTHER __pyx_string_tab[1753]
#define __pyx_n_u_VGPU_DRIVER_CAP_COUNT __pyx_string_tab[1754]
#define __pyx_n_u_VGPU_DRIVER_CAP_HETEROGENEOUS_MU __pyx_string_tab[1755]
#define __pyx_n_u_VGPU_DRIVER_CAP_WARM_UPDATE __pyx_string_tab[1756]
#define __pyx_n_u_VGPU_INSTANCE_GUEST_INFO_STATE_I __pyx_string_tab[1757]
#define __pyx_n_u_VGPU_INSTANCE_GUEST_INFO_STATE_U __pyx_string_tab[1758]
#define __pyx_n_u_VGPU_VM_ID_DOMAIN_ID __pyx_string_tab[1759]
#define __pyx_n_u_VGPU_VM_ID_UUID __pyx_string_tab[1760]
#define __pyx_n_u_VID __pyx_string_tab[1761]
#define __pyx_n_u_VOLATILE_ECC __pyx_string_tab[1762]
#define __pyx_n_u_VWORKSTATION __pyx_string_tab[1763]
#define __pyx_n_u_Value __pyx_string_tab[1764]
#define __pyx_n_u_ValueType __pyx_string_tab[1765]
#define __pyx_n_u_Value___reduce_cython __pyx_string_tab[1766]
#define __pyx_n_u_Value___setstate_cython __pyx_string_tab[1767]
#define __pyx_n_u_Value_from_data __pyx_string_tab[1768]
#define __pyx_n_u_Value_from_ptr __pyx_string_tab[1769]
#define __pyx_n_u_VgpuCapability __pyx_string_tab[1770]
#define __pyx_n_u_VgpuCreatablePlacementInfo_v1 __pyx_string_tab[1771]
#define __pyx_n_u_VgpuCreatablePlacementInfo_v1_2 __pyx_string_tab[1772]
#define __pyx_n_u_VgpuCreatablePlacementInfo_v1_3 __pyx_string_tab[1773]
#define __pyx_n_u_VgpuCreatablePlacementInfo_v1_fr __pyx_string_tab[1774]
#define __pyx_n_u_VgpuCreatablePlacementInfo_v1_fr_2 __pyx_string_tab[1775]
#define __pyx_n_u_VgpuDriverCapability __pyx_string_tab[1776]
#define __pyx_n_u_VgpuEccNotSupportedError __pyx_string_tab[1777]
#define __pyx_n_u_VgpuGuestInfoState __pyx_string_tab[1778]
#define __pyx_n_u_VgpuHeterogeneousMode_v1 __pyx_string_tab[1779]
#define __pyx_n_u_VgpuHeterogeneousMode_v1___reduc __pyx_string_tab[1780]
#define __pyx_n_u_VgpuHeterogeneousMode_v1___setst __pyx_string_tab[1781]
#define __pyx_n_u_VgpuHeterogeneousMode_v1_from_da __pyx_string_tab[1782]
#define __pyx_n_u_VgpuHeterogeneousMode_v1_from_pt __pyx_string_tab[1783]
#define __pyx_n_u_VgpuInstanceUtilizationInfo_v1_3 __pyx_string_tab[1784]
#define __pyx_n_u_VgpuInstanceUtilizationInfo_v1_4 __pyx_string_tab[1785]
#define __pyx_n_u_VgpuInstanceUtilizationInfo_v1_5 __pyx_string_tab[1786]
#define __pyx_n_u_VgpuInstanceUtilizationInfo_v1_f __pyx_string_tab[1787]
#define __pyx_n_u_VgpuInstanceUtilizationInfo_v1_f_2 __pyx_string_tab[1788]
#define __pyx_n_u_VgpuInstancesUtilizationInfo_v1_2 __pyx_string_tab[1789]
#define __pyx_n_u_VgpuInstancesUtilizationInfo_v1_3 __pyx_string_tab[1790]
#define __pyx_n_u_VgpuInstancesUtilizationInfo_v1_4 __pyx_string_tab[1791]
#define __pyx_n_u_VgpuInstancesUtilizationInfo_v1_5 __pyx_string_tab[1792]
#define __pyx_n_u_VgpuInstancesUtilizationInfo_v1_6 __pyx_string_tab[1793]
#define __pyx_n_u_VgpuLicenseExpiry __pyx_string_tab[1794]
#define __pyx_n_u_VgpuLicenseExpiry___reduce_cytho __pyx_string_tab[1795]
#define __pyx_n_u_VgpuLicenseExpiry___setstate_cyt __pyx_string_tab[1796]
#define __pyx_n_u_VgpuLicenseExpiry_from_data __pyx_string_tab[1797]
#define __pyx_n_u_VgpuLicenseExpiry_from_ptr __pyx_string_tab[1798]
#define __pyx_n_u_VgpuLicenseInfo __pyx_string_tab[1799]
#define __pyx_n_u_VgpuLicenseInfo___reduce_cython __pyx_string_tab[1800]
#define __pyx_n_u_VgpuLicenseInfo___setstate_cytho __pyx_string_tab[1801]
#define __pyx_n_u_VgpuLicenseInfo_from_data __pyx_string_tab[1802]
#define __pyx_n_u_VgpuLicenseInfo_from_ptr __pyx_string_tab[1803]
#define __pyx_n_u_VgpuMetadata __pyx_string_tab[1804]
#define __pyx_n_u_VgpuMetadata___reduce_cython __pyx_string_tab[1805]
#define __pyx_n_u_VgpuMetadata___setstate_cython __pyx_string_tab[1806]
#define __pyx_n_u_VgpuMetadata_from_data __pyx_string_tab[1807]
#define __pyx_n_u_VgpuMetadata_from_ptr __pyx_string_tab[1808]
#define __pyx_n_u_VgpuPgpuCompatibility __pyx_string_tab[1809]
#define __pyx_n_u_VgpuPgpuCompatibilityLimitCode __pyx_string_tab[1810]
#define __pyx_n_u_VgpuPgpuCompatibility___reduce_c __pyx_string_tab[1811]
#define __pyx_n_u_VgpuPgpuCompatibility___setstate __pyx_string_tab[1812]
#define __pyx_n_u_VgpuPgpuCompatibility_from_data __pyx_string_tab[1813]
#define __pyx_n_u_VgpuPgpuCompatibility_from_ptr __pyx_string_tab[1814]
#define __pyx_n_u_VgpuPgpuMetadata __pyx_string_tab[1815]
#define __pyx_n_u_VgpuPgpuMetadata___reduce_cython __pyx_string_tab[1816]
#define __pyx_n_u_VgpuPgpuMetadata___setstate_cyth __pyx_string_tab[1817]
#define __pyx_n_u_VgpuPgpuMetadata_from_data __pyx_string_tab[1818]
#define __pyx_n_u_VgpuPgpuMetadata_from_ptr __pyx_string_tab[1819]
#define __pyx_n_u_VgpuPlacementId_v1 __pyx_string_tab[1820]
#define __pyx_n_u_VgpuPlacementId_v1___reduce_cyth __pyx_string_tab[1821]
#define __pyx_n_u_VgpuPlacementId_v1___setstate_cy __pyx_string_tab[1822]
#define __pyx_n_u_VgpuPlacementId_v1_from_data __pyx_string_tab[1823]
#define __pyx_n_u_VgpuPlacementId_v1_from_ptr __pyx_string_tab[1824]
#define __pyx_n_u_VgpuPlacementList_v2 __pyx_string_tab[1825]
#define __pyx_n_u_VgpuPlacementList_v2___reduce_cy __pyx_string_tab[1826]
#define __pyx_n_u_VgpuPlacementList_v2___setstate __pyx_string_tab[1827]
#define __pyx_n_u_VgpuPlacementList_v2_from_data __pyx_string_tab[1828]
#define __pyx_n_u_VgpuPlacementList_v2_from_ptr __pyx_string_tab[1829]
#define __pyx_n_u_VgpuProcessUtilizationInfo_v1 __pyx_string_tab[1830]
#define __pyx_n_u_VgpuProcessUtilizationInfo_v1_2 __pyx_string_tab[1831]
#define __pyx_n_u_VgpuProcessUtilizationInfo_v1_3 __pyx_string_tab[1832]
#define __pyx_n_u_VgpuProcessUtilizationInfo_v1_fr __pyx_string_tab[1833]
#define __pyx_n_u_VgpuProcessUtilizationInfo_v1_fr_2 __pyx_string_tab[1834]
#define __pyx_n_u_VgpuProcessesUtilizationInfo_v1_2 __pyx_string_tab[1835]
#define __pyx_n_u_VgpuProcessesUtilizationInfo_v1_3 __pyx_string_tab[1836]
#define __pyx_n_u_VgpuProcessesUtilizationInfo_v1_4 __pyx_string_tab[1837]
#define __pyx_n_u_VgpuProcessesUtilizationInfo_v1_5 __pyx_string_tab[1838]
#define __pyx_n_u_VgpuProcessesUtilizationInfo_v1_6 __pyx_string_tab[1839]
#define __pyx_n_u_VgpuRuntimeState_v1 __pyx_string_tab[1840]
#define __pyx_n_u_VgpuRuntimeState_v1___reduce_cyt __pyx_string_tab[1841]
#define __pyx_n_u_VgpuRuntimeState_v1___setstate_c __pyx_string_tab[1842]
#define __pyx_n_u_VgpuRuntimeState_v1_from_data __pyx_string_tab[1843]
#define __pyx_n_u_VgpuRuntimeState_v1_from_ptr __pyx_string_tab[1844]
#define __pyx_n_u_VgpuSchedulerCapabilities __pyx_string_tab[1845]
#define __pyx_n_u_VgpuSchedulerCapabilities___redu __pyx_string_tab[1846]
#define __pyx_n_u_VgpuSchedulerCapabilities___sets __pyx_string_tab[1847]
#define __pyx_n_u_VgpuSchedulerCapabilities_from_d __pyx_string_tab[1848]
#define __pyx_n_u_VgpuSchedulerCapabilities_from_p __pyx_string_tab[1849]
#define __pyx_n_u_VgpuSchedulerGetState __pyx_string_tab[1850]
#define __pyx_n_u_VgpuSchedulerGetState___reduce_c __pyx_string_tab[1851]
#define __pyx_n_u_VgpuSchedulerGetState___setstate __pyx_string_tab[1852]
#define __pyx_n_u_VgpuSchedulerGetState_from_data __pyx_string_tab[1853]
#define __pyx_n_u_VgpuSchedulerGetState_from_ptr __pyx_string_tab[1854]
#define __pyx_n_u_VgpuSchedulerLog __pyx_string_tab[1855]
#define __pyx_n_u_VgpuSchedulerLogEntry __pyx_string_tab[1856]
#define __pyx_n_u_VgpuSchedulerLogEntry___reduce_c __pyx_string_tab[1857]
#define __pyx_n_u_VgpuSchedulerLogEntry___setstate __pyx_string_tab[1858]
#define __pyx_n_u_VgpuSchedulerLogEntry_from_data __pyx_string_tab[1859]
#define __pyx_n_u_VgpuSchedulerLogEntry_from_ptr __pyx_string_tab[1860]
#define __pyx_n_u_VgpuSchedulerLogInfo_v1 __pyx_string_tab[1861]
#define __pyx_n_u_VgpuSchedulerLogInfo_v1___reduce __pyx_string_tab[1862]
#define __pyx_n_u_VgpuSchedulerLogInfo_v1___setsta __pyx_string_tab[1863]
#define __pyx_n_u_VgpuSchedulerLogInfo_v1_from_dat __pyx_string_tab[1864]
#define __pyx_n_u_VgpuSchedulerLogInfo_v1_from_ptr __pyx_string_tab[1865]
#define __pyx_n_u_VgpuSchedulerLog___reduce_cython __pyx_string_tab[1866]
#define __pyx_n_u_VgpuSchedulerLog___setstate_cyth __pyx_string_tab[1867]
#define __pyx_n_u_VgpuSchedulerLog_from_data __pyx_string_tab[1868]
#define __pyx_n_u_VgpuSchedulerLog_from_ptr __pyx_string_tab[1869]
#define __pyx_n_u_VgpuSchedulerParams __pyx_string_tab[1870]
#define __pyx_n_u_VgpuSchedulerParams___reduce_cyt __pyx_string_tab[1871]
#define __pyx_n_u_VgpuSchedulerParams___setstate_c __pyx_string_tab[1872]
#define __pyx_n_u_VgpuSchedulerParams_from_data __pyx_string_tab[1873]
#define __pyx_n_u_VgpuSchedulerParams_from_ptr __pyx_string_tab[1874]
#define __pyx_n_u_VgpuSchedulerSetParams __pyx_string_tab[1875]
#define __pyx_n_u_VgpuSchedulerSetParams___reduce __pyx_string_tab[1876]
#define __pyx_n_u_VgpuSchedulerSetParams___setstat __pyx_string_tab[1877]
#define __pyx_n_u_VgpuSchedulerSetParams_from_data __pyx_string_tab[1878]
#define __pyx_n_u_VgpuSchedulerSetParams_from_ptr __pyx_string_tab[1879]
#define __pyx_n_u_VgpuSchedulerStateInfo_v1 __pyx_string_tab[1880]
#define __pyx_n_u_VgpuSchedulerStateInfo_v1___redu __pyx_string_tab[1881]
#define __pyx_n_u_VgpuSchedulerStateInfo_v1___sets __pyx_string_tab[1882]
#define __pyx_n_u_VgpuSchedulerStateInfo_v1_from_d __pyx_string_tab[1883]
#define __pyx_n_u_VgpuSchedulerStateInfo_v1_from_p __pyx_string_tab[1884]
#define __pyx_n_u_VgpuSchedulerState_v1 __pyx_string_tab[1885]
#define __pyx_n_u_VgpuSchedulerState_v1___reduce_c __pyx_string_tab[1886]
#define __pyx_n_u_VgpuSchedulerState_v1___setstate __pyx_string_tab[1887]
#define __pyx_n_u_VgpuSchedulerState_v1_from_data __pyx_string_tab[1888]
#define __pyx_n_u_VgpuSchedulerState_v1_from_ptr __pyx_string_tab[1889]
#define __pyx_n_u_VgpuTypeBar1Info_v1 __pyx_string_tab[1890]
#define __pyx_n_u_VgpuTypeBar1Info_v1___reduce_cyt __pyx_string_tab[1891]
#define __pyx_n_u_VgpuTypeBar1Info_v1___setstate_c __pyx_string_tab[1892]
#define __pyx_n_u_VgpuTypeBar1Info_v1_from_data __pyx_string_tab[1893]
#define __pyx_n_u_VgpuTypeBar1Info_v1_from_ptr __pyx_string_tab[1894]
#define __pyx_n_u_VgpuTypeIdInfo_v1 __pyx_string_tab[1895]
#define __pyx_n_u_VgpuTypeIdInfo_v1___reduce_cytho __pyx_string_tab[1896]
#define __pyx_n_u_VgpuTypeIdInfo_v1___setstate_cyt __pyx_string_tab[1897]
#define __pyx_n_u_VgpuTypeIdInfo_v1_from_data __pyx_string_tab[1898]
#define __pyx_n_u_VgpuTypeIdInfo_v1_from_ptr __pyx_string_tab[1899]
#define __pyx_n_u_VgpuTypeMaxInstance_v1 __pyx_string_tab[1900]
#define __pyx_n_u_VgpuTypeMaxInstance_v1___reduce __pyx_string_tab[1901]
#define __pyx_n_u_VgpuTypeMaxInstance_v1___setstat __pyx_string_tab[1902]
#define __pyx_n_u_VgpuTypeMaxInstance_v1_from_data __pyx_string_tab[1903]
#define __pyx_n_u_VgpuTypeMaxInstance_v1_from_ptr __pyx_string_tab[1904]
#define __pyx_n_u_VgpuVersion __pyx_string_tab[1905]
#define __pyx_n_u_VgpuVersion___reduce_cython __pyx_string_tab[1906]
#define __pyx_n_u_VgpuVersion___setstate_cython __pyx_string_tab[1907]
#define __pyx_n_u_VgpuVersion_from_data __pyx_string_tab[1908]
#define __pyx_n_u_VgpuVersion_from_ptr __pyx_string_tab[1909]
#define __pyx_n_u_VgpuVmCompatibility __pyx_string_tab[1910]
#define __pyx_n_u_VgpuVmIdType __pyx_string_tab[1911]
#define __pyx_n_u_View_MemoryView __pyx_string_tab[1912]
#define __pyx_n_u_abc __pyx_string_tab[1913]
#define __pyx_n_u_accounting_stats_dtype __pyx_string_tab[1914]
#define __pyx_n_u_active_vgpu_instance_info_v1_dty __pyx_string_tab[1915]
#define __pyx_n_u_address __pyx_string_tab[1916]
#define __pyx_n_u_aggregate_cor __pyx_string_tab[1917]
#define __pyx_n_u_aggregate_unc_bucket_l2 __pyx_string_tab[1918]
#define __pyx_n_u_aggregate_unc_bucket_mcu __pyx_string_tab[1919]
#define __pyx_n_u_aggregate_unc_bucket_other __pyx_string_tab[1920]
#define __pyx_n_u_aggregate_unc_bucket_pcie __pyx_string_tab[1921]
#define __pyx_n_u_aggregate_unc_bucket_sm __pyx_string_tab[1922]
#define __pyx_n_u_aggregate_unc_parity __pyx_string_tab[1923]
#define __pyx_n_u_aggregate_unc_sec_ded __pyx_string_tab[1924]
#define __pyx_n_u_all __pyx_string_tab[1925]
#define __pyx_n_u_allocate_buffer __pyx_string_tab[1926]
#define __pyx_n_u_api_type __pyx_string_tab[1927]
#define __pyx_n_u_arr_mode __pyx_string_tab[1928]
#define __pyx_n_u_asarray __pyx_string_tab[1929]
#define __pyx_n_u_asyncio_coroutines __pyx_string_tab[1930]
#define __pyx_n_u_attacker_advantage __pyx_string_tab[1931]
#define __pyx_n_u_attestation_cert_chain __pyx_string_tab[1932]
#define __pyx_n_u_attestation_cert_chain_size __pyx_string_tab[1933]
#define __pyx_n_u_attestation_report __pyx_string_tab[1934]
#define __pyx_n_u_attestation_report_size __pyx_string_tab[1935]
#define __pyx_n_u_average_fps __pyx_string_tab[1936]
#define __pyx_n_u_average_latency __pyx_string_tab[1937]
#define __pyx_n_u_avg_factor __pyx_string_tab[1938]
#define __pyx_n_u_b_channel_repair_pending __pyx_string_tab[1939]
#define __pyx_n_u_b_global_status __pyx_string_tab[1940]
#define __pyx_n_u_b_is_best __pyx_string_tab[1941]
#define __pyx_n_u_b_is_present __pyx_string_tab[1942]
#define __pyx_n_u_b_set_best __pyx_string_tab[1943]
#define __pyx_n_u_b_threshold_exceeded __pyx_string_tab[1944]
#define __pyx_n_u_b_tpc_repair_pending __pyx_string_tab[1945]
#define __pyx_n_u_ba_r1memory_dtype __pyx_string_tab[1946]
#define __pyx_n_u_bar1_used __pyx_string_tab[1947]
#define __pyx_n_u_bar1free __pyx_string_tab[1948]
#define __pyx_n_u_bar1size __pyx_string_tab[1949]
#define __pyx_n_u_bar1total __pyx_string_tab[1950]
#define __pyx_n_u_base __pyx_string_tab[1951]
#define __pyx_n_u_base_class __pyx_string_tab[1952]
#define __pyx_n_u_bridge_chip_hierarchy_dtype __pyx_string_tab[1953]
#define __pyx_n_u_bridge_chip_info __pyx_string_tab[1954]
#define __pyx_n_u_bridge_chip_info_dtype __pyx_string_tab[1955]
#define __pyx_n_u_bridge_count __pyx_string_tab[1956]
#define __pyx_n_u_buf __pyx_string_tab[1957]
#define __pyx_n_u_buffer __pyx_string_tab[1958]
#define __pyx_n_u_bus __pyx_string_tab[1959]
#define __pyx_n_u_bus_id __pyx_string_tab[1960]
#define __pyx_n_u_bus_id_legacy __pyx_string_tab[1961]
#define __pyx_n_u_bw_mode __pyx_string_tab[1962]
#define __pyx_n_u_bw_modes __pyx_string_tab[1963]
#define __pyx_n_u_c __pyx_string_tab[1964]
#define __pyx_n_u_c2c_mode_info_v1_dtype __pyx_string_tab[1965]
#define __pyx_n_u_cap_mask __pyx_string_tab[1966]
#define __pyx_n_u_capabilities __pyx_string_tab[1967]
#define __pyx_n_u_capability __pyx_string_tab[1968]
#define __pyx_n_u_cause __pyx_string_tab[1969]
#define __pyx_n_u_cc_feature __pyx_string_tab[1970]
#define __pyx_n_u_cec_attestation_report __pyx_string_tab[1971]
#define __pyx_n_u_cec_attestation_report_size __pyx_string_tab[1972]
#define __pyx_n_u_cert_chain __pyx_string_tab[1973]
#define __pyx_n_u_cert_chain_size __pyx_string_tab[1974]
#define __pyx_n_u_chassis_serial_number __pyx_string_tab[1975]
#define __pyx_n_u_check_status __pyx_string_tab[1976]
#define __pyx_n_u_check_status_size __pyx_string_tab[1977]
#define __pyx_n_u_class __pyx_string_tab[1978]
#define __pyx_n_u_class_getitem __pyx_string_tab[1979]
#define __pyx_n_u_cline_in_traceback __pyx_string_tab[1980]
#define __pyx_n_u_clique_id __pyx_string_tab[1981]
#define __pyx_n_u_clk_api_domain __pyx_string_tab[1982]
#define __pyx_n_u_clk_domain_fault_mask __pyx_string_tab[1983]
#define __pyx_n_u_clk_mon_fault_info_dtype __pyx_string_tab[1984]
#define __pyx_n_u_clk_mon_list __pyx_string_tab[1985]
#define __pyx_n_u_clk_mon_list_size __pyx_string_tab[1986]
#define __pyx_n_u_clk_mon_status_dtype __pyx_string_tab[1987]
#define __pyx_n_u_clock_id __pyx_string_tab[1988]
#define __pyx_n_u_clock_offset_m_hz __pyx_string_tab[1989]
#define __pyx_n_u_clock_offset_v1_dtype __pyx_string_tab[1990]
#define __pyx_n_u_clock_type __pyx_string_tab[1991]
#define __pyx_n_u_cluster_uuid __pyx_string_tab[1992]
#define __pyx_n_u_codec_type __pyx_string_tab[1993]
#define __pyx_n_u_color __pyx_string_tab[1994]
#define __pyx_n_u_compatibility_limit_code __pyx_string_tab[1995]
#define __pyx_n_u_compute_instance __pyx_string_tab[1996]
#define __pyx_n_u_compute_instance_destroy __pyx_string_tab[1997]
#define __pyx_n_u_compute_instance_get_info_v2 __pyx_string_tab[1998]
#define __pyx_n_u_compute_instance_id __pyx_string_tab[1999]
#define __pyx_n_u_compute_instance_info_dtype __pyx_string_tab[2000]
#define __pyx_n_u_compute_instance_placement_dtype __pyx_string_tab[2001]
#define __pyx_n_u_compute_instance_profile_info_v2 __pyx_string_tab[2002]
#define __pyx_n_u_compute_instance_profile_info_v3 __pyx_string_tab[2003]
#define __pyx_n_u_compute_instance_slice_count __pyx_string_tab[2004]
#define __pyx_n_u_conf_compute_get_key_rotation_th __pyx_string_tab[2005]
#define __pyx_n_u_conf_compute_gpu_attestation_rep __pyx_string_tab[2006]
#define __pyx_n_u_conf_compute_gpu_certificate_dty __pyx_string_tab[2007]
#define __pyx_n_u_conf_compute_mem_size_info_dtype __pyx_string_tab[2008]
#define __pyx_n_u_conf_compute_system_caps_dtype __pyx_string_tab[2009]
#define __pyx_n_u_conf_compute_system_state_dtype __pyx_string_tab[2010]
#define __pyx_n_u_controller __pyx_string_tab[2011]
#define __pyx_n_u_cooler_info_v1_dtype __pyx_string_tab[2012]
#define __pyx_n_u_copy_engine_count __pyx_string_tab[2013]
#define __pyx_n_u_count __pyx_string_tab[2014]
#define __pyx_n_u_counter __pyx_string_tab[2015]
#define __pyx_n_u_counter_type __pyx_string_tab[2016]
#define __pyx_n_u_cpuNumber __pyx_string_tab[2017]
#define __pyx_n_u_cpu_caps __pyx_string_tab[2018]
#define __pyx_n_u_cpu_set_size __pyx_string_tab[2019]
#define __pyx_n_u_ctypes __pyx_string_tab[2020]
#define __pyx_n_u_cuda_bindings__nvml __pyx_string_tab[2021]
#define __pyx_n_u_cumulative_preemption_time __pyx_string_tab[2022]
#define __pyx_n_u_current __pyx_string_tab[2023]
#define __pyx_n_u_current_mode __pyx_string_tab[2024]
#define __pyx_n_u_current_state __pyx_string_tab[2025]
#define __pyx_n_u_current_temp __pyx_string_tab[2026]
#define __pyx_n_u_dVal __pyx_string_tab[2027]
#define __pyx_n_u_d_val __pyx_string_tab[2028]
#define __pyx_n_u_data __pyx_string_tab[2029]
#define __pyx_n_u_data_2 __pyx_string_tab[2030]
#define __pyx_n_u_day __pyx_string_tab[2031]
#define __pyx_n_u_decUtil __pyx_string_tab[2032]
#define __pyx_n_u_dec_threshold __pyx_string_tab[2033]
#define __pyx_n_u_dec_util __pyx_string_tab[2034]
#define __pyx_n_u_decoder_count __pyx_string_tab[2035]
#define __pyx_n_u_default_max_temp __pyx_string_tab[2036]
#define __pyx_n_u_default_min_temp __pyx_string_tab[2037]
#define __pyx_n_u_dev_tools_mode __pyx_string_tab[2038]
#define __pyx_n_u_device __pyx_string_tab[2039]
#define __pyx_n_u_device1 __pyx_string_tab[2040]
#define __pyx_n_u_device2 __pyx_string_tab[2041]
#define __pyx_n_u_deviceCount __pyx_string_tab[2042]
#define __pyx_n_u_device_2 __pyx_string_tab[2043]
#define __pyx_n_u_device_addressing_mode_v1_dtype __pyx_string_tab[2044]
#define __pyx_n_u_device_attributes_dtype __pyx_string_tab[2045]
#define __pyx_n_u_device_capabilities_v1_dtype __pyx_string_tab[2046]
#define __pyx_n_u_device_clear_accounting_pids __pyx_string_tab[2047]
#define __pyx_n_u_device_clear_cpu_affinity __pyx_string_tab[2048]
#define __pyx_n_u_device_clear_ecc_error_counts __pyx_string_tab[2049]
#define __pyx_n_u_device_clear_field_values __pyx_string_tab[2050]
#define __pyx_n_u_device_create_gpu_instance __pyx_string_tab[2051]
#define __pyx_n_u_device_create_gpu_instance_with __pyx_string_tab[2052]
#define __pyx_n_u_device_current_clock_freqs_v1_dt __pyx_string_tab[2053]
#define __pyx_n_u_device_discover_gpus __pyx_string_tab[2054]
#define __pyx_n_u_device_get_accounting_buffer_siz __pyx_string_tab[2055]
#define __pyx_n_u_device_get_accounting_mode __pyx_string_tab[2056]
#define __pyx_n_u_device_get_accounting_pids __pyx_string_tab[2057]
#define __pyx_n_u_device_get_accounting_stats __pyx_string_tab[2058]
#define __pyx_n_u_device_get_active_vgpus __pyx_string_tab[2059]
#define __pyx_n_u_device_get_adaptive_clock_info_s __pyx_string_tab[2060]
#define __pyx_n_u_device_get_addressing_mode __pyx_string_tab[2061]
#define __pyx_n_u_device_get_api_restriction __pyx_string_tab[2062]
#define __pyx_n_u_device_get_architecture __pyx_string_tab[2063]
#define __pyx_n_u_device_get_attributes_v2 __pyx_string_tab[2064]
#define __pyx_n_u_device_get_auto_boosted_clocks_e __pyx_string_tab[2065]
#define __pyx_n_u_device_get_bar1_memory_info __pyx_string_tab[2066]
#define __pyx_n_u_device_get_board_id __pyx_string_tab[2067]
#define __pyx_n_u_device_get_board_part_number __pyx_string_tab[2068]
#define __pyx_n_u_device_get_brand __pyx_string_tab[2069]
#define __pyx_n_u_device_get_bridge_chip_info __pyx_string_tab[2070]
#define __pyx_n_u_device_get_bus_type __pyx_string_tab[2071]
#define __pyx_n_u_device_get_c2c_mode_info_v __pyx_string_tab[2072]
#define __pyx_n_u_device_get_capabilities __pyx_string_tab[2073]
#define __pyx_n_u_device_get_clk_mon_status __pyx_string_tab[2074]
#define __pyx_n_u_device_get_clock __pyx_string_tab[2075]
#define __pyx_n_u_device_get_clock_info __pyx_string_tab[2076]
#define __pyx_n_u_device_get_clock_offsets __pyx_string_tab[2077]
#define __pyx_n_u_device_get_compute_instance_id __pyx_string_tab[2078]
#define __pyx_n_u_device_get_compute_mode __pyx_string_tab[2079]
#define __pyx_n_u_device_get_compute_running_proce __pyx_string_tab[2080]
#define __pyx_n_u_device_get_conf_compute_gpu_atte __pyx_string_tab[2081]
#define __pyx_n_u_device_get_conf_compute_gpu_cert __pyx_string_tab[2082]
#define __pyx_n_u_device_get_conf_compute_mem_size __pyx_string_tab[2083]
#define __pyx_n_u_device_get_conf_compute_protecte __pyx_string_tab[2084]
#define __pyx_n_u_device_get_cooler_info __pyx_string_tab[2085]
#define __pyx_n_u_device_get_count_v2 __pyx_string_tab[2086]
#define __pyx_n_u_device_get_cpu_affinity __pyx_string_tab[2087]
#define __pyx_n_u_device_get_cpu_affinity_within_s __pyx_string_tab[2088]
#define __pyx_n_u_device_get_creatable_vgpus __pyx_string_tab[2089]
#define __pyx_n_u_device_get_cuda_compute_capabili __pyx_string_tab[2090]
#define __pyx_n_u_device_get_curr_pcie_link_genera __pyx_string_tab[2091]
#define __pyx_n_u_device_get_curr_pcie_link_width __pyx_string_tab[2092]
#define __pyx_n_u_device_get_current_clock_freqs __pyx_string_tab[2093]
#define __pyx_n_u_device_get_current_clocks_event __pyx_string_tab[2094]
#define __pyx_n_u_device_get_decoder_utilization __pyx_string_tab[2095]
#define __pyx_n_u_device_get_default_ecc_mode __pyx_string_tab[2096]
#define __pyx_n_u_device_get_device_handle_from_mi __pyx_string_tab[2097]
#define __pyx_n_u_device_get_display_active __pyx_string_tab[2098]
#define __pyx_n_u_device_get_display_mode __pyx_string_tab[2099]
#define __pyx_n_u_device_get_dram_encryption_mode __pyx_string_tab[2100]
#define __pyx_n_u_device_get_driver_model_v2 __pyx_string_tab[2101]
#define __pyx_n_u_device_get_dynamic_pstates_info __pyx_string_tab[2102]
#define __pyx_n_u_device_get_ecc_mode __pyx_string_tab[2103]
#define __pyx_n_u_device_get_encoder_capacity __pyx_string_tab[2104]
#define __pyx_n_u_device_get_encoder_sessions __pyx_string_tab[2105]
#define __pyx_n_u_device_get_encoder_stats __pyx_string_tab[2106]
#define __pyx_n_u_device_get_encoder_utilization __pyx_string_tab[2107]
#define __pyx_n_u_device_get_enforced_power_limit __pyx_string_tab[2108]
#define __pyx_n_u_device_get_fan_control_policy_v2 __pyx_string_tab[2109]
#define __pyx_n_u_device_get_fan_speed __pyx_string_tab[2110]
#define __pyx_n_u_device_get_fan_speed_rpm __pyx_string_tab[2111]
#define __pyx_n_u_device_get_fan_speed_v2 __pyx_string_tab[2112]
#define __pyx_n_u_device_get_fbc_sessions __pyx_string_tab[2113]
#define __pyx_n_u_device_get_fbc_stats __pyx_string_tab[2114]
#define __pyx_n_u_device_get_field_values __pyx_string_tab[2115]
#define __pyx_n_u_device_get_gpc_clk_min_max_vf_of __pyx_string_tab[2116]
#define __pyx_n_u_device_get_gpc_clk_vf_offset __pyx_string_tab[2117]
#define __pyx_n_u_device_get_gpu_fabric_info_v __pyx_string_tab[2118]
#define __pyx_n_u_device_get_gpu_instance_by_id __pyx_string_tab[2119]
#define __pyx_n_u_device_get_gpu_instance_id __pyx_string_tab[2120]
#define __pyx_n_u_device_get_gpu_instance_possible __pyx_string_tab[2121]
#define __pyx_n_u_device_get_gpu_instance_profile __pyx_string_tab[2122]
#define __pyx_n_u_device_get_gpu_instance_profile_2 __pyx_string_tab[2123]
#define __pyx_n_u_device_get_gpu_instance_remainin __pyx_string_tab[2124]
#define __pyx_n_u_device_get_gpu_instances __pyx_string_tab[2125]
#define __pyx_n_u_device_get_gpu_max_pcie_link_gen __pyx_string_tab[2126]
#define __pyx_n_u_device_get_gpu_operation_mode __pyx_string_tab[2127]
#define __pyx_n_u_device_get_grid_licensable_featu __pyx_string_tab[2128]
#define __pyx_n_u_device_get_gsp_firmware_mode __pyx_string_tab[2129]
#define __pyx_n_u_device_get_gsp_firmware_version __pyx_string_tab[2130]
#define __pyx_n_u_device_get_handle_by_index_v2 __pyx_string_tab[2131]
#define __pyx_n_u_device_get_handle_by_pci_bus_id __pyx_string_tab[2132]
#define __pyx_n_u_device_get_handle_by_serial __pyx_string_tab[2133]
#define __pyx_n_u_device_get_handle_by_uuid __pyx_string_tab[2134]
#define __pyx_n_u_device_get_handle_by_uuidv __pyx_string_tab[2135]
#define __pyx_n_u_device_get_host_vgpu_mode __pyx_string_tab[2136]
#define __pyx_n_u_device_get_hostname_v1 __pyx_string_tab[2137]
#define __pyx_n_u_device_get_index __pyx_string_tab[2138]
#define __pyx_n_u_device_get_inforom_configuration __pyx_string_tab[2139]
#define __pyx_n_u_device_get_inforom_image_version __pyx_string_tab[2140]
#define __pyx_n_u_device_get_inforom_version __pyx_string_tab[2141]
#define __pyx_n_u_device_get_irq_num __pyx_string_tab[2142]
#define __pyx_n_u_device_get_jpg_utilization __pyx_string_tab[2143]
#define __pyx_n_u_device_get_last_bbx_flush_time __pyx_string_tab[2144]
#define __pyx_n_u_device_get_margin_temperature __pyx_string_tab[2145]
#define __pyx_n_u_device_get_max_clock_info __pyx_string_tab[2146]
#define __pyx_n_u_device_get_max_customer_boost_cl __pyx_string_tab[2147]
#define __pyx_n_u_device_get_max_mig_device_count __pyx_string_tab[2148]
#define __pyx_n_u_device_get_max_pcie_link_generat __pyx_string_tab[2149]
#define __pyx_n_u_device_get_max_pcie_link_width __pyx_string_tab[2150]
#define __pyx_n_u_device_get_mem_clk_min_max_vf_of __pyx_string_tab[2151]
#define __pyx_n_u_device_get_mem_clk_vf_offset __pyx_string_tab[2152]
#define __pyx_n_u_device_get_memory_affinity __pyx_string_tab[2153]
#define __pyx_n_u_device_get_memory_bus_width __pyx_string_tab[2154]
#define __pyx_n_u_device_get_memory_error_counter __pyx_string_tab[2155]
#define __pyx_n_u_device_get_memory_info_v2 __pyx_string_tab[2156]
#define __pyx_n_u_device_get_mig_device_handle_by __pyx_string_tab[2157]
#define __pyx_n_u_device_get_mig_mode __pyx_string_tab[2158]
#define __pyx_n_u_device_get_min_max_clock_of_p_st __pyx_string_tab[2159]
#define __pyx_n_u_device_get_min_max_fan_speed __pyx_string_tab[2160]
#define __pyx_n_u_device_get_minor_number __pyx_string_tab[2161]
#define __pyx_n_u_device_get_module_id __pyx_string_tab[2162]
#define __pyx_n_u_device_get_mps_compute_running_p __pyx_string_tab[2163]
#define __pyx_n_u_device_get_multi_gpu_board __pyx_string_tab[2164]
#define __pyx_n_u_device_get_name __pyx_string_tab[2165]
#define __pyx_n_u_device_get_num_fans __pyx_string_tab[2166]
#define __pyx_n_u_device_get_num_gpu_cores __pyx_string_tab[2167]
#define __pyx_n_u_device_get_numa_node_id __pyx_string_tab[2168]
#define __pyx_n_u_device_get_nvlink_bw_mode __pyx_string_tab[2169]
#define __pyx_n_u_device_get_nvlink_capability __pyx_string_tab[2170]
#define __pyx_n_u_device_get_nvlink_error_counter __pyx_string_tab[2171]
#define __pyx_n_u_device_get_nvlink_info __pyx_string_tab[2172]
#define __pyx_n_u_device_get_nvlink_remote_device __pyx_string_tab[2173]
#define __pyx_n_u_device_get_nvlink_remote_pci_inf __pyx_string_tab[2174]
#define __pyx_n_u_device_get_nvlink_state __pyx_string_tab[2175]
#define __pyx_n_u_device_get_nvlink_supported_bw_m __pyx_string_tab[2176]
#define __pyx_n_u_device_get_nvlink_version __pyx_string_tab[2177]
#define __pyx_n_u_device_get_ofa_utilization __pyx_string_tab[2178]
#define __pyx_n_u_device_get_p2p_status __pyx_string_tab[2179]
#define __pyx_n_u_device_get_pci_info_ext __pyx_string_tab[2180]
#define __pyx_n_u_device_get_pci_info_v3 __pyx_string_tab[2181]
#define __pyx_n_u_device_get_pcie_link_max_speed __pyx_string_tab[2182]
#define __pyx_n_u_device_get_pcie_replay_counter __pyx_string_tab[2183]
#define __pyx_n_u_device_get_pcie_speed __pyx_string_tab[2184]
#define __pyx_n_u_device_get_pcie_throughput __pyx_string_tab[2185]
#define __pyx_n_u_device_get_pdi __pyx_string_tab[2186]
#define __pyx_n_u_device_get_performance_modes __pyx_string_tab[2187]
#define __pyx_n_u_device_get_performance_state __pyx_string_tab[2188]
#define __pyx_n_u_device_get_persistence_mode __pyx_string_tab[2189]
#define __pyx_n_u_device_get_pgpu_metadata_string __pyx_string_tab[2190]
#define __pyx_n_u_device_get_platform_info __pyx_string_tab[2191]
#define __pyx_n_u_device_get_power_management_defa __pyx_string_tab[2192]
#define __pyx_n_u_device_get_power_management_limi __pyx_string_tab[2193]
#define __pyx_n_u_device_get_power_management_limi_2 __pyx_string_tab[2194]
#define __pyx_n_u_device_get_power_mizer_mode_v1 __pyx_string_tab[2195]
#define __pyx_n_u_device_get_power_source __pyx_string_tab[2196]
#define __pyx_n_u_device_get_power_state __pyx_string_tab[2197]
#define __pyx_n_u_device_get_power_usage __pyx_string_tab[2198]
#define __pyx_n_u_device_get_process_utilization __pyx_string_tab[2199]
#define __pyx_n_u_device_get_processes_utilization __pyx_string_tab[2200]
#define __pyx_n_u_device_get_remapped_rows __pyx_string_tab[2201]
#define __pyx_n_u_device_get_repair_status __pyx_string_tab[2202]
#define __pyx_n_u_device_get_retired_pages __pyx_string_tab[2203]
#define __pyx_n_u_device_get_retired_pages_pending __pyx_string_tab[2204]
#define __pyx_n_u_device_get_retired_pages_v2 __pyx_string_tab[2205]
#define __pyx_n_u_device_get_row_remapper_histogra __pyx_string_tab[2206]
#define __pyx_n_u_device_get_running_process_detai __pyx_string_tab[2207]
#define __pyx_n_u_device_get_samples __pyx_string_tab[2208]
#define __pyx_n_u_device_get_serial __pyx_string_tab[2209]
#define __pyx_n_u_device_get_sram_ecc_error_status __pyx_string_tab[2210]
#define __pyx_n_u_device_get_sram_unique_uncorrect __pyx_string_tab[2211]
#define __pyx_n_u_device_get_supported_clocks_even __pyx_string_tab[2212]
#define __pyx_n_u_device_get_supported_event_types __pyx_string_tab[2213]
#define __pyx_n_u_device_get_supported_graphics_cl __pyx_string_tab[2214]
#define __pyx_n_u_device_get_supported_memory_cloc __pyx_string_tab[2215]
#define __pyx_n_u_device_get_supported_performance __pyx_string_tab[2216]
#define __pyx_n_u_device_get_supported_vgpus __pyx_string_tab[2217]
#define __pyx_n_u_device_get_target_fan_speed __pyx_string_tab[2218]
#define __pyx_n_u_device_get_temperature_threshold __pyx_string_tab[2219]
#define __pyx_n_u_device_get_temperature_v __pyx_string_tab[2220]
#define __pyx_n_u_device_get_thermal_settings __pyx_string_tab[2221]
#define __pyx_n_u_device_get_topology_common_ances __pyx_string_tab[2222]
#define __pyx_n_u_device_get_topology_nearest_gpus __pyx_string_tab[2223]
#define __pyx_n_u_device_get_total_ecc_errors __pyx_string_tab[2224]
#define __pyx_n_u_device_get_total_energy_consumpt __pyx_string_tab[2225]
#define __pyx_n_u_device_get_utilization_rates __pyx_string_tab[2226]
#define __pyx_n_u_device_get_uuid __pyx_string_tab[2227]
#define __pyx_n_u_device_get_vbios_version __pyx_string_tab[2228]
#define __pyx_n_u_device_get_vgpu_capabilities __pyx_string_tab[2229]
#define __pyx_n_u_device_get_vgpu_heterogeneous_mo __pyx_string_tab[2230]
#define __pyx_n_u_device_get_vgpu_instances_utiliz __pyx_string_tab[2231]
#define __pyx_n_u_device_get_vgpu_metadata __pyx_string_tab[2232]
#define __pyx_n_u_device_get_vgpu_process_utilizat __pyx_string_tab[2233]
#define __pyx_n_u_device_get_vgpu_processes_utiliz __pyx_string_tab[2234]
#define __pyx_n_u_device_get_vgpu_scheduler_capabi __pyx_string_tab[2235]
#define __pyx_n_u_device_get_vgpu_scheduler_log __pyx_string_tab[2236]
#define __pyx_n_u_device_get_vgpu_scheduler_state __pyx_string_tab[2237]
#define __pyx_n_u_device_get_vgpu_type_creatable_p __pyx_string_tab[2238]
#define __pyx_n_u_device_get_vgpu_type_supported_p __pyx_string_tab[2239]
#define __pyx_n_u_device_get_vgpu_utilization __pyx_string_tab[2240]
#define __pyx_n_u_device_get_virtualization_mode __pyx_string_tab[2241]
#define __pyx_n_u_device_is_mig_device_handle __pyx_string_tab[2242]
#define __pyx_n_u_device_modify_drain_state __pyx_string_tab[2243]
#define __pyx_n_u_device_on_same_board __pyx_string_tab[2244]
#define __pyx_n_u_device_perf_modes_v1_dtype __pyx_string_tab[2245]
#define __pyx_n_u_device_power_mizer_modes_v1_dtyp __pyx_string_tab[2246]
#define __pyx_n_u_device_power_smoothing_activate __pyx_string_tab[2247]
#define __pyx_n_u_device_power_smoothing_set_state __pyx_string_tab[2248]
#define __pyx_n_u_device_power_smoothing_update_pr __pyx_string_tab[2249]
#define __pyx_n_u_device_query_drain_state __pyx_string_tab[2250]
#define __pyx_n_u_device_read_write_prm_v1 __pyx_string_tab[2251]
#define __pyx_n_u_device_register_events __pyx_string_tab[2252]
#define __pyx_n_u_device_remove_gpu_v2 __pyx_string_tab[2253]
#define __pyx_n_u_device_reset_gpu_locked_clocks __pyx_string_tab[2254]
#define __pyx_n_u_device_reset_memory_locked_clock __pyx_string_tab[2255]
#define __pyx_n_u_device_reset_nvlink_error_counte __pyx_string_tab[2256]
#define __pyx_n_u_device_set_accounting_mode __pyx_string_tab[2257]
#define __pyx_n_u_device_set_api_restriction __pyx_string_tab[2258]
#define __pyx_n_u_device_set_auto_boosted_clocks_e __pyx_string_tab[2259]
#define __pyx_n_u_device_set_clock_offsets __pyx_string_tab[2260]
#define __pyx_n_u_device_set_compute_mode __pyx_string_tab[2261]
#define __pyx_n_u_device_set_conf_compute_unprotec __pyx_string_tab[2262]
#define __pyx_n_u_device_set_cpu_affinity __pyx_string_tab[2263]
#define __pyx_n_u_device_set_default_auto_boosted __pyx_string_tab[2264]
#define __pyx_n_u_device_set_default_fan_speed_v2 __pyx_string_tab[2265]
#define __pyx_n_u_device_set_dram_encryption_mode __pyx_string_tab[2266]
#define __pyx_n_u_device_set_driver_model __pyx_string_tab[2267]
#define __pyx_n_u_device_set_ecc_mode __pyx_string_tab[2268]
#define __pyx_n_u_device_set_fan_control_policy __pyx_string_tab[2269]
#define __pyx_n_u_device_set_fan_speed_v2 __pyx_string_tab[2270]
#define __pyx_n_u_device_set_gpu_locked_clocks __pyx_string_tab[2271]
#define __pyx_n_u_device_set_gpu_operation_mode __pyx_string_tab[2272]
#define __pyx_n_u_device_set_hostname_v1 __pyx_string_tab[2273]
#define __pyx_n_u_device_set_memory_locked_clocks __pyx_string_tab[2274]
#define __pyx_n_u_device_set_mig_mode __pyx_string_tab[2275]
#define __pyx_n_u_device_set_nvlink_bw_mode __pyx_string_tab[2276]
#define __pyx_n_u_device_set_nvlink_device_low_pow __pyx_string_tab[2277]
#define __pyx_n_u_device_set_persistence_mode __pyx_string_tab[2278]
#define __pyx_n_u_device_set_power_management_limi __pyx_string_tab[2279]
#define __pyx_n_u_device_set_power_management_limi_2 __pyx_string_tab[2280]
#define __pyx_n_u_device_set_power_mizer_mode_v1 __pyx_string_tab[2281]
#define __pyx_n_u_device_set_temperature_threshold __pyx_string_tab[2282]
#define __pyx_n_u_device_set_vgpu_capabilities __pyx_string_tab[2283]
#define __pyx_n_u_device_set_vgpu_heterogeneous_mo __pyx_string_tab[2284]
#define __pyx_n_u_device_set_vgpu_scheduler_state __pyx_string_tab[2285]
#define __pyx_n_u_device_set_virtualization_mode __pyx_string_tab[2286]
#define __pyx_n_u_device_validate_inforom __pyx_string_tab[2287]
#define __pyx_n_u_device_workload_power_profile_cl __pyx_string_tab[2288]
#define __pyx_n_u_dict __pyx_string_tab[2289]
#define __pyx_n_u_dict_2 __pyx_string_tab[2290]
#define __pyx_n_u_display_ind_ex __pyx_string_tab[2291]
#define __pyx_n_u_display_ordinal __pyx_string_tab[2292]
#define __pyx_n_u_doc __pyx_string_tab[2293]
#define __pyx_n_u_domain __pyx_string_tab[2294]
#define __pyx_n_u_dram_encryption __pyx_string_tab[2295]
#define __pyx_n_u_driver_model __pyx_string_tab[2296]
#define __pyx_n_u_dtype __pyx_string_tab[2297]
#define __pyx_n_u_dtype_is_object __pyx_string_tab[2298]
#define __pyx_n_u_ecc __pyx_string_tab[2299]
#define __pyx_n_u_ecc_sram_error_status_v1_dtype __pyx_string_tab[2300]
#define __pyx_n_u_ecc_sram_unique_uncorrected_erro __pyx_string_tab[2301]
#define __pyx_n_u_ecc_sram_unique_uncorrected_erro_2 __pyx_string_tab[2302]
#define __pyx_n_u_empty __pyx_string_tab[2303]
#define __pyx_n_u_enable_arr_mode __pyx_string_tab[2304]
#define __pyx_n_u_enabled __pyx_string_tab[2305]
#define __pyx_n_u_encUtil __pyx_string_tab[2306]
#define __pyx_n_u_enc_util __pyx_string_tab[2307]
#define __pyx_n_u_encode __pyx_string_tab[2308]
#define __pyx_n_u_encoder_capacity __pyx_string_tab[2309]
#define __pyx_n_u_encoder_count __pyx_string_tab[2310]
#define __pyx_n_u_encoder_query_type __pyx_string_tab[2311]
#define __pyx_n_u_encoder_session_info_dtype __pyx_string_tab[2312]
#define __pyx_n_u_encryptionState __pyx_string_tab[2313]
#define __pyx_n_u_eng_profile __pyx_string_tab[2314]
#define __pyx_n_u_engine_id __pyx_string_tab[2315]
#define __pyx_n_u_entries __pyx_string_tab[2316]
#define __pyx_n_u_entries_count __pyx_string_tab[2317]
#define __pyx_n_u_entry_count __pyx_string_tab[2318]
#define __pyx_n_u_enum __pyx_string_tab[2319]
#define __pyx_n_u_enumerate __pyx_string_tab[2320]
#define __pyx_n_u_environment __pyx_string_tab[2321]
#define __pyx_n_u_error __pyx_string_tab[2322]
#define __pyx_n_u_error_string __pyx_string_tab[2323]
#define __pyx_n_u_error_type __pyx_string_tab[2324]
#define __pyx_n_u_event_data __pyx_string_tab[2325]
#define __pyx_n_u_event_data_dtype __pyx_string_tab[2326]
#define __pyx_n_u_event_set_create __pyx_string_tab[2327]
#define __pyx_n_u_event_set_free __pyx_string_tab[2328]
#define __pyx_n_u_event_set_wait_v2 __pyx_string_tab[2329]
#define __pyx_n_u_event_type __pyx_string_tab[2330]
#define __pyx_n_u_event_types __pyx_string_tab[2331]
#define __pyx_n_u_excluded_device_info_dtype __pyx_string_tab[2332]
#define __pyx_n_u_extlocation __pyx_string_tab[2333]
#define __pyx_n_u_fan __pyx_string_tab[2334]
#define __pyx_n_u_fan_speed_info_v1_dtype __pyx_string_tab[2335]
#define __pyx_n_u_fans __pyx_string_tab[2336]
#define __pyx_n_u_fbc_session_info_dtype __pyx_string_tab[2337]
#define __pyx_n_u_fbc_stats_dtype __pyx_string_tab[2338]
#define __pyx_n_u_feature_code __pyx_string_tab[2339]
#define __pyx_n_u_feature_enabled __pyx_string_tab[2340]
#define __pyx_n_u_feature_state __pyx_string_tab[2341]
#define __pyx_n_u_field_id __pyx_string_tab[2342]
#define __pyx_n_u_field_value_dtype __pyx_string_tab[2343]
#define __pyx_n_u_firmware_info __pyx_string_tab[2344]
#define __pyx_n_u_firmware_version __pyx_string_tab[2345]
#define __pyx_n_u_flag __pyx_string_tab[2346]
#define __pyx_n_u_flags __pyx_string_tab[2347]
#define __pyx_n_u_flags_2 __pyx_string_tab[2348]
#define __pyx_n_u_float64 __pyx_string_tab[2349]
#define __pyx_n_u_format __pyx_string_tab[2350]
#define __pyx_n_u_formats __pyx_string_tab[2351]
#define __pyx_n_u_fortran __pyx_string_tab[2352]
#define __pyx_n_u_free __pyx_string_tab[2353]
#define __pyx_n_u_frequency __pyx_string_tab[2354]
#define __pyx_n_u_from_data __pyx_string_tab[2355]
#define __pyx_n_u_from_ptr __pyx_string_tab[2356]
#define __pyx_n_u_func __pyx_string_tab[2357]
#define __pyx_n_u_fw_version __pyx_string_tab[2358]
#define __pyx_n_u_get_excluded_device_count __pyx_string_tab[2359]
#define __pyx_n_u_get_excluded_device_info_by_inde __pyx_string_tab[2360]
#define __pyx_n_u_get_vgpu_compatibility __pyx_string_tab[2361]
#define __pyx_n_u_get_vgpu_driver_capabilities __pyx_string_tab[2362]
#define __pyx_n_u_get_vgpu_version __pyx_string_tab[2363]
#define __pyx_n_u_getstate __pyx_string_tab[2364]
#define __pyx_n_u_gpm_mig_sample_get __pyx_string_tab[2365]
#define __pyx_n_u_gpm_query_device_support __pyx_string_tab[2366]
#define __pyx_n_u_gpm_query_if_streaming_enabled __pyx_string_tab[2367]
#define __pyx_n_u_gpm_sample __pyx_string_tab[2368]
#define __pyx_n_u_gpm_sample_get __pyx_string_tab[2369]
#define __pyx_n_u_gpm_set_streaming_enabled __pyx_string_tab[2370]
#define __pyx_n_u_gpm_support_dtype __pyx_string_tab[2371]
#define __pyx_n_u_gpu __pyx_string_tab[2372]
#define __pyx_n_u_gpu_dynamic_pstates_info_dtype __pyx_string_tab[2373]
#define __pyx_n_u_gpu_fabric_info_v3_dtype __pyx_string_tab[2374]
#define __pyx_n_u_gpu_instance __pyx_string_tab[2375]
#define __pyx_n_u_gpu_instance_create_compute_inst __pyx_string_tab[2376]
#define __pyx_n_u_gpu_instance_create_compute_inst_2 __pyx_string_tab[2377]
#define __pyx_n_u_gpu_instance_destroy __pyx_string_tab[2378]
#define __pyx_n_u_gpu_instance_get_active_vgpus __pyx_string_tab[2379]
#define __pyx_n_u_gpu_instance_get_compute_instanc __pyx_string_tab[2380]
#define __pyx_n_u_gpu_instance_get_compute_instanc_2 __pyx_string_tab[2381]
#define __pyx_n_u_gpu_instance_get_compute_instanc_3 __pyx_string_tab[2382]
#define __pyx_n_u_gpu_instance_get_compute_instanc_4 __pyx_string_tab[2383]
#define __pyx_n_u_gpu_instance_get_compute_instanc_5 __pyx_string_tab[2384]
#define __pyx_n_u_gpu_instance_get_creatable_vgpus __pyx_string_tab[2385]
#define __pyx_n_u_gpu_instance_get_info __pyx_string_tab[2386]
#define __pyx_n_u_gpu_instance_get_vgpu_heterogene __pyx_string_tab[2387]
#define __pyx_n_u_gpu_instance_get_vgpu_scheduler __pyx_string_tab[2388]
#define __pyx_n_u_gpu_instance_get_vgpu_scheduler_2 __pyx_string_tab[2389]
#define __pyx_n_u_gpu_instance_get_vgpu_type_creat __pyx_string_tab[2390]
#define __pyx_n_u_gpu_instance_id __pyx_string_tab[2391]
#define __pyx_n_u_gpu_instance_info_dtype __pyx_string_tab[2392]
#define __pyx_n_u_gpu_instance_placement_dtype __pyx_string_tab[2393]
#define __pyx_n_u_gpu_instance_profile_info_v2_dty __pyx_string_tab[2394]
#define __pyx_n_u_gpu_instance_profile_info_v3_dty __pyx_string_tab[2395]
#define __pyx_n_u_gpu_instance_set_vgpu_heterogene __pyx_string_tab[2396]
#define __pyx_n_u_gpu_instance_set_vgpu_scheduler __pyx_string_tab[2397]
#define __pyx_n_u_gpu_instance_slice_count __pyx_string_tab[2398]
#define __pyx_n_u_gpu_state __pyx_string_tab[2399]
#define __pyx_n_u_gpu_thermal_settings_dtype __pyx_string_tab[2400]
#define __pyx_n_u_gpu_utilization __pyx_string_tab[2401]
#define __pyx_n_u_gpus_caps __pyx_string_tab[2402]
#define __pyx_n_u_grid_licensable_feature_dtype __pyx_string_tab[2403]
#define __pyx_n_u_grid_licensable_features __pyx_string_tab[2404]
#define __pyx_n_u_grid_licensable_features_dtype __pyx_string_tab[2405]
#define __pyx_n_u_grid_license_expiry_dtype __pyx_string_tab[2406]
#define __pyx_n_u_guest_driver_version __pyx_string_tab[2407]
#define __pyx_n_u_guest_info_state __pyx_string_tab[2408]
#define __pyx_n_u_guest_vgpu_version __pyx_string_tab[2409]
#define __pyx_n_u_h_max_resolution __pyx_string_tab[2410]
#define __pyx_n_u_h_resolution __pyx_string_tab[2411]
#define __pyx_n_u_health_mask __pyx_string_tab[2412]
#define __pyx_n_u_health_summary __pyx_string_tab[2413]
#define __pyx_n_u_high __pyx_string_tab[2414]
#define __pyx_n_u_host_driver_version __pyx_string_tab[2415]
#define __pyx_n_u_host_id __pyx_string_tab[2416]
#define __pyx_n_u_host_supported_vgpu_range __pyx_string_tab[2417]
#define __pyx_n_u_hostname __pyx_string_tab[2418]
#define __pyx_n_u_hour __pyx_string_tab[2419]
#define __pyx_n_u_hwbc_entry_dtype __pyx_string_tab[2420]
#define __pyx_n_u_hwbc_id __pyx_string_tab[2421]
#define __pyx_n_u_i __pyx_string_tab[2422]
#define __pyx_n_u_ib_guid __pyx_string_tab[2423]
#define __pyx_n_u_id __pyx_string_tab[2424]
#define __pyx_n_u_import __pyx_string_tab[2425]
#define __pyx_n_u_inc_threshold __pyx_string_tab[2426]
#define __pyx_n_u_ind_ex __pyx_string_tab[2427]
#define __pyx_n_u_index __pyx_string_tab[2428]
#define __pyx_n_u_info __pyx_string_tab[2429]
#define __pyx_n_u_init __pyx_string_tab[2430]
#define __pyx_n_u_init_v2 __pyx_string_tab[2431]
#define __pyx_n_u_init_with_flags __pyx_string_tab[2432]
#define __pyx_n_u_instance_count __pyx_string_tab[2433]
#define __pyx_n_u_int32 __pyx_string_tab[2434]
#define __pyx_n_u_int64 __pyx_string_tab[2435]
#define __pyx_n_u_int8 __pyx_string_tab[2436]
#define __pyx_n_u_intp __pyx_string_tab[2437]
#define __pyx_n_u_is_accepting_work __pyx_string_tab[2438]
#define __pyx_n_u_is_arr_mode_supported __pyx_string_tab[2439]
#define __pyx_n_u_is_c2c_enabled __pyx_string_tab[2440]
#define __pyx_n_u_is_cec_attestation_report_presen __pyx_string_tab[2441]
#define __pyx_n_u_is_coroutine __pyx_string_tab[2442]
#define __pyx_n_u_is_grid_license_supported __pyx_string_tab[2443]
#define __pyx_n_u_is_licensed __pyx_string_tab[2444]
#define __pyx_n_u_is_nvle_enabled __pyx_string_tab[2445]
#define __pyx_n_u_is_p2p_supported __pyx_string_tab[2446]
#define __pyx_n_u_is_parity __pyx_string_tab[2447]
#define __pyx_n_u_is_restricted __pyx_string_tab[2448]
#define __pyx_n_u_is_running __pyx_string_tab[2449]
#define __pyx_n_u_is_supported_device __pyx_string_tab[2450]
#define __pyx_n_u_items __pyx_string_tab[2451]
#define __pyx_n_u_itemsize __pyx_string_tab[2452]
#define __pyx_n_u_jpeg_count __pyx_string_tab[2453]
#define __pyx_n_u_jpg_util __pyx_string_tab[2454]
#define __pyx_n_u_last_seen_time_stamp __pyx_string_tab[2455]
#define __pyx_n_u_latency_usec __pyx_string_tab[2456]
#define __pyx_n_u_led_state_dtype __pyx_string_tab[2457]
#define __pyx_n_u_level __pyx_string_tab[2458]
#define __pyx_n_u_licensable_features_count __pyx_string_tab[2459]
#define __pyx_n_u_license_expiry __pyx_string_tab[2460]
#define __pyx_n_u_license_info __pyx_string_tab[2461]
#define __pyx_n_u_limit __pyx_string_tab[2462]
#define __pyx_n_u_link __pyx_string_tab[2463]
#define __pyx_n_u_link_state __pyx_string_tab[2464]
#define __pyx_n_u_location __pyx_string_tab[2465]
#define __pyx_n_u_location_type __pyx_string_tab[2466]
#define __pyx_n_u_log_entries __pyx_string_tab[2467]
#define __pyx_n_u_low __pyx_string_tab[2468]
#define __pyx_n_u_main __pyx_string_tab[2469]
#define __pyx_n_u_major __pyx_string_tab[2470]
#define __pyx_n_u_margin_temperature __pyx_string_tab[2471]
#define __pyx_n_u_margin_temperature_v1_dtype __pyx_string_tab[2472]
#define __pyx_n_u_max __pyx_string_tab[2473]
#define __pyx_n_u_max_avg_factor_for_arr __pyx_string_tab[2474]
#define __pyx_n_u_max_clock_offset_m_hz __pyx_string_tab[2475]
#define __pyx_n_u_max_frequency_for_arr __pyx_string_tab[2476]
#define __pyx_n_u_max_gpu_clock_m_hz __pyx_string_tab[2477]
#define __pyx_n_u_max_instance_per_gi __pyx_string_tab[2478]
#define __pyx_n_u_max_mem_clock_m_hz __pyx_string_tab[2479]
#define __pyx_n_u_max_memory_usage __pyx_string_tab[2480]
#define __pyx_n_u_max_timeslice __pyx_string_tab[2481]
#define __pyx_n_u_max_version __pyx_string_tab[2482]
#define __pyx_n_u_memUtil __pyx_string_tab[2483]
#define __pyx_n_u_mem_util __pyx_string_tab[2484]
#define __pyx_n_u_memory __pyx_string_tab[2485]
#define __pyx_n_u_memory_clock_m_hz __pyx_string_tab[2486]
#define __pyx_n_u_memory_dtype __pyx_string_tab[2487]
#define __pyx_n_u_memory_size_mb __pyx_string_tab[2488]
#define __pyx_n_u_memory_utilization __pyx_string_tab[2489]
#define __pyx_n_u_memory_v2_dtype __pyx_string_tab[2490]
#define __pyx_n_u_memview __pyx_string_tab[2491]
#define __pyx_n_u_metaclass __pyx_string_tab[2492]
#define __pyx_n_u_mig_device __pyx_string_tab[2493]
#define __pyx_n_u_min __pyx_string_tab[2494]
#define __pyx_n_u_min_avg_factor_for_arr __pyx_string_tab[2495]
#define __pyx_n_u_min_clock_offset_m_hz __pyx_string_tab[2496]
#define __pyx_n_u_min_frequency_for_arr __pyx_string_tab[2497]
#define __pyx_n_u_min_gpu_clock_m_hz __pyx_string_tab[2498]
#define __pyx_n_u_min_mem_clock_m_hz __pyx_string_tab[2499]
#define __pyx_n_u_min_timeslice __pyx_string_tab[2500]
#define __pyx_n_u_min_version __pyx_string_tab[2501]
#define __pyx_n_u_minor __pyx_string_tab[2502]
#define __pyx_n_u_mode __pyx_string_tab[2503]
#define __pyx_n_u_module __pyx_string_tab[2504]
#define __pyx_n_u_module_id __pyx_string_tab[2505]
#define __pyx_n_u_month __pyx_string_tab[2506]
#define __pyx_n_u_mro_entries __pyx_string_tab[2507]
#define __pyx_n_u_multi_gpu_mode __pyx_string_tab[2508]
#define __pyx_n_u_multiprocessor_count __pyx_string_tab[2509]
#define __pyx_n_u_name __pyx_string_tab[2510]
#define __pyx_n_u_name_2 __pyx_string_tab[2511]
#define __pyx_n_u_names __pyx_string_tab[2512]
#define __pyx_n_u_ndarray __pyx_string_tab[2513]
#define __pyx_n_u_ndim __pyx_string_tab[2514]
#define __pyx_n_u_new __pyx_string_tab[2515]
#define __pyx_n_u_new_state __pyx_string_tab[2516]
#define __pyx_n_u_node_set_size __pyx_string_tab[2517]
#define __pyx_n_u_nonce __pyx_string_tab[2518]
#define __pyx_n_u_none __pyx_string_tab[2519]
#define __pyx_n_u_num_proc_array_entries __pyx_string_tab[2520]
#define __pyx_n_u_num_valid_entries __pyx_string_tab[2521]
#define __pyx_n_u_numpy __pyx_string_tab[2522]
#define __pyx_n_u_numpy_2 __pyx_string_tab[2523]
#define __pyx_n_u_nv_link_info_v2_dtype __pyx_string_tab[2524]
#define __pyx_n_u_nvlink_bw_mode __pyx_string_tab[2525]
#define __pyx_n_u_nvlink_firmware_info_dtype __pyx_string_tab[2526]
#define __pyx_n_u_nvlink_firmware_version_dtype __pyx_string_tab[2527]
#define __pyx_n_u_nvlink_get_bw_mode_v1_dtype __pyx_string_tab[2528]
#define __pyx_n_u_nvlink_set_bw_mode_v1_dtype __pyx_string_tab[2529]
#define __pyx_n_u_nvlink_supported_bw_modes_v1_dty __pyx_string_tab[2530]
#define __pyx_n_u_nvml_return __pyx_string_tab[2531]
#define __pyx_n_u_obj __pyx_string_tab[2532]
#define __pyx_n_u_object_2 __pyx_string_tab[2533]
#define __pyx_n_u_ofa_count __pyx_string_tab[2534]
#define __pyx_n_u_ofa_util __pyx_string_tab[2535]
#define __pyx_n_u_offsets __pyx_string_tab[2536]
#define __pyx_n_u_opaque_data __pyx_string_tab[2537]
#define __pyx_n_u_opaque_data_size __pyx_string_tab[2538]
#define __pyx_n_u_owner __pyx_string_tab[2539]
#define __pyx_n_u_p2p_ind_ex __pyx_string_tab[2540]
#define __pyx_n_u_p_heterogeneous_mode __pyx_string_tab[2541]
#define __pyx_n_u_p_key_rotation_thr_info __pyx_string_tab[2542]
#define __pyx_n_u_p_scheduler __pyx_string_tab[2543]
#define __pyx_n_u_p_scheduler_state __pyx_string_tab[2544]
#define __pyx_n_u_pack __pyx_string_tab[2545]
#define __pyx_n_u_partial __pyx_string_tab[2546]
#define __pyx_n_u_pci_bus_id __pyx_string_tab[2547]
#define __pyx_n_u_pci_device_id __pyx_string_tab[2548]
#define __pyx_n_u_pci_info __pyx_string_tab[2549]
#define __pyx_n_u_pci_info_dtype __pyx_string_tab[2550]
#define __pyx_n_u_pci_info_ext_v1_dtype __pyx_string_tab[2551]
#define __pyx_n_u_pci_sub_system_id __pyx_string_tab[2552]
#define __pyx_n_u_pdi_v1_dtype __pyx_string_tab[2553]
#define __pyx_n_u_peer_type __pyx_string_tab[2554]
#define __pyx_n_u_percentage __pyx_string_tab[2555]
#define __pyx_n_u_pgpu_metadata __pyx_string_tab[2556]
#define __pyx_n_u_pgpu_virtualization_caps __pyx_string_tab[2557]
#define __pyx_n_u_pid __pyx_string_tab[2558]
#define __pyx_n_u_placement __pyx_string_tab[2559]
#define __pyx_n_u_placement_id __pyx_string_tab[2560]
#define __pyx_n_u_placement_ids __pyx_string_tab[2561]
#define __pyx_n_u_placement_size __pyx_string_tab[2562]
#define __pyx_n_u_platform_info_v2_dtype __pyx_string_tab[2563]
#define __pyx_n_u_policy __pyx_string_tab[2564]
#define __pyx_n_u_pop __pyx_string_tab[2565]
#define __pyx_n_u_power __pyx_string_tab[2566]
#define __pyx_n_u_power_mizer_mode __pyx_string_tab[2567]
#define __pyx_n_u_power_value __pyx_string_tab[2568]
#define __pyx_n_u_prepare __pyx_string_tab[2569]
#define __pyx_n_u_proc_array __pyx_string_tab[2570]
#define __pyx_n_u_proc_util_array __pyx_string_tab[2571]
#define __pyx_n_u_processName __pyx_string_tab[2572]
#define __pyx_n_u_process_detail_list_v1_dtype __pyx_string_tab[2573]
#define __pyx_n_u_process_detail_v1_dtype __pyx_string_tab[2574]
#define __pyx_n_u_process_info_dtype __pyx_string_tab[2575]
#define __pyx_n_u_process_name __pyx_string_tab[2576]
#define __pyx_n_u_process_samples_count __pyx_string_tab[2577]
#define __pyx_n_u_process_utilization_info_v1_dtyp __pyx_string_tab[2578]
#define __pyx_n_u_process_utilization_sample_dtype __pyx_string_tab[2579]
#define __pyx_n_u_processes_utilization_info_v1_dt __pyx_string_tab[2580]
#define __pyx_n_u_product_name __pyx_string_tab[2581]
#define __pyx_n_u_profile __pyx_string_tab[2582]
#define __pyx_n_u_profile_id __pyx_string_tab[2583]
#define __pyx_n_u_protected_mem_size_kib __pyx_string_tab[2584]
#define __pyx_n_u_pstate __pyx_string_tab[2585]
#define __pyx_n_u_psu_info_dtype __pyx_string_tab[2586]
#define __pyx_n_u_ptr __pyx_string_tab[2587]
#define __pyx_n_u_py_anon_pod0 __pyx_string_tab[2588]
#define __pyx_n_u_py_anon_pod0___reduce_cython __pyx_string_tab[2589]
#define __pyx_n_u_py_anon_pod0___setstate_cython __pyx_string_tab[2590]
#define __pyx_n_u_py_anon_pod0_dtype __pyx_string_tab[2591]
#define __pyx_n_u_py_anon_pod0_from_data __pyx_string_tab[2592]
#define __pyx_n_u_py_anon_pod0_from_ptr __pyx_string_tab[2593]
#define __pyx_n_u_py_anon_pod1 __pyx_string_tab[2594]
#define __pyx_n_u_py_anon_pod1___reduce_cython __pyx_string_tab[2595]
#define __pyx_n_u_py_anon_pod1___setstate_cython __pyx_string_tab[2596]
#define __pyx_n_u_py_anon_pod1_dtype __pyx_string_tab[2597]
#define __pyx_n_u_py_anon_pod1_from_data __pyx_string_tab[2598]
#define __pyx_n_u_py_anon_pod1_from_ptr __pyx_string_tab[2599]
#define __pyx_n_u_py_anon_pod2 __pyx_string_tab[2600]
#define __pyx_n_u_py_anon_pod2___reduce_cython __pyx_string_tab[2601]
#define __pyx_n_u_py_anon_pod2___setstate_cython __pyx_string_tab[2602]
#define __pyx_n_u_py_anon_pod2_dtype __pyx_string_tab[2603]
#define __pyx_n_u_py_anon_pod2_from_data __pyx_string_tab[2604]
#define __pyx_n_u_py_anon_pod2_from_ptr __pyx_string_tab[2605]
#define __pyx_n_u_py_anon_pod3 __pyx_string_tab[2606]
#define __pyx_n_u_py_anon_pod3___reduce_cython __pyx_string_tab[2607]
#define __pyx_n_u_py_anon_pod3___setstate_cython __pyx_string_tab[2608]
#define __pyx_n_u_py_anon_pod3_dtype __pyx_string_tab[2609]
#define __pyx_n_u_py_anon_pod3_from_data __pyx_string_tab[2610]
#define __pyx_n_u_py_anon_pod3_from_ptr __pyx_string_tab[2611]
#define __pyx_n_u_py_anon_pod4 __pyx_string_tab[2612]
#define __pyx_n_u_py_anon_pod4___reduce_cython __pyx_string_tab[2613]
#define __pyx_n_u_py_anon_pod4___setstate_cython __pyx_string_tab[2614]
#define __pyx_n_u_py_anon_pod4_dtype __pyx_string_tab[2615]
#define __pyx_n_u_py_anon_pod4_from_data __pyx_string_tab[2616]
#define __pyx_n_u_py_anon_pod4_from_ptr __pyx_string_tab[2617]
#define __pyx_n_u_py_anon_pod5 __pyx_string_tab[2618]
#define __pyx_n_u_py_anon_pod5___reduce_cython __pyx_string_tab[2619]
#define __pyx_n_u_py_anon_pod5___setstate_cython __pyx_string_tab[2620]
#define __pyx_n_u_py_anon_pod5_dtype __pyx_string_tab[2621]
#define __pyx_n_u_py_anon_pod5_from_data __pyx_string_tab[2622]
#define __pyx_n_u_py_anon_pod5_from_ptr __pyx_string_tab[2623]
#define __pyx_n_u_pyx_capi __pyx_string_tab[2624]
#define __pyx_n_u_pyx_checksum __pyx_string_tab[2625]
#define __pyx_n_u_pyx_result __pyx_string_tab[2626]
#define __pyx_n_u_pyx_state __pyx_string_tab[2627]
#define __pyx_n_u_pyx_type __pyx_string_tab[2628]
#define __pyx_n_u_pyx_unpickle_BridgeChipInfo __pyx_string_tab[2629]
#define __pyx_n_u_pyx_unpickle_ClkMonFaultInfo __pyx_string_tab[2630]
#define __pyx_n_u_pyx_unpickle_ComputeInstancePl __pyx_string_tab[2631]
#define __pyx_n_u_pyx_unpickle_EccSramUniqueUnco __pyx_string_tab[2632]
#define __pyx_n_u_pyx_unpickle_EncoderSessionInf __pyx_string_tab[2633]
#define __pyx_n_u_pyx_unpickle_Enum __pyx_string_tab[2634]
#define __pyx_n_u_pyx_unpickle_FBCSessionInfo __pyx_string_tab[2635]
#define __pyx_n_u_pyx_unpickle_FieldValue __pyx_string_tab[2636]
#define __pyx_n_u_pyx_unpickle_GpuInstancePlacem __pyx_string_tab[2637]
#define __pyx_n_u_pyx_unpickle_GridLicensableFea __pyx_string_tab[2638]
#define __pyx_n_u_pyx_unpickle_HwbcEntry __pyx_string_tab[2639]
#define __pyx_n_u_pyx_unpickle_ProcessDetail_v1 __pyx_string_tab[2640]
#define __pyx_n_u_pyx_unpickle_ProcessInfo __pyx_string_tab[2641]
#define __pyx_n_u_pyx_unpickle_ProcessUtilizatio __pyx_string_tab[2642]
#define __pyx_n_u_pyx_unpickle_ProcessUtilizatio_2 __pyx_string_tab[2643]
#define __pyx_n_u_pyx_unpickle_Sample __pyx_string_tab[2644]
#define __pyx_n_u_pyx_unpickle_UnitFanInfo __pyx_string_tab[2645]
#define __pyx_n_u_pyx_unpickle_VgpuInstanceUtili __pyx_string_tab[2646]
#define __pyx_n_u_pyx_unpickle_VgpuProcessUtiliz __pyx_string_tab[2647]
#define __pyx_n_u_pyx_unpickle_VgpuSchedulerLogE __pyx_string_tab[2648]
#define __pyx_n_u_pyx_vtable __pyx_string_tab[2649]
#define __pyx_n_u_qualname __pyx_string_tab[2650]
#define __pyx_n_u_readonly __pyx_string_tab[2651]
#define __pyx_n_u_recarray __pyx_string_tab[2652]
#define __pyx_n_u_reduce __pyx_string_tab[2653]
#define __pyx_n_u_reduce_cython __pyx_string_tab[2654]
#define __pyx_n_u_reduce_ex __pyx_string_tab[2655]
#define __pyx_n_u_register __pyx_string_tab[2656]
#define __pyx_n_u_repair_status_v1_dtype __pyx_string_tab[2657]
#define __pyx_n_u_request __pyx_string_tab[2658]
#define __pyx_n_u_requested_profiles __pyx_string_tab[2659]
#define __pyx_n_u_reserved __pyx_string_tab[2660]
#define __pyx_n_u_result __pyx_string_tab[2661]
#define __pyx_n_u_revision __pyx_string_tab[2662]
#define __pyx_n_u_row_remapper_histogram_values_dt __pyx_string_tab[2663]
#define __pyx_n_u_s __pyx_string_tab[2664]
#define __pyx_n_u_sample_dtype __pyx_string_tab[2665]
#define __pyx_n_u_sample_val_type __pyx_string_tab[2666]
#define __pyx_n_u_sample_value __pyx_string_tab[2667]
#define __pyx_n_u_scheduler_params __pyx_string_tab[2668]
#define __pyx_n_u_scheduler_policy __pyx_string_tab[2669]
#define __pyx_n_u_scope __pyx_string_tab[2670]
#define __pyx_n_u_scope_id __pyx_string_tab[2671]
#define __pyx_n_u_sec __pyx_string_tab[2672]
#define __pyx_n_u_self __pyx_string_tab[2673]
#define __pyx_n_u_sensor __pyx_string_tab[2674]
#define __pyx_n_u_sensorType __pyx_string_tab[2675]
#define __pyx_n_u_sensor_ind_ex __pyx_string_tab[2676]
#define __pyx_n_u_serial __pyx_string_tab[2677]
#define __pyx_n_u_session_flags __pyx_string_tab[2678]
#define __pyx_n_u_session_id __pyx_string_tab[2679]
#define __pyx_n_u_session_type __pyx_string_tab[2680]
#define __pyx_n_u_sessions_count __pyx_string_tab[2681]
#define __pyx_n_u_set __pyx_string_tab[2682]
#define __pyx_n_u_set_bw_mode __pyx_string_tab[2683]
#define __pyx_n_u_set_name __pyx_string_tab[2684]
#define __pyx_n_u_set_vgpu_version __pyx_string_tab[2685]
#define __pyx_n_u_setdefault __pyx_string_tab[2686]
#define __pyx_n_u_setstate __pyx_string_tab[2687]
#define __pyx_n_u_setstate_cython __pyx_string_tab[2688]
#define __pyx_n_u_shape __pyx_string_tab[2689]
#define __pyx_n_u_shared_copy_engine_count __pyx_string_tab[2690]
#define __pyx_n_u_shared_decoder_count __pyx_string_tab[2691]
#define __pyx_n_u_shared_encoder_count __pyx_string_tab[2692]
#define __pyx_n_u_shared_jpeg_count __pyx_string_tab[2693]
#define __pyx_n_u_shared_ofa_count __pyx_string_tab[2694]
#define __pyx_n_u_shutdown __pyx_string_tab[2695]
#define __pyx_n_u_siVal __pyx_string_tab[2696]
#define __pyx_n_u_si_val __pyx_string_tab[2697]
#define __pyx_n_u_signal_type __pyx_string_tab[2698]
#define __pyx_n_u_size __pyx_string_tab[2699]
#define __pyx_n_u_size_2 __pyx_string_tab[2700]
#define __pyx_n_u_size_ki_b __pyx_string_tab[2701]
#define __pyx_n_u_slice_count __pyx_string_tab[2702]
#define __pyx_n_u_sllVal __pyx_string_tab[2703]
#define __pyx_n_u_sll_val __pyx_string_tab[2704]
#define __pyx_n_u_slot_number __pyx_string_tab[2705]
#define __pyx_n_u_smUtil __pyx_string_tab[2706]
#define __pyx_n_u_sm_util __pyx_string_tab[2707]
#define __pyx_n_u_speed __pyx_string_tab[2708]
#define __pyx_n_u_start __pyx_string_tab[2709]
#define __pyx_n_u_start_time __pyx_string_tab[2710]
#define __pyx_n_u_state __pyx_string_tab[2711]
#define __pyx_n_u_staticmethod __pyx_string_tab[2712]
#define __pyx_n_u_status __pyx_string_tab[2713]
#define __pyx_n_u_step __pyx_string_tab[2714]
#define __pyx_n_u_stop __pyx_string_tab[2715]
#define __pyx_n_u_str __pyx_string_tab[2716]
#define __pyx_n_u_struct __pyx_string_tab[2717]
#define __pyx_n_u_sub_class __pyx_string_tab[2718]
#define __pyx_n_u_sub_minor __pyx_string_tab[2719]
#define __pyx_n_u_sublocation __pyx_string_tab[2720]
#define __pyx_n_u_super __pyx_string_tab[2721]
#define __pyx_n_u_supported_power_mizer_modes __pyx_string_tab[2722]
#define __pyx_n_u_supported_schedulers __pyx_string_tab[2723]
#define __pyx_n_u_sw_runlist_id __pyx_string_tab[2724]
#define __pyx_n_u_system_conf_compute_settings_v1 __pyx_string_tab[2725]
#define __pyx_n_u_system_event_set_create __pyx_string_tab[2726]
#define __pyx_n_u_system_event_set_free __pyx_string_tab[2727]
#define __pyx_n_u_system_event_set_wait __pyx_string_tab[2728]
#define __pyx_n_u_system_get_conf_compute_capabili __pyx_string_tab[2729]
#define __pyx_n_u_system_get_conf_compute_gpus_rea __pyx_string_tab[2730]
#define __pyx_n_u_system_get_conf_compute_key_rota __pyx_string_tab[2731]
#define __pyx_n_u_system_get_conf_compute_settings __pyx_string_tab[2732]
#define __pyx_n_u_system_get_conf_compute_state __pyx_string_tab[2733]
#define __pyx_n_u_system_get_cuda_driver_version __pyx_string_tab[2734]
#define __pyx_n_u_system_get_cuda_driver_version_v __pyx_string_tab[2735]
#define __pyx_n_u_system_get_driver_branch __pyx_string_tab[2736]
#define __pyx_n_u_system_get_driver_version __pyx_string_tab[2737]
#define __pyx_n_u_system_get_hic_version __pyx_string_tab[2738]
#define __pyx_n_u_system_get_nvlink_bw_mode __pyx_string_tab[2739]
#define __pyx_n_u_system_get_nvml_version __pyx_string_tab[2740]
#define __pyx_n_u_system_get_process_name __pyx_string_tab[2741]
#define __pyx_n_u_system_get_topology_gpu_set __pyx_string_tab[2742]
#define __pyx_n_u_system_register_events __pyx_string_tab[2743]
#define __pyx_n_u_system_set_conf_compute_gpus_rea __pyx_string_tab[2744]
#define __pyx_n_u_system_set_conf_compute_key_rota __pyx_string_tab[2745]
#define __pyx_n_u_system_set_nvlink_bw_mode __pyx_string_tab[2746]
#define __pyx_n_u_target __pyx_string_tab[2747]
#define __pyx_n_u_target_time_slice __pyx_string_tab[2748]
#define __pyx_n_u_temp __pyx_string_tab[2749]
#define __pyx_n_u_test __pyx_string_tab[2750]
#define __pyx_n_u_threshold_type __pyx_string_tab[2751]
#define __pyx_n_u_time __pyx_string_tab[2752]
#define __pyx_n_u_timeStamp __pyx_string_tab[2753]
#define __pyx_n_u_time_run __pyx_string_tab[2754]
#define __pyx_n_u_time_run_total __pyx_string_tab[2755]
#define __pyx_n_u_time_stamp __pyx_string_tab[2756]
#define __pyx_n_u_timeoutms __pyx_string_tab[2757]
#define __pyx_n_u_timeslice __pyx_string_tab[2758]
#define __pyx_n_u_timestamp __pyx_string_tab[2759]
#define __pyx_n_u_total __pyx_string_tab[2760]
#define __pyx_n_u_total_bw_modes __pyx_string_tab[2761]
#define __pyx_n_u_tray_ind_ex __pyx_string_tab[2762]
#define __pyx_n_u_type __pyx_string_tab[2763]
#define __pyx_n_u_ucode_type __pyx_string_tab[2764]
#define __pyx_n_u_uiVal __pyx_string_tab[2765]
#define __pyx_n_u_ui_val __pyx_string_tab[2766]
#define __pyx_n_u_uint16 __pyx_string_tab[2767]
#define __pyx_n_u_uint32 __pyx_string_tab[2768]
#define __pyx_n_u_uint64 __pyx_string_tab[2769]
#define __pyx_n_u_uint8 __pyx_string_tab[2770]
#define __pyx_n_u_ulVal __pyx_string_tab[2771]
#define __pyx_n_u_ul_val __pyx_string_tab[2772]
#define __pyx_n_u_ullVal __pyx_string_tab[2773]
#define __pyx_n_u_ull_val __pyx_string_tab[2774]
#define __pyx_n_u_unit __pyx_string_tab[2775]
#define __pyx_n_u_unit_fan_info_dtype __pyx_string_tab[2776]
#define __pyx_n_u_unit_fan_speeds_dtype __pyx_string_tab[2777]
#define __pyx_n_u_unit_get_count __pyx_string_tab[2778]
#define __pyx_n_u_unit_get_devices __pyx_string_tab[2779]
#define __pyx_n_u_unit_get_fan_speed_info __pyx_string_tab[2780]
#define __pyx_n_u_unit_get_handle_by_index __pyx_string_tab[2781]
#define __pyx_n_u_unit_get_led_state __pyx_string_tab[2782]
#define __pyx_n_u_unit_get_psu_info __pyx_string_tab[2783]
#define __pyx_n_u_unit_get_temperature __pyx_string_tab[2784]
#define __pyx_n_u_unit_get_unit_info __pyx_string_tab[2785]
#define __pyx_n_u_unit_info_dtype __pyx_string_tab[2786]
#define __pyx_n_u_unit_set_led_state __pyx_string_tab[2787]
#define __pyx_n_u_unpack __pyx_string_tab[2788]
#define __pyx_n_u_unprotected_mem_size_kib __pyx_string_tab[2789]
#define __pyx_n_u_update __pyx_string_tab[2790]
#define __pyx_n_u_usVal __pyx_string_tab[2791]
#define __pyx_n_u_us_val __pyx_string_tab[2792]
#define __pyx_n_u_use_setstate __pyx_string_tab[2793]
#define __pyx_n_u_used __pyx_string_tab[2794]
#define __pyx_n_u_used_gpu_cc_protected_memory __pyx_string_tab[2795]
#define __pyx_n_u_used_gpu_memory __pyx_string_tab[2796]
#define __pyx_n_u_utilization __pyx_string_tab[2797]
#define __pyx_n_u_utilization_dtype __pyx_string_tab[2798]
#define __pyx_n_u_uuid __pyx_string_tab[2799]
#define __pyx_n_u_v_max_resolution __pyx_string_tab[2800]
#define __pyx_n_u_v_resolution __pyx_string_tab[2801]
#define __pyx_n_u_value __pyx_string_tab[2802]
#define __pyx_n_u_value_dtype __pyx_string_tab[2803]
#define __pyx_n_u_value_type __pyx_string_tab[2804]
#define __pyx_n_u_values __pyx_string_tab[2805]
#define __pyx_n_u_version __pyx_string_tab[2806]
#define __pyx_n_u_vgpuInstance __pyx_string_tab[2807]
#define __pyx_n_u_vgpu_count __pyx_string_tab[2808]
#define __pyx_n_u_vgpu_creatable_placement_info_v1 __pyx_string_tab[2809]
#define __pyx_n_u_vgpu_heterogeneous_mode_v1_dtype __pyx_string_tab[2810]
#define __pyx_n_u_vgpu_instance __pyx_string_tab[2811]
#define __pyx_n_u_vgpu_instance_clear_accounting_p __pyx_string_tab[2812]
#define __pyx_n_u_vgpu_instance_count __pyx_string_tab[2813]
#define __pyx_n_u_vgpu_instance_get_accounting_mod __pyx_string_tab[2814]
#define __pyx_n_u_vgpu_instance_get_accounting_pid __pyx_string_tab[2815]
#define __pyx_n_u_vgpu_instance_get_accounting_sta __pyx_string_tab[2816]
#define __pyx_n_u_vgpu_instance_get_ecc_mode __pyx_string_tab[2817]
#define __pyx_n_u_vgpu_instance_get_encoder_capaci __pyx_string_tab[2818]
#define __pyx_n_u_vgpu_instance_get_encoder_sessio __pyx_string_tab[2819]
#define __pyx_n_u_vgpu_instance_get_encoder_stats __pyx_string_tab[2820]
#define __pyx_n_u_vgpu_instance_get_fb_usage __pyx_string_tab[2821]
#define __pyx_n_u_vgpu_instance_get_fbc_sessions __pyx_string_tab[2822]
#define __pyx_n_u_vgpu_instance_get_fbc_stats __pyx_string_tab[2823]
#define __pyx_n_u_vgpu_instance_get_frame_rate_lim __pyx_string_tab[2824]
#define __pyx_n_u_vgpu_instance_get_gpu_instance_i __pyx_string_tab[2825]
#define __pyx_n_u_vgpu_instance_get_gpu_pci_id __pyx_string_tab[2826]
#define __pyx_n_u_vgpu_instance_get_license_info_v __pyx_string_tab[2827]
#define __pyx_n_u_vgpu_instance_get_license_status __pyx_string_tab[2828]
#define __pyx_n_u_vgpu_instance_get_mdev_uuid __pyx_string_tab[2829]
#define __pyx_n_u_vgpu_instance_get_metadata __pyx_string_tab[2830]
#define __pyx_n_u_vgpu_instance_get_placement_id __pyx_string_tab[2831]
#define __pyx_n_u_vgpu_instance_get_runtime_state __pyx_string_tab[2832]
#define __pyx_n_u_vgpu_instance_get_type __pyx_string_tab[2833]
#define __pyx_n_u_vgpu_instance_get_uuid __pyx_string_tab[2834]
#define __pyx_n_u_vgpu_instance_get_vm_driver_vers __pyx_string_tab[2835]
#define __pyx_n_u_vgpu_instance_get_vm_id __pyx_string_tab[2836]
#define __pyx_n_u_vgpu_instance_set_encoder_capaci __pyx_string_tab[2837]
#define __pyx_n_u_vgpu_instance_utilization_info_v __pyx_string_tab[2838]
#define __pyx_n_u_vgpu_instances __pyx_string_tab[2839]
#define __pyx_n_u_vgpu_instances_utilization_info __pyx_string_tab[2840]
#define __pyx_n_u_vgpu_license_expiry_dtype __pyx_string_tab[2841]
#define __pyx_n_u_vgpu_license_info_dtype __pyx_string_tab[2842]
#define __pyx_n_u_vgpu_metadata __pyx_string_tab[2843]
#define __pyx_n_u_vgpu_metadata_dtype __pyx_string_tab[2844]
#define __pyx_n_u_vgpu_pgpu_compatibility_dtype __pyx_string_tab[2845]
#define __pyx_n_u_vgpu_pgpu_metadata_dtype __pyx_string_tab[2846]
#define __pyx_n_u_vgpu_placement_id_v1_dtype __pyx_string_tab[2847]
#define __pyx_n_u_vgpu_placement_list_v2_dtype __pyx_string_tab[2848]
#define __pyx_n_u_vgpu_proc_util_array __pyx_string_tab[2849]
#define __pyx_n_u_vgpu_process_count __pyx_string_tab[2850]
#define __pyx_n_u_vgpu_process_utilization_info_v1 __pyx_string_tab[2851]
#define __pyx_n_u_vgpu_processes_utilization_info __pyx_string_tab[2852]
#define __pyx_n_u_vgpu_runtime_state_v1_dtype __pyx_string_tab[2853]
#define __pyx_n_u_vgpu_sched_data __pyx_string_tab[2854]
#define __pyx_n_u_vgpu_sched_data_with_arr __pyx_string_tab[2855]
#define __pyx_n_u_vgpu_scheduler_capabilities_dtyp __pyx_string_tab[2856]
#define __pyx_n_u_vgpu_scheduler_get_state_dtype __pyx_string_tab[2857]
#define __pyx_n_u_vgpu_scheduler_log_dtype __pyx_string_tab[2858]
#define __pyx_n_u_vgpu_scheduler_log_entry_dtype __pyx_string_tab[2859]
#define __pyx_n_u_vgpu_scheduler_log_info_v1_dtype __pyx_string_tab[2860]
#define __pyx_n_u_vgpu_scheduler_params_dtype __pyx_string_tab[2861]
#define __pyx_n_u_vgpu_scheduler_set_params_dtype __pyx_string_tab[2862]
#define __pyx_n_u_vgpu_scheduler_state_info_v1_dty __pyx_string_tab[2863]
#define __pyx_n_u_vgpu_scheduler_state_v1_dtype __pyx_string_tab[2864]
#define __pyx_n_u_vgpu_type_bar1info_v1_dtype __pyx_string_tab[2865]
#define __pyx_n_u_vgpu_type_get_bar1_info __pyx_string_tab[2866]
#define __pyx_n_u_vgpu_type_get_capabilities __pyx_string_tab[2867]
#define __pyx_n_u_vgpu_type_get_class __pyx_string_tab[2868]
#define __pyx_n_u_vgpu_type_get_device_id __pyx_string_tab[2869]
#define __pyx_n_u_vgpu_type_get_fb_reservation __pyx_string_tab[2870]
#define __pyx_n_u_vgpu_type_get_frame_rate_limit __pyx_string_tab[2871]
#define __pyx_n_u_vgpu_type_get_framebuffer_size __pyx_string_tab[2872]
#define __pyx_n_u_vgpu_type_get_gpu_instance_profi __pyx_string_tab[2873]
#define __pyx_n_u_vgpu_type_get_gsp_heap_size __pyx_string_tab[2874]
#define __pyx_n_u_vgpu_type_get_license __pyx_string_tab[2875]
#define __pyx_n_u_vgpu_type_get_max_instances __pyx_string_tab[2876]
#define __pyx_n_u_vgpu_type_get_max_instances_per __pyx_string_tab[2877]
#define __pyx_n_u_vgpu_type_get_max_instances_per_2 __pyx_string_tab[2878]
#define __pyx_n_u_vgpu_type_get_name __pyx_string_tab[2879]
#define __pyx_n_u_vgpu_type_get_num_display_heads __pyx_string_tab[2880]
#define __pyx_n_u_vgpu_type_get_resolution __pyx_string_tab[2881]
#define __pyx_n_u_vgpu_type_id __pyx_string_tab[2882]
#define __pyx_n_u_vgpu_type_id_info_v1_dtype __pyx_string_tab[2883]
#define __pyx_n_u_vgpu_type_ids __pyx_string_tab[2884]
#define __pyx_n_u_vgpu_type_license_string __pyx_string_tab[2885]
#define __pyx_n_u_vgpu_type_max_instance_v1_dtype __pyx_string_tab[2886]
#define __pyx_n_u_vgpu_util_array __pyx_string_tab[2887]
#define __pyx_n_u_vgpu_version __pyx_string_tab[2888]
#define __pyx_n_u_vgpu_version_dtype __pyx_string_tab[2889]
#define __pyx_n_u_vgpu_virtualization_caps __pyx_string_tab[2890]
#define __pyx_n_u_vgpu_vm_compatibility __pyx_string_tab[2891]
#define __pyx_n_u_view __pyx_string_tab[2892]
#define __pyx_n_u_virtual_mode __pyx_string_tab[2893]
#define __pyx_n_u_void __pyx_string_tab[2894]
#define __pyx_n_u_volatile_cor __pyx_string_tab[2895]
#define __pyx_n_u_volatile_unc_parity __pyx_string_tab[2896]
#define __pyx_n_u_volatile_unc_sec_ded __pyx_string_tab[2897]
#define __pyx_n_u_voltage __pyx_string_tab[2898]
#define __pyx_n_u_writeable __pyx_string_tab[2899]
#define __pyx_n_u_x __pyx_string_tab[2900]
#define __pyx_n_u_year __pyx_string_tab[2901]
#define __pyx_kp_b_PyObject_int___pyx_skip_dispatch __pyx_string_tab[2902]
#define __pyx_kp_b_char_const_nvmlReturn_t_nvmlRetu __pyx_string_tab[2903]
#define __pyx_kp_b_int___pyx_t_4cuda_8bindings_9_in __pyx_string_tab[2904]
#define __pyx_kp_b_iso88591_0_1 __pyx_string_tab[2905]
#define __pyx_kp_b_iso88591_0_1A_E_q __pyx_string_tab[2906]
#define __pyx_kp_b_iso88591_0_5 __pyx_string_tab[2907]
#define __pyx_kp_b_iso88591_0_5WIQ_Cq_Qhhm1_Yaz_a __pyx_string_tab[2908]
#define __pyx_kp_b_iso88591_0_5WIQ_Cq_q_Q __pyx_string_tab[2909]
#define __pyx_kp_b_iso88591_0_AQQ_iij __pyx_string_tab[2910]
#define __pyx_kp_b_iso88591_0_C1_I_W_a_9_a_9A_1_81_QhhN_nno __pyx_string_tab[2911]
#define __pyx_kp_b_iso88591_0_O7RS_Qa_uAS_1_6_i_A_URVVXXY_f __pyx_string_tab[2912]
#define __pyx_kp_b_iso88591_0_PWWX_Qa_uAS_1_6_i_EWEQVVZZ_vQ __pyx_string_tab[2913]
#define __pyx_kp_b_iso88591_0_Qa_Kq __pyx_string_tab[2914]
#define __pyx_kp_b_iso88591_0_Qa_Q __pyx_string_tab[2915]
#define __pyx_kp_b_iso88591_0_q __pyx_string_tab[2916]
#define __pyx_kp_b_iso88591_0_qPQ_M __pyx_string_tab[2917]
#define __pyx_kp_b_iso88591_0_q_11I_Tffoop_U_0_1 __pyx_string_tab[2918]
#define __pyx_kp_b_iso88591_0_q_2 __pyx_string_tab[2919]
#define __pyx_kp_b_iso88591_0_q_33M_Xjjsst_2_881_1 __pyx_string_tab[2920]
#define __pyx_kp_b_iso88591_0_s_7_A_j_8_9J_8SVVWWX_Qhhaq __pyx_string_tab[2921]
#define __pyx_kp_b_iso88591_0_woQ_q_q __pyx_string_tab[2922]
#define __pyx_kp_b_iso88591_1 __pyx_string_tab[2923]
#define __pyx_kp_b_iso88591_10J_44XXcclluuv_6axxq_1 __pyx_string_tab[2924]
#define __pyx_kp_b_iso88591_1A __pyx_string_tab[2925]
#define __pyx_kp_b_iso88591_1A_Oeer_s_D_D_S_S_a_a_b_Qa_Qa_5 __pyx_string_tab[2926]
#define __pyx_kp_b_iso88591_1F __pyx_string_tab[2927]
#define __pyx_kp_b_iso88591_1HIXYN_llmmn_5 __pyx_string_tab[2928]
#define __pyx_kp_b_iso88591_1M __pyx_string_tab[2929]
#define __pyx_kp_b_iso88591_1_0_1E_TZZiij_Qa_vQc_A_q_uAV1A __pyx_string_tab[2930]
#define __pyx_kp_b_iso88591_1_1 __pyx_string_tab[2931]
#define __pyx_kp_b_iso88591_1_1A_1 __pyx_string_tab[2932]
#define __pyx_kp_b_iso88591_1_1_5 __pyx_string_tab[2933]
#define __pyx_kp_b_iso88591_1_A __pyx_string_tab[2934]
#define __pyx_kp_b_iso88591_1_AXXQa_5 __pyx_string_tab[2935]
#define __pyx_kp_b_iso88591_1_BVVW __pyx_string_tab[2936]
#define __pyx_kp_b_iso88591_1_BXXYYjjk_l_I_I_J_J_K_E_q __pyx_string_tab[2937]
#define __pyx_kp_b_iso88591_1_JFWWbbrrs __pyx_string_tab[2938]
#define __pyx_kp_b_iso88591_1_Na __pyx_string_tab[2939]
#define __pyx_kp_b_iso88591_1_PQ __pyx_string_tab[2940]
#define __pyx_kp_b_iso88591_1_Q_a_Qa_a_AQ_77SS_kkttu_AS_1_q __pyx_string_tab[2941]
#define __pyx_kp_b_iso88591_1_nGSmmn __pyx_string_tab[2942]
#define __pyx_kp_b_iso88591_1_qPQ_L __pyx_string_tab[2943]
#define __pyx_kp_b_iso88591_1_t_Qha_iq_WG1_0_j_Q __pyx_string_tab[2944]
#define __pyx_kp_b_iso88591_21Fa_55TT__nnwwx_9_1 __pyx_string_tab[2945]
#define __pyx_kp_b_iso88591_21J_55XXccnnwwx_1HHA_1 __pyx_string_tab[2946]
#define __pyx_kp_b_iso88591_21Na_55_ggnnwwx_uA_B_P_ggttu_1 __pyx_string_tab[2947]
#define __pyx_kp_b_iso88591_2_4AXXV_Zqqr_Qa_00H_I____hhss_A __pyx_string_tab[2948]
#define __pyx_kp_b_iso88591_2_6 __pyx_string_tab[2949]
#define __pyx_kp_b_iso88591_2_81 __pyx_string_tab[2950]
#define __pyx_kp_b_iso88591_2_881IQa_E_q __pyx_string_tab[2951]
#define __pyx_kp_b_iso88591_2_885H_a __pyx_string_tab[2952]
#define __pyx_kp_b_iso88591_2_88CTTU __pyx_string_tab[2953]
#define __pyx_kp_b_iso88591_2_88CVVW __pyx_string_tab[2954]
#define __pyx_kp_b_iso88591_2_88CZZffttu __pyx_string_tab[2955]
#define __pyx_kp_b_iso88591_314FnTUUV_1 __pyx_string_tab[2956]
#define __pyx_kp_b_iso88591_314H_Wffuuv_Qa_Qm1A_22J_Uaajjk __pyx_string_tab[2957]
#define __pyx_kp_b_iso88591_31AQ_1 __pyx_string_tab[2958]
#define __pyx_kp_b_iso88591_31F __pyx_string_tab[2959]
#define __pyx_kp_b_iso88591_31HA __pyx_string_tab[2960]
#define __pyx_kp_b_iso88591_31HHDeef __pyx_string_tab[2961]
#define __pyx_kp_b_iso88591_31_5Qhhe1A __pyx_string_tab[2962]
#define __pyx_kp_b_iso88591_43Fa_77TT__rr_0_1 __pyx_string_tab[2963]
#define __pyx_kp_b_iso88591_43H_KK_eef_11G_T_55O_eef_Q_AATT __pyx_string_tab[2964]
#define __pyx_kp_b_iso88591_4A5I_XYYhhiivvwwx_O __pyx_string_tab[2965]
#define __pyx_kp_b_iso88591_4AXXE __pyx_string_tab[2966]
#define __pyx_kp_b_iso88591_4AXXEYYZ __pyx_string_tab[2967]
#define __pyx_kp_b_iso88591_4AXXEccd __pyx_string_tab[2968]
#define __pyx_kp_b_iso88591_4AXXEffg __pyx_string_tab[2969]
#define __pyx_kp_b_iso88591_4AXXQ __pyx_string_tab[2970]
#define __pyx_kp_b_iso88591_4AXXQiqPQ_2Oq __pyx_string_tab[2971]
#define __pyx_kp_b_iso88591_4AXXQmSTTU_M __pyx_string_tab[2972]
#define __pyx_kp_b_iso88591_54Fa_88TT__tt_1_QR_1 __pyx_string_tab[2973]
#define __pyx_kp_b_iso88591_54H_55PP_nnwwx_K_B_q_31HHA_1 __pyx_string_tab[2974]
#define __pyx_kp_b_iso88591_54H_55PP_nnwwx_QhhN_nno_1 __pyx_string_tab[2975]
#define __pyx_kp_b_iso88591_54J_55RR_nnwwx_B_q_aq_1 __pyx_string_tab[2976]
#define __pyx_kp_b_iso88591_54MQ_00SSaajjk_e1_A_2_a_Qa_s_S __pyx_string_tab[2977]
#define __pyx_kp_b_iso88591_54PPQ_55XXccnnwwx_K_B_q_5Qa_1 __pyx_string_tab[2978]
#define __pyx_kp_b_iso88591_5Q_6aq_1 __pyx_string_tab[2979]
#define __pyx_kp_b_iso88591_5Q_A_A_Qa_uAS_1_6_i_A_URVVXXY_f __pyx_string_tab[2980]
#define __pyx_kp_b_iso88591_5Q_C6_RS_6e1_5Qm_QR_Qa_s_S_q_fA __pyx_string_tab[2981]
#define __pyx_kp_b_iso88591_5Q_q_5 __pyx_string_tab[2982]
#define __pyx_kp_b_iso88591_5QhhFbbc __pyx_string_tab[2983]
#define __pyx_kp_b_iso88591_5QhhlRXXbbjjkk_A __pyx_string_tab[2984]
#define __pyx_kp_b_iso88591_5_axx_WWX __pyx_string_tab[2985]
#define __pyx_kp_b_iso88591_5_q_aq_a_q_A __pyx_string_tab[2986]
#define __pyx_kp_b_iso88591_66__ooxxy_DE_q_axxq_Qa_s_Cq_q_X __pyx_string_tab[2987]
#define __pyx_kp_b_iso88591_6a7K_Ziippq_Qa_uAS_1_6_i_EWEQVV __pyx_string_tab[2988]
#define __pyx_kp_b_iso88591_6avQ __pyx_string_tab[2989]
#define __pyx_kp_b_iso88591_6axq __pyx_string_tab[2990]
#define __pyx_kp_b_iso88591_6axxG__oouuv __pyx_string_tab[2991]
#define __pyx_kp_b_iso88591_6axxq __pyx_string_tab[2992]
#define __pyx_kp_b_iso88591_6axxq_TUUV_L __pyx_string_tab[2993]
#define __pyx_kp_b_iso88591_76K1_YYddx_y_B_B_C_4AXXQ_1 __pyx_string_tab[2994]
#define __pyx_kp_b_iso88591_7R_1M_q_1 __pyx_string_tab[2995]
#define __pyx_kp_b_iso88591_7_iWX_0_1E_TU_1 __pyx_string_tab[2996]
#define __pyx_kp_b_iso88591_7_iWX_881_1 __pyx_string_tab[2997]
#define __pyx_kp_b_iso88591_7q __pyx_string_tab[2998]
#define __pyx_kp_b_iso88591_7q8LO __pyx_string_tab[2999]
#define __pyx_kp_b_iso88591_7q8LO_jjyyz_Qa_Qm1A_66RR_iirrs __pyx_string_tab[3000]
#define __pyx_kp_b_iso88591_7q_Hllm __pyx_string_tab[3001]
#define __pyx_kp_b_iso88591_7q_PQQR_G1 __pyx_string_tab[3002]
#define __pyx_kp_b_iso88591_7q_S__eef_Qa_uAS_1_7_y_e4rQR_6 __pyx_string_tab[3003]
#define __pyx_kp_b_iso88591_7q_W___Qa_uAS_1_6_i_EWEQVVZZ_fA __pyx_string_tab[3004]
#define __pyx_kp_b_iso88591_7q_a __pyx_string_tab[3005]
#define __pyx_kp_b_iso88591_7q_aq_1 __pyx_string_tab[3006]
#define __pyx_kp_b_iso88591_7q_q_1 __pyx_string_tab[3007]
#define __pyx_kp_b_iso88591_7q_t_QfA_iq_e5_q_a_HAQ_Q __pyx_string_tab[3008]
#define __pyx_kp_b_iso88591_87PPQ_iiz_D_D_E_1HHA_1 __pyx_string_tab[3009]
#define __pyx_kp_b_iso88591_881LXYYZ_E_U __pyx_string_tab[3010]
#define __pyx_kp_b_iso88591_88_YZ __pyx_string_tab[3011]
#define __pyx_kp_b_iso88591_8_7_ST_0_Q_axxq_1 __pyx_string_tab[3012]
#define __pyx_kp_b_iso88591_8_9K_YZZ_1 __pyx_string_tab[3013]
#define __pyx_kp_b_iso88591_8_9MQ __pyx_string_tab[3014]
#define __pyx_kp_b_iso88591_8_FkQXXaab_5Qha_1 __pyx_string_tab[3015]
#define __pyx_kp_b_iso88591_8_I __pyx_string_tab[3016]
#define __pyx_kp_b_iso88591_8_R_5 __pyx_string_tab[3017]
#define __pyx_kp_b_iso88591_8_qPQ_5 __pyx_string_tab[3018]
#define __pyx_kp_b_iso88591_8_q_1 __pyx_string_tab[3019]
#define __pyx_kp_b_iso88591_98PPQ_ii_F_F_G_31HHA_1 __pyx_string_tab[3020]
#define __pyx_kp_b_iso88591_98SST_TTddmmn_Q_A_Qa_s_s_q_33LA __pyx_string_tab[3021]
#define __pyx_kp_b_iso88591_98WWX_66_hhqqr_DE_A_axxq_Qa_s_S __pyx_string_tab[3022]
#define __pyx_kp_b_iso88591_9AQ_E_q __pyx_string_tab[3023]
#define __pyx_kp_b_iso88591_9J_UV __pyx_string_tab[3024]
#define __pyx_kp_b_iso88591_9_J_a __pyx_string_tab[3025]
#define __pyx_kp_b_iso88591_9_J_llsst_Qa_uAS_1_6_i_EWEQVVZZ __pyx_string_tab[3026]
#define __pyx_kp_b_iso88591_9_Uppq __pyx_string_tab[3027]
#define __pyx_kp_b_iso88591_9_aq_A_YVW_axxuA_Qa_1 __pyx_string_tab[3028]
#define __pyx_kp_b_iso88591_AV1 __pyx_string_tab[3029]
#define __pyx_kp_b_iso88591_AV1_2 __pyx_string_tab[3030]
#define __pyx_kp_b_iso88591_AV1_3 __pyx_string_tab[3031]
#define __pyx_kp_b_iso88591_AXXQ_1 __pyx_string_tab[3032]
#define __pyx_kp_b_iso88591_A_1_4z_a_1A_4vS_AQ_4wc_AQ_9D_QfA __pyx_string_tab[3033]
#define __pyx_kp_b_iso88591_A_1_4z_a_1A_4vS_AQ_4wc_AQ_9D_QfA_2 __pyx_string_tab[3034]
#define __pyx_kp_b_iso88591_A_21NhVWWX_4z_a_1A_4vS_AQ_4wc_AQ __pyx_string_tab[3035]
#define __pyx_kp_b_iso88591_A_2_881A_5 __pyx_string_tab[3036]
#define __pyx_kp_b_iso88591_A_32PPXXYYZ_4z_a_1A_4vS_AQ_4wc_A __pyx_string_tab[3037]
#define __pyx_kp_b_iso88591_A_4HAQ_4z_a_1A_4vS_AQ_4wc_AQ_9D __pyx_string_tab[3038]
#define __pyx_kp_b_iso88591_A_4z_a_1A_4vS_AQ_4wc_AQ_9D_QfA_q __pyx_string_tab[3039]
#define __pyx_kp_b_iso88591_A_4z_a_1A_4vS_AQ_4wc_AQ_9D_QfA_q_2 __pyx_string_tab[3040]
#define __pyx_kp_b_iso88591_A_6 __pyx_string_tab[3041]
#define __pyx_kp_b_iso88591_A_6_1_q __pyx_string_tab[3042]
#define __pyx_kp_b_iso88591_A_6_2 __pyx_string_tab[3043]
#define __pyx_kp_b_iso88591_A_6_33C1 __pyx_string_tab[3044]
#define __pyx_kp_b_iso88591_A_6_44EQ __pyx_string_tab[3045]
#define __pyx_kp_b_iso88591_A_6_55Gq __pyx_string_tab[3046]
#define __pyx_kp_b_iso88591_A_6_66I __pyx_string_tab[3047]
#define __pyx_kp_b_iso88591_A_6_77K1 __pyx_string_tab[3048]
#define __pyx_kp_b_iso88591_A_6_88MQ __pyx_string_tab[3049]
#define __pyx_kp_b_iso88591_A_6_99Oq __pyx_string_tab[3050]
#define __pyx_kp_b_iso88591_A_6_AA __pyx_string_tab[3051]
#define __pyx_kp_b_iso88591_A_6_BBaab __pyx_string_tab[3052]
#define __pyx_kp_b_iso88591_A_6_CCccd __pyx_string_tab[3053]
#define __pyx_kp_b_iso88591_A_6_DDeef __pyx_string_tab[3054]
#define __pyx_kp_b_iso88591_A_6_EEggh __pyx_string_tab[3055]
#define __pyx_kp_b_iso88591_A_6_FFiij __pyx_string_tab[3056]
#define __pyx_kp_b_iso88591_A_6_GGkkl __pyx_string_tab[3057]
#define __pyx_kp_b_iso88591_A_6_HHmmn __pyx_string_tab[3058]
#define __pyx_kp_b_iso88591_A_6_JJqqr __pyx_string_tab[3059]
#define __pyx_kp_b_iso88591_A_6_KKsst __pyx_string_tab[3060]
#define __pyx_kp_b_iso88591_A_6_MMwwx __pyx_string_tab[3061]
#define __pyx_kp_b_iso88591_A_6_NNyyz __pyx_string_tab[3062]
#define __pyx_kp_b_iso88591_A_6_Q __pyx_string_tab[3063]
#define __pyx_kp_b_iso88591_A_6_QQR __pyx_string_tab[3064]
#define __pyx_kp_b_iso88591_A_6_SST __pyx_string_tab[3065]
#define __pyx_kp_b_iso88591_A_6_UUV __pyx_string_tab[3066]
#define __pyx_kp_b_iso88591_A_6_V_W_J_J_K __pyx_string_tab[3067]
#define __pyx_kp_b_iso88591_A_6_WWX __pyx_string_tab[3068]
#define __pyx_kp_b_iso88591_A_6_YYZ __pyx_string_tab[3069]
#define __pyx_kp_b_iso88591_A_6_Z_R_R_S __pyx_string_tab[3070]
#define __pyx_kp_b_iso88591_A_881A_1 __pyx_string_tab[3071]
#define __pyx_kp_b_iso88591_A_8_4z_a_1A_4vS_AQ_4wc_AQ_9D_QfA __pyx_string_tab[3072]
#define __pyx_kp_b_iso88591_A_9_ffggh_4z_a_1A_4vS_AQ_4wc_AQ __pyx_string_tab[3073]
#define __pyx_kp_b_iso88591_A_AXT __pyx_string_tab[3074]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_00LHTUUV_6_A_x_V1A_s_1 __pyx_string_tab[3075]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_11NhVWWX_6_A_x_fAQ_s_1 __pyx_string_tab[3076]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_11NhVWWX_6_A_x_fAQ_s_1_2 __pyx_string_tab[3077]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_11NhVWWX_N_1MQ_DA_5_DB __pyx_string_tab[3078]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_1_6_A_x_0_aq_s_1_k_83h __pyx_string_tab[3079]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_1_6_A_x_4F_1_s_1_k_83h __pyx_string_tab[3080]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_1_6_A_x_F_1_s_1_k_83hg __pyx_string_tab[3081]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_1_N_1MQ_DA_5_2_F_vXQfG __pyx_string_tab[3082]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_1_N_1MQ_DA_5_6b_a_vXQf __pyx_string_tab[3083]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_22PPXXYYZ_N_1MQ_DA_5_E __pyx_string_tab[3084]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_33RRZZ_6_A_x_aq_s_1_k __pyx_string_tab[3085]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_33RRZZ_6_A_x_aq_s_1_k_2 __pyx_string_tab[3086]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_4HAQ_6_A_x_1_q_s_1_k_8 __pyx_string_tab[3087]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_4HAQ_N_1MQ_DA_5_7r_q_v __pyx_string_tab[3088]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_6_A_x_7vQa_s_1_k_83hgU __pyx_string_tab[3089]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_6_A_x_vQa_s_1_k_83hgU __pyx_string_tab[3090]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_6_A_x_vQa_s_1_k_83hgU_2 __pyx_string_tab[3091]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_6haq_6_A_x_2_s_1_k_83h __pyx_string_tab[3092]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_7_1_6_A_x_aq_s_1_k_83h __pyx_string_tab[3093]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_8_6_A_x_36_s_1_k_83hgU __pyx_string_tab[3094]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_8_N_1MQ_DA_5_9_6_vXQfG __pyx_string_tab[3095]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_99_ffggh_N_1MQ_DA_5_LB __pyx_string_tab[3096]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_B_1_6_A_x_8_aq_s_1_k_8 __pyx_string_tab[3097]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_DHAQ_6_A_x_9_q_s_1_k_8 __pyx_string_tab[3098]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_DHAQ_N_1MQ_DA_5_r_q_vX __pyx_string_tab[3099]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_Fhaq_6_A_x_s_1_k_83hgU __pyx_string_tab[3100]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_Fhaq_N_1MQ_DA_5_vXQfG5 __pyx_string_tab[3101]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_HAQ_6_A_x_5V1A_s_1_k_8 __pyx_string_tab[3102]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_HAQ_6_A_x_5V1A_s_1_k_8_2 __pyx_string_tab[3103]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_HAQ_6_A_x_V1A_s_1_k_83 __pyx_string_tab[3104]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_HAQ_N_1MQ_DA_5_2V1_vXQ __pyx_string_tab[3105]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_HHAQ_6_A_x_q_s_1_k_83h __pyx_string_tab[3106]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_H_PQQR_6_A_x_6_s_1_k_8 __pyx_string_tab[3107]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_J_RSST_6_A_x_F_1_s_1_k __pyx_string_tab[3108]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_N_1MQ_DA_5_5RvQ_vXQfG5 __pyx_string_tab[3109]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_N_1MQ_DA_5_RvQ_vXQfG5 __pyx_string_tab[3110]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_Yhaq_6_A_x_s_1_k_83hgU __pyx_string_tab[3111]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_Yhaq_N_1MQ_DA_5_0_vXQf __pyx_string_tab[3112]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_ffnnoop_6_A_x_J_PQQR_s __pyx_string_tab[3113]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_6fAQ_s_1_k_8 __pyx_string_tab[3114]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_V1A_s_1_k_83 __pyx_string_tab[3115]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_fAQ_s_1_k_83 __pyx_string_tab[3116]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_fAQ_s_1_k_83_2 __pyx_string_tab[3117]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_haq_N_1MQ_DA_5_BfA_vXQ __pyx_string_tab[3118]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_hhiij_6_A_x_GvQa_s_1_k __pyx_string_tab[3119]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_j_6_A_x_6_s_1_k_83hgU __pyx_string_tab[3120]
#define __pyx_kp_b_iso88591_A_A_4s_AQ_j_N_1MQ_DA_5_1_6_vXQfG __pyx_string_tab[3121]
#define __pyx_kp_b_iso88591_A_A_B_Gq_Uccooppq_1 __pyx_string_tab[3122]
#define __pyx_kp_b_iso88591_A_A_ST_wiq_q_q_Kq __pyx_string_tab[3123]
#define __pyx_kp_b_iso88591_A_B_Zccd_AV6_1 __pyx_string_tab[3124]
#define __pyx_kp_b_iso88591_A_DHAQ_4z_a_1A_4vS_AQ_4wc_AQ_9D __pyx_string_tab[3125]
#define __pyx_kp_b_iso88591_A_Fhaq_4z_a_1A_4vS_AQ_4wc_AQ_9D __pyx_string_tab[3126]
#define __pyx_kp_b_iso88591_A_HAQ_4z_a_1A_4vS_AQ_4wc_AQ_9D_Q __pyx_string_tab[3127]
#define __pyx_kp_b_iso88591_A_Ja_L_Qk_iq __pyx_string_tab[3128]
#define __pyx_kp_b_iso88591_A_O_Zbbkkl_2_1_1 __pyx_string_tab[3129]
#define __pyx_kp_b_iso88591_A_Qaq_1 __pyx_string_tab[3130]
#define __pyx_kp_b_iso88591_A_Qhhaq_5 __pyx_string_tab[3131]
#define __pyx_kp_b_iso88591_A_QhhlZ_I_I_J_J_K_Q __pyx_string_tab[3132]
#define __pyx_kp_b_iso88591_A_QkYZZ_Kq __pyx_string_tab[3133]
#define __pyx_kp_b_iso88591_A_RSS____Kq __pyx_string_tab[3134]
#define __pyx_kp_b_iso88591_A_R_iij __pyx_string_tab[3135]
#define __pyx_kp_b_iso88591_A_VVW_DDddo_p_M_M_V_V_W_1HHA_1 __pyx_string_tab[3136]
#define __pyx_kp_b_iso88591_A_VWWX_Q __pyx_string_tab[3137]
#define __pyx_kp_b_iso88591_A_X __pyx_string_tab[3138]
#define __pyx_kp_b_iso88591_A_Yhaq_4z_a_1A_4vS_AQ_4wc_AQ_9D __pyx_string_tab[3139]
#define __pyx_kp_b_iso88591_A_haq_4z_a_1A_4vS_AQ_4wc_AQ_9D_Q __pyx_string_tab[3140]
#define __pyx_kp_b_iso88591_A_j_4z_a_1A_4vS_AQ_4wc_AQ_9D_QfA __pyx_string_tab[3141]
#define __pyx_kp_b_iso88591_A_q0C6_Q __pyx_string_tab[3142]
#define __pyx_kp_b_iso88591_BBkkv_w_I_I_R_R_S_A_RS_1 __pyx_string_tab[3143]
#define __pyx_kp_b_iso88591_B_1HHAQ_1 __pyx_string_tab[3144]
#define __pyx_kp_b_iso88591_B_88S__eettu_Qa_q_Qa_66TT__iirr __pyx_string_tab[3145]
#define __pyx_kp_b_iso88591_B_88Sqqr __pyx_string_tab[3146]
#define __pyx_kp_b_iso88591_B_A___44_rr_B_q_9A_1_B_P___Qa_s __pyx_string_tab[3147]
#define __pyx_kp_b_iso88591_B_J_U_eef_9_a_0_1 __pyx_string_tab[3148]
#define __pyx_kp_b_iso88591_C1F __pyx_string_tab[3149]
#define __pyx_kp_b_iso88591_C1_4AXXQa_5 __pyx_string_tab[3150]
#define __pyx_kp_b_iso88591_C1_AXXQa_1 __pyx_string_tab[3151]
#define __pyx_kp_b_iso88591_Cq_6_i_FgURWW_0_I_ggllqqr_8_q_8 __pyx_string_tab[3152]
#define __pyx_kp_b_iso88591_Cq_6_i_FgURWW_0_I_ggllqqr_8_q_Q __pyx_string_tab[3153]
#define __pyx_kp_b_iso88591_DA_22RR_hhqqr_8_1 __pyx_string_tab[3154]
#define __pyx_kp_b_iso88591_DA_4AXXQa_1 __pyx_string_tab[3155]
#define __pyx_kp_b_iso88591_DA_AXXQa_1 __pyx_string_tab[3156]
#define __pyx_kp_b_iso88591_DA_Qhhaq_1 __pyx_string_tab[3157]
#define __pyx_kp_b_iso88591_DA_axxq __pyx_string_tab[3158]
#define __pyx_kp_b_iso88591_EQFuuv __pyx_string_tab[3159]
#define __pyx_kp_b_iso88591_EQ_Qhaq_Q __pyx_string_tab[3160]
#define __pyx_kp_b_iso88591_EQ_Qhhaq_1 __pyx_string_tab[3161]
#define __pyx_kp_b_iso88591_EQhhVttu __pyx_string_tab[3162]
#define __pyx_kp_b_iso88591_F_0_9IQ_A_RS_1 __pyx_string_tab[3163]
#define __pyx_kp_b_iso88591_Fa_11TT__ffoop_5_9_ST_1 __pyx_string_tab[3164]
#define __pyx_kp_b_iso88591_Fa_11TT__ffoop_5_QhhlZ_1 __pyx_string_tab[3165]
#define __pyx_kp_b_iso88591_Fa_1AQ_1 __pyx_string_tab[3166]
#define __pyx_kp_b_iso88591_Fa_t_Ql_iq_E_G1_a_5Qm_QRRS_Q __pyx_string_tab[3167]
#define __pyx_kp_b_iso88591_G_Rbbkkl_axxq_1 __pyx_string_tab[3168]
#define __pyx_kp_b_iso88591_Gq_q_1 __pyx_string_tab[3169]
#define __pyx_kp_b_iso88591_HA_2_WIQ_vQ_1 __pyx_string_tab[3170]
#define __pyx_kp_b_iso88591_H_0_1 __pyx_string_tab[3171]
#define __pyx_kp_b_iso88591_H_2_3J_VWWX_1 __pyx_string_tab[3172]
#define __pyx_kp_b_iso88591_H_H_I_4AXXEWWcct_u_C_C_T_T_c_c __pyx_string_tab[3173]
#define __pyx_kp_b_iso88591_H_SZZccd_2_3DDVVW_1 __pyx_string_tab[3174]
#define __pyx_kp_b_iso88591_H_S_eef_9_a_4AXXQ_1 __pyx_string_tab[3175]
#define __pyx_kp_b_iso88591_H_S_iij_q_1 __pyx_string_tab[3176]
#define __pyx_kp_b_iso88591_H_Sbbkkl_6a7K_Z_1 __pyx_string_tab[3177]
#define __pyx_kp_b_iso88591_H_Vddppv_w_F_F_G_Qa_Gq_Qa_ggqqz __pyx_string_tab[3178]
#define __pyx_kp_b_iso88591_I_RZ_I_I_J __pyx_string_tab[3179]
#define __pyx_kp_b_iso88591_I_iWX_9_a_81_881_Qa_s_s_q_A_q_8 __pyx_string_tab[3180]
#define __pyx_kp_b_iso88591_J_2_81A_Q __pyx_string_tab[3181]
#define __pyx_kp_b_iso88591_J_q0DOSTTU_5 __pyx_string_tab[3182]
#define __pyx_kp_b_iso88591_Ja_K_T_0_1 __pyx_string_tab[3183]
#define __pyx_kp_b_iso88591_K1_2_881A_1 __pyx_string_tab[3184]
#define __pyx_kp_b_iso88591_K1_31HHAQ_1 __pyx_string_tab[3185]
#define __pyx_kp_b_iso88591_K1_5 __pyx_string_tab[3186]
#define __pyx_kp_b_iso88591_K1_89HIQa_1 __pyx_string_tab[3187]
#define __pyx_kp_b_iso88591_LA_1HHAQ_5 __pyx_string_tab[3188]
#define __pyx_kp_b_iso88591_LA_1HHL_aq_1 __pyx_string_tab[3189]
#define __pyx_kp_b_iso88591_LA_31HHAQ_1 __pyx_string_tab[3190]
#define __pyx_kp_b_iso88591_LA_4AXXQa_1 __pyx_string_tab[3191]
#define __pyx_kp_b_iso88591_MQ_0_AQQ_5 __pyx_string_tab[3192]
#define __pyx_kp_b_iso88591_N __pyx_string_tab[3193]
#define __pyx_kp_b_iso88591_N_P___Qa_t1Cs_q_5_Qa_NJ_jjk_q __pyx_string_tab[3194]
#define __pyx_kp_b_iso88591_Na_6axxq_1 __pyx_string_tab[3195]
#define __pyx_kp_b_iso88591_Na_A_A_PQQR __pyx_string_tab[3196]
#define __pyx_kp_b_iso88591_O1_A_HT_1_q_1 __pyx_string_tab[3197]
#define __pyx_kp_b_iso88591_O1_A_HT_7q8LO_a_1 __pyx_string_tab[3198]
#define __pyx_kp_b_iso88591_O1_C_N_ggh_q_axxq_1 __pyx_string_tab[3199]
#define __pyx_kp_b_iso88591_Oq_QhhfAQ_5 __pyx_string_tab[3200]
#define __pyx_kp_b_iso88591_PPQ_7q_1 __pyx_string_tab[3201]
#define __pyx_kp_b_iso88591_PPQ_axx_6QRRS_1 __pyx_string_tab[3202]
#define __pyx_kp_b_iso88591_PPQ_i_j_C_C_L_L_M_4AXXQ_1 __pyx_string_tab[3203]
#define __pyx_kp_b_iso88591_P_P_Q_Fa_Tbbn_o_V_V_a_a_b_b_c_Q __pyx_string_tab[3204]
#define __pyx_kp_b_iso88591_QQR_6a7K_Z_5 __pyx_string_tab[3205]
#define __pyx_kp_b_iso88591_QQR_8_1 __pyx_string_tab[3206]
#define __pyx_kp_b_iso88591_Q_0_1E_T___q __pyx_string_tab[3207]
#define __pyx_kp_b_iso88591_Q_2 __pyx_string_tab[3208]
#define __pyx_kp_b_iso88591_Q_5_iq_e1_AXXQ_1 __pyx_string_tab[3209]
#define __pyx_kp_b_iso88591_Q_A_A_PVVW_q __pyx_string_tab[3210]
#define __pyx_kp_b_iso88591_Q_A_A_PVV_G __pyx_string_tab[3211]
#define __pyx_kp_b_iso88591_Q_E_PZZccd_8_Q_8_9M__1 __pyx_string_tab[3212]
#define __pyx_kp_b_iso88591_Q_IYYhhi_q __pyx_string_tab[3213]
#define __pyx_kp_b_iso88591_Q_Pyyz_Q_R_B_B_M_M_g_g_p_p_q_cc __pyx_string_tab[3214]
#define __pyx_kp_b_iso88591_Q_QQR __pyx_string_tab[3215]
#define __pyx_kp_b_iso88591_Q_RRS __pyx_string_tab[3216]
#define __pyx_kp_b_iso88591_Q_V___6e1_AXXQ_1 __pyx_string_tab[3217]
#define __pyx_kp_b_iso88591_Q_q_1 __pyx_string_tab[3218]
#define __pyx_kp_b_iso88591_Qa_0_A_Qa_y_Cq_6_i_EWEQVVZZ_fAW __pyx_string_tab[3219]
#define __pyx_kp_b_iso88591_Qa_Qhhk_Qa_y_Cq_6_i_EWEQVVZZ_aw __pyx_string_tab[3220]
#define __pyx_kp_b_iso88591_QfA __pyx_string_tab[3221]
#define __pyx_kp_b_iso88591_Qha __pyx_string_tab[3222]
#define __pyx_kp_b_iso88591_Qhhaq __pyx_string_tab[3223]
#define __pyx_kp_b_iso88591_Qhhaq_1 __pyx_string_tab[3224]
#define __pyx_kp_b_iso88591_Qhhaq_q __pyx_string_tab[3225]
#define __pyx_kp_b_iso88591_Qhhe1 __pyx_string_tab[3226]
#define __pyx_kp_b_iso88591_Qhhk __pyx_string_tab[3227]
#define __pyx_kp_b_iso88591_Qhhm_UV __pyx_string_tab[3228]
#define __pyx_kp_b_iso88591_QhhnA __pyx_string_tab[3229]
#define __pyx_kp_b_iso88591_QhhnNZ __pyx_string_tab[3230]
#define __pyx_kp_b_iso88591_Qhho__Qa_Qm1A_22J_Uaajjk_AS_1_q __pyx_string_tab[3231]
#define __pyx_kp_b_iso88591_Qj __pyx_string_tab[3232]
#define __pyx_kp_b_iso88591_RRS_1F_aq_1 __pyx_string_tab[3233]
#define __pyx_kp_b_iso88591_RRS_9_1_1 __pyx_string_tab[3234]
#define __pyx_kp_b_iso88591_RRS_ZZee_F_F_G_I_a_7q_ST_1 __pyx_string_tab[3235]
#define __pyx_kp_b_iso88591_SST_5Qhhaq_1 __pyx_string_tab[3236]
#define __pyx_kp_b_iso88591_SST_ffz_D_D_E_H_Q_8_1 __pyx_string_tab[3237]
#define __pyx_kp_b_iso88591_S_6_i_FgURWW_6_1B_Kbbiinnsst_9 __pyx_string_tab[3238]
#define __pyx_kp_b_iso88591_TTU_0_ASS_1 __pyx_string_tab[3239]
#define __pyx_kp_b_iso88591_TTU_881A_1 __pyx_string_tab[3240]
#define __pyx_kp_b_iso88591_TTU_gg_H_H_I_J_q_7q_1 __pyx_string_tab[3241]
#define __pyx_kp_b_iso88591_TTU_gg_H_H_I_J_q_A_XY_1 __pyx_string_tab[3242]
#define __pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_0_AWKwa_0_AWK __pyx_string_tab[3243]
#define __pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_1_Qg_q_1_Qg __pyx_string_tab[3244]
#define __pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_34q_QR_34q __pyx_string_tab[3245]
#define __pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_4q_4q __pyx_string_tab[3246]
#define __pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_5T_GST_5T_A __pyx_string_tab[3247]
#define __pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_6d_7_WTU_6d_7 __pyx_string_tab[3248]
#define __pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_9_Qg_PWWX_9_Q __pyx_string_tab[3249]
#define __pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_D_7_D_1 __pyx_string_tab[3250]
#define __pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_Fd_7R_dde_Fd __pyx_string_tab[3251]
#define __pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_awkQXXY_awkQR __pyx_string_tab[3252]
#define __pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_awk_awk __pyx_string_tab[3253]
#define __pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_d_7_U_d_7_UV __pyx_string_tab[3254]
#define __pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_t1G_V_t1G_VW __pyx_string_tab[3255]
#define __pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_t1G_gQ_t1G_a __pyx_string_tab[3256]
#define __pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_t1G_gQ_t1G_a_2 __pyx_string_tab[3257]
#define __pyx_kp_b_iso88591_UUV_2_3E_STTU_1 __pyx_string_tab[3258]
#define __pyx_kp_b_iso88591_VVW_314FnTUUV_1 __pyx_string_tab[3259]
#define __pyx_kp_b_iso88591_VVW_8_1 __pyx_string_tab[3260]
#define __pyx_kp_b_iso88591_VVW_Qhhaq_1 __pyx_string_tab[3261]
#define __pyx_kp_b_iso88591_VVW_Qhhe1A_1 __pyx_string_tab[3262]
#define __pyx_kp_b_iso88591_VVW_axxqPQ_1 __pyx_string_tab[3263]
#define __pyx_kp_b_iso88591_WWX_1_aq_Q __pyx_string_tab[3264]
#define __pyx_kp_b_iso88591_WWX___j_k_A_A_J_J_K_K5PQ_31HHA __pyx_string_tab[3265]
#define __pyx_kp_b_iso88591_XQ_3_hiq_vQ_1 __pyx_string_tab[3266]
#define __pyx_kp_b_iso88591_XXY_5Q6J_YZZ_1 __pyx_string_tab[3267]
#define __pyx_kp_b_iso88591_XXY_k_l_E_E_N_N_O_I_a_9_UV_1 __pyx_string_tab[3268]
#define __pyx_kp_b_iso88591_XXY_q0B_PQQR_1 __pyx_string_tab[3269]
#define __pyx_kp_b_iso88591_XXY_q0DOSTTU_1 __pyx_string_tab[3270]
#define __pyx_kp_b_iso88591_YYZ_1_2DNRSST_1 __pyx_string_tab[3271]
#define __pyx_kp_b_iso88591_YYZ_2_88CZZffggh_1 __pyx_string_tab[3272]
#define __pyx_kp_b_iso88591_YYZ_5Q6H_VWWX_1 __pyx_string_tab[3273]
#define __pyx_kp_b_iso88591_YYZ_5Q6J_YZZ_1 __pyx_string_tab[3274]
#define __pyx_kp_b_iso88591_YYZ_q_aq_1 __pyx_string_tab[3275]
#define __pyx_kp_b_iso88591_Ya_4Kwiq_5_a_1 __pyx_string_tab[3276]
#define __pyx_kp_b_iso88591_ZZ_0_aq_1 __pyx_string_tab[3277]
#define __pyx_kp_b_iso88591_ZZ_1HHAQ_1 __pyx_string_tab[3278]
#define __pyx_kp_b_iso88591_ZZ_6a7K_Z_1 __pyx_string_tab[3279]
#define __pyx_kp_b_iso88591_ZZ_7q8LO_1 __pyx_string_tab[3280]
#define __pyx_kp_b_iso88591_Zq_V___axxq_1 __pyx_string_tab[3281]
#define __pyx_kp_b_iso88591__10 __pyx_string_tab[3282]
#define __pyx_kp_b_iso88591__11 __pyx_string_tab[3283]
#define __pyx_kp_b_iso88591__12 __pyx_string_tab[3284]
#define __pyx_kp_b_iso88591__9 __pyx_string_tab[3285]
#define __pyx_kp_b_iso88591_a_1_Pddeef_1 __pyx_string_tab[3286]
#define __pyx_kp_b_iso88591_a_2_88CXXccdde_1 __pyx_string_tab[3287]
#define __pyx_kp_b_iso88591_a_6k_QR_uA_AXXQ_1 __pyx_string_tab[3288]
#define __pyx_kp_b_iso88591_a_7q_T_aab_1 __pyx_string_tab[3289]
#define __pyx_kp_b_iso88591_a_A_O___ffggh_F __pyx_string_tab[3290]
#define __pyx_kp_b_iso88591_a_FkQXXaab_7uA_axxq_1 __pyx_string_tab[3291]
#define __pyx_kp_b_iso88591_a_FkQ_eef_K_9_a_A_Q_1 __pyx_string_tab[3292]
#define __pyx_kp_b_iso88591_a_FkQ_ggh_k_q_2_881_1 __pyx_string_tab[3293]
#define __pyx_kp_b_iso88591_a_KyPYYZ_axxq_1 __pyx_string_tab[3294]
#define __pyx_kp_b_iso88591_a_axxq_5 __pyx_string_tab[3295]
#define __pyx_kp_b_iso88591_a_q_5 __pyx_string_tab[3296]
#define __pyx_kp_b_iso88591_aab_0_1C_Qbbnnoop_1 __pyx_string_tab[3297]
#define __pyx_kp_b_iso88591_aab_6axxG__ooppq_1 __pyx_string_tab[3298]
#define __pyx_kp_b_iso88591_aq __pyx_string_tab[3299]
#define __pyx_kp_b_iso88591_aq_1_BXX__nnz_A_A_B_Qa_z_S_awe9 __pyx_string_tab[3300]
#define __pyx_kp_b_iso88591_aq_2 __pyx_string_tab[3301]
#define __pyx_kp_b_iso88591_aq_9_Ya_0_1E_TYYZ_Qa_1 __pyx_string_tab[3302]
#define __pyx_kp_b_iso88591_aq_AXX__hhi_Qa_1A_YVW_z_S_q_AXX __pyx_string_tab[3303]
#define __pyx_kp_b_iso88591_aq_A_L_Qa_1Jaq_W_a_z_S_q_A_L_1 __pyx_string_tab[3304]
#define __pyx_kp_b_iso88591_aq_axx_UU_kkwwx_Qa_z_S_6_i_K7RW __pyx_string_tab[3305]
#define __pyx_kp_b_iso88591_aq_q_P__kkl_Qa_1A_YVW_z_S_q_q_P __pyx_string_tab[3306]
#define __pyx_kp_b_iso88591_avQ __pyx_string_tab[3307]
#define __pyx_kp_b_iso88591_axq __pyx_string_tab[3308]
#define __pyx_kp_b_iso88591_axx_6QR_Qa_uAS_1_7_y_e4rQR_vQgU __pyx_string_tab[3309]
#define __pyx_kp_b_iso88591_axx_QQR __pyx_string_tab[3310]
#define __pyx_kp_b_iso88591_axx_SST __pyx_string_tab[3311]
#define __pyx_kp_b_iso88591_axxq_1JVWWccdde_Kz_Q __pyx_string_tab[3312]
#define __pyx_kp_b_iso88591_axxq_qP___O __pyx_string_tab[3313]
#define __pyx_kp_b_iso88591_ay_87 __pyx_string_tab[3314]
#define __pyx_kp_b_iso88591_ccd_0_ASSaabbc_1 __pyx_string_tab[3315]
#define __pyx_kp_b_iso88591_ccd_8_QRRS_Q __pyx_string_tab[3316]
#define __pyx_kp_b_iso88591_dde_mmx_y_B_B_C_K5PQ_a_EQhhVW_Q __pyx_string_tab[3317]
#define __pyx_kp_b_iso88591_ggh_VZZ_Q __pyx_string_tab[3318]
#define __pyx_kp_b_iso88591_k_Xaab_2_881_1 __pyx_string_tab[3319]
#define __pyx_kp_b_iso88591_llm_2_886I_hhiij_1 __pyx_string_tab[3320]
#define __pyx_kp_b_iso88591_llm_9_Uaabbc_Q __pyx_string_tab[3321]
#define __pyx_kp_b_iso88591_ppq_0_ASS__pp_A_1 __pyx_string_tab[3322]
#define __pyx_kp_b_iso88591_ppq_axx_1 __pyx_string_tab[3323]
#define __pyx_kp_b_iso88591_q __pyx_string_tab[3324]
#define __pyx_kp_b_iso88591_q0A __pyx_string_tab[3325]
#define __pyx_kp_b_iso88591_q0_XY __pyx_string_tab[3326]
#define __pyx_kp_b_iso88591_q_0_5 __pyx_string_tab[3327]
#define __pyx_kp_b_iso88591_q_0_AQQYYbbc_q __pyx_string_tab[3328]
#define __pyx_kp_b_iso88591_q_0_kQR_0_7_q_nno_1 __pyx_string_tab[3329]
#define __pyx_kp_b_iso88591_q_0_kQR_1_7_1_2DNRS_1 __pyx_string_tab[3330]
#define __pyx_kp_b_iso88591_q_0_kQR_1_7_Abbppq_1 __pyx_string_tab[3331]
#define __pyx_kp_b_iso88591_q_0_kQR_6_7_1 __pyx_string_tab[3332]
#define __pyx_kp_b_iso88591_q_0_kQR_7_0_1B_PQ_1 __pyx_string_tab[3333]
#define __pyx_kp_b_iso88591_q_0_kQR_7_7q8PP___1 __pyx_string_tab[3334]
#define __pyx_kp_b_iso88591_q_0_kQR_81A_7_2_3FnTU_1 __pyx_string_tab[3335]
#define __pyx_kp_b_iso88591_q_0_kQR_81A_7_VVdde_1 __pyx_string_tab[3336]
#define __pyx_kp_b_iso88591_q_0_kQR_8_7_GqHpp_1 __pyx_string_tab[3337]
#define __pyx_kp_b_iso88591_q_0_kQR_9HAQ_7_1L_a_1 __pyx_string_tab[3338]
#define __pyx_kp_b_iso88591_q_0_kQR_HAQ_7_1_XXffg_1 __pyx_string_tab[3339]
#define __pyx_kp_b_iso88591_q_0_kQR_XQa_7_4A5J_XY_1 __pyx_string_tab[3340]
#define __pyx_kp_b_iso88591_q_0_kQR_XQa_7_A_1 __pyx_string_tab[3341]
#define __pyx_kp_b_iso88591_q_0_kQR_haq_7_QnN_1 __pyx_string_tab[3342]
#define __pyx_kp_b_iso88591_q_0_kQR_xq_7_6a7Nn_1 __pyx_string_tab[3343]
#define __pyx_kp_b_iso88591_q_1 __pyx_string_tab[3344]
#define __pyx_kp_b_iso88591_q_1_q_q __pyx_string_tab[3345]
#define __pyx_kp_b_iso88591_q_2 __pyx_string_tab[3346]
#define __pyx_kp_b_iso88591_q_3 __pyx_string_tab[3347]
#define __pyx_kp_b_iso88591_q_4AXXV_Z_Qa_3c_q_a_1_q_4AXX_Tc __pyx_string_tab[3348]
#define __pyx_kp_b_iso88591_q_5Qhhiq_q __pyx_string_tab[3349]
#define __pyx_kp_b_iso88591_q_7q8LO_dde_q __pyx_string_tab[3350]
#define __pyx_kp_b_iso88591_q_A __pyx_string_tab[3351]
#define __pyx_kp_b_iso88591_q_G_R_iij_5_314H_WX_1 __pyx_string_tab[3352]
#define __pyx_kp_b_iso88591_q_G_R_iij_5_axxq_1 __pyx_string_tab[3353]
#define __pyx_kp_b_iso88591_q_LPZZddeef_1 __pyx_string_tab[3354]
#define __pyx_kp_b_iso88591_q_PQ __pyx_string_tab[3355]
#define __pyx_kp_b_iso88591_q_Piij __pyx_string_tab[3356]
#define __pyx_kp_b_iso88591_q_Q __pyx_string_tab[3357]
#define __pyx_kp_b_iso88591_q_Qe6_q __pyx_string_tab[3358]
#define __pyx_kp_b_iso88591_q_Qiq_q __pyx_string_tab[3359]
#define __pyx_kp_b_iso88591_q_V___6e1_Qhha_1 __pyx_string_tab[3360]
#define __pyx_kp_b_iso88591_q_a __pyx_string_tab[3361]
#define __pyx_kp_b_iso88591_q_a_z_5 __pyx_string_tab[3362]
#define __pyx_kp_b_iso88591_q_axxvQ_q __pyx_string_tab[3363]
#define __pyx_kp_b_iso88591_q_axxy_q __pyx_string_tab[3364]
#define __pyx_kp_b_iso88591_q_fA_1 __pyx_string_tab[3365]
#define __pyx_kp_b_iso88591_q_q __pyx_string_tab[3366]
#define __pyx_kp_b_iso88591_q_q_fO_XY_Qa_3c_6_i_A_URVVXXY_f __pyx_string_tab[3367]
#define __pyx_kp_b_iso88591_q_q_q __pyx_string_tab[3368]
#define __pyx_kp_b_iso88591_q_q_q_axxq_a __pyx_string_tab[3369]
#define __pyx_kp_b_iso88591_q_uA_Qhha_1 __pyx_string_tab[3370]
#define __pyx_kp_b_iso88591_rrs_4AXXVK_iijjk_1 __pyx_string_tab[3371]
#define __pyx_kp_b_iso88591_uCq_6_i_EWEQVVZZ_AWW_cchhi_A_A __pyx_string_tab[3372]
#define __pyx_kp_b_iso88591_v __pyx_string_tab[3373]
#define __pyx_kp_b_iso88591_wc_Qa_7_Q __pyx_string_tab[3374]
#define __pyx_kp_b_iso88591_y_6k_ST_U_q_1 __pyx_string_tab[3375]
#define __pyx_n_b_O __pyx_string_tab[3376]
#define __pyx_int_0 __pyx_number_tab[0]
#define __pyx_int_neg_1 __pyx_number_tab[1]
#define __pyx_int_1 __pyx_number_tab[2]
#define __pyx_int_2 __pyx_number_tab[3]
#define __pyx_int_3 __pyx_number_tab[4]
#define __pyx_int_4 __pyx_number_tab[5]
#define __pyx_int_5 __pyx_number_tab[6]
#define __pyx_int_6 __pyx_number_tab[7]
#define __pyx_int_7 __pyx_number_tab[8]
#define __pyx_int_8 __pyx_number_tab[9]
#define __pyx_int_9 __pyx_number_tab[10]
#define __pyx_int_10 __pyx_number_tab[11]
#define __pyx_int_11 __pyx_number_tab[12]
#define __pyx_int_12 __pyx_number_tab[13]
#define __pyx_int_13 __pyx_number_tab[14]
#define __pyx_int_14 __pyx_number_tab[15]
#define __pyx_int_15 __pyx_number_tab[16]
#define __pyx_int_16 __pyx_number_tab[17]
#define __pyx_int_17 __pyx_number_tab[18]
#define __pyx_int_18 __pyx_number_tab[19]
#define __pyx_int_19 __pyx_number_tab[20]
#define __pyx_int_20 __pyx_number_tab[21]
#define __pyx_int_21 __pyx_number_tab[22]
#define __pyx_int_22 __pyx_number_tab[23]
#define __pyx_int_23 __pyx_number_tab[24]
#define __pyx_int_24 __pyx_number_tab[25]
#define __pyx_int_25 __pyx_number_tab[26]
#define __pyx_int_26 __pyx_number_tab[27]
#define __pyx_int_27 __pyx_number_tab[28]
#define __pyx_int_28 __pyx_number_tab[29]
#define __pyx_int_29 __pyx_number_tab[30]
#define __pyx_int_30 __pyx_number_tab[31]
#define __pyx_int_31 __pyx_number_tab[32]
#define __pyx_int_32 __pyx_number_tab[33]
#define __pyx_int_33 __pyx_number_tab[34]
#define __pyx_int_34 __pyx_number_tab[35]
#define __pyx_int_35 __pyx_number_tab[36]
#define __pyx_int_36 __pyx_number_tab[37]
#define __pyx_int_37 __pyx_number_tab[38]
#define __pyx_int_38 __pyx_number_tab[39]
#define __pyx_int_39 __pyx_number_tab[40]
#define __pyx_int_40 __pyx_number_tab[41]
#define __pyx_int_41 __pyx_number_tab[42]
#define __pyx_int_42 __pyx_number_tab[43]
#define __pyx_int_43 __pyx_number_tab[44]
#define __pyx_int_44 __pyx_number_tab[45]
#define __pyx_int_45 __pyx_number_tab[46]
#define __pyx_int_46 __pyx_number_tab[47]
#define __pyx_int_47 __pyx_number_tab[48]
#define __pyx_int_48 __pyx_number_tab[49]
#define __pyx_int_49 __pyx_number_tab[50]
#define __pyx_int_50 __pyx_number_tab[51]
#define __pyx_int_51 __pyx_number_tab[52]
#define __pyx_int_52 __pyx_number_tab[53]
#define __pyx_int_53 __pyx_number_tab[54]
#define __pyx_int_54 __pyx_number_tab[55]
#define __pyx_int_55 __pyx_number_tab[56]
#define __pyx_int_56 __pyx_number_tab[57]
#define __pyx_int_57 __pyx_number_tab[58]
#define __pyx_int_58 __pyx_number_tab[59]
#define __pyx_int_59 __pyx_number_tab[60]
#define __pyx_int_60 __pyx_number_tab[61]
#define __pyx_int_61 __pyx_number_tab[62]
#define __pyx_int_62 __pyx_number_tab[63]
#define __pyx_int_63 __pyx_number_tab[64]
#define __pyx_int_64 __pyx_number_tab[65]
#define __pyx_int_65 __pyx_number_tab[66]
#define __pyx_int_66 __pyx_number_tab[67]
#define __pyx_int_67 __pyx_number_tab[68]
#define __pyx_int_68 __pyx_number_tab[69]
#define __pyx_int_69 __pyx_number_tab[70]
#define __pyx_int_70 __pyx_number_tab[71]
#define __pyx_int_71 __pyx_number_tab[72]
#define __pyx_int_72 __pyx_number_tab[73]
#define __pyx_int_73 __pyx_number_tab[74]
#define __pyx_int_74 __pyx_number_tab[75]
#define __pyx_int_75 __pyx_number_tab[76]
#define __pyx_int_76 __pyx_number_tab[77]
#define __pyx_int_77 __pyx_number_tab[78]
#define __pyx_int_78 __pyx_number_tab[79]
#define __pyx_int_79 __pyx_number_tab[80]
#define __pyx_int_80 __pyx_number_tab[81]
#define __pyx_int_81 __pyx_number_tab[82]
#define __pyx_int_82 __pyx_number_tab[83]
#define __pyx_int_83 __pyx_number_tab[84]
#define __pyx_int_84 __pyx_number_tab[85]
#define __pyx_int_85 __pyx_number_tab[86]
#define __pyx_int_86 __pyx_number_tab[87]
#define __pyx_int_87 __pyx_number_tab[88]
#define __pyx_int_88 __pyx_number_tab[89]
#define __pyx_int_89 __pyx_number_tab[90]
#define __pyx_int_90 __pyx_number_tab[91]
#define __pyx_int_91 __pyx_number_tab[92]
#define __pyx_int_92 __pyx_number_tab[93]
#define __pyx_int_93 __pyx_number_tab[94]
#define __pyx_int_94 __pyx_number_tab[95]
#define __pyx_int_95 __pyx_number_tab[96]
#define __pyx_int_96 __pyx_number_tab[97]
#define __pyx_int_97 __pyx_number_tab[98]
#define __pyx_int_98 __pyx_number_tab[99]
#define __pyx_int_99 __pyx_number_tab[100]
#define __pyx_int_100 __pyx_number_tab[101]
#define __pyx_int_101 __pyx_number_tab[102]
#define __pyx_int_102 __pyx_number_tab[103]
#define __pyx_int_103 __pyx_number_tab[104]
#define __pyx_int_104 __pyx_number_tab[105]
#define __pyx_int_105 __pyx_number_tab[106]
#define __pyx_int_106 __pyx_number_tab[107]
#define __pyx_int_107 __pyx_number_tab[108]
#define __pyx_int_108 __pyx_number_tab[109]
#define __pyx_int_109 __pyx_number_tab[110]
#define __pyx_int_110 __pyx_number_tab[111]
#define __pyx_int_111 __pyx_number_tab[112]
#define __pyx_int_112 __pyx_number_tab[113]
#define __pyx_int_113 __pyx_number_tab[114]
#define __pyx_int_114 __pyx_number_tab[115]
#define __pyx_int_115 __pyx_number_tab[116]
#define __pyx_int_116 __pyx_number_tab[117]
#define __pyx_int_117 __pyx_number_tab[118]
#define __pyx_int_118 __pyx_number_tab[119]
#define __pyx_int_119 __pyx_number_tab[120]
#define __pyx_int_120 __pyx_number_tab[121]
#define __pyx_int_121 __pyx_number_tab[122]
#define __pyx_int_122 __pyx_number_tab[123]
#define __pyx_int_123 __pyx_number_tab[124]
#define __pyx_int_124 __pyx_number_tab[125]
#define __pyx_int_125 __pyx_number_tab[126]
#define __pyx_int_126 __pyx_number_tab[127]
#define __pyx_int_127 __pyx_number_tab[128]
#define __pyx_int_128 __pyx_number_tab[129]
#define __pyx_int_129 __pyx_number_tab[130]
#define __pyx_int_130 __pyx_number_tab[131]
#define __pyx_int_131 __pyx_number_tab[132]
#define __pyx_int_132 __pyx_number_tab[133]
#define __pyx_int_133 __pyx_number_tab[134]
#define __pyx_int_134 __pyx_number_tab[135]
#define __pyx_int_135 __pyx_number_tab[136]
#define __pyx_int_136 __pyx_number_tab[137]
#define __pyx_int_137 __pyx_number_tab[138]
#define __pyx_int_138 __pyx_number_tab[139]
#define __pyx_int_139 __pyx_number_tab[140]
#define __pyx_int_140 __pyx_number_tab[141]
#define __pyx_int_141 __pyx_number_tab[142]
#define __pyx_int_142 __pyx_number_tab[143]
#define __pyx_int_143 __pyx_number_tab[144]
#define __pyx_int_144 __pyx_number_tab[145]
#define __pyx_int_145 __pyx_number_tab[146]
#define __pyx_int_146 __pyx_number_tab[147]
#define __pyx_int_147 __pyx_number_tab[148]
#define __pyx_int_148 __pyx_number_tab[149]
#define __pyx_int_149 __pyx_number_tab[150]
#define __pyx_int_150 __pyx_number_tab[151]
#define __pyx_int_151 __pyx_number_tab[152]
#define __pyx_int_152 __pyx_number_tab[153]
#define __pyx_int_153 __pyx_number_tab[154]
#define __pyx_int_154 __pyx_number_tab[155]
#define __pyx_int_155 __pyx_number_tab[156]
#define __pyx_int_156 __pyx_number_tab[157]
#define __pyx_int_157 __pyx_number_tab[158]
#define __pyx_int_158 __pyx_number_tab[159]
#define __pyx_int_159 __pyx_number_tab[160]
#define __pyx_int_160 __pyx_number_tab[161]
#define __pyx_int_161 __pyx_number_tab[162]
#define __pyx_int_162 __pyx_number_tab[163]
#define __pyx_int_163 __pyx_number_tab[164]
#define __pyx_int_164 __pyx_number_tab[165]
#define __pyx_int_165 __pyx_number_tab[166]
#define __pyx_int_166 __pyx_number_tab[167]
#define __pyx_int_167 __pyx_number_tab[168]
#define __pyx_int_168 __pyx_number_tab[169]
#define __pyx_int_169 __pyx_number_tab[170]
#define __pyx_int_170 __pyx_number_tab[171]
#define __pyx_int_171 __pyx_number_tab[172]
#define __pyx_int_172 __pyx_number_tab[173]
#define __pyx_int_173 __pyx_number_tab[174]
#define __pyx_int_174 __pyx_number_tab[175]
#define __pyx_int_175 __pyx_number_tab[176]
#define __pyx_int_176 __pyx_number_tab[177]
#define __pyx_int_177 __pyx_number_tab[178]
#define __pyx_int_178 __pyx_number_tab[179]
#define __pyx_int_179 __pyx_number_tab[180]
#define __pyx_int_180 __pyx_number_tab[181]
#define __pyx_int_181 __pyx_number_tab[182]
#define __pyx_int_182 __pyx_number_tab[183]
#define __pyx_int_183 __pyx_number_tab[184]
#define __pyx_int_184 __pyx_number_tab[185]
#define __pyx_int_185 __pyx_number_tab[186]
#define __pyx_int_186 __pyx_number_tab[187]
#define __pyx_int_187 __pyx_number_tab[188]
#define __pyx_int_188 __pyx_number_tab[189]
#define __pyx_int_189 __pyx_number_tab[190]
#define __pyx_int_190 __pyx_number_tab[191]
#define __pyx_int_191 __pyx_number_tab[192]
#define __pyx_int_192 __pyx_number_tab[193]
#define __pyx_int_193 __pyx_number_tab[194]
#define __pyx_int_194 __pyx_number_tab[195]
#define __pyx_int_195 __pyx_number_tab[196]
#define __pyx_int_196 __pyx_number_tab[197]
#define __pyx_int_197 __pyx_number_tab[198]
#define __pyx_int_198 __pyx_number_tab[199]
#define __pyx_int_199 __pyx_number_tab[200]
#define __pyx_int_200 __pyx_number_tab[201]
#define __pyx_int_201 __pyx_number_tab[202]
#define __pyx_int_202 __pyx_number_tab[203]
#define __pyx_int_203 __pyx_number_tab[204]
#define __pyx_int_204 __pyx_number_tab[205]
#define __pyx_int_205 __pyx_number_tab[206]
#define __pyx_int_206 __pyx_number_tab[207]
#define __pyx_int_207 __pyx_number_tab[208]
#define __pyx_int_208 __pyx_number_tab[209]
#define __pyx_int_209 __pyx_number_tab[210]
#define __pyx_int_210 __pyx_number_tab[211]
#define __pyx_int_211 __pyx_number_tab[212]
#define __pyx_int_212 __pyx_number_tab[213]
#define __pyx_int_213 __pyx_number_tab[214]
#define __pyx_int_214 __pyx_number_tab[215]
#define __pyx_int_215 __pyx_number_tab[216]
#define __pyx_int_216 __pyx_number_tab[217]
#define __pyx_int_217 __pyx_number_tab[218]
#define __pyx_int_218 __pyx_number_tab[219]
#define __pyx_int_219 __pyx_number_tab[220]
#define __pyx_int_220 __pyx_number_tab[221]
#define __pyx_int_221 __pyx_number_tab[222]
#define __pyx_int_222 __pyx_number_tab[223]
#define __pyx_int_223 __pyx_number_tab[224]
#define __pyx_int_224 __pyx_number_tab[225]
#define __pyx_int_225 __pyx_number_tab[226]
#define __pyx_int_226 __pyx_number_tab[227]
#define __pyx_int_227 __pyx_number_tab[228]
#define __pyx_int_228 __pyx_number_tab[229]
#define __pyx_int_229 __pyx_number_tab[230]
#define __pyx_int_230 __pyx_number_tab[231]
#define __pyx_int_231 __pyx_number_tab[232]
#define __pyx_int_232 __pyx_number_tab[233]
#define __pyx_int_233 __pyx_number_tab[234]
#define __pyx_int_234 __pyx_number_tab[235]
#define __pyx_int_235 __pyx_number_tab[236]
#define __pyx_int_236 __pyx_number_tab[237]
#define __pyx_int_237 __pyx_number_tab[238]
#define __pyx_int_238 __pyx_number_tab[239]
#define __pyx_int_239 __pyx_number_tab[240]
#define __pyx_int_240 __pyx_number_tab[241]
#define __pyx_int_241 __pyx_number_tab[242]
#define __pyx_int_242 __pyx_number_tab[243]
#define __pyx_int_243 __pyx_number_tab[244]
#define __pyx_int_244 __pyx_number_tab[245]
#define __pyx_int_245 __pyx_number_tab[246]
#define __pyx_int_246 __pyx_number_tab[247]
#define __pyx_int_247 __pyx_number_tab[248]
#define __pyx_int_248 __pyx_number_tab[249]
#define __pyx_int_249 __pyx_number_tab[250]
#define __pyx_int_250 __pyx_number_tab[251]
#define __pyx_int_251 __pyx_number_tab[252]
#define __pyx_int_252 __pyx_number_tab[253]
#define __pyx_int_253 __pyx_number_tab[254]
#define __pyx_int_254 __pyx_number_tab[255]
#define __pyx_int_255 __pyx_number_tab[256]
#define __pyx_int_256 __pyx_number_tab[257]
#define __pyx_int_257 __pyx_number_tab[258]
#define __pyx_int_258 __pyx_number_tab[259]
#define __pyx_int_259 __pyx_number_tab[260]
#define __pyx_int_260 __pyx_number_tab[261]
#define __pyx_int_261 __pyx_number_tab[262]
#define __pyx_int_262 __pyx_number_tab[263]
#define __pyx_int_263 __pyx_number_tab[264]
#define __pyx_int_264 __pyx_number_tab[265]
#define __pyx_int_265 __pyx_number_tab[266]
#define __pyx_int_266 __pyx_number_tab[267]
#define __pyx_int_267 __pyx_number_tab[268]
#define __pyx_int_268 __pyx_number_tab[269]
#define __pyx_int_269 __pyx_number_tab[270]
#define __pyx_int_270 __pyx_number_tab[271]
#define __pyx_int_271 __pyx_number_tab[272]
#define __pyx_int_272 __pyx_number_tab[273]
#define __pyx_int_273 __pyx_number_tab[274]
#define __pyx_int_274 __pyx_number_tab[275]
#define __pyx_int_4096 __pyx_number_tab[276]
#define __pyx_int_5120 __pyx_number_tab[277]
#define __pyx_int_8192 __pyx_number_tab[278]
#define __pyx_int_136983863 __pyx_number_tab[279]
#define __pyx_int_175497610 __pyx_number_tab[280]
/* #### Code section: module_state_clear ### */
#if CYTHON_USE_MODULE_STATE
static CYTHON_SMALL_CODE int __pyx_m_clear(PyObject *m) {
  __pyx_mstatetype *clear_module_state = __Pyx_PyModule_GetState(m);
  if (!clear_module_state) return 0;
  Py_CLEAR(clear_module_state->__pyx_d);
  Py_CLEAR(clear_module_state->__pyx_b);
  Py_CLEAR(clear_module_state->__pyx_cython_runtime);
  Py_CLEAR(clear_module_state->__pyx_empty_tuple);
  Py_CLEAR(clear_module_state->__pyx_empty_bytes);
  Py_CLEAR(clear_module_state->__pyx_empty_unicode);
  #if CYTHON_PEP489_MULTI_PHASE_INIT
  __Pyx_State_RemoveModule(NULL);
  #endif
  Py_CLEAR(clear_module_state->__pyx_ptype_7cpython_4type_type);
  Py_CLEAR(clear_module_state->__pyx_ptype_7cpython_4bool_bool);
  Py_CLEAR(clear_module_state->__pyx_ptype_7cpython_7complex_complex);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_PciInfoExt_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_PciInfo);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_Utilization);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_Memory);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_Memory);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_Memory_v2);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_BAR1Memory);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ProcessInfo);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ProcessDetail_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_DeviceAttributes);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_C2cModeInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_RowRemapperHistogramValues);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_BridgeChipInfo);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_Value);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_Value);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod0);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_CoolerInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_MarginTemperature_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ClkMonFaultInfo);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ClockOffset_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_FanSpeedInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_DevicePerfModes_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationSample);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_PlatformInfo_v2);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementId_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementList_v2);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod2);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod3);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod4);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod5);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseExpiry);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_GridLicenseExpiry);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_HwbcEntry);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_LedState);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_LedState);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_UnitInfo);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_PSUInfo);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_UnitFanInfo);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_EventData);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_EventData);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_AccountingStats);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_EncoderSessionInfo);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_FBCStats);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_FBCSessionInfo);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemCaps);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemState);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuVersion);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuMetadata);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_GpuInstancePlacement);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ComputeInstancePlacement);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_GpmSupport);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_DeviceCapabilities_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_RepairStatus_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_Pdi_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_GpuFabricInfo_v3);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ExcludedDeviceInfo);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ProcessDetailList_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_BridgeChipHierarchy);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_Sample);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_Sample);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_FieldValue);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_GpuThermalSettings);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ClkMonStatus);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerParams);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseInfo);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeature);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_UnitFanSpeeds);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuMetadata);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceInfo);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceInfo);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLog);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerGetState);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeatures);
  Py_CLEAR(clear_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2);
  Py_CLEAR(clear_module_state->__pyx_type_4cuda_8bindings_5_nvml_NvLinkInfo_v2);
  Py_CLEAR(clear_module_state->__pyx_array_type);
  Py_CLEAR(clear_module_state->__pyx_type___pyx_array);
  Py_CLEAR(clear_module_state->__pyx_MemviewEnum_type);
  Py_CLEAR(clear_module_state->__pyx_type___pyx_MemviewEnum);
  Py_CLEAR(clear_module_state->__pyx_memoryview_type);
  Py_CLEAR(clear_module_state->__pyx_type___pyx_memoryview);
  Py_CLEAR(clear_module_state->__pyx_memoryviewslice_type);
  Py_CLEAR(clear_module_state->__pyx_type___pyx_memoryviewslice);
  for (int i=0; i<2; ++i) { Py_CLEAR(clear_module_state->__pyx_slice[i]); }
  for (int i=0; i<13; ++i) { Py_CLEAR(clear_module_state->__pyx_tuple[i]); }
  for (int i=0; i<814; ++i) { Py_CLEAR(clear_module_state->__pyx_codeobj_tab[i]); }
  for (int i=0; i<3377; ++i) { Py_CLEAR(clear_module_state->__pyx_string_tab[i]); }
  for (int i=0; i<281; ++i) { Py_CLEAR(clear_module_state->__pyx_number_tab[i]); }
/* #### Code section: module_state_clear_contents ### */
/* CommonTypesMetaclass.module_state_clear */
Py_CLEAR(clear_module_state->__pyx_CommonTypesMetaclassType);

/* CythonFunctionShared.module_state_clear */
Py_CLEAR(clear_module_state->__pyx_CyFunctionType);

/* #### Code section: module_state_clear_end ### */
return 0;
}
#endif
/* #### Code section: module_state_traverse ### */
#if CYTHON_USE_MODULE_STATE
static CYTHON_SMALL_CODE int __pyx_m_traverse(PyObject *m, visitproc visit, void *arg) {
  __pyx_mstatetype *traverse_module_state = __Pyx_PyModule_GetState(m);
  if (!traverse_module_state) return 0;
  Py_VISIT(traverse_module_state->__pyx_d);
  Py_VISIT(traverse_module_state->__pyx_b);
  Py_VISIT(traverse_module_state->__pyx_cython_runtime);
  __Pyx_VISIT_CONST(traverse_module_state->__pyx_empty_tuple);
  __Pyx_VISIT_CONST(traverse_module_state->__pyx_empty_bytes);
  __Pyx_VISIT_CONST(traverse_module_state->__pyx_empty_unicode);
  Py_VISIT(traverse_module_state->__pyx_ptype_7cpython_4type_type);
  Py_VISIT(traverse_module_state->__pyx_ptype_7cpython_4bool_bool);
  Py_VISIT(traverse_module_state->__pyx_ptype_7cpython_7complex_complex);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_PciInfoExt_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_PciInfo);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_Utilization);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_Memory);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_Memory);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_Memory_v2);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_BAR1Memory);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ProcessInfo);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ProcessDetail_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_DeviceAttributes);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_C2cModeInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_RowRemapperHistogramValues);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_BridgeChipInfo);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_Value);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_Value);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod0);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_CoolerInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_MarginTemperature_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ClkMonFaultInfo);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ClockOffset_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_FanSpeedInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_DevicePerfModes_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationSample);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_PlatformInfo_v2);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementId_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementList_v2);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod2);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod3);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod4);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod5);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseExpiry);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_GridLicenseExpiry);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_HwbcEntry);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_LedState);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_LedState);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_UnitInfo);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_PSUInfo);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_UnitFanInfo);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_EventData);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_EventData);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_AccountingStats);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_EncoderSessionInfo);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_FBCStats);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_FBCSessionInfo);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemCaps);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemState);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuVersion);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuMetadata);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_GpuInstancePlacement);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ComputeInstancePlacement);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_GpmSupport);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_DeviceCapabilities_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_RepairStatus_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_Pdi_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_GpuFabricInfo_v3);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ExcludedDeviceInfo);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ProcessDetailList_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_BridgeChipHierarchy);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_Sample);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_Sample);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_FieldValue);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_GpuThermalSettings);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ClkMonStatus);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerParams);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseInfo);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeature);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_UnitFanSpeeds);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuMetadata);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceInfo);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceInfo);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLog);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerGetState);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeatures);
  Py_VISIT(traverse_module_state->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2);
  Py_VISIT(traverse_module_state->__pyx_type_4cuda_8bindings_5_nvml_NvLinkInfo_v2);
  Py_VISIT(traverse_module_state->__pyx_array_type);
  Py_VISIT(traverse_module_state->__pyx_type___pyx_array);
  Py_VISIT(traverse_module_state->__pyx_MemviewEnum_type);
  Py_VISIT(traverse_module_state->__pyx_type___pyx_MemviewEnum);
  Py_VISIT(traverse_module_state->__pyx_memoryview_type);
  Py_VISIT(traverse_module_state->__pyx_type___pyx_memoryview);
  Py_VISIT(traverse_module_state->__pyx_memoryviewslice_type);
  Py_VISIT(traverse_module_state->__pyx_type___pyx_memoryviewslice);
  for (int i=0; i<2; ++i) { __Pyx_VISIT_CONST(traverse_module_state->__pyx_slice[i]); }
  for (int i=0; i<13; ++i) { __Pyx_VISIT_CONST(traverse_module_state->__pyx_tuple[i]); }
  for (int i=0; i<814; ++i) { __Pyx_VISIT_CONST(traverse_module_state->__pyx_codeobj_tab[i]); }
  for (int i=0; i<3377; ++i) { __Pyx_VISIT_CONST(traverse_module_state->__pyx_string_tab[i]); }
  for (int i=0; i<281; ++i) { __Pyx_VISIT_CONST(traverse_module_state->__pyx_number_tab[i]); }
/* #### Code section: module_state_traverse_contents ### */
/* CommonTypesMetaclass.module_state_traverse */
Py_VISIT(traverse_module_state->__pyx_CommonTypesMetaclassType);

/* CythonFunctionShared.module_state_traverse */
Py_VISIT(traverse_module_state->__pyx_CyFunctionType);

/* #### Code section: module_state_traverse_end ### */
return 0;
}
#endif
/* #### Code section: module_code ### */

/* "View.MemoryView":129
 *         cdef bint dtype_is_object
 * 
 *     def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,             # <<<<<<<<<<<<<<
 *                   mode="c", bint allocate_buffer=True):
 * 
*/

/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_shape = 0;
  Py_ssize_t __pyx_v_itemsize;
  PyObject *__pyx_v_format = 0;
  PyObject *__pyx_v_mode = 0;
  int __pyx_v_allocate_buffer;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[5] = {0,0,0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_shape,&__pyx_mstate_global->__pyx_n_u_itemsize,&__pyx_mstate_global->__pyx_n_u_format,&__pyx_mstate_global->__pyx_n_u_mode,&__pyx_mstate_global->__pyx_n_u_allocate_buffer,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 129, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  5:
        values[4] = __Pyx_ArgRef_VARARGS(__pyx_args, 4);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(1, 129, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  4:
        values[3] = __Pyx_ArgRef_VARARGS(__pyx_args, 3);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(1, 129, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  3:
        values[2] = __Pyx_ArgRef_VARARGS(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 129, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 129, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 129, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(1, 129, __pyx_L3_error)
      if (!values[3]) values[3] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_n_u_c));
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, i); __PYX_ERR(1, 129, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  5:
        values[4] = __Pyx_ArgRef_VARARGS(__pyx_args, 4);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(1, 129, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  4:
        values[3] = __Pyx_ArgRef_VARARGS(__pyx_args, 3);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(1, 129, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  3:
        values[2] = __Pyx_ArgRef_VARARGS(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 129, __pyx_L3_error)
        values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 129, __pyx_L3_error)
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 129, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[3]) values[3] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_n_u_c));
    }
    __pyx_v_shape = ((PyObject*)values[0]);
    __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 129, __pyx_L3_error)
    __pyx_v_format = values[2];
    __pyx_v_mode = values[3];
    if (values[4]) {
      __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 130, __pyx_L3_error)
    } else {

      /* "View.MemoryView":130
 * 
 *     def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
 *                   mode="c", bint allocate_buffer=True):             # <<<<<<<<<<<<<<
 * 
 *         cdef int idx
*/
      __pyx_v_allocate_buffer = ((int)1);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, __pyx_nargs); __PYX_ERR(1, 129, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 129, __pyx_L1_error)
  if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
    PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 129, __pyx_L1_error)
  }
  __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);

  /* "View.MemoryView":129
 *         cdef bint dtype_is_object
 * 
 *     def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,             # <<<<<<<<<<<<<<
 *                   mode="c", bint allocate_buffer=True):
 * 
*/

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = -1;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
  int __pyx_v_idx;
  Py_ssize_t __pyx_v_dim;
  char __pyx_v_order;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  Py_ssize_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  char *__pyx_t_7;
  int __pyx_t_8;
  Py_ssize_t __pyx_t_9;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11[5];
  PyObject *__pyx_t_12 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__cinit__", 0);
  __Pyx_INCREF(__pyx_v_format);

  /* "View.MemoryView":135
 *         cdef Py_ssize_t dim
 * 
 *         self.ndim = <int> len(shape)             # <<<<<<<<<<<<<<
 *         self.itemsize = itemsize
 * 
*/
  if (unlikely(__pyx_v_shape == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(1, 135, __pyx_L1_error)
  }
  __pyx_t_1 = __Pyx_PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 135, __pyx_L1_error)
  __pyx_v_self->ndim = ((int)__pyx_t_1);

  /* "View.MemoryView":136
 * 
 *         self.ndim = <int> len(shape)
 *         self.itemsize = itemsize             # <<<<<<<<<<<<<<
 * 
 *         if not self.ndim:
*/
  __pyx_v_self->itemsize = __pyx_v_itemsize;

  /* "View.MemoryView":138
 *         self.itemsize = itemsize
 * 
 *         if not self.ndim:             # <<<<<<<<<<<<<<
 *             raise ValueError, "Empty shape tuple for cython.array"
 * 
*/
  __pyx_t_2 = (!(__pyx_v_self->ndim != 0));
  if (unlikely(__pyx_t_2)) {

    /* "View.MemoryView":139
 * 
 *         if not self.ndim:
 *             raise ValueError, "Empty shape tuple for cython.array"             # <<<<<<<<<<<<<<
 * 
 *         if itemsize <= 0:
*/
    __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_ValueError))), __pyx_mstate_global->__pyx_kp_u_Empty_shape_tuple_for_cython_arr, 0, 0);
    __PYX_ERR(1, 139, __pyx_L1_error)

    /* "View.MemoryView":138
 *         self.itemsize = itemsize
 * 
 *         if not self.ndim:             # <<<<<<<<<<<<<<
 *             raise ValueError, "Empty shape tuple for cython.array"
 * 
*/
  }

  /* "View.MemoryView":141
 *             raise ValueError, "Empty shape tuple for cython.array"
 * 
 *         if itemsize <= 0:             # <<<<<<<<<<<<<<
 *             raise ValueError, "itemsize <= 0 for cython.array"
 * 
*/
  __pyx_t_2 = (__pyx_v_itemsize <= 0);
  if (unlikely(__pyx_t_2)) {

    /* "View.MemoryView":142
 * 
 *         if itemsize <= 0:
 *             raise ValueError, "itemsize <= 0 for cython.array"             # <<<<<<<<<<<<<<
 * 
 *         if not isinstance(format, bytes):
*/
    __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_ValueError))), __pyx_mstate_global->__pyx_kp_u_itemsize_0_for_cython_array, 0, 0);
    __PYX_ERR(1, 142, __pyx_L1_error)

    /* "View.MemoryView":141
 *             raise ValueError, "Empty shape tuple for cython.array"
 * 
 *         if itemsize <= 0:             # <<<<<<<<<<<<<<
 *             raise ValueError, "itemsize <= 0 for cython.array"
 * 
*/
  }

  /* "View.MemoryView":144
 *             raise ValueError, "itemsize <= 0 for cython.array"
 * 
 *         if not isinstance(format, bytes):             # <<<<<<<<<<<<<<
 *             format = format.encode('ASCII')
 *         self._format = format  # keep a reference to the byte string
*/
  __pyx_t_2 = PyBytes_Check(__pyx_v_format); 
  __pyx_t_3 = (!__pyx_t_2);
  if (__pyx_t_3) {

    /* "View.MemoryView":145
 * 
 *         if not isinstance(format, bytes):
 *             format = format.encode('ASCII')             # <<<<<<<<<<<<<<
 *         self._format = format  # keep a reference to the byte string
 *         self.format = self._format
*/
    __pyx_t_5 = __pyx_v_format;
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_n_u_ASCII};
      __pyx_t_4 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 145, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
    }
    __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_4);
    __pyx_t_4 = 0;

    /* "View.MemoryView":144
 *             raise ValueError, "itemsize <= 0 for cython.array"
 * 
 *         if not isinstance(format, bytes):             # <<<<<<<<<<<<<<
 *             format = format.encode('ASCII')
 *         self._format = format  # keep a reference to the byte string
*/
  }

  /* "View.MemoryView":146
 *         if not isinstance(format, bytes):
 *             format = format.encode('ASCII')
 *         self._format = format  # keep a reference to the byte string             # <<<<<<<<<<<<<<
 *         self.format = self._format
 * 
*/
  __pyx_t_4 = __pyx_v_format;
  __Pyx_INCREF(__pyx_t_4);
  if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_4))) __PYX_ERR(1, 146, __pyx_L1_error)
  __Pyx_GIVEREF(__pyx_t_4);
  __Pyx_GOTREF(__pyx_v_self->_format);
  __Pyx_DECREF(__pyx_v_self->_format);
  __pyx_v_self->_format = ((PyObject*)__pyx_t_4);
  __pyx_t_4 = 0;

  /* "View.MemoryView":147
 *             format = format.encode('ASCII')
 *         self._format = format  # keep a reference to the byte string
 *         self.format = self._format             # <<<<<<<<<<<<<<
 * 
 * 
*/
  if (unlikely(__pyx_v_self->_format == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(1, 147, __pyx_L1_error)
  }
  __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 147, __pyx_L1_error)
  __pyx_v_self->format = __pyx_t_7;

  /* "View.MemoryView":150
 * 
 * 
 *         self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)             # <<<<<<<<<<<<<<
 *         self._strides = self._shape + self.ndim
 * 
*/
  __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));

  /* "View.MemoryView":151
 * 
 *         self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
 *         self._strides = self._shape + self.ndim             # <<<<<<<<<<<<<<
 * 
 *         if not self._shape:
*/
  __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);

  /* "View.MemoryView":153
 *         self._strides = self._shape + self.ndim
 * 
 *         if not self._shape:             # <<<<<<<<<<<<<<
 *             raise MemoryError, "unable to allocate shape and strides."
 * 
*/
  __pyx_t_3 = (!(__pyx_v_self->_shape != 0));
  if (unlikely(__pyx_t_3)) {

    /* "View.MemoryView":154
 * 
 *         if not self._shape:
 *             raise MemoryError, "unable to allocate shape and strides."             # <<<<<<<<<<<<<<
 * 
 * 
*/
    __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_MemoryError))), __pyx_mstate_global->__pyx_kp_u_unable_to_allocate_shape_and_str, 0, 0);
    __PYX_ERR(1, 154, __pyx_L1_error)

    /* "View.MemoryView":153
 *         self._strides = self._shape + self.ndim
 * 
 *         if not self._shape:             # <<<<<<<<<<<<<<
 *             raise MemoryError, "unable to allocate shape and strides."
 * 
*/
  }

  /* "View.MemoryView":157
 * 
 * 
 *         for idx, dim in enumerate(shape):             # <<<<<<<<<<<<<<
 *             if dim <= 0:
 *                 raise ValueError, f"Invalid shape in axis {idx}: {dim}."
*/
  __pyx_t_8 = 0;
  __pyx_t_4 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_4);
  __pyx_t_1 = 0;
  for (;;) {
    {
      Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_4);
      #if !CYTHON_ASSUME_SAFE_SIZE
      if (unlikely((__pyx_temp < 0))) __PYX_ERR(1, 157, __pyx_L1_error)
      #endif
      if (__pyx_t_1 >= __pyx_temp) break;
    }
    #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
    __pyx_t_5 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_1));
    #else
    __pyx_t_5 = __Pyx_PySequence_ITEM(__pyx_t_4, __pyx_t_1);
    #endif
    ++__pyx_t_1;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 157, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 157, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_v_dim = __pyx_t_9;
    __pyx_v_idx = __pyx_t_8;
    __pyx_t_8 = (__pyx_t_8 + 1);

    /* "View.MemoryView":158
 * 
 *         for idx, dim in enumerate(shape):
 *             if dim <= 0:             # <<<<<<<<<<<<<<
 *                 raise ValueError, f"Invalid shape in axis {idx}: {dim}."
 *             self._shape[idx] = dim
*/
    __pyx_t_3 = (__pyx_v_dim <= 0);
    if (unlikely(__pyx_t_3)) {

      /* "View.MemoryView":159
 *         for idx, dim in enumerate(shape):
 *             if dim <= 0:
 *                 raise ValueError, f"Invalid shape in axis {idx}: {dim}."             # <<<<<<<<<<<<<<
 *             self._shape[idx] = dim
 * 
*/
      __pyx_t_5 = __Pyx_PyUnicode_From_int(__pyx_v_idx, 0, ' ', 'd'); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 159, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_10 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_dim, 0, ' ', 'd'); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 159, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_10);
      __pyx_t_11[0] = __pyx_mstate_global->__pyx_kp_u_Invalid_shape_in_axis;
      __pyx_t_11[1] = __pyx_t_5;
      __pyx_t_11[2] = __pyx_mstate_global->__pyx_kp_u_;
      __pyx_t_11[3] = __pyx_t_10;
      __pyx_t_11[4] = __pyx_mstate_global->__pyx_kp_u__2;
      __pyx_t_12 = __Pyx_PyUnicode_Join(__pyx_t_11, 5, 22 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5) + 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_10) + 1, 127);
      if (unlikely(!__pyx_t_12)) __PYX_ERR(1, 159, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_12);
      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_ValueError))), __pyx_t_12, 0, 0);
      __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
      __PYX_ERR(1, 159, __pyx_L1_error)

      /* "View.MemoryView":158
 * 
 *         for idx, dim in enumerate(shape):
 *             if dim <= 0:             # <<<<<<<<<<<<<<
 *                 raise ValueError, f"Invalid shape in axis {idx}: {dim}."
 *             self._shape[idx] = dim
*/
    }

    /* "View.MemoryView":160
 *             if dim <= 0:
 *                 raise ValueError, f"Invalid shape in axis {idx}: {dim}."
 *             self._shape[idx] = dim             # <<<<<<<<<<<<<<
 * 
 *         cdef char order
*/
    (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;

    /* "View.MemoryView":157
 * 
 * 
 *         for idx, dim in enumerate(shape):             # <<<<<<<<<<<<<<
 *             if dim <= 0:
 *                 raise ValueError, f"Invalid shape in axis {idx}: {dim}."
*/
  }
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "View.MemoryView":163
 * 
 *         cdef char order
 *         if mode == 'c':             # <<<<<<<<<<<<<<
 *             order = b'C'
 *             self.mode = u'c'
*/
  __pyx_t_3 = (__Pyx_PyUnicode_Equals(__pyx_v_mode, __pyx_mstate_global->__pyx_n_u_c, Py_EQ)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 163, __pyx_L1_error)
  if (__pyx_t_3) {

    /* "View.MemoryView":164
 *         cdef char order
 *         if mode == 'c':
 *             order = b'C'             # <<<<<<<<<<<<<<
 *             self.mode = u'c'
 *         elif mode == 'fortran':
*/
    __pyx_v_order = 'C';

    /* "View.MemoryView":165
 *         if mode == 'c':
 *             order = b'C'
 *             self.mode = u'c'             # <<<<<<<<<<<<<<
 *         elif mode == 'fortran':
 *             order = b'F'
*/
    __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_c);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_c);
    __Pyx_GOTREF(__pyx_v_self->mode);
    __Pyx_DECREF(__pyx_v_self->mode);
    __pyx_v_self->mode = __pyx_mstate_global->__pyx_n_u_c;

    /* "View.MemoryView":163
 * 
 *         cdef char order
 *         if mode == 'c':             # <<<<<<<<<<<<<<
 *             order = b'C'
 *             self.mode = u'c'
*/
    goto __pyx_L11;
  }

  /* "View.MemoryView":166
 *             order = b'C'
 *             self.mode = u'c'
 *         elif mode == 'fortran':             # <<<<<<<<<<<<<<
 *             order = b'F'
 *             self.mode = u'fortran'
*/
  __pyx_t_3 = (__Pyx_PyUnicode_Equals(__pyx_v_mode, __pyx_mstate_global->__pyx_n_u_fortran, Py_EQ)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 166, __pyx_L1_error)
  if (likely(__pyx_t_3)) {

    /* "View.MemoryView":167
 *             self.mode = u'c'
 *         elif mode == 'fortran':
 *             order = b'F'             # <<<<<<<<<<<<<<
 *             self.mode = u'fortran'
 *         else:
*/
    __pyx_v_order = 'F';

    /* "View.MemoryView":168
 *         elif mode == 'fortran':
 *             order = b'F'
 *             self.mode = u'fortran'             # <<<<<<<<<<<<<<
 *         else:
 *             raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}"
*/
    __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_fortran);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_fortran);
    __Pyx_GOTREF(__pyx_v_self->mode);
    __Pyx_DECREF(__pyx_v_self->mode);
    __pyx_v_self->mode = __pyx_mstate_global->__pyx_n_u_fortran;

    /* "View.MemoryView":166
 *             order = b'C'
 *             self.mode = u'c'
 *         elif mode == 'fortran':             # <<<<<<<<<<<<<<
 *             order = b'F'
 *             self.mode = u'fortran'
*/
    goto __pyx_L11;
  }

  /* "View.MemoryView":170
 *             self.mode = u'fortran'
 *         else:
 *             raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}"             # <<<<<<<<<<<<<<
 * 
 *         self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order)
*/
  /*else*/ {
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_v_mode, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 170, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_12 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Invalid_mode_expected_c_or_fortr, __pyx_t_4); if (unlikely(!__pyx_t_12)) __PYX_ERR(1, 170, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_12);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_ValueError))), __pyx_t_12, 0, 0);
    __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
    __PYX_ERR(1, 170, __pyx_L1_error)
  }
  __pyx_L11:;

  /* "View.MemoryView":172
 *             raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}"
 * 
 *         self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order)             # <<<<<<<<<<<<<<
 * 
 *         self.free_data = allocate_buffer
*/
  __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);

  /* "View.MemoryView":174
 *         self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order)
 * 
 *         self.free_data = allocate_buffer             # <<<<<<<<<<<<<<
 *         self.dtype_is_object = format == b'O'
 * 
*/
  __pyx_v_self->free_data = __pyx_v_allocate_buffer;

  /* "View.MemoryView":175
 * 
 *         self.free_data = allocate_buffer
 *         self.dtype_is_object = format == b'O'             # <<<<<<<<<<<<<<
 * 
 *         if allocate_buffer:
*/
  __pyx_t_12 = PyObject_RichCompare(__pyx_v_format, __pyx_mstate_global->__pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_12); if (unlikely(!__pyx_t_12)) __PYX_ERR(1, 175, __pyx_L1_error)
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_12); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 175, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
  __pyx_v_self->dtype_is_object = __pyx_t_3;

  /* "View.MemoryView":177
 *         self.dtype_is_object = format == b'O'
 * 
 *         if allocate_buffer:             # <<<<<<<<<<<<<<
 *             _allocate_buffer(self)
 * 
*/
  if (__pyx_v_allocate_buffer) {

    /* "View.MemoryView":178
 * 
 *         if allocate_buffer:
 *             _allocate_buffer(self)             # <<<<<<<<<<<<<<
 * 
 *     @cname('getbuffer')
*/
    __pyx_t_8 = __pyx_array_allocate_buffer(__pyx_v_self); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(1, 178, __pyx_L1_error)

    /* "View.MemoryView":177
 *         self.dtype_is_object = format == b'O'
 * 
 *         if allocate_buffer:             # <<<<<<<<<<<<<<
 *             _allocate_buffer(self)
 * 
*/
  }

  /* "View.MemoryView":129
 *         cdef bint dtype_is_object
 * 
 *     def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,             # <<<<<<<<<<<<<<
 *                   mode="c", bint allocate_buffer=True):
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_format);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":180
 *             _allocate_buffer(self)
 * 
 *     @cname('getbuffer')             # <<<<<<<<<<<<<<
 *     def __getbuffer__(self, Py_buffer *info, int flags):
 *         cdef int bufmode = -1
*/

/* Python wrapper */
CYTHON_UNUSED static int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
CYTHON_UNUSED static int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
  int __pyx_v_bufmode;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  char *__pyx_t_2;
  Py_ssize_t __pyx_t_3;
  int __pyx_t_4;
  Py_ssize_t *__pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  if (unlikely(__pyx_v_info == NULL)) {
    PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
    return -1;
  }
  __Pyx_RefNannySetupContext("__getbuffer__", 0);
  __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(__pyx_v_info->obj);

  /* "View.MemoryView":182
 *     @cname('getbuffer')
 *     def __getbuffer__(self, Py_buffer *info, int flags):
 *         cdef int bufmode = -1             # <<<<<<<<<<<<<<
 *         if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS):
 *             if self.mode == u"c":
*/
  __pyx_v_bufmode = -1;

  /* "View.MemoryView":183
 *     def __getbuffer__(self, Py_buffer *info, int flags):
 *         cdef int bufmode = -1
 *         if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS):             # <<<<<<<<<<<<<<
 *             if self.mode == u"c":
 *                 bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
  __pyx_t_1 = ((__pyx_v_flags & ((PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS) | PyBUF_ANY_CONTIGUOUS)) != 0);
  if (__pyx_t_1) {

    /* "View.MemoryView":184
 *         cdef int bufmode = -1
 *         if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS):
 *             if self.mode == u"c":             # <<<<<<<<<<<<<<
 *                 bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
 *             elif self.mode == u"fortran":
*/
    __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_mstate_global->__pyx_n_u_c, Py_EQ)); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 184, __pyx_L1_error)
    if (__pyx_t_1) {

      /* "View.MemoryView":185
 *         if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS):
 *             if self.mode == u"c":
 *                 bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS             # <<<<<<<<<<<<<<
 *             elif self.mode == u"fortran":
 *                 bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
      __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);

      /* "View.MemoryView":184
 *         cdef int bufmode = -1
 *         if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS):
 *             if self.mode == u"c":             # <<<<<<<<<<<<<<
 *                 bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
 *             elif self.mode == u"fortran":
*/
      goto __pyx_L4;
    }

    /* "View.MemoryView":186
 *             if self.mode == u"c":
 *                 bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
 *             elif self.mode == u"fortran":             # <<<<<<<<<<<<<<
 *                 bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
 *             if not (flags & bufmode):
*/
    __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_mstate_global->__pyx_n_u_fortran, Py_EQ)); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 186, __pyx_L1_error)
    if (__pyx_t_1) {

      /* "View.MemoryView":187
 *                 bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
 *             elif self.mode == u"fortran":
 *                 bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS             # <<<<<<<<<<<<<<
 *             if not (flags & bufmode):
 *                 raise ValueError, "Can only create a buffer that is contiguous in memory."
*/
      __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);

      /* "View.MemoryView":186
 *             if self.mode == u"c":
 *                 bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
 *             elif self.mode == u"fortran":             # <<<<<<<<<<<<<<
 *                 bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
 *             if not (flags & bufmode):
*/
    }
    __pyx_L4:;

    /* "View.MemoryView":188
 *             elif self.mode == u"fortran":
 *                 bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
 *             if not (flags & bufmode):             # <<<<<<<<<<<<<<
 *                 raise ValueError, "Can only create a buffer that is contiguous in memory."
 *         info.buf = self.data
*/
    __pyx_t_1 = (!((__pyx_v_flags & __pyx_v_bufmode) != 0));
    if (unlikely(__pyx_t_1)) {

      /* "View.MemoryView":189
 *                 bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
 *             if not (flags & bufmode):
 *                 raise ValueError, "Can only create a buffer that is contiguous in memory."             # <<<<<<<<<<<<<<
 *         info.buf = self.data
 *         info.len = self.len
*/
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_ValueError))), __pyx_mstate_global->__pyx_kp_u_Can_only_create_a_buffer_that_is, 0, 0);
      __PYX_ERR(1, 189, __pyx_L1_error)

      /* "View.MemoryView":188
 *             elif self.mode == u"fortran":
 *                 bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
 *             if not (flags & bufmode):             # <<<<<<<<<<<<<<
 *                 raise ValueError, "Can only create a buffer that is contiguous in memory."
 *         info.buf = self.data
*/
    }

    /* "View.MemoryView":183
 *     def __getbuffer__(self, Py_buffer *info, int flags):
 *         cdef int bufmode = -1
 *         if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS):             # <<<<<<<<<<<<<<
 *             if self.mode == u"c":
 *                 bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
  }

  /* "View.MemoryView":190
 *             if not (flags & bufmode):
 *                 raise ValueError, "Can only create a buffer that is contiguous in memory."
 *         info.buf = self.data             # <<<<<<<<<<<<<<
 *         info.len = self.len
 * 
*/
  __pyx_t_2 = __pyx_v_self->data;
  __pyx_v_info->buf = __pyx_t_2;

  /* "View.MemoryView":191
 *                 raise ValueError, "Can only create a buffer that is contiguous in memory."
 *         info.buf = self.data
 *         info.len = self.len             # <<<<<<<<<<<<<<
 * 
 *         if flags & PyBUF_STRIDES:
*/
  __pyx_t_3 = __pyx_v_self->len;
  __pyx_v_info->len = __pyx_t_3;

  /* "View.MemoryView":193
 *         info.len = self.len
 * 
 *         if flags & PyBUF_STRIDES:             # <<<<<<<<<<<<<<
 *             info.ndim = self.ndim
 *             info.shape = self._shape
*/
  __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
  if (__pyx_t_1) {

    /* "View.MemoryView":194
 * 
 *         if flags & PyBUF_STRIDES:
 *             info.ndim = self.ndim             # <<<<<<<<<<<<<<
 *             info.shape = self._shape
 *             info.strides = self._strides
*/
    __pyx_t_4 = __pyx_v_self->ndim;
    __pyx_v_info->ndim = __pyx_t_4;

    /* "View.MemoryView":195
 *         if flags & PyBUF_STRIDES:
 *             info.ndim = self.ndim
 *             info.shape = self._shape             # <<<<<<<<<<<<<<
 *             info.strides = self._strides
 *         else:
*/
    __pyx_t_5 = __pyx_v_self->_shape;
    __pyx_v_info->shape = __pyx_t_5;

    /* "View.MemoryView":196
 *             info.ndim = self.ndim
 *             info.shape = self._shape
 *             info.strides = self._strides             # <<<<<<<<<<<<<<
 *         else:
 *             info.ndim = 1
*/
    __pyx_t_5 = __pyx_v_self->_strides;
    __pyx_v_info->strides = __pyx_t_5;

    /* "View.MemoryView":193
 *         info.len = self.len
 * 
 *         if flags & PyBUF_STRIDES:             # <<<<<<<<<<<<<<
 *             info.ndim = self.ndim
 *             info.shape = self._shape
*/
    goto __pyx_L6;
  }

  /* "View.MemoryView":198
 *             info.strides = self._strides
 *         else:
 *             info.ndim = 1             # <<<<<<<<<<<<<<
 *             info.shape = &self.len if flags & PyBUF_ND else NULL
 *             info.strides = NULL
*/
  /*else*/ {
    __pyx_v_info->ndim = 1;

    /* "View.MemoryView":199
 *         else:
 *             info.ndim = 1
 *             info.shape = &self.len if flags & PyBUF_ND else NULL             # <<<<<<<<<<<<<<
 *             info.strides = NULL
 * 
*/
    __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0);
    if (__pyx_t_1) {
      __pyx_t_5 = (&__pyx_v_self->len);
    } else {
      __pyx_t_5 = NULL;
    }
    __pyx_v_info->shape = __pyx_t_5;

    /* "View.MemoryView":200
 *             info.ndim = 1
 *             info.shape = &self.len if flags & PyBUF_ND else NULL
 *             info.strides = NULL             # <<<<<<<<<<<<<<
 * 
 *         info.suboffsets = NULL
*/
    __pyx_v_info->strides = NULL;
  }
  __pyx_L6:;

  /* "View.MemoryView":202
 *             info.strides = NULL
 * 
 *         info.suboffsets = NULL             # <<<<<<<<<<<<<<
 *         info.itemsize = self.itemsize
 *         info.readonly = 0
*/
  __pyx_v_info->suboffsets = NULL;

  /* "View.MemoryView":203
 * 
 *         info.suboffsets = NULL
 *         info.itemsize = self.itemsize             # <<<<<<<<<<<<<<
 *         info.readonly = 0
 *         info.format = self.format if flags & PyBUF_FORMAT else NULL
*/
  __pyx_t_3 = __pyx_v_self->itemsize;
  __pyx_v_info->itemsize = __pyx_t_3;

  /* "View.MemoryView":204
 *         info.suboffsets = NULL
 *         info.itemsize = self.itemsize
 *         info.readonly = 0             # <<<<<<<<<<<<<<
 *         info.format = self.format if flags & PyBUF_FORMAT else NULL
 *         info.obj = self
*/
  __pyx_v_info->readonly = 0;

  /* "View.MemoryView":205
 *         info.itemsize = self.itemsize
 *         info.readonly = 0
 *         info.format = self.format if flags & PyBUF_FORMAT else NULL             # <<<<<<<<<<<<<<
 *         info.obj = self
 * 
*/
  __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
  if (__pyx_t_1) {
    __pyx_t_2 = __pyx_v_self->format;
  } else {
    __pyx_t_2 = NULL;
  }
  __pyx_v_info->format = __pyx_t_2;

  /* "View.MemoryView":206
 *         info.readonly = 0
 *         info.format = self.format if flags & PyBUF_FORMAT else NULL
 *         info.obj = self             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(array self):
*/
  __Pyx_INCREF((PyObject *)__pyx_v_self);
  __Pyx_GIVEREF((PyObject *)__pyx_v_self);
  __Pyx_GOTREF(__pyx_v_info->obj);
  __Pyx_DECREF(__pyx_v_info->obj);
  __pyx_v_info->obj = ((PyObject *)__pyx_v_self);

  /* "View.MemoryView":180
 *             _allocate_buffer(self)
 * 
 *     @cname('getbuffer')             # <<<<<<<<<<<<<<
 *     def __getbuffer__(self, Py_buffer *info, int flags):
 *         cdef int bufmode = -1
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  if (__pyx_v_info->obj != NULL) {
    __Pyx_GOTREF(__pyx_v_info->obj);
    __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
  }
  goto __pyx_L2;
  __pyx_L0:;
  if (__pyx_v_info->obj == Py_None) {
    __Pyx_GOTREF(__pyx_v_info->obj);
    __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
  }
  __pyx_L2:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":208
 *         info.obj = self
 * 
 *     def __dealloc__(array self):             # <<<<<<<<<<<<<<
 *         if self.callback_free_data != NULL:
 *             self.callback_free_data(self.data)
*/

/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
  int __pyx_t_1;
  int __pyx_t_2;

  /* "View.MemoryView":209
 * 
 *     def __dealloc__(array self):
 *         if self.callback_free_data != NULL:             # <<<<<<<<<<<<<<
 *             self.callback_free_data(self.data)
 *         elif self.free_data and self.data is not NULL:
*/
  __pyx_t_1 = (__pyx_v_self->callback_free_data != NULL);
  if (__pyx_t_1) {

    /* "View.MemoryView":210
 *     def __dealloc__(array self):
 *         if self.callback_free_data != NULL:
 *             self.callback_free_data(self.data)             # <<<<<<<<<<<<<<
 *         elif self.free_data and self.data is not NULL:
 *             if self.dtype_is_object:
*/
    __pyx_v_self->callback_free_data(__pyx_v_self->data);

    /* "View.MemoryView":209
 * 
 *     def __dealloc__(array self):
 *         if self.callback_free_data != NULL:             # <<<<<<<<<<<<<<
 *             self.callback_free_data(self.data)
 *         elif self.free_data and self.data is not NULL:
*/
    goto __pyx_L3;
  }

  /* "View.MemoryView":211
 *         if self.callback_free_data != NULL:
 *             self.callback_free_data(self.data)
 *         elif self.free_data and self.data is not NULL:             # <<<<<<<<<<<<<<
 *             if self.dtype_is_object:
 *                 refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
*/
  if (__pyx_v_self->free_data) {
  } else {
    __pyx_t_1 = __pyx_v_self->free_data;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->data != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "View.MemoryView":212
 *             self.callback_free_data(self.data)
 *         elif self.free_data and self.data is not NULL:
 *             if self.dtype_is_object:             # <<<<<<<<<<<<<<
 *                 refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
 *             free(self.data)
*/
    if (__pyx_v_self->dtype_is_object) {

      /* "View.MemoryView":213
 *         elif self.free_data and self.data is not NULL:
 *             if self.dtype_is_object:
 *                 refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)             # <<<<<<<<<<<<<<
 *             free(self.data)
 *         PyObject_Free(self._shape)
*/
      __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);

      /* "View.MemoryView":212
 *             self.callback_free_data(self.data)
 *         elif self.free_data and self.data is not NULL:
 *             if self.dtype_is_object:             # <<<<<<<<<<<<<<
 *                 refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
 *             free(self.data)
*/
    }

    /* "View.MemoryView":214
 *             if self.dtype_is_object:
 *                 refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
 *             free(self.data)             # <<<<<<<<<<<<<<
 *         PyObject_Free(self._shape)
 * 
*/
    free(__pyx_v_self->data);

    /* "View.MemoryView":211
 *         if self.callback_free_data != NULL:
 *             self.callback_free_data(self.data)
 *         elif self.free_data and self.data is not NULL:             # <<<<<<<<<<<<<<
 *             if self.dtype_is_object:
 *                 refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
*/
  }
  __pyx_L3:;

  /* "View.MemoryView":215
 *                 refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
 *             free(self.data)
 *         PyObject_Free(self._shape)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  PyObject_Free(__pyx_v_self->_shape);

  /* "View.MemoryView":208
 *         info.obj = self
 * 
 *     def __dealloc__(array self):             # <<<<<<<<<<<<<<
 *         if self.callback_free_data != NULL:
 *             self.callback_free_data(self.data)
*/

  /* function exit code */
}

/* "View.MemoryView":217
 *         PyObject_Free(self._shape)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def memview(self):
 *         return self.get_memview()
*/

/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "View.MemoryView":219
 *     @property
 *     def memview(self):
 *         return self.get_memview()             # <<<<<<<<<<<<<<
 * 
 *     @cname('get_memview')
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 219, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":217
 *         PyObject_Free(self._shape)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def memview(self):
 *         return self.get_memview()
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":221
 *         return self.get_memview()
 * 
 *     @cname('get_memview')             # <<<<<<<<<<<<<<
 *     cdef get_memview(self):
 *         flags =  PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
*/

static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
  int __pyx_v_flags;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("get_memview", 0);

  /* "View.MemoryView":223
 *     @cname('get_memview')
 *     cdef get_memview(self):
 *         flags =  PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE             # <<<<<<<<<<<<<<
 *         return  memoryview(self, flags, self.dtype_is_object)
 * 
*/
  __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);

  /* "View.MemoryView":224
 *     cdef get_memview(self):
 *         flags =  PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
 *         return  memoryview(self, flags, self.dtype_is_object)             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = NULL;
  __pyx_t_3 = __Pyx_PyLong_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 224, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 224, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 1;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, ((PyObject *)__pyx_v_self), __pyx_t_3, __pyx_t_4};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_memoryview_type, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 224, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_r = ((PyObject *)__pyx_t_1);
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":221
 *         return self.get_memview()
 * 
 *     @cname('get_memview')             # <<<<<<<<<<<<<<
 *     cdef get_memview(self):
 *         flags =  PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":226
 *         return  memoryview(self, flags, self.dtype_is_object)
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._shape[0]
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) {
  Py_ssize_t __pyx_r;

  /* "View.MemoryView":227
 * 
 *     def __len__(self):
 *         return self._shape[0]             # <<<<<<<<<<<<<<
 * 
 *     def __getattr__(self, attr):
*/
  __pyx_r = (__pyx_v_self->_shape[0]);
  goto __pyx_L0;

  /* "View.MemoryView":226
 *         return  memoryview(self, flags, self.dtype_is_object)
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._shape[0]
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "View.MemoryView":229
 *         return self._shape[0]
 * 
 *     def __getattr__(self, attr):             # <<<<<<<<<<<<<<
 *         return getattr(self.memview, attr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getattr__", 0);

  /* "View.MemoryView":230
 * 
 *     def __getattr__(self, attr):
 *         return getattr(self.memview, attr)             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, item):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 230, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 230, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":229
 *         return self._shape[0]
 * 
 *     def __getattr__(self, attr):             # <<<<<<<<<<<<<<
 *         return getattr(self.memview, attr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":232
 *         return getattr(self.memview, attr)
 * 
 *     def __getitem__(self, item):             # <<<<<<<<<<<<<<
 *         return self.memview[item]
 * 
*/

/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "View.MemoryView":233
 * 
 *     def __getitem__(self, item):
 *         return self.memview[item]             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, item, value):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 233, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 233, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":232
 *         return getattr(self.memview, attr)
 * 
 *     def __getitem__(self, item):             # <<<<<<<<<<<<<<
 *         return self.memview[item]
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":235
 *         return self.memview[item]
 * 
 *     def __setitem__(self, item, value):             # <<<<<<<<<<<<<<
 *         self.memview[item] = value
 * 
*/

/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "View.MemoryView":236
 * 
 *     def __setitem__(self, item, value):
 *         self.memview[item] = value             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 236, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  if (unlikely((PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0))) __PYX_ERR(1, 236, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;

  /* "View.MemoryView":235
 *         return self.memview[item]
 * 
 *     def __setitem__(self, item, value):             # <<<<<<<<<<<<<<
 *         self.memview[item] = value
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
*/

/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":245
 *         pass
 * 
 * @cname("__pyx_array_allocate_buffer")             # <<<<<<<<<<<<<<
 * cdef int _allocate_buffer(array self) except -1:
 * 
*/

static int __pyx_array_allocate_buffer(struct __pyx_array_obj *__pyx_v_self) {
  Py_ssize_t __pyx_v_i;
  PyObject **__pyx_v_p;
  int __pyx_r;
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  Py_ssize_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "View.MemoryView":252
 *     cdef PyObject **p
 * 
 *     self.free_data = True             # <<<<<<<<<<<<<<
 *     self.data = <char *>malloc(self.len)
 *     if not self.data:
*/
  __pyx_v_self->free_data = 1;

  /* "View.MemoryView":253
 * 
 *     self.free_data = True
 *     self.data = <char *>malloc(self.len)             # <<<<<<<<<<<<<<
 *     if not self.data:
 *         raise MemoryError, "unable to allocate array data."
*/
  __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));

  /* "View.MemoryView":254
 *     self.free_data = True
 *     self.data = <char *>malloc(self.len)
 *     if not self.data:             # <<<<<<<<<<<<<<
 *         raise MemoryError, "unable to allocate array data."
 * 
*/
  __pyx_t_1 = (!(__pyx_v_self->data != 0));
  if (unlikely(__pyx_t_1)) {

    /* "View.MemoryView":255
 *     self.data = <char *>malloc(self.len)
 *     if not self.data:
 *         raise MemoryError, "unable to allocate array data."             # <<<<<<<<<<<<<<
 * 
 *     if self.dtype_is_object:
*/
    __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_MemoryError))), __pyx_mstate_global->__pyx_kp_u_unable_to_allocate_array_data, 0, 0);
    __PYX_ERR(1, 255, __pyx_L1_error)

    /* "View.MemoryView":254
 *     self.free_data = True
 *     self.data = <char *>malloc(self.len)
 *     if not self.data:             # <<<<<<<<<<<<<<
 *         raise MemoryError, "unable to allocate array data."
 * 
*/
  }

  /* "View.MemoryView":257
 *         raise MemoryError, "unable to allocate array data."
 * 
 *     if self.dtype_is_object:             # <<<<<<<<<<<<<<
 *         p = <PyObject **> self.data
 *         for i in range(self.len // self.itemsize):
*/
  if (__pyx_v_self->dtype_is_object) {

    /* "View.MemoryView":258
 * 
 *     if self.dtype_is_object:
 *         p = <PyObject **> self.data             # <<<<<<<<<<<<<<
 *         for i in range(self.len // self.itemsize):
 *             p[i] = Py_None
*/
    __pyx_v_p = ((PyObject **)__pyx_v_self->data);

    /* "View.MemoryView":259
 *     if self.dtype_is_object:
 *         p = <PyObject **> self.data
 *         for i in range(self.len // self.itemsize):             # <<<<<<<<<<<<<<
 *             p[i] = Py_None
 *             Py_INCREF(Py_None)
*/
    if (unlikely(__pyx_v_self->itemsize == 0)) {
      PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
      __PYX_ERR(1, 259, __pyx_L1_error)
    }
    else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_self->itemsize == (Py_ssize_t)-1)  && unlikely(__Pyx_UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
      PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
      __PYX_ERR(1, 259, __pyx_L1_error)
    }
    __pyx_t_2 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_self->itemsize, 0);
    __pyx_t_3 = __pyx_t_2;
    for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
      __pyx_v_i = __pyx_t_4;

      /* "View.MemoryView":260
 *         p = <PyObject **> self.data
 *         for i in range(self.len // self.itemsize):
 *             p[i] = Py_None             # <<<<<<<<<<<<<<
 *             Py_INCREF(Py_None)
 *     return 0
*/
      (__pyx_v_p[__pyx_v_i]) = Py_None;

      /* "View.MemoryView":261
 *         for i in range(self.len // self.itemsize):
 *             p[i] = Py_None
 *             Py_INCREF(Py_None)             # <<<<<<<<<<<<<<
 *     return 0
 * 
*/
      Py_INCREF(Py_None);
    }

    /* "View.MemoryView":257
 *         raise MemoryError, "unable to allocate array data."
 * 
 *     if self.dtype_is_object:             # <<<<<<<<<<<<<<
 *         p = <PyObject **> self.data
 *         for i in range(self.len // self.itemsize):
*/
  }

  /* "View.MemoryView":262
 *             p[i] = Py_None
 *             Py_INCREF(Py_None)
 *     return 0             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = 0;
  goto __pyx_L0;

  /* "View.MemoryView":245
 *         pass
 * 
 * @cname("__pyx_array_allocate_buffer")             # <<<<<<<<<<<<<<
 * cdef int _allocate_buffer(array self) except -1:
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("View.MemoryView._allocate_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "View.MemoryView":265
 * 
 * 
 * @cname("__pyx_array_new")             # <<<<<<<<<<<<<<
 * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, const char *c_mode, char *buf):
 *     cdef array result
*/

static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char const *__pyx_v_c_mode, char *__pyx_v_buf) {
  struct __pyx_array_obj *__pyx_v_result = 0;
  PyObject *__pyx_v_mode = 0;
  struct __pyx_array_obj *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("array_cwrapper", 0);

  /* "View.MemoryView":268
 * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, const char *c_mode, char *buf):
 *     cdef array result
 *     cdef str mode = "fortran" if c_mode[0] == b'f' else "c"  # this often comes from a constant C string.             # <<<<<<<<<<<<<<
 * 
 *     if buf is NULL:
*/
  __pyx_t_2 = ((__pyx_v_c_mode[0]) == 'f');
  if (__pyx_t_2) {
    __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_fortran);
    __pyx_t_1 = __pyx_mstate_global->__pyx_n_u_fortran;
  } else {
    __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_c);
    __pyx_t_1 = __pyx_mstate_global->__pyx_n_u_c;
  }
  __pyx_v_mode = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "View.MemoryView":270
 *     cdef str mode = "fortran" if c_mode[0] == b'f' else "c"  # this often comes from a constant C string.
 * 
 *     if buf is NULL:             # <<<<<<<<<<<<<<
 *         result = array.__new__(array, shape, itemsize, format, mode)
 *     else:
*/
  __pyx_t_2 = (__pyx_v_buf == NULL);
  if (__pyx_t_2) {

    /* "View.MemoryView":271
 * 
 *     if buf is NULL:
 *         result = array.__new__(array, shape, itemsize, format, mode)             # <<<<<<<<<<<<<<
 *     else:
 *         result = array.__new__(array, shape, itemsize, format, mode, allocate_buffer=False)
*/
    __pyx_t_1 = PyLong_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 271, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 271, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyTuple_New(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 271, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_INCREF(__pyx_v_shape);
    __Pyx_GIVEREF(__pyx_v_shape);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_shape) != (0)) __PYX_ERR(1, 271, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 271, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_3);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3) != (0)) __PYX_ERR(1, 271, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_mode);
    __Pyx_GIVEREF(__pyx_v_mode);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 3, __pyx_v_mode) != (0)) __PYX_ERR(1, 271, __pyx_L1_error);
    __pyx_t_1 = 0;
    __pyx_t_3 = 0;
    __pyx_t_3 = ((PyObject *)__pyx_tp_new_array(((PyTypeObject *)__pyx_mstate_global->__pyx_array_type), __pyx_t_4, NULL)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 271, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_3);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_3);
    __pyx_t_3 = 0;

    /* "View.MemoryView":270
 *     cdef str mode = "fortran" if c_mode[0] == b'f' else "c"  # this often comes from a constant C string.
 * 
 *     if buf is NULL:             # <<<<<<<<<<<<<<
 *         result = array.__new__(array, shape, itemsize, format, mode)
 *     else:
*/
    goto __pyx_L3;
  }

  /* "View.MemoryView":273
 *         result = array.__new__(array, shape, itemsize, format, mode)
 *     else:
 *         result = array.__new__(array, shape, itemsize, format, mode, allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         result.data = buf
 * 
*/
  /*else*/ {
    __pyx_t_3 = PyLong_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 273, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 273, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 273, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v_shape);
    __Pyx_GIVEREF(__pyx_v_shape);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_shape) != (0)) __PYX_ERR(1, 273, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_3);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3) != (0)) __PYX_ERR(1, 273, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_4) != (0)) __PYX_ERR(1, 273, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_mode);
    __Pyx_GIVEREF(__pyx_v_mode);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 3, __pyx_v_mode) != (0)) __PYX_ERR(1, 273, __pyx_L1_error);
    __pyx_t_3 = 0;
    __pyx_t_4 = 0;
    __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 273, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_allocate_buffer, Py_False) < (0)) __PYX_ERR(1, 273, __pyx_L1_error)
    __pyx_t_3 = ((PyObject *)__pyx_tp_new_array(((PyTypeObject *)__pyx_mstate_global->__pyx_array_type), __pyx_t_1, __pyx_t_4)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 273, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_3);
    __pyx_t_3 = 0;

    /* "View.MemoryView":274
 *     else:
 *         result = array.__new__(array, shape, itemsize, format, mode, allocate_buffer=False)
 *         result.data = buf             # <<<<<<<<<<<<<<
 * 
 *     return result
*/
    __pyx_v_result->data = __pyx_v_buf;
  }
  __pyx_L3:;

  /* "View.MemoryView":276
 *         result.data = buf
 * 
 *     return result             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF((PyObject *)__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_result);
  __pyx_r = __pyx_v_result;
  goto __pyx_L0;

  /* "View.MemoryView":265
 * 
 * 
 * @cname("__pyx_array_new")             # <<<<<<<<<<<<<<
 * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, const char *c_mode, char *buf):
 *     cdef array result
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_result);
  __Pyx_XDECREF(__pyx_v_mode);
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":302
 * cdef class Enum(object):
 *     cdef object name
 *     def __init__(self, name):             # <<<<<<<<<<<<<<
 *         self.name = name
 *     def __repr__(self):
*/

/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_name = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_name,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 302, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 302, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(1, 302, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, i); __PYX_ERR(1, 302, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 302, __pyx_L3_error)
    }
    __pyx_v_name = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 302, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "View.MemoryView":303
 *     cdef object name
 *     def __init__(self, name):
 *         self.name = name             # <<<<<<<<<<<<<<
 *     def __repr__(self):
 *         return self.name
*/
  __Pyx_INCREF(__pyx_v_name);
  __Pyx_GIVEREF(__pyx_v_name);
  __Pyx_GOTREF(__pyx_v_self->name);
  __Pyx_DECREF(__pyx_v_self->name);
  __pyx_v_self->name = __pyx_v_name;

  /* "View.MemoryView":302
 * cdef class Enum(object):
 *     cdef object name
 *     def __init__(self, name):             # <<<<<<<<<<<<<<
 *         self.name = name
 *     def __repr__(self):
*/

  /* function exit code */
  __pyx_r = 0;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":304
 *     def __init__(self, name):
 *         self.name = name
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return self.name
 * 
*/

/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "View.MemoryView":305
 *         self.name = name
 *     def __repr__(self):
 *         return self.name             # <<<<<<<<<<<<<<
 * 
 * cdef generic = Enum("<strided and direct or indirect>")
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->name);
  __pyx_r = __pyx_v_self->name;
  goto __pyx_L0;

  /* "View.MemoryView":304
 *     def __init__(self, name):
 *         self.name = name
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return self.name
 * 
*/

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self.name,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->name);
  __Pyx_GIVEREF(__pyx_v_self->name);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self.name,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self.name,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self.name is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self.name,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self.name is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[0]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self.name is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self.name is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_Enum, (type(self), 0x82a3537, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_136983863);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_136983863);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_136983863) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self.name is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state
 *     else:
 *         return __pyx_unpickle_Enum, (type(self), 0x82a3537, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_136983863);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_136983863);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_136983863) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_Enum, (type(self), 0x82a3537, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_Enum, (type(self), 0x82a3537, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_Enum__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_Enum, (type(self), 0x82a3537, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":348
 *     cdef const __Pyx_TypeInfo *typeinfo
 * 
 *     def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):             # <<<<<<<<<<<<<<
 *         self.obj = obj
 *         self.flags = flags
*/

/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_obj = 0;
  int __pyx_v_flags;
  int __pyx_v_dtype_is_object;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_obj,&__pyx_mstate_global->__pyx_n_u_flags,&__pyx_mstate_global->__pyx_n_u_dtype_is_object,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 348, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_VARARGS(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 348, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 348, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 348, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(1, 348, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, i); __PYX_ERR(1, 348, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_VARARGS(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 348, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 348, __pyx_L3_error)
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 348, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_obj = values[0];
    __pyx_v_flags = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 348, __pyx_L3_error)
    if (values[2]) {
      __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 348, __pyx_L3_error)
    } else {
      __pyx_v_dtype_is_object = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, __pyx_nargs); __PYX_ERR(1, 348, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  int __pyx_t_3;
  Py_intptr_t __pyx_t_4;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__cinit__", 0);

  /* "View.MemoryView":349
 * 
 *     def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
 *         self.obj = obj             # <<<<<<<<<<<<<<
 *         self.flags = flags
 *         if type(self) is memoryview or obj is not None:
*/
  __Pyx_INCREF(__pyx_v_obj);
  __Pyx_GIVEREF(__pyx_v_obj);
  __Pyx_GOTREF(__pyx_v_self->obj);
  __Pyx_DECREF(__pyx_v_self->obj);
  __pyx_v_self->obj = __pyx_v_obj;

  /* "View.MemoryView":350
 *     def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
 *         self.obj = obj
 *         self.flags = flags             # <<<<<<<<<<<<<<
 *         if type(self) is memoryview or obj is not None:
 *             PyObject_GetBuffer(obj, &self.view, flags)
*/
  __pyx_v_self->flags = __pyx_v_flags;

  /* "View.MemoryView":351
 *         self.obj = obj
 *         self.flags = flags
 *         if type(self) is memoryview or obj is not None:             # <<<<<<<<<<<<<<
 *             PyObject_GetBuffer(obj, &self.view, flags)
 *             if <PyObject *> self.view.obj == NULL:
*/
  __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_mstate_global->__pyx_memoryview_type));
  if (!__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_obj != Py_None);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "View.MemoryView":352
 *         self.flags = flags
 *         if type(self) is memoryview or obj is not None:
 *             PyObject_GetBuffer(obj, &self.view, flags)             # <<<<<<<<<<<<<<
 *             if <PyObject *> self.view.obj == NULL:
 *                 (<__pyx_buffer *> &self.view).obj = Py_None
*/
    __pyx_t_3 = PyObject_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 352, __pyx_L1_error)

    /* "View.MemoryView":353
 *         if type(self) is memoryview or obj is not None:
 *             PyObject_GetBuffer(obj, &self.view, flags)
 *             if <PyObject *> self.view.obj == NULL:             # <<<<<<<<<<<<<<
 *                 (<__pyx_buffer *> &self.view).obj = Py_None
 *                 Py_INCREF(Py_None)
*/
    __pyx_t_1 = (((PyObject *)__pyx_v_self->view.obj) == NULL);
    if (__pyx_t_1) {

      /* "View.MemoryView":354
 *             PyObject_GetBuffer(obj, &self.view, flags)
 *             if <PyObject *> self.view.obj == NULL:
 *                 (<__pyx_buffer *> &self.view).obj = Py_None             # <<<<<<<<<<<<<<
 *                 Py_INCREF(Py_None)
 * 
*/
      ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;

      /* "View.MemoryView":355
 *             if <PyObject *> self.view.obj == NULL:
 *                 (<__pyx_buffer *> &self.view).obj = Py_None
 *                 Py_INCREF(Py_None)             # <<<<<<<<<<<<<<
 * 
 *         if not __PYX_CYTHON_ATOMICS_ENABLED():
*/
      Py_INCREF(Py_None);

      /* "View.MemoryView":353
 *         if type(self) is memoryview or obj is not None:
 *             PyObject_GetBuffer(obj, &self.view, flags)
 *             if <PyObject *> self.view.obj == NULL:             # <<<<<<<<<<<<<<
 *                 (<__pyx_buffer *> &self.view).obj = Py_None
 *                 Py_INCREF(Py_None)
*/
    }

    /* "View.MemoryView":351
 *         self.obj = obj
 *         self.flags = flags
 *         if type(self) is memoryview or obj is not None:             # <<<<<<<<<<<<<<
 *             PyObject_GetBuffer(obj, &self.view, flags)
 *             if <PyObject *> self.view.obj == NULL:
*/
  }

  /* "View.MemoryView":357
 *                 Py_INCREF(Py_None)
 * 
 *         if not __PYX_CYTHON_ATOMICS_ENABLED():             # <<<<<<<<<<<<<<
 *             global __pyx_memoryview_thread_locks_used
 *             if (__pyx_memoryview_thread_locks_used < 8 and
*/
  __pyx_t_1 = (!__PYX_CYTHON_ATOMICS_ENABLED());
  if (__pyx_t_1) {

    /* "View.MemoryView":359
 *         if not __PYX_CYTHON_ATOMICS_ENABLED():
 *             global __pyx_memoryview_thread_locks_used
 *             if (__pyx_memoryview_thread_locks_used < 8 and             # <<<<<<<<<<<<<<
 * 
 *                     not __PYX_GET_CYTHON_COMPILING_IN_CPYTHON_FREETHREADING()):
*/
    __pyx_t_2 = (__pyx_memoryview_thread_locks_used < 8);
    if (__pyx_t_2) {
    } else {
      __pyx_t_1 = __pyx_t_2;
      goto __pyx_L9_bool_binop_done;
    }

    /* "View.MemoryView":361
 *             if (__pyx_memoryview_thread_locks_used < 8 and
 * 
 *                     not __PYX_GET_CYTHON_COMPILING_IN_CPYTHON_FREETHREADING()):             # <<<<<<<<<<<<<<
 *                 self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
 *                 __pyx_memoryview_thread_locks_used += 1
*/
    __pyx_t_2 = (!__PYX_GET_CYTHON_COMPILING_IN_CPYTHON_FREETHREADING());
    __pyx_t_1 = __pyx_t_2;
    __pyx_L9_bool_binop_done:;

    /* "View.MemoryView":359
 *         if not __PYX_CYTHON_ATOMICS_ENABLED():
 *             global __pyx_memoryview_thread_locks_used
 *             if (__pyx_memoryview_thread_locks_used < 8 and             # <<<<<<<<<<<<<<
 * 
 *                     not __PYX_GET_CYTHON_COMPILING_IN_CPYTHON_FREETHREADING()):
*/
    if (__pyx_t_1) {

      /* "View.MemoryView":362
 * 
 *                     not __PYX_GET_CYTHON_COMPILING_IN_CPYTHON_FREETHREADING()):
 *                 self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]             # <<<<<<<<<<<<<<
 *                 __pyx_memoryview_thread_locks_used += 1
 *             if self.lock is NULL:
*/
      __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);

      /* "View.MemoryView":363
 *                     not __PYX_GET_CYTHON_COMPILING_IN_CPYTHON_FREETHREADING()):
 *                 self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
 *                 __pyx_memoryview_thread_locks_used += 1             # <<<<<<<<<<<<<<
 *             if self.lock is NULL:
 *                 self.lock = PyThread_allocate_lock()
*/
      __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);

      /* "View.MemoryView":359
 *         if not __PYX_CYTHON_ATOMICS_ENABLED():
 *             global __pyx_memoryview_thread_locks_used
 *             if (__pyx_memoryview_thread_locks_used < 8 and             # <<<<<<<<<<<<<<
 * 
 *                     not __PYX_GET_CYTHON_COMPILING_IN_CPYTHON_FREETHREADING()):
*/
    }

    /* "View.MemoryView":364
 *                 self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
 *                 __pyx_memoryview_thread_locks_used += 1
 *             if self.lock is NULL:             # <<<<<<<<<<<<<<
 *                 self.lock = PyThread_allocate_lock()
 *                 if self.lock is NULL:
*/
    __pyx_t_1 = (__pyx_v_self->lock == NULL);
    if (__pyx_t_1) {

      /* "View.MemoryView":365
 *                 __pyx_memoryview_thread_locks_used += 1
 *             if self.lock is NULL:
 *                 self.lock = PyThread_allocate_lock()             # <<<<<<<<<<<<<<
 *                 if self.lock is NULL:
 *                     raise MemoryError
*/
      __pyx_v_self->lock = PyThread_allocate_lock();

      /* "View.MemoryView":366
 *             if self.lock is NULL:
 *                 self.lock = PyThread_allocate_lock()
 *                 if self.lock is NULL:             # <<<<<<<<<<<<<<
 *                     raise MemoryError
 * 
*/
      __pyx_t_1 = (__pyx_v_self->lock == NULL);
      if (unlikely(__pyx_t_1)) {

        /* "View.MemoryView":367
 *                 self.lock = PyThread_allocate_lock()
 *                 if self.lock is NULL:
 *                     raise MemoryError             # <<<<<<<<<<<<<<
 * 
 *         if flags & PyBUF_FORMAT:
*/
        PyErr_NoMemory(); __PYX_ERR(1, 367, __pyx_L1_error)

        /* "View.MemoryView":366
 *             if self.lock is NULL:
 *                 self.lock = PyThread_allocate_lock()
 *                 if self.lock is NULL:             # <<<<<<<<<<<<<<
 *                     raise MemoryError
 * 
*/
      }

      /* "View.MemoryView":364
 *                 self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
 *                 __pyx_memoryview_thread_locks_used += 1
 *             if self.lock is NULL:             # <<<<<<<<<<<<<<
 *                 self.lock = PyThread_allocate_lock()
 *                 if self.lock is NULL:
*/
    }

    /* "View.MemoryView":357
 *                 Py_INCREF(Py_None)
 * 
 *         if not __PYX_CYTHON_ATOMICS_ENABLED():             # <<<<<<<<<<<<<<
 *             global __pyx_memoryview_thread_locks_used
 *             if (__pyx_memoryview_thread_locks_used < 8 and
*/
  }

  /* "View.MemoryView":369
 *                     raise MemoryError
 * 
 *         if flags & PyBUF_FORMAT:             # <<<<<<<<<<<<<<
 *             self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
 *         else:
*/
  __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
  if (__pyx_t_1) {

    /* "View.MemoryView":370
 * 
 *         if flags & PyBUF_FORMAT:
 *             self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')             # <<<<<<<<<<<<<<
 *         else:
 *             self.dtype_is_object = dtype_is_object
*/
    __pyx_t_2 = ((__pyx_v_self->view.format[0]) == 'O');
    if (__pyx_t_2) {
    } else {
      __pyx_t_1 = __pyx_t_2;
      goto __pyx_L14_bool_binop_done;
    }
    __pyx_t_2 = ((__pyx_v_self->view.format[1]) == '\x00');
    __pyx_t_1 = __pyx_t_2;
    __pyx_L14_bool_binop_done:;
    __pyx_v_self->dtype_is_object = __pyx_t_1;

    /* "View.MemoryView":369
 *                     raise MemoryError
 * 
 *         if flags & PyBUF_FORMAT:             # <<<<<<<<<<<<<<
 *             self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
 *         else:
*/
    goto __pyx_L13;
  }

  /* "View.MemoryView":372
 *             self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
 *         else:
 *             self.dtype_is_object = dtype_is_object             # <<<<<<<<<<<<<<
 * 
 *         assert <Py_intptr_t><void*>(&self.acquisition_count) % sizeof(__pyx_atomic_int_type) == 0
*/
  /*else*/ {
    __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
  }
  __pyx_L13:;

  /* "View.MemoryView":374
 *             self.dtype_is_object = dtype_is_object
 * 
 *         assert <Py_intptr_t><void*>(&self.acquisition_count) % sizeof(__pyx_atomic_int_type) == 0             # <<<<<<<<<<<<<<
 *         self.typeinfo = NULL
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_4 = ((Py_intptr_t)((void *)(&__pyx_v_self->acquisition_count)));
    __pyx_t_5 = (sizeof(__pyx_atomic_int_type));
    if (unlikely(__pyx_t_5 == 0)) {
      PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
      __PYX_ERR(1, 374, __pyx_L1_error)
    }
    __pyx_t_1 = ((__pyx_t_4 % __pyx_t_5) == 0);
    if (unlikely(!__pyx_t_1)) {
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), 0, 0, 0);
      __PYX_ERR(1, 374, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(1, 374, __pyx_L1_error)
  #endif

  /* "View.MemoryView":375
 * 
 *         assert <Py_intptr_t><void*>(&self.acquisition_count) % sizeof(__pyx_atomic_int_type) == 0
 *         self.typeinfo = NULL             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(memoryview self):
*/
  __pyx_v_self->typeinfo = NULL;

  /* "View.MemoryView":348
 *     cdef const __Pyx_TypeInfo *typeinfo
 * 
 *     def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):             # <<<<<<<<<<<<<<
 *         self.obj = obj
 *         self.flags = flags
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":377
 *         self.typeinfo = NULL
 * 
 *     def __dealloc__(memoryview self):             # <<<<<<<<<<<<<<
 *         if self.obj is not None:
 *             PyBuffer_Release(&self.view)
*/

/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
  int __pyx_v_i;
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  Py_ssize_t __pyx_t_3;
  int __pyx_t_4;
  PyThread_type_lock __pyx_t_5;
  PyThread_type_lock __pyx_t_6;

  /* "View.MemoryView":378
 * 
 *     def __dealloc__(memoryview self):
 *         if self.obj is not None:             # <<<<<<<<<<<<<<
 *             PyBuffer_Release(&self.view)
 *         elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
  __pyx_t_1 = (__pyx_v_self->obj != Py_None);
  if (__pyx_t_1) {

    /* "View.MemoryView":379
 *     def __dealloc__(memoryview self):
 *         if self.obj is not None:
 *             PyBuffer_Release(&self.view)             # <<<<<<<<<<<<<<
 *         elif (<__pyx_buffer *> &self.view).obj == Py_None:
 * 
*/
    PyBuffer_Release((&__pyx_v_self->view));

    /* "View.MemoryView":378
 * 
 *     def __dealloc__(memoryview self):
 *         if self.obj is not None:             # <<<<<<<<<<<<<<
 *             PyBuffer_Release(&self.view)
 *         elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
    goto __pyx_L3;
  }

  /* "View.MemoryView":380
 *         if self.obj is not None:
 *             PyBuffer_Release(&self.view)
 *         elif (<__pyx_buffer *> &self.view).obj == Py_None:             # <<<<<<<<<<<<<<
 * 
 *             (<__pyx_buffer *> &self.view).obj = NULL
*/
  __pyx_t_1 = (((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None);
  if (__pyx_t_1) {

    /* "View.MemoryView":382
 *         elif (<__pyx_buffer *> &self.view).obj == Py_None:
 * 
 *             (<__pyx_buffer *> &self.view).obj = NULL             # <<<<<<<<<<<<<<
 *             Py_DECREF(Py_None)
 * 
*/
    ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL;

    /* "View.MemoryView":383
 * 
 *             (<__pyx_buffer *> &self.view).obj = NULL
 *             Py_DECREF(Py_None)             # <<<<<<<<<<<<<<
 * 
 *         cdef int i
*/
    Py_DECREF(Py_None);

    /* "View.MemoryView":380
 *         if self.obj is not None:
 *             PyBuffer_Release(&self.view)
 *         elif (<__pyx_buffer *> &self.view).obj == Py_None:             # <<<<<<<<<<<<<<
 * 
 *             (<__pyx_buffer *> &self.view).obj = NULL
*/
  }
  __pyx_L3:;

  /* "View.MemoryView":387
 *         cdef int i
 *         global __pyx_memoryview_thread_locks_used
 *         if self.lock != NULL:             # <<<<<<<<<<<<<<
 *             for i in range(0 if __PYX_GET_CYTHON_COMPILING_IN_CPYTHON_FREETHREADING() else __pyx_memoryview_thread_locks_used):
 *                 if __pyx_memoryview_thread_locks[i] is self.lock:
*/
  __pyx_t_1 = (__pyx_v_self->lock != NULL);
  if (__pyx_t_1) {

    /* "View.MemoryView":388
 *         global __pyx_memoryview_thread_locks_used
 *         if self.lock != NULL:
 *             for i in range(0 if __PYX_GET_CYTHON_COMPILING_IN_CPYTHON_FREETHREADING() else __pyx_memoryview_thread_locks_used):             # <<<<<<<<<<<<<<
 *                 if __pyx_memoryview_thread_locks[i] is self.lock:
 *                     __pyx_memoryview_thread_locks_used -= 1
*/
    __pyx_t_1 = __PYX_GET_CYTHON_COMPILING_IN_CPYTHON_FREETHREADING();
    if (__pyx_t_1) {
      __pyx_t_2 = 0;
    } else {
      __pyx_t_2 = __pyx_memoryview_thread_locks_used;
    }
    __pyx_t_3 = __pyx_t_2;
    for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
      __pyx_v_i = __pyx_t_4;

      /* "View.MemoryView":389
 *         if self.lock != NULL:
 *             for i in range(0 if __PYX_GET_CYTHON_COMPILING_IN_CPYTHON_FREETHREADING() else __pyx_memoryview_thread_locks_used):
 *                 if __pyx_memoryview_thread_locks[i] is self.lock:             # <<<<<<<<<<<<<<
 *                     __pyx_memoryview_thread_locks_used -= 1
 *                     if i != __pyx_memoryview_thread_locks_used:
*/
      __pyx_t_1 = ((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock);
      if (__pyx_t_1) {

        /* "View.MemoryView":390
 *             for i in range(0 if __PYX_GET_CYTHON_COMPILING_IN_CPYTHON_FREETHREADING() else __pyx_memoryview_thread_locks_used):
 *                 if __pyx_memoryview_thread_locks[i] is self.lock:
 *                     __pyx_memoryview_thread_locks_used -= 1             # <<<<<<<<<<<<<<
 *                     if i != __pyx_memoryview_thread_locks_used:
 *                         __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
*/
        __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);

        /* "View.MemoryView":391
 *                 if __pyx_memoryview_thread_locks[i] is self.lock:
 *                     __pyx_memoryview_thread_locks_used -= 1
 *                     if i != __pyx_memoryview_thread_locks_used:             # <<<<<<<<<<<<<<
 *                         __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
 *                             __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
        __pyx_t_1 = (__pyx_v_i != __pyx_memoryview_thread_locks_used);
        if (__pyx_t_1) {

          /* "View.MemoryView":393
 *                     if i != __pyx_memoryview_thread_locks_used:
 *                         __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
 *                             __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])             # <<<<<<<<<<<<<<
 *                     break
 *             else:
*/
          __pyx_t_5 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
          __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_v_i]);

          /* "View.MemoryView":392
 *                     __pyx_memoryview_thread_locks_used -= 1
 *                     if i != __pyx_memoryview_thread_locks_used:
 *                         __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (             # <<<<<<<<<<<<<<
 *                             __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
 *                     break
*/
          (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_5;
          (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_6;

          /* "View.MemoryView":391
 *                 if __pyx_memoryview_thread_locks[i] is self.lock:
 *                     __pyx_memoryview_thread_locks_used -= 1
 *                     if i != __pyx_memoryview_thread_locks_used:             # <<<<<<<<<<<<<<
 *                         __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
 *                             __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
        }

        /* "View.MemoryView":394
 *                         __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
 *                             __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
 *                     break             # <<<<<<<<<<<<<<
 *             else:
 *                 PyThread_free_lock(self.lock)
*/
        goto __pyx_L6_break;

        /* "View.MemoryView":389
 *         if self.lock != NULL:
 *             for i in range(0 if __PYX_GET_CYTHON_COMPILING_IN_CPYTHON_FREETHREADING() else __pyx_memoryview_thread_locks_used):
 *                 if __pyx_memoryview_thread_locks[i] is self.lock:             # <<<<<<<<<<<<<<
 *                     __pyx_memoryview_thread_locks_used -= 1
 *                     if i != __pyx_memoryview_thread_locks_used:
*/
      }
    }
    /*else*/ {

      /* "View.MemoryView":396
 *                     break
 *             else:
 *                 PyThread_free_lock(self.lock)             # <<<<<<<<<<<<<<
 * 
 *     cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
      PyThread_free_lock(__pyx_v_self->lock);
    }
    __pyx_L6_break:;

    /* "View.MemoryView":387
 *         cdef int i
 *         global __pyx_memoryview_thread_locks_used
 *         if self.lock != NULL:             # <<<<<<<<<<<<<<
 *             for i in range(0 if __PYX_GET_CYTHON_COMPILING_IN_CPYTHON_FREETHREADING() else __pyx_memoryview_thread_locks_used):
 *                 if __pyx_memoryview_thread_locks[i] is self.lock:
*/
  }

  /* "View.MemoryView":377
 *         self.typeinfo = NULL
 * 
 *     def __dealloc__(memoryview self):             # <<<<<<<<<<<<<<
 *         if self.obj is not None:
 *             PyBuffer_Release(&self.view)
*/

  /* function exit code */
}

/* "View.MemoryView":398
 *                 PyThread_free_lock(self.lock)
 * 
 *     cdef char *get_item_pointer(memoryview self, object index) except NULL:             # <<<<<<<<<<<<<<
 *         cdef Py_ssize_t dim
 *         cdef char *itemp = <char *> self.view.buf
*/

static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
  Py_ssize_t __pyx_v_dim;
  char *__pyx_v_itemp;
  PyObject *__pyx_v_idx = NULL;
  char *__pyx_r;
  __Pyx_RefNannyDeclarations
  Py_ssize_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  Py_ssize_t __pyx_t_3;
  PyObject *(*__pyx_t_4)(PyObject *);
  PyObject *__pyx_t_5 = NULL;
  Py_ssize_t __pyx_t_6;
  char *__pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("get_item_pointer", 0);

  /* "View.MemoryView":400
 *     cdef char *get_item_pointer(memoryview self, object index) except NULL:
 *         cdef Py_ssize_t dim
 *         cdef char *itemp = <char *> self.view.buf             # <<<<<<<<<<<<<<
 * 
 *         for dim, idx in enumerate(index):
*/
  __pyx_v_itemp = ((char *)__pyx_v_self->view.buf);

  /* "View.MemoryView":402
 *         cdef char *itemp = <char *> self.view.buf
 * 
 *         for dim, idx in enumerate(index):             # <<<<<<<<<<<<<<
 *             itemp = pybuffer_index(&self.view, itemp, idx, dim)
 * 
*/
  __pyx_t_1 = 0;
  if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
    __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2);
    __pyx_t_3 = 0;
    __pyx_t_4 = NULL;
  } else {
    __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 402, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 402, __pyx_L1_error)
  }
  for (;;) {
    if (likely(!__pyx_t_4)) {
      if (likely(PyList_CheckExact(__pyx_t_2))) {
        {
          Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_2);
          #if !CYTHON_ASSUME_SAFE_SIZE
          if (unlikely((__pyx_temp < 0))) __PYX_ERR(1, 402, __pyx_L1_error)
          #endif
          if (__pyx_t_3 >= __pyx_temp) break;
        }
        __pyx_t_5 = __Pyx_PyList_GetItemRefFast(__pyx_t_2, __pyx_t_3, __Pyx_ReferenceSharing_OwnStrongReference);
        ++__pyx_t_3;
      } else {
        {
          Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_2);
          #if !CYTHON_ASSUME_SAFE_SIZE
          if (unlikely((__pyx_temp < 0))) __PYX_ERR(1, 402, __pyx_L1_error)
          #endif
          if (__pyx_t_3 >= __pyx_temp) break;
        }
        #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
        __pyx_t_5 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3));
        #else
        __pyx_t_5 = __Pyx_PySequence_ITEM(__pyx_t_2, __pyx_t_3);
        #endif
        ++__pyx_t_3;
      }
      if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 402, __pyx_L1_error)
    } else {
      __pyx_t_5 = __pyx_t_4(__pyx_t_2);
      if (unlikely(!__pyx_t_5)) {
        PyObject* exc_type = PyErr_Occurred();
        if (exc_type) {
          if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(1, 402, __pyx_L1_error)
          PyErr_Clear();
        }
        break;
      }
    }
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
    __pyx_t_5 = 0;
    __pyx_v_dim = __pyx_t_1;
    __pyx_t_1 = (__pyx_t_1 + 1);

    /* "View.MemoryView":403
 * 
 *         for dim, idx in enumerate(index):
 *             itemp = pybuffer_index(&self.view, itemp, idx, dim)             # <<<<<<<<<<<<<<
 * 
 *         return itemp
*/
    __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 403, __pyx_L1_error)
    __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 403, __pyx_L1_error)
    __pyx_v_itemp = __pyx_t_7;

    /* "View.MemoryView":402
 *         cdef char *itemp = <char *> self.view.buf
 * 
 *         for dim, idx in enumerate(index):             # <<<<<<<<<<<<<<
 *             itemp = pybuffer_index(&self.view, itemp, idx, dim)
 * 
*/
  }
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "View.MemoryView":405
 *             itemp = pybuffer_index(&self.view, itemp, idx, dim)
 * 
 *         return itemp             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_itemp;
  goto __pyx_L0;

  /* "View.MemoryView":398
 *                 PyThread_free_lock(self.lock)
 * 
 *     cdef char *get_item_pointer(memoryview self, object index) except NULL:             # <<<<<<<<<<<<<<
 *         cdef Py_ssize_t dim
 *         cdef char *itemp = <char *> self.view.buf
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_idx);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":408
 * 
 * 
 *     def __getitem__(memoryview self, object index):             # <<<<<<<<<<<<<<
 *         if index is Ellipsis:
 *             return self
*/

/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
  PyObject *__pyx_v_have_slices = NULL;
  PyObject *__pyx_v_indices = NULL;
  char *__pyx_v_itemp;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  char *__pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "View.MemoryView":409
 * 
 *     def __getitem__(memoryview self, object index):
 *         if index is Ellipsis:             # <<<<<<<<<<<<<<
 *             return self
 * 
*/
  __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
  if (__pyx_t_1) {

    /* "View.MemoryView":410
 *     def __getitem__(memoryview self, object index):
 *         if index is Ellipsis:
 *             return self             # <<<<<<<<<<<<<<
 * 
 *         have_slices, indices = _unellipsify(index, self.view.ndim)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_self);
    __pyx_r = ((PyObject *)__pyx_v_self);
    goto __pyx_L0;

    /* "View.MemoryView":409
 * 
 *     def __getitem__(memoryview self, object index):
 *         if index is Ellipsis:             # <<<<<<<<<<<<<<
 *             return self
 * 
*/
  }

  /* "View.MemoryView":412
 *             return self
 * 
 *         have_slices, indices = _unellipsify(index, self.view.ndim)             # <<<<<<<<<<<<<<
 * 
 *         cdef char *itemp
*/
  __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 412, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  if (likely(__pyx_t_2 != Py_None)) {
    PyObject* sequence = __pyx_t_2;
    Py_ssize_t size = __Pyx_PyTuple_GET_SIZE(sequence);
    if (unlikely(size != 2)) {
      if (size > 2) __Pyx_RaiseTooManyValuesError(2);
      else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
      __PYX_ERR(1, 412, __pyx_L1_error)
    }
    #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
    __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
    __Pyx_INCREF(__pyx_t_4);
    #else
    __pyx_t_3 = __Pyx_PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 412, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = __Pyx_PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 412, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    #endif
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  } else {
    __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 412, __pyx_L1_error)
  }
  __pyx_v_have_slices = __pyx_t_3;
  __pyx_t_3 = 0;
  __pyx_v_indices = __pyx_t_4;
  __pyx_t_4 = 0;

  /* "View.MemoryView":415
 * 
 *         cdef char *itemp
 *         if have_slices:             # <<<<<<<<<<<<<<
 *             return memview_slice(self, indices)
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 415, __pyx_L1_error)
  if (__pyx_t_1) {

    /* "View.MemoryView":416
 *         cdef char *itemp
 *         if have_slices:
 *             return memview_slice(self, indices)             # <<<<<<<<<<<<<<
 *         else:
 *             itemp = self.get_item_pointer(indices)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 416, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_r = __pyx_t_2;
    __pyx_t_2 = 0;
    goto __pyx_L0;

    /* "View.MemoryView":415
 * 
 *         cdef char *itemp
 *         if have_slices:             # <<<<<<<<<<<<<<
 *             return memview_slice(self, indices)
 *         else:
*/
  }

  /* "View.MemoryView":418
 *             return memview_slice(self, indices)
 *         else:
 *             itemp = self.get_item_pointer(indices)             # <<<<<<<<<<<<<<
 *             return self.convert_item_to_object(itemp)
 * 
*/
  /*else*/ {
    __pyx_t_5 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_5 == ((void *)NULL))) __PYX_ERR(1, 418, __pyx_L1_error)
    __pyx_v_itemp = __pyx_t_5;

    /* "View.MemoryView":419
 *         else:
 *             itemp = self.get_item_pointer(indices)
 *             return self.convert_item_to_object(itemp)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(memoryview self, object index, object value):
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 419, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_r = __pyx_t_2;
    __pyx_t_2 = 0;
    goto __pyx_L0;
  }

  /* "View.MemoryView":408
 * 
 * 
 *     def __getitem__(memoryview self, object index):             # <<<<<<<<<<<<<<
 *         if index is Ellipsis:
 *             return self
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_have_slices);
  __Pyx_XDECREF(__pyx_v_indices);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":421
 *             return self.convert_item_to_object(itemp)
 * 
 *     def __setitem__(memoryview self, object index, object value):             # <<<<<<<<<<<<<<
 *         if self.view.readonly:
 *             raise TypeError, "Cannot assign to read-only memoryview"
*/

/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
  PyObject *__pyx_v_have_slices = NULL;
  PyObject *__pyx_v_obj = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);
  __Pyx_INCREF(__pyx_v_index);

  /* "View.MemoryView":422
 * 
 *     def __setitem__(memoryview self, object index, object value):
 *         if self.view.readonly:             # <<<<<<<<<<<<<<
 *             raise TypeError, "Cannot assign to read-only memoryview"
 * 
*/
  if (unlikely(__pyx_v_self->view.readonly)) {

    /* "View.MemoryView":423
 *     def __setitem__(memoryview self, object index, object value):
 *         if self.view.readonly:
 *             raise TypeError, "Cannot assign to read-only memoryview"             # <<<<<<<<<<<<<<
 * 
 *         have_slices, index = _unellipsify(index, self.view.ndim)
*/
    __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_Cannot_assign_to_read_only_memor, 0, 0);
    __PYX_ERR(1, 423, __pyx_L1_error)

    /* "View.MemoryView":422
 * 
 *     def __setitem__(memoryview self, object index, object value):
 *         if self.view.readonly:             # <<<<<<<<<<<<<<
 *             raise TypeError, "Cannot assign to read-only memoryview"
 * 
*/
  }

  /* "View.MemoryView":425
 *             raise TypeError, "Cannot assign to read-only memoryview"
 * 
 *         have_slices, index = _unellipsify(index, self.view.ndim)             # <<<<<<<<<<<<<<
 * 
 *         if have_slices:
*/
  __pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 425, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  if (likely(__pyx_t_1 != Py_None)) {
    PyObject* sequence = __pyx_t_1;
    Py_ssize_t size = __Pyx_PyTuple_GET_SIZE(sequence);
    if (unlikely(size != 2)) {
      if (size > 2) __Pyx_RaiseTooManyValuesError(2);
      else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
      __PYX_ERR(1, 425, __pyx_L1_error)
    }
    #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
    __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0);
    __Pyx_INCREF(__pyx_t_2);
    __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1);
    __Pyx_INCREF(__pyx_t_3);
    #else
    __pyx_t_2 = __Pyx_PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_3 = __Pyx_PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 425, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    #endif
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  } else {
    __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 425, __pyx_L1_error)
  }
  __pyx_v_have_slices = __pyx_t_2;
  __pyx_t_2 = 0;
  __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3);
  __pyx_t_3 = 0;

  /* "View.MemoryView":427
 *         have_slices, index = _unellipsify(index, self.view.ndim)
 * 
 *         if have_slices:             # <<<<<<<<<<<<<<
 *             obj = self.is_slice(value)
 *             if obj is not None:
*/
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(1, 427, __pyx_L1_error)
  if (__pyx_t_4) {

    /* "View.MemoryView":428
 * 
 *         if have_slices:
 *             obj = self.is_slice(value)             # <<<<<<<<<<<<<<
 *             if obj is not None:
 *                 self.setitem_slice_assignment(self[index], obj)
*/
    __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 428, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_v_obj = __pyx_t_1;
    __pyx_t_1 = 0;

    /* "View.MemoryView":429
 *         if have_slices:
 *             obj = self.is_slice(value)
 *             if obj is not None:             # <<<<<<<<<<<<<<
 *                 self.setitem_slice_assignment(self[index], obj)
 *             else:
*/
    __pyx_t_4 = (__pyx_v_obj != Py_None);
    if (__pyx_t_4) {

      /* "View.MemoryView":430
 *             obj = self.is_slice(value)
 *             if obj is not None:
 *                 self.setitem_slice_assignment(self[index], obj)             # <<<<<<<<<<<<<<
 *             else:
 *                 self.setitem_slice_assign_scalar(self[index], value)
*/
      __pyx_t_1 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 430, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 430, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;

      /* "View.MemoryView":429
 *         if have_slices:
 *             obj = self.is_slice(value)
 *             if obj is not None:             # <<<<<<<<<<<<<<
 *                 self.setitem_slice_assignment(self[index], obj)
 *             else:
*/
      goto __pyx_L5;
    }

    /* "View.MemoryView":432
 *                 self.setitem_slice_assignment(self[index], obj)
 *             else:
 *                 self.setitem_slice_assign_scalar(self[index], value)             # <<<<<<<<<<<<<<
 *         else:
 *             self.setitem_indexed(index, value)
*/
    /*else*/ {
      __pyx_t_3 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 432, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_memoryview_type))))) __PYX_ERR(1, 432, __pyx_L1_error)
      __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 432, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    }
    __pyx_L5:;

    /* "View.MemoryView":427
 *         have_slices, index = _unellipsify(index, self.view.ndim)
 * 
 *         if have_slices:             # <<<<<<<<<<<<<<
 *             obj = self.is_slice(value)
 *             if obj is not None:
*/
    goto __pyx_L4;
  }

  /* "View.MemoryView":434
 *                 self.setitem_slice_assign_scalar(self[index], value)
 *         else:
 *             self.setitem_indexed(index, value)             # <<<<<<<<<<<<<<
 * 
 *     cdef is_slice(self, obj):
*/
  /*else*/ {
    __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 434, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  }
  __pyx_L4:;

  /* "View.MemoryView":421
 *             return self.convert_item_to_object(itemp)
 * 
 *     def __setitem__(memoryview self, object index, object value):             # <<<<<<<<<<<<<<
 *         if self.view.readonly:
 *             raise TypeError, "Cannot assign to read-only memoryview"
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_have_slices);
  __Pyx_XDECREF(__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_index);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":436
 *             self.setitem_indexed(index, value)
 * 
 *     cdef is_slice(self, obj):             # <<<<<<<<<<<<<<
 *         if not isinstance(obj, memoryview):
 *             try:
*/

static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_t_11;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("is_slice", 0);
  __Pyx_INCREF(__pyx_v_obj);

  /* "View.MemoryView":437
 * 
 *     cdef is_slice(self, obj):
 *         if not isinstance(obj, memoryview):             # <<<<<<<<<<<<<<
 *             try:
 *                 obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_mstate_global->__pyx_memoryview_type); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "View.MemoryView":438
 *     cdef is_slice(self, obj):
 *         if not isinstance(obj, memoryview):
 *             try:             # <<<<<<<<<<<<<<
 *                 obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
 *                                  self.dtype_is_object)
*/
    {
      __Pyx_PyThreadState_declare
      __Pyx_PyThreadState_assign
      __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
      __Pyx_XGOTREF(__pyx_t_3);
      __Pyx_XGOTREF(__pyx_t_4);
      __Pyx_XGOTREF(__pyx_t_5);
      /*try:*/ {

        /* "View.MemoryView":439
 *         if not isinstance(obj, memoryview):
 *             try:
 *                 obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,             # <<<<<<<<<<<<<<
 *                                  self.dtype_is_object)
 *             except TypeError:
*/
        __pyx_t_7 = NULL;
        __pyx_t_8 = __Pyx_PyLong_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 439, __pyx_L4_error)
        __Pyx_GOTREF(__pyx_t_8);

        /* "View.MemoryView":440
 *             try:
 *                 obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
 *                                  self.dtype_is_object)             # <<<<<<<<<<<<<<
 *             except TypeError:
 *                 return None
*/
        __pyx_t_9 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 440, __pyx_L4_error)
        __Pyx_GOTREF(__pyx_t_9);
        __pyx_t_10 = 1;
        {
          PyObject *__pyx_callargs[4] = {__pyx_t_7, __pyx_v_obj, __pyx_t_8, __pyx_t_9};
          __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_memoryview_type, __pyx_callargs+__pyx_t_10, (4-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
          __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
          __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
          __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
          if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 439, __pyx_L4_error)
          __Pyx_GOTREF((PyObject *)__pyx_t_6);
        }
        __Pyx_DECREF_SET(__pyx_v_obj, ((PyObject *)__pyx_t_6));
        __pyx_t_6 = 0;

        /* "View.MemoryView":438
 *     cdef is_slice(self, obj):
 *         if not isinstance(obj, memoryview):
 *             try:             # <<<<<<<<<<<<<<
 *                 obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
 *                                  self.dtype_is_object)
*/
      }
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      goto __pyx_L9_try_end;
      __pyx_L4_error:;
      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
      __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
      __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
      __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;

      /* "View.MemoryView":441
 *                 obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
 *                                  self.dtype_is_object)
 *             except TypeError:             # <<<<<<<<<<<<<<
 *                 return None
 * 
*/
      __pyx_t_11 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(((PyTypeObject*)PyExc_TypeError))));
      if (__pyx_t_11) {
        __Pyx_ErrRestore(0,0,0);

        /* "View.MemoryView":442
 *                                  self.dtype_is_object)
 *             except TypeError:
 *                 return None             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
        __Pyx_XDECREF(__pyx_r);
        __pyx_r = Py_None; __Pyx_INCREF(Py_None);
        goto __pyx_L7_except_return;
      }
      goto __pyx_L6_except_error;

      /* "View.MemoryView":438
 *     cdef is_slice(self, obj):
 *         if not isinstance(obj, memoryview):
 *             try:             # <<<<<<<<<<<<<<
 *                 obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
 *                                  self.dtype_is_object)
*/
      __pyx_L6_except_error:;
      __Pyx_XGIVEREF(__pyx_t_3);
      __Pyx_XGIVEREF(__pyx_t_4);
      __Pyx_XGIVEREF(__pyx_t_5);
      __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
      goto __pyx_L1_error;
      __pyx_L7_except_return:;
      __Pyx_XGIVEREF(__pyx_t_3);
      __Pyx_XGIVEREF(__pyx_t_4);
      __Pyx_XGIVEREF(__pyx_t_5);
      __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
      goto __pyx_L0;
      __pyx_L9_try_end:;
    }

    /* "View.MemoryView":437
 * 
 *     cdef is_slice(self, obj):
 *         if not isinstance(obj, memoryview):             # <<<<<<<<<<<<<<
 *             try:
 *                 obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
  }

  /* "View.MemoryView":444
 *                 return None
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     cdef setitem_slice_assignment(self, dst, src):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_obj);
  __pyx_r = __pyx_v_obj;
  goto __pyx_L0;

  /* "View.MemoryView":436
 *             self.setitem_indexed(index, value)
 * 
 *     cdef is_slice(self, obj):             # <<<<<<<<<<<<<<
 *         if not isinstance(obj, memoryview):
 *             try:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":446
 *         return obj
 * 
 *     cdef setitem_slice_assignment(self, dst, src):             # <<<<<<<<<<<<<<
 *         cdef __Pyx_memviewslice dst_slice
 *         cdef __Pyx_memviewslice src_slice
*/

static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
  __Pyx_memviewslice __pyx_v_dst_slice;
  __Pyx_memviewslice __pyx_v_src_slice;
  __Pyx_memviewslice __pyx_v_msrc;
  __Pyx_memviewslice __pyx_v_mdst;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_memviewslice *__pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("setitem_slice_assignment", 0);

  /* "View.MemoryView":449
 *         cdef __Pyx_memviewslice dst_slice
 *         cdef __Pyx_memviewslice src_slice
 *         cdef __Pyx_memviewslice msrc = get_slice_from_memview(src, &src_slice)[0]             # <<<<<<<<<<<<<<
 *         cdef __Pyx_memviewslice mdst = get_slice_from_memview(dst, &dst_slice)[0]
 * 
*/
  if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_mstate_global->__pyx_memoryview_type))))) __PYX_ERR(1, 449, __pyx_L1_error)
  __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((void *)NULL))) __PYX_ERR(1, 449, __pyx_L1_error)
  __pyx_v_msrc = (__pyx_t_1[0]);

  /* "View.MemoryView":450
 *         cdef __Pyx_memviewslice src_slice
 *         cdef __Pyx_memviewslice msrc = get_slice_from_memview(src, &src_slice)[0]
 *         cdef __Pyx_memviewslice mdst = get_slice_from_memview(dst, &dst_slice)[0]             # <<<<<<<<<<<<<<
 * 
 *         memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object)
*/
  if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_mstate_global->__pyx_memoryview_type))))) __PYX_ERR(1, 450, __pyx_L1_error)
  __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_1 == ((void *)NULL))) __PYX_ERR(1, 450, __pyx_L1_error)
  __pyx_v_mdst = (__pyx_t_1[0]);

  /* "View.MemoryView":452
 *         cdef __Pyx_memviewslice mdst = get_slice_from_memview(dst, &dst_slice)[0]
 * 
 *         memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object)             # <<<<<<<<<<<<<<
 * 
 *     cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 452, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_3 = __Pyx_PyLong_As_int(__pyx_t_2); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 452, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 452, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_t_2); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 452, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = __pyx_memoryview_copy_contents(__pyx_v_msrc, __pyx_v_mdst, __pyx_t_3, __pyx_t_4, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(1, 452, __pyx_L1_error)

  /* "View.MemoryView":446
 *         return obj
 * 
 *     cdef setitem_slice_assignment(self, dst, src):             # <<<<<<<<<<<<<<
 *         cdef __Pyx_memviewslice dst_slice
 *         cdef __Pyx_memviewslice src_slice
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":454
 *         memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object)
 * 
 *     cdef setitem_slice_assign_scalar(self, memoryview dst, value):             # <<<<<<<<<<<<<<
 *         cdef int array[128]
 *         cdef void *tmp = NULL
*/

static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
  int __pyx_v_array[128];
  void *__pyx_v_tmp;
  void *__pyx_v_item;
  __Pyx_memviewslice *__pyx_v_dst_slice;
  __Pyx_memviewslice __pyx_v_tmp_slice;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_memviewslice *__pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  int __pyx_t_5;
  char const *__pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);

  /* "View.MemoryView":456
 *     cdef setitem_slice_assign_scalar(self, memoryview dst, value):
 *         cdef int array[128]
 *         cdef void *tmp = NULL             # <<<<<<<<<<<<<<
 *         cdef void *item
 * 
*/
  __pyx_v_tmp = NULL;

  /* "View.MemoryView":461
 *         cdef __Pyx_memviewslice *dst_slice
 *         cdef __Pyx_memviewslice tmp_slice
 *         dst_slice = get_slice_from_memview(dst, &tmp_slice)             # <<<<<<<<<<<<<<
 * 
 *         if <size_t>self.view.itemsize > sizeof(array):
*/
  __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((void *)NULL))) __PYX_ERR(1, 461, __pyx_L1_error)
  __pyx_v_dst_slice = __pyx_t_1;

  /* "View.MemoryView":463
 *         dst_slice = get_slice_from_memview(dst, &tmp_slice)
 * 
 *         if <size_t>self.view.itemsize > sizeof(array):             # <<<<<<<<<<<<<<
 *             tmp = PyMem_Malloc(self.view.itemsize)
 *             if tmp == NULL:
*/
  __pyx_t_2 = (((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array)));
  if (__pyx_t_2) {

    /* "View.MemoryView":464
 * 
 *         if <size_t>self.view.itemsize > sizeof(array):
 *             tmp = PyMem_Malloc(self.view.itemsize)             # <<<<<<<<<<<<<<
 *             if tmp == NULL:
 *                 raise MemoryError
*/
    __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);

    /* "View.MemoryView":465
 *         if <size_t>self.view.itemsize > sizeof(array):
 *             tmp = PyMem_Malloc(self.view.itemsize)
 *             if tmp == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError
 *             item = tmp
*/
    __pyx_t_2 = (__pyx_v_tmp == NULL);
    if (unlikely(__pyx_t_2)) {

      /* "View.MemoryView":466
 *             tmp = PyMem_Malloc(self.view.itemsize)
 *             if tmp == NULL:
 *                 raise MemoryError             # <<<<<<<<<<<<<<
 *             item = tmp
 *         else:
*/
      PyErr_NoMemory(); __PYX_ERR(1, 466, __pyx_L1_error)

      /* "View.MemoryView":465
 *         if <size_t>self.view.itemsize > sizeof(array):
 *             tmp = PyMem_Malloc(self.view.itemsize)
 *             if tmp == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError
 *             item = tmp
*/
    }

    /* "View.MemoryView":467
 *             if tmp == NULL:
 *                 raise MemoryError
 *             item = tmp             # <<<<<<<<<<<<<<
 *         else:
 *             item = <void *> array
*/
    __pyx_v_item = __pyx_v_tmp;

    /* "View.MemoryView":463
 *         dst_slice = get_slice_from_memview(dst, &tmp_slice)
 * 
 *         if <size_t>self.view.itemsize > sizeof(array):             # <<<<<<<<<<<<<<
 *             tmp = PyMem_Malloc(self.view.itemsize)
 *             if tmp == NULL:
*/
    goto __pyx_L3;
  }

  /* "View.MemoryView":469
 *             item = tmp
 *         else:
 *             item = <void *> array             # <<<<<<<<<<<<<<
 * 
 *         try:
*/
  /*else*/ {
    __pyx_v_item = ((void *)__pyx_v_array);
  }
  __pyx_L3:;

  /* "View.MemoryView":471
 *             item = <void *> array
 * 
 *         try:             # <<<<<<<<<<<<<<
 *             if self.dtype_is_object:
 *                 (<PyObject **> item)[0] = <PyObject *> value
*/
  /*try:*/ {

    /* "View.MemoryView":472
 * 
 *         try:
 *             if self.dtype_is_object:             # <<<<<<<<<<<<<<
 *                 (<PyObject **> item)[0] = <PyObject *> value
 *             else:
*/
    if (__pyx_v_self->dtype_is_object) {

      /* "View.MemoryView":473
 *         try:
 *             if self.dtype_is_object:
 *                 (<PyObject **> item)[0] = <PyObject *> value             # <<<<<<<<<<<<<<
 *             else:
 *                 self.assign_item_from_object(<char *> item, value)
*/
      (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);

      /* "View.MemoryView":472
 * 
 *         try:
 *             if self.dtype_is_object:             # <<<<<<<<<<<<<<
 *                 (<PyObject **> item)[0] = <PyObject *> value
 *             else:
*/
      goto __pyx_L8;
    }

    /* "View.MemoryView":475
 *                 (<PyObject **> item)[0] = <PyObject *> value
 *             else:
 *                 self.assign_item_from_object(<char *> item, value)             # <<<<<<<<<<<<<<
 * 
 * 
*/
    /*else*/ {
      __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    }
    __pyx_L8:;

    /* "View.MemoryView":479
 * 
 * 
 *             if self.view.suboffsets != NULL:             # <<<<<<<<<<<<<<
 *                 assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
 *             slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
    __pyx_t_2 = (__pyx_v_self->view.suboffsets != NULL);
    if (__pyx_t_2) {

      /* "View.MemoryView":480
 * 
 *             if self.view.suboffsets != NULL:
 *                 assert_direct_dimensions(self.view.suboffsets, self.view.ndim)             # <<<<<<<<<<<<<<
 *             slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
 *                                 item, self.dtype_is_object)
*/
      __pyx_t_4 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 480, __pyx_L6_error)

      /* "View.MemoryView":479
 * 
 * 
 *             if self.view.suboffsets != NULL:             # <<<<<<<<<<<<<<
 *                 assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
 *             slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
    }

    /* "View.MemoryView":481
 *             if self.view.suboffsets != NULL:
 *                 assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
 *             slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,             # <<<<<<<<<<<<<<
 *                                 item, self.dtype_is_object)
 *         finally:
*/
    __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
  }

  /* "View.MemoryView":484
 *                                 item, self.dtype_is_object)
 *         finally:
 *             PyMem_Free(tmp)             # <<<<<<<<<<<<<<
 * 
 *     cdef setitem_indexed(self, index, value):
*/
  /*finally:*/ {
    /*normal exit:*/{
      PyMem_Free(__pyx_v_tmp);
      goto __pyx_L7;
    }
    __pyx_L6_error:;
    /*exception exit:*/{
      __Pyx_PyThreadState_declare
      __Pyx_PyThreadState_assign
      __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
       __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
      if ( unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
      __Pyx_XGOTREF(__pyx_t_7);
      __Pyx_XGOTREF(__pyx_t_8);
      __Pyx_XGOTREF(__pyx_t_9);
      __Pyx_XGOTREF(__pyx_t_10);
      __Pyx_XGOTREF(__pyx_t_11);
      __Pyx_XGOTREF(__pyx_t_12);
      __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename;
      {
        PyMem_Free(__pyx_v_tmp);
      }
      __Pyx_XGIVEREF(__pyx_t_10);
      __Pyx_XGIVEREF(__pyx_t_11);
      __Pyx_XGIVEREF(__pyx_t_12);
      __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12);
      __Pyx_XGIVEREF(__pyx_t_7);
      __Pyx_XGIVEREF(__pyx_t_8);
      __Pyx_XGIVEREF(__pyx_t_9);
      __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9);
      __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
      __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6;
      goto __pyx_L1_error;
    }
    __pyx_L7:;
  }

  /* "View.MemoryView":454
 *         memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object)
 * 
 *     cdef setitem_slice_assign_scalar(self, memoryview dst, value):             # <<<<<<<<<<<<<<
 *         cdef int array[128]
 *         cdef void *tmp = NULL
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":486
 *             PyMem_Free(tmp)
 * 
 *     cdef setitem_indexed(self, index, value):             # <<<<<<<<<<<<<<
 *         cdef char *itemp = self.get_item_pointer(index)
 *         self.assign_item_from_object(itemp, value)
*/

static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
  char *__pyx_v_itemp;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  char *__pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("setitem_indexed", 0);

  /* "View.MemoryView":487
 * 
 *     cdef setitem_indexed(self, index, value):
 *         cdef char *itemp = self.get_item_pointer(index)             # <<<<<<<<<<<<<<
 *         self.assign_item_from_object(itemp, value)
 * 
*/
  __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((void *)NULL))) __PYX_ERR(1, 487, __pyx_L1_error)
  __pyx_v_itemp = __pyx_t_1;

  /* "View.MemoryView":488
 *     cdef setitem_indexed(self, index, value):
 *         cdef char *itemp = self.get_item_pointer(index)
 *         self.assign_item_from_object(itemp, value)             # <<<<<<<<<<<<<<
 * 
 *     cdef convert_item_to_object(self, char *itemp):
*/
  __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 488, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "View.MemoryView":486
 *             PyMem_Free(tmp)
 * 
 *     cdef setitem_indexed(self, index, value):             # <<<<<<<<<<<<<<
 *         cdef char *itemp = self.get_item_pointer(index)
 *         self.assign_item_from_object(itemp, value)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":490
 *         self.assign_item_from_object(itemp, value)
 * 
 *     cdef convert_item_to_object(self, char *itemp):             # <<<<<<<<<<<<<<
 *         """Only used if instantiated manually by the user, or if Cython doesn't
 *         know how to convert the type"""
*/

static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
  PyObject *__pyx_v_struct = NULL;
  PyObject *__pyx_v_bytesitem = 0;
  PyObject *__pyx_v_result = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  Py_ssize_t __pyx_t_8;
  int __pyx_t_9;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_t_11;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("convert_item_to_object", 0);

  /* "View.MemoryView":493
 *         """Only used if instantiated manually by the user, or if Cython doesn't
 *         know how to convert the type"""
 *         import struct             # <<<<<<<<<<<<<<
 *         cdef bytes bytesitem
 * 
*/
  __pyx_t_2 = __Pyx_Import(__pyx_mstate_global->__pyx_n_u_struct, 0, 0, NULL, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 493, __pyx_L1_error)
  __pyx_t_1 = __pyx_t_2;
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v_struct = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "View.MemoryView":496
 *         cdef bytes bytesitem
 * 
 *         bytesitem = itemp[:self.view.itemsize]             # <<<<<<<<<<<<<<
 *         try:
 *             result = struct.unpack(self.view.format, bytesitem)
*/
  __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 496, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "View.MemoryView":497
 * 
 *         bytesitem = itemp[:self.view.itemsize]
 *         try:             # <<<<<<<<<<<<<<
 *             result = struct.unpack(self.view.format, bytesitem)
 *         except struct.error:
*/
  {
    __Pyx_PyThreadState_declare
    __Pyx_PyThreadState_assign
    __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
    __Pyx_XGOTREF(__pyx_t_2);
    __Pyx_XGOTREF(__pyx_t_3);
    __Pyx_XGOTREF(__pyx_t_4);
    /*try:*/ {

      /* "View.MemoryView":498
 *         bytesitem = itemp[:self.view.itemsize]
 *         try:
 *             result = struct.unpack(self.view.format, bytesitem)             # <<<<<<<<<<<<<<
 *         except struct.error:
 *             raise ValueError, "Unable to convert item to object"
*/
      __pyx_t_5 = __pyx_v_struct;
      __Pyx_INCREF(__pyx_t_5);
      __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 498, __pyx_L3_error)
      __Pyx_GOTREF(__pyx_t_6);
      __pyx_t_7 = 0;
      {
        PyObject *__pyx_callargs[3] = {__pyx_t_5, __pyx_t_6, __pyx_v_bytesitem};
        __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_unpack, __pyx_callargs+__pyx_t_7, (3-__pyx_t_7) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
        if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L3_error)
        __Pyx_GOTREF(__pyx_t_1);
      }
      __pyx_v_result = __pyx_t_1;
      __pyx_t_1 = 0;

      /* "View.MemoryView":497
 * 
 *         bytesitem = itemp[:self.view.itemsize]
 *         try:             # <<<<<<<<<<<<<<
 *             result = struct.unpack(self.view.format, bytesitem)
 *         except struct.error:
*/
    }

    /* "View.MemoryView":502
 *             raise ValueError, "Unable to convert item to object"
 *         else:
 *             if len(self.view.format) == 1:             # <<<<<<<<<<<<<<
 *                 return result[0]
 *             return result
*/
    /*else:*/ {
      __pyx_t_8 = __Pyx_ssize_strlen(__pyx_v_self->view.format); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 502, __pyx_L5_except_error)
      __pyx_t_9 = (__pyx_t_8 == 1);
      if (__pyx_t_9) {

        /* "View.MemoryView":503
 *         else:
 *             if len(self.view.format) == 1:
 *                 return result[0]             # <<<<<<<<<<<<<<
 *             return result
 * 
*/
        __Pyx_XDECREF(__pyx_r);
        __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 503, __pyx_L5_except_error)
        __Pyx_GOTREF(__pyx_t_1);
        __pyx_r = __pyx_t_1;
        __pyx_t_1 = 0;
        goto __pyx_L6_except_return;

        /* "View.MemoryView":502
 *             raise ValueError, "Unable to convert item to object"
 *         else:
 *             if len(self.view.format) == 1:             # <<<<<<<<<<<<<<
 *                 return result[0]
 *             return result
*/
      }

      /* "View.MemoryView":504
 *             if len(self.view.format) == 1:
 *                 return result[0]
 *             return result             # <<<<<<<<<<<<<<
 * 
 *     cdef assign_item_from_object(self, char *itemp, object value):
*/
      __Pyx_XDECREF(__pyx_r);
      __Pyx_INCREF(__pyx_v_result);
      __pyx_r = __pyx_v_result;
      goto __pyx_L6_except_return;
    }
    __pyx_L3_error:;
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;

    /* "View.MemoryView":499
 *         try:
 *             result = struct.unpack(self.view.format, bytesitem)
 *         except struct.error:             # <<<<<<<<<<<<<<
 *             raise ValueError, "Unable to convert item to object"
 *         else:
*/
    __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_6, &__pyx_t_5);
    __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_mstate_global->__pyx_n_u_error); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 499, __pyx_L5_except_error)
    __Pyx_GOTREF(__pyx_t_10);
    __pyx_t_11 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_10);
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_ErrRestore(__pyx_t_1, __pyx_t_6, __pyx_t_5);
    __pyx_t_1 = 0; __pyx_t_6 = 0; __pyx_t_5 = 0;
    if (__pyx_t_11) {
      __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
      if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_1) < 0) __PYX_ERR(1, 499, __pyx_L5_except_error)
      __Pyx_XGOTREF(__pyx_t_5);
      __Pyx_XGOTREF(__pyx_t_6);
      __Pyx_XGOTREF(__pyx_t_1);

      /* "View.MemoryView":500
 *             result = struct.unpack(self.view.format, bytesitem)
 *         except struct.error:
 *             raise ValueError, "Unable to convert item to object"             # <<<<<<<<<<<<<<
 *         else:
 *             if len(self.view.format) == 1:
*/
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_ValueError))), __pyx_mstate_global->__pyx_kp_u_Unable_to_convert_item_to_object, 0, 0);
      __PYX_ERR(1, 500, __pyx_L5_except_error)
    }
    goto __pyx_L5_except_error;

    /* "View.MemoryView":497
 * 
 *         bytesitem = itemp[:self.view.itemsize]
 *         try:             # <<<<<<<<<<<<<<
 *             result = struct.unpack(self.view.format, bytesitem)
 *         except struct.error:
*/
    __pyx_L5_except_error:;
    __Pyx_XGIVEREF(__pyx_t_2);
    __Pyx_XGIVEREF(__pyx_t_3);
    __Pyx_XGIVEREF(__pyx_t_4);
    __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
    goto __pyx_L1_error;
    __pyx_L6_except_return:;
    __Pyx_XGIVEREF(__pyx_t_2);
    __Pyx_XGIVEREF(__pyx_t_3);
    __Pyx_XGIVEREF(__pyx_t_4);
    __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
    goto __pyx_L0;
  }

  /* "View.MemoryView":490
 *         self.assign_item_from_object(itemp, value)
 * 
 *     cdef convert_item_to_object(self, char *itemp):             # <<<<<<<<<<<<<<
 *         """Only used if instantiated manually by the user, or if Cython doesn't
 *         know how to convert the type"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_struct);
  __Pyx_XDECREF(__pyx_v_bytesitem);
  __Pyx_XDECREF(__pyx_v_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":506
 *             return result
 * 
 *     cdef assign_item_from_object(self, char *itemp, object value):             # <<<<<<<<<<<<<<
 *         """Only used if instantiated manually by the user, or if Cython doesn't
 *         know how to convert the type"""
*/

static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
  PyObject *__pyx_v_struct = NULL;
  char __pyx_v_c;
  PyObject *__pyx_v_bytesvalue = 0;
  Py_ssize_t __pyx_v_i;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  Py_ssize_t __pyx_t_8;
  PyObject *__pyx_t_9 = NULL;
  char *__pyx_t_10;
  char *__pyx_t_11;
  Py_ssize_t __pyx_t_12;
  char *__pyx_t_13;
  char *__pyx_t_14;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("assign_item_from_object", 0);

  /* "View.MemoryView":509
 *         """Only used if instantiated manually by the user, or if Cython doesn't
 *         know how to convert the type"""
 *         import struct             # <<<<<<<<<<<<<<
 *         cdef char c
 *         cdef bytes bytesvalue
*/
  __pyx_t_2 = __Pyx_Import(__pyx_mstate_global->__pyx_n_u_struct, 0, 0, NULL, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 509, __pyx_L1_error)
  __pyx_t_1 = __pyx_t_2;
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v_struct = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "View.MemoryView":514
 *         cdef Py_ssize_t i
 * 
 *         if isinstance(value, tuple):             # <<<<<<<<<<<<<<
 *             bytesvalue = struct.pack(self.view.format, *value)
 *         else:
*/
  __pyx_t_3 = PyTuple_Check(__pyx_v_value); 
  if (__pyx_t_3) {

    /* "View.MemoryView":515
 * 
 *         if isinstance(value, tuple):
 *             bytesvalue = struct.pack(self.view.format, *value)             # <<<<<<<<<<<<<<
 *         else:
 *             bytesvalue = struct.pack(self.view.format, value)
*/
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_mstate_global->__pyx_n_u_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 515, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 515, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 515, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 515, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 515, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 515, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 515, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_4))) __PYX_ERR(1, 515, __pyx_L1_error)
    __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
    __pyx_t_4 = 0;

    /* "View.MemoryView":514
 *         cdef Py_ssize_t i
 * 
 *         if isinstance(value, tuple):             # <<<<<<<<<<<<<<
 *             bytesvalue = struct.pack(self.view.format, *value)
 *         else:
*/
    goto __pyx_L3;
  }

  /* "View.MemoryView":517
 *             bytesvalue = struct.pack(self.view.format, *value)
 *         else:
 *             bytesvalue = struct.pack(self.view.format, value)             # <<<<<<<<<<<<<<
 * 
 *         for i, c in enumerate(bytesvalue):
*/
  /*else*/ {
    __pyx_t_6 = __pyx_v_struct;
    __Pyx_INCREF(__pyx_t_6);
    __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 517, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_7 = 0;
    {
      PyObject *__pyx_callargs[3] = {__pyx_t_6, __pyx_t_1, __pyx_v_value};
      __pyx_t_4 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_pack, __pyx_callargs+__pyx_t_7, (3-__pyx_t_7) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 517, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
    }
    if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_4))) __PYX_ERR(1, 517, __pyx_L1_error)
    __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
    __pyx_t_4 = 0;
  }
  __pyx_L3:;

  /* "View.MemoryView":519
 *             bytesvalue = struct.pack(self.view.format, value)
 * 
 *         for i, c in enumerate(bytesvalue):             # <<<<<<<<<<<<<<
 *             itemp[i] = c
 * 
*/
  __pyx_t_8 = 0;
  if (unlikely(__pyx_v_bytesvalue == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
    __PYX_ERR(1, 519, __pyx_L1_error)
  }
  __Pyx_INCREF(__pyx_v_bytesvalue);
  __pyx_t_9 = __pyx_v_bytesvalue;
  __pyx_t_11 = __Pyx_PyBytes_AsWritableString(__pyx_t_9); if (unlikely(__pyx_t_11 == ((char *)NULL))) __PYX_ERR(1, 519, __pyx_L1_error)
  __pyx_t_12 = __Pyx_PyBytes_GET_SIZE(__pyx_t_9); if (unlikely(__pyx_t_12 == ((Py_ssize_t)-1))) __PYX_ERR(1, 519, __pyx_L1_error)
  __pyx_t_13 = (__pyx_t_11 + __pyx_t_12);
  for (__pyx_t_14 = __pyx_t_11; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) {
    __pyx_t_10 = __pyx_t_14;
    __pyx_v_c = (__pyx_t_10[0]);

    /* "View.MemoryView":520
 * 
 *         for i, c in enumerate(bytesvalue):
 *             itemp[i] = c             # <<<<<<<<<<<<<<
 * 
 *     @cname('getbuffer')
*/
    __pyx_v_i = __pyx_t_8;

    /* "View.MemoryView":519
 *             bytesvalue = struct.pack(self.view.format, value)
 * 
 *         for i, c in enumerate(bytesvalue):             # <<<<<<<<<<<<<<
 *             itemp[i] = c
 * 
*/
    __pyx_t_8 = (__pyx_t_8 + 1);

    /* "View.MemoryView":520
 * 
 *         for i, c in enumerate(bytesvalue):
 *             itemp[i] = c             # <<<<<<<<<<<<<<
 * 
 *     @cname('getbuffer')
*/
    (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
  }
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;

  /* "View.MemoryView":506
 *             return result
 * 
 *     cdef assign_item_from_object(self, char *itemp, object value):             # <<<<<<<<<<<<<<
 *         """Only used if instantiated manually by the user, or if Cython doesn't
 *         know how to convert the type"""
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_struct);
  __Pyx_XDECREF(__pyx_v_bytesvalue);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":522
 *             itemp[i] = c
 * 
 *     @cname('getbuffer')             # <<<<<<<<<<<<<<
 *     def __getbuffer__(self, Py_buffer *info, int flags):
 *         if flags & PyBUF_WRITABLE and self.view.readonly:
*/

/* Python wrapper */
CYTHON_UNUSED static int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
CYTHON_UNUSED static int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  Py_ssize_t *__pyx_t_3;
  char *__pyx_t_4;
  void *__pyx_t_5;
  int __pyx_t_6;
  Py_ssize_t __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  if (unlikely(__pyx_v_info == NULL)) {
    PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
    return -1;
  }
  __Pyx_RefNannySetupContext("__getbuffer__", 0);
  __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(__pyx_v_info->obj);

  /* "View.MemoryView":524
 *     @cname('getbuffer')
 *     def __getbuffer__(self, Py_buffer *info, int flags):
 *         if flags & PyBUF_WRITABLE and self.view.readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError, "Cannot create writable memory view from read-only memoryview"
 * 
*/
  __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0);
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __pyx_v_self->view.readonly;
  __pyx_L4_bool_binop_done:;
  if (unlikely(__pyx_t_1)) {

    /* "View.MemoryView":525
 *     def __getbuffer__(self, Py_buffer *info, int flags):
 *         if flags & PyBUF_WRITABLE and self.view.readonly:
 *             raise ValueError, "Cannot create writable memory view from read-only memoryview"             # <<<<<<<<<<<<<<
 * 
 *         if flags & PyBUF_ND:
*/
    __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_ValueError))), __pyx_mstate_global->__pyx_kp_u_Cannot_create_writable_memory_vi, 0, 0);
    __PYX_ERR(1, 525, __pyx_L1_error)

    /* "View.MemoryView":524
 *     @cname('getbuffer')
 *     def __getbuffer__(self, Py_buffer *info, int flags):
 *         if flags & PyBUF_WRITABLE and self.view.readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError, "Cannot create writable memory view from read-only memoryview"
 * 
*/
  }

  /* "View.MemoryView":527
 *             raise ValueError, "Cannot create writable memory view from read-only memoryview"
 * 
 *         if flags & PyBUF_ND:             # <<<<<<<<<<<<<<
 *             info.shape = self.view.shape
 *         else:
*/
  __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0);
  if (__pyx_t_1) {

    /* "View.MemoryView":528
 * 
 *         if flags & PyBUF_ND:
 *             info.shape = self.view.shape             # <<<<<<<<<<<<<<
 *         else:
 *             info.shape = NULL
*/
    __pyx_t_3 = __pyx_v_self->view.shape;
    __pyx_v_info->shape = __pyx_t_3;

    /* "View.MemoryView":527
 *             raise ValueError, "Cannot create writable memory view from read-only memoryview"
 * 
 *         if flags & PyBUF_ND:             # <<<<<<<<<<<<<<
 *             info.shape = self.view.shape
 *         else:
*/
    goto __pyx_L6;
  }

  /* "View.MemoryView":530
 *             info.shape = self.view.shape
 *         else:
 *             info.shape = NULL             # <<<<<<<<<<<<<<
 * 
 *         if flags & PyBUF_STRIDES:
*/
  /*else*/ {
    __pyx_v_info->shape = NULL;
  }
  __pyx_L6:;

  /* "View.MemoryView":532
 *             info.shape = NULL
 * 
 *         if flags & PyBUF_STRIDES:             # <<<<<<<<<<<<<<
 *             info.strides = self.view.strides
 *         else:
*/
  __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
  if (__pyx_t_1) {

    /* "View.MemoryView":533
 * 
 *         if flags & PyBUF_STRIDES:
 *             info.strides = self.view.strides             # <<<<<<<<<<<<<<
 *         else:
 *             info.strides = NULL
*/
    __pyx_t_3 = __pyx_v_self->view.strides;
    __pyx_v_info->strides = __pyx_t_3;

    /* "View.MemoryView":532
 *             info.shape = NULL
 * 
 *         if flags & PyBUF_STRIDES:             # <<<<<<<<<<<<<<
 *             info.strides = self.view.strides
 *         else:
*/
    goto __pyx_L7;
  }

  /* "View.MemoryView":535
 *             info.strides = self.view.strides
 *         else:
 *             info.strides = NULL             # <<<<<<<<<<<<<<
 * 
 *         if flags & PyBUF_INDIRECT:
*/
  /*else*/ {
    __pyx_v_info->strides = NULL;
  }
  __pyx_L7:;

  /* "View.MemoryView":537
 *             info.strides = NULL
 * 
 *         if flags & PyBUF_INDIRECT:             # <<<<<<<<<<<<<<
 *             info.suboffsets = self.view.suboffsets
 *         else:
*/
  __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
  if (__pyx_t_1) {

    /* "View.MemoryView":538
 * 
 *         if flags & PyBUF_INDIRECT:
 *             info.suboffsets = self.view.suboffsets             # <<<<<<<<<<<<<<
 *         else:
 *             info.suboffsets = NULL
*/
    __pyx_t_3 = __pyx_v_self->view.suboffsets;
    __pyx_v_info->suboffsets = __pyx_t_3;

    /* "View.MemoryView":537
 *             info.strides = NULL
 * 
 *         if flags & PyBUF_INDIRECT:             # <<<<<<<<<<<<<<
 *             info.suboffsets = self.view.suboffsets
 *         else:
*/
    goto __pyx_L8;
  }

  /* "View.MemoryView":540
 *             info.suboffsets = self.view.suboffsets
 *         else:
 *             info.suboffsets = NULL             # <<<<<<<<<<<<<<
 * 
 *         if flags & PyBUF_FORMAT:
*/
  /*else*/ {
    __pyx_v_info->suboffsets = NULL;
  }
  __pyx_L8:;

  /* "View.MemoryView":542
 *             info.suboffsets = NULL
 * 
 *         if flags & PyBUF_FORMAT:             # <<<<<<<<<<<<<<
 *             info.format = self.view.format
 *         else:
*/
  __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
  if (__pyx_t_1) {

    /* "View.MemoryView":543
 * 
 *         if flags & PyBUF_FORMAT:
 *             info.format = self.view.format             # <<<<<<<<<<<<<<
 *         else:
 *             info.format = NULL
*/
    __pyx_t_4 = __pyx_v_self->view.format;
    __pyx_v_info->format = __pyx_t_4;

    /* "View.MemoryView":542
 *             info.suboffsets = NULL
 * 
 *         if flags & PyBUF_FORMAT:             # <<<<<<<<<<<<<<
 *             info.format = self.view.format
 *         else:
*/
    goto __pyx_L9;
  }

  /* "View.MemoryView":545
 *             info.format = self.view.format
 *         else:
 *             info.format = NULL             # <<<<<<<<<<<<<<
 * 
 *         info.buf = self.view.buf
*/
  /*else*/ {
    __pyx_v_info->format = NULL;
  }
  __pyx_L9:;

  /* "View.MemoryView":547
 *             info.format = NULL
 * 
 *         info.buf = self.view.buf             # <<<<<<<<<<<<<<
 *         info.ndim = self.view.ndim
 *         info.itemsize = self.view.itemsize
*/
  __pyx_t_5 = __pyx_v_self->view.buf;
  __pyx_v_info->buf = __pyx_t_5;

  /* "View.MemoryView":548
 * 
 *         info.buf = self.view.buf
 *         info.ndim = self.view.ndim             # <<<<<<<<<<<<<<
 *         info.itemsize = self.view.itemsize
 *         info.len = self.view.len
*/
  __pyx_t_6 = __pyx_v_self->view.ndim;
  __pyx_v_info->ndim = __pyx_t_6;

  /* "View.MemoryView":549
 *         info.buf = self.view.buf
 *         info.ndim = self.view.ndim
 *         info.itemsize = self.view.itemsize             # <<<<<<<<<<<<<<
 *         info.len = self.view.len
 *         info.readonly = self.view.readonly
*/
  __pyx_t_7 = __pyx_v_self->view.itemsize;
  __pyx_v_info->itemsize = __pyx_t_7;

  /* "View.MemoryView":550
 *         info.ndim = self.view.ndim
 *         info.itemsize = self.view.itemsize
 *         info.len = self.view.len             # <<<<<<<<<<<<<<
 *         info.readonly = self.view.readonly
 *         info.obj = self
*/
  __pyx_t_7 = __pyx_v_self->view.len;
  __pyx_v_info->len = __pyx_t_7;

  /* "View.MemoryView":551
 *         info.itemsize = self.view.itemsize
 *         info.len = self.view.len
 *         info.readonly = self.view.readonly             # <<<<<<<<<<<<<<
 *         info.obj = self
 * 
*/
  __pyx_t_1 = __pyx_v_self->view.readonly;
  __pyx_v_info->readonly = __pyx_t_1;

  /* "View.MemoryView":552
 *         info.len = self.view.len
 *         info.readonly = self.view.readonly
 *         info.obj = self             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_INCREF((PyObject *)__pyx_v_self);
  __Pyx_GIVEREF((PyObject *)__pyx_v_self);
  __Pyx_GOTREF(__pyx_v_info->obj);
  __Pyx_DECREF(__pyx_v_info->obj);
  __pyx_v_info->obj = ((PyObject *)__pyx_v_self);

  /* "View.MemoryView":522
 *             itemp[i] = c
 * 
 *     @cname('getbuffer')             # <<<<<<<<<<<<<<
 *     def __getbuffer__(self, Py_buffer *info, int flags):
 *         if flags & PyBUF_WRITABLE and self.view.readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  if (__pyx_v_info->obj != NULL) {
    __Pyx_GOTREF(__pyx_v_info->obj);
    __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
  }
  goto __pyx_L2;
  __pyx_L0:;
  if (__pyx_v_info->obj == Py_None) {
    __Pyx_GOTREF(__pyx_v_info->obj);
    __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
  }
  __pyx_L2:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":555
 * 
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def T(self):
 *         cdef _memoryviewslice result = memoryview_copy(self)
*/

/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
  struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "View.MemoryView":557
 *     @property
 *     def T(self):
 *         cdef _memoryviewslice result = memoryview_copy(self)             # <<<<<<<<<<<<<<
 *         transpose_memslice(&result.from_slice)
 *         return result
*/
  __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_memoryviewslice_type))))) __PYX_ERR(1, 557, __pyx_L1_error)
  __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "View.MemoryView":558
 *     def T(self):
 *         cdef _memoryviewslice result = memoryview_copy(self)
 *         transpose_memslice(&result.from_slice)             # <<<<<<<<<<<<<<
 *         return result
 * 
*/
  __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 558, __pyx_L1_error)

  /* "View.MemoryView":559
 *         cdef _memoryviewslice result = memoryview_copy(self)
 *         transpose_memslice(&result.from_slice)
 *         return result             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_result);
  __pyx_r = ((PyObject *)__pyx_v_result);
  goto __pyx_L0;

  /* "View.MemoryView":555
 * 
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def T(self):
 *         cdef _memoryviewslice result = memoryview_copy(self)
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":561
 *         return result
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def base(self):
 *         return self._get_base()
*/

/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "View.MemoryView":563
 *     @property
 *     def base(self):
 *         return self._get_base()             # <<<<<<<<<<<<<<
 * 
 *     cdef _get_base(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->_get_base(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 563, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":561
 *         return result
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def base(self):
 *         return self._get_base()
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("View.MemoryView.memoryview.base.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":565
 *         return self._get_base()
 * 
 *     cdef _get_base(self):             # <<<<<<<<<<<<<<
 *         return self.obj
 * 
*/

static PyObject *__pyx_memoryview__get_base(struct __pyx_memoryview_obj *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("_get_base", 0);

  /* "View.MemoryView":566
 * 
 *     cdef _get_base(self):
 *         return self.obj             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->obj);
  __pyx_r = __pyx_v_self->obj;
  goto __pyx_L0;

  /* "View.MemoryView":565
 *         return self._get_base()
 * 
 *     cdef _get_base(self):             # <<<<<<<<<<<<<<
 *         return self.obj
 * 
*/

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":568
 *         return self.obj
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shape(self):
 *         return tuple([length for length in self.view.shape[:self.view.ndim]])
*/

/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
  Py_ssize_t __pyx_7genexpr__pyx_v_length;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t *__pyx_t_2;
  Py_ssize_t *__pyx_t_3;
  Py_ssize_t *__pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "View.MemoryView":570
 *     @property
 *     def shape(self):
 *         return tuple([length for length in self.view.shape[:self.view.ndim]])             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  { /* enter inner scope */
    __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 570, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
    for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
      __pyx_t_2 = __pyx_t_4;
      __pyx_7genexpr__pyx_v_length = (__pyx_t_2[0]);
      __pyx_t_5 = PyLong_FromSsize_t(__pyx_7genexpr__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 570, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 570, __pyx_L1_error)
      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    }
  } /* exit inner scope */
  __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 570, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_5;
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":568
 *         return self.obj
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shape(self):
 *         return tuple([length for length in self.view.shape[:self.view.ndim]])
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":572
 *         return tuple([length for length in self.view.shape[:self.view.ndim]])
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def strides(self):
 *         if self.view.strides == NULL:
*/

/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
  Py_ssize_t __pyx_8genexpr1__pyx_v_stride;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  Py_ssize_t *__pyx_t_3;
  Py_ssize_t *__pyx_t_4;
  Py_ssize_t *__pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "View.MemoryView":574
 *     @property
 *     def strides(self):
 *         if self.view.strides == NULL:             # <<<<<<<<<<<<<<
 * 
 *             raise ValueError, "Buffer view does not expose strides"
*/
  __pyx_t_1 = (__pyx_v_self->view.strides == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "View.MemoryView":576
 *         if self.view.strides == NULL:
 * 
 *             raise ValueError, "Buffer view does not expose strides"             # <<<<<<<<<<<<<<
 * 
 *         return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
    __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_ValueError))), __pyx_mstate_global->__pyx_kp_u_Buffer_view_does_not_expose_stri, 0, 0);
    __PYX_ERR(1, 576, __pyx_L1_error)

    /* "View.MemoryView":574
 *     @property
 *     def strides(self):
 *         if self.view.strides == NULL:             # <<<<<<<<<<<<<<
 * 
 *             raise ValueError, "Buffer view does not expose strides"
*/
  }

  /* "View.MemoryView":578
 *             raise ValueError, "Buffer view does not expose strides"
 * 
 *         return tuple([stride for stride in self.view.strides[:self.view.ndim]])             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  { /* enter inner scope */
    __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 578, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
    for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
      __pyx_t_3 = __pyx_t_5;
      __pyx_8genexpr1__pyx_v_stride = (__pyx_t_3[0]);
      __pyx_t_6 = PyLong_FromSsize_t(__pyx_8genexpr1__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 578, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
      if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 578, __pyx_L1_error)
      __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    }
  } /* exit inner scope */
  __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 578, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":572
 *         return tuple([length for length in self.view.shape[:self.view.ndim]])
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def strides(self):
 *         if self.view.strides == NULL:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":580
 *         return tuple([stride for stride in self.view.strides[:self.view.ndim]])
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def suboffsets(self):
 *         if self.view.suboffsets == NULL:
*/

/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
  Py_ssize_t __pyx_8genexpr2__pyx_v_suboffset;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  Py_ssize_t *__pyx_t_3;
  Py_ssize_t *__pyx_t_4;
  Py_ssize_t *__pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "View.MemoryView":582
 *     @property
 *     def suboffsets(self):
 *         if self.view.suboffsets == NULL:             # <<<<<<<<<<<<<<
 *             return (-1,) * self.view.ndim
 * 
*/
  __pyx_t_1 = (__pyx_v_self->view.suboffsets == NULL);
  if (__pyx_t_1) {

    /* "View.MemoryView":583
 *     def suboffsets(self):
 *         if self.view.suboffsets == NULL:
 *             return (-1,) * self.view.ndim             # <<<<<<<<<<<<<<
 * 
 *         return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = __Pyx_PySequence_Multiply(__pyx_mstate_global->__pyx_tuple[1], __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 583, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_r = __pyx_t_2;
    __pyx_t_2 = 0;
    goto __pyx_L0;

    /* "View.MemoryView":582
 *     @property
 *     def suboffsets(self):
 *         if self.view.suboffsets == NULL:             # <<<<<<<<<<<<<<
 *             return (-1,) * self.view.ndim
 * 
*/
  }

  /* "View.MemoryView":585
 *             return (-1,) * self.view.ndim
 * 
 *         return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  { /* enter inner scope */
    __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 585, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
    for (__pyx_t_5 = __pyx_v_self->view.suboffsets; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
      __pyx_t_3 = __pyx_t_5;
      __pyx_8genexpr2__pyx_v_suboffset = (__pyx_t_3[0]);
      __pyx_t_6 = PyLong_FromSsize_t(__pyx_8genexpr2__pyx_v_suboffset); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 585, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
      if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 585, __pyx_L1_error)
      __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    }
  } /* exit inner scope */
  __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 585, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":580
 *         return tuple([stride for stride in self.view.strides[:self.view.ndim]])
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def suboffsets(self):
 *         if self.view.suboffsets == NULL:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":587
 *         return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ndim(self):
 *         return self.view.ndim
*/

/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "View.MemoryView":589
 *     @property
 *     def ndim(self):
 *         return self.view.ndim             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 589, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":587
 *         return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ndim(self):
 *         return self.view.ndim
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":591
 *         return self.view.ndim
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def itemsize(self):
 *         return self.view.itemsize
*/

/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "View.MemoryView":593
 *     @property
 *     def itemsize(self):
 *         return self.view.itemsize             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 593, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":591
 *         return self.view.ndim
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def itemsize(self):
 *         return self.view.itemsize
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":595
 *         return self.view.itemsize
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def nbytes(self):
 *         return self.size * self.view.itemsize
*/

/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "View.MemoryView":597
 *     @property
 *     def nbytes(self):
 *         return self.size * self.view.itemsize             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 597, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyLong_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 597, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 597, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":595
 *         return self.view.itemsize
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def nbytes(self):
 *         return self.size * self.view.itemsize
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":599
 *         return self.size * self.view.itemsize
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def size(self):
 *         if self._size is None:
*/

/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
  PyObject *__pyx_v_result = NULL;
  PyObject *__pyx_v_length = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t *__pyx_t_2;
  Py_ssize_t *__pyx_t_3;
  Py_ssize_t *__pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "View.MemoryView":601
 *     @property
 *     def size(self):
 *         if self._size is None:             # <<<<<<<<<<<<<<
 *             result = 1
 * 
*/
  __pyx_t_1 = (__pyx_v_self->_size == Py_None);
  if (__pyx_t_1) {

    /* "View.MemoryView":602
 *     def size(self):
 *         if self._size is None:
 *             result = 1             # <<<<<<<<<<<<<<
 * 
 *             for length in self.view.shape[:self.view.ndim]:
*/
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_1);
    __pyx_v_result = __pyx_mstate_global->__pyx_int_1;

    /* "View.MemoryView":604
 *             result = 1
 * 
 *             for length in self.view.shape[:self.view.ndim]:             # <<<<<<<<<<<<<<
 *                 result *= length
 * 
*/
    __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
    for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
      __pyx_t_2 = __pyx_t_4;
      __pyx_t_5 = PyLong_FromSsize_t((__pyx_t_2[0])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 604, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_5);
      __pyx_t_5 = 0;

      /* "View.MemoryView":605
 * 
 *             for length in self.view.shape[:self.view.ndim]:
 *                 result *= length             # <<<<<<<<<<<<<<
 * 
 *             self._size = result
*/
      __pyx_t_5 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 605, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_5);
      __pyx_t_5 = 0;
    }

    /* "View.MemoryView":607
 *                 result *= length
 * 
 *             self._size = result             # <<<<<<<<<<<<<<
 * 
 *         return self._size
*/
    __Pyx_INCREF(__pyx_v_result);
    __Pyx_GIVEREF(__pyx_v_result);
    __Pyx_GOTREF(__pyx_v_self->_size);
    __Pyx_DECREF(__pyx_v_self->_size);
    __pyx_v_self->_size = __pyx_v_result;

    /* "View.MemoryView":601
 *     @property
 *     def size(self):
 *         if self._size is None:             # <<<<<<<<<<<<<<
 *             result = 1
 * 
*/
  }

  /* "View.MemoryView":609
 *             self._size = result
 * 
 *         return self._size             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_size);
  __pyx_r = __pyx_v_self->_size;
  goto __pyx_L0;

  /* "View.MemoryView":599
 *         return self.size * self.view.itemsize
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def size(self):
 *         if self._size is None:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_result);
  __Pyx_XDECREF(__pyx_v_length);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":611
 *         return self._size
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         if self.view.ndim >= 1:
 *             return self.view.shape[0]
*/

/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  int __pyx_t_1;

  /* "View.MemoryView":612
 * 
 *     def __len__(self):
 *         if self.view.ndim >= 1:             # <<<<<<<<<<<<<<
 *             return self.view.shape[0]
 * 
*/
  __pyx_t_1 = (__pyx_v_self->view.ndim >= 1);
  if (__pyx_t_1) {

    /* "View.MemoryView":613
 *     def __len__(self):
 *         if self.view.ndim >= 1:
 *             return self.view.shape[0]             # <<<<<<<<<<<<<<
 * 
 *         return 0
*/
    __pyx_r = (__pyx_v_self->view.shape[0]);
    goto __pyx_L0;

    /* "View.MemoryView":612
 * 
 *     def __len__(self):
 *         if self.view.ndim >= 1:             # <<<<<<<<<<<<<<
 *             return self.view.shape[0]
 * 
*/
  }

  /* "View.MemoryView":615
 *             return self.view.shape[0]
 * 
 *         return 0             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
  __pyx_r = 0;
  goto __pyx_L0;

  /* "View.MemoryView":611
 *         return self._size
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         if self.view.ndim >= 1:
 *             return self.view.shape[0]
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "View.MemoryView":617
 *         return 0
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
 *                                                id(self))
*/

/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "View.MemoryView":618
 * 
 *     def __repr__(self):
 *         return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,             # <<<<<<<<<<<<<<
 *                                                id(self))
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 618, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 618, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 618, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_2 = __Pyx_PyObject_FormatSimpleAndDecref(PyObject_Repr(__pyx_t_1), __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 618, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;

  /* "View.MemoryView":619
 *     def __repr__(self):
 *         return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
 *                                                id(self))             # <<<<<<<<<<<<<<
 * 
 *     def __str__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 619, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyObject_Format(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_x); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 619, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u_MemoryView_of;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_at_0x;
  __pyx_t_4[3] = __pyx_t_3;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;

  /* "View.MemoryView":618
 * 
 *     def __repr__(self):
 *         return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,             # <<<<<<<<<<<<<<
 *                                                id(self))
 * 
*/
  __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 15 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 6 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 1, 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
  if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 618, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":617
 *         return 0
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
 *                                                id(self))
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":621
 *                                                id(self))
 * 
 *     def __str__(self):             # <<<<<<<<<<<<<<
 *         return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3[3];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__str__", 0);

  /* "View.MemoryView":622
 * 
 *     def __str__(self):
 *         return "<MemoryView of %r object>" % (self.base.__class__.__name__,)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 622, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 622, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 622, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_2 = __Pyx_PyObject_FormatSimpleAndDecref(PyObject_Repr(__pyx_t_1), __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 622, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3[0] = __pyx_mstate_global->__pyx_kp_u_MemoryView_of;
  __pyx_t_3[1] = __pyx_t_2;
  __pyx_t_3[2] = __pyx_mstate_global->__pyx_kp_u_object;
  __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_3, 3, 15 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 8, 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
  if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 622, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":621
 *                                                id(self))
 * 
 *     def __str__(self):             # <<<<<<<<<<<<<<
 *         return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":625
 * 
 * 
 *     def is_c_contig(self):             # <<<<<<<<<<<<<<
 *         cdef __Pyx_memviewslice *mslice
 *         cdef __Pyx_memviewslice tmp
*/

/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("is_c_contig", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("is_c_contig", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
  __Pyx_memviewslice *__pyx_v_mslice;
  __Pyx_memviewslice __pyx_v_tmp;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_memviewslice *__pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("is_c_contig", 0);

  /* "View.MemoryView":628
 *         cdef __Pyx_memviewslice *mslice
 *         cdef __Pyx_memviewslice tmp
 *         mslice = get_slice_from_memview(self, &tmp)             # <<<<<<<<<<<<<<
 *         return slice_is_contig(mslice[0], 'C', self.view.ndim)
 * 
*/
  __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((void *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error)
  __pyx_v_mslice = __pyx_t_1;

  /* "View.MemoryView":629
 *         cdef __Pyx_memviewslice tmp
 *         mslice = get_slice_from_memview(self, &tmp)
 *         return slice_is_contig(mslice[0], 'C', self.view.ndim)             # <<<<<<<<<<<<<<
 * 
 *     def is_f_contig(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":625
 * 
 * 
 *     def is_c_contig(self):             # <<<<<<<<<<<<<<
 *         cdef __Pyx_memviewslice *mslice
 *         cdef __Pyx_memviewslice tmp
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":631
 *         return slice_is_contig(mslice[0], 'C', self.view.ndim)
 * 
 *     def is_f_contig(self):             # <<<<<<<<<<<<<<
 *         cdef __Pyx_memviewslice *mslice
 *         cdef __Pyx_memviewslice tmp
*/

/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("is_f_contig", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("is_f_contig", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
  __Pyx_memviewslice *__pyx_v_mslice;
  __Pyx_memviewslice __pyx_v_tmp;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_memviewslice *__pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("is_f_contig", 0);

  /* "View.MemoryView":634
 *         cdef __Pyx_memviewslice *mslice
 *         cdef __Pyx_memviewslice tmp
 *         mslice = get_slice_from_memview(self, &tmp)             # <<<<<<<<<<<<<<
 *         return slice_is_contig(mslice[0], 'F', self.view.ndim)
 * 
*/
  __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((void *)NULL))) __PYX_ERR(1, 634, __pyx_L1_error)
  __pyx_v_mslice = __pyx_t_1;

  /* "View.MemoryView":635
 *         cdef __Pyx_memviewslice tmp
 *         mslice = get_slice_from_memview(self, &tmp)
 *         return slice_is_contig(mslice[0], 'F', self.view.ndim)             # <<<<<<<<<<<<<<
 * 
 *     def copy(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 635, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":631
 *         return slice_is_contig(mslice[0], 'C', self.view.ndim)
 * 
 *     def is_f_contig(self):             # <<<<<<<<<<<<<<
 *         cdef __Pyx_memviewslice *mslice
 *         cdef __Pyx_memviewslice tmp
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":637
 *         return slice_is_contig(mslice[0], 'F', self.view.ndim)
 * 
 *     def copy(self):             # <<<<<<<<<<<<<<
 *         cdef __Pyx_memviewslice mslice
 *         cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/

/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("copy (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("copy", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("copy", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
  __Pyx_memviewslice __pyx_v_mslice;
  int __pyx_v_flags;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_memviewslice __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("copy", 0);

  /* "View.MemoryView":639
 *     def copy(self):
 *         cdef __Pyx_memviewslice mslice
 *         cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS             # <<<<<<<<<<<<<<
 * 
 *         slice_copy(self, &mslice)
*/
  __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));

  /* "View.MemoryView":641
 *         cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
 * 
 *         slice_copy(self, &mslice)             # <<<<<<<<<<<<<<
 *         mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
 *                                    self.view.itemsize,
*/
  __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));

  /* "View.MemoryView":642
 * 
 *         slice_copy(self, &mslice)
 *         mslice = slice_copy_contig(&mslice, "c", self.view.ndim,             # <<<<<<<<<<<<<<
 *                                    self.view.itemsize,
 *                                    flags|PyBUF_C_CONTIGUOUS,
*/
  __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), __pyx_k_c, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 642, __pyx_L1_error)
  __pyx_v_mslice = __pyx_t_1;

  /* "View.MemoryView":647
 *                                    self.dtype_is_object)
 * 
 *         return memoryview_copy_from_slice(self, &mslice)             # <<<<<<<<<<<<<<
 * 
 *     def copy_fortran(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":637
 *         return slice_is_contig(mslice[0], 'F', self.view.ndim)
 * 
 *     def copy(self):             # <<<<<<<<<<<<<<
 *         cdef __Pyx_memviewslice mslice
 *         cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":649
 *         return memoryview_copy_from_slice(self, &mslice)
 * 
 *     def copy_fortran(self):             # <<<<<<<<<<<<<<
 *         cdef __Pyx_memviewslice src, dst
 *         cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/

/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("copy_fortran", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("copy_fortran", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
  __Pyx_memviewslice __pyx_v_src;
  __Pyx_memviewslice __pyx_v_dst;
  int __pyx_v_flags;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_memviewslice __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("copy_fortran", 0);

  /* "View.MemoryView":651
 *     def copy_fortran(self):
 *         cdef __Pyx_memviewslice src, dst
 *         cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS             # <<<<<<<<<<<<<<
 * 
 *         slice_copy(self, &src)
*/
  __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));

  /* "View.MemoryView":653
 *         cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
 * 
 *         slice_copy(self, &src)             # <<<<<<<<<<<<<<
 *         dst = slice_copy_contig(&src, "fortran", self.view.ndim,
 *                                 self.view.itemsize,
*/
  __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));

  /* "View.MemoryView":654
 * 
 *         slice_copy(self, &src)
 *         dst = slice_copy_contig(&src, "fortran", self.view.ndim,             # <<<<<<<<<<<<<<
 *                                 self.view.itemsize,
 *                                 flags|PyBUF_F_CONTIGUOUS,
*/
  __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), __pyx_k_fortran, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 654, __pyx_L1_error)
  __pyx_v_dst = __pyx_t_1;

  /* "View.MemoryView":659
 *                                 self.dtype_is_object)
 * 
 *         return memoryview_copy_from_slice(self, &dst)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 659, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":649
 *         return memoryview_copy_from_slice(self, &mslice)
 * 
 *     def copy_fortran(self):             # <<<<<<<<<<<<<<
 *         cdef __Pyx_memviewslice src, dst
 *         cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
*/

/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":662
 * 
 * 
 * @cname('__pyx_memoryview_new')             # <<<<<<<<<<<<<<
 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, const __Pyx_TypeInfo *typeinfo):
 *     cdef memoryview result = memoryview(o, flags, dtype_is_object)
*/

static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo const *__pyx_v_typeinfo) {
  struct __pyx_memoryview_obj *__pyx_v_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("memoryview_cwrapper", 0);

  /* "View.MemoryView":664
 * @cname('__pyx_memoryview_new')
 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, const __Pyx_TypeInfo *typeinfo):
 *     cdef memoryview result = memoryview(o, flags, dtype_is_object)             # <<<<<<<<<<<<<<
 *     result.typeinfo = typeinfo
 *     return result
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = __Pyx_PyLong_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 664, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 664, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 1;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_v_o, __pyx_t_3, __pyx_t_4};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_memoryview_type, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 664, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "View.MemoryView":665
 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, const __Pyx_TypeInfo *typeinfo):
 *     cdef memoryview result = memoryview(o, flags, dtype_is_object)
 *     result.typeinfo = typeinfo             # <<<<<<<<<<<<<<
 *     return result
 * 
*/
  __pyx_v_result->typeinfo = __pyx_v_typeinfo;

  /* "View.MemoryView":666
 *     cdef memoryview result = memoryview(o, flags, dtype_is_object)
 *     result.typeinfo = typeinfo
 *     return result             # <<<<<<<<<<<<<<
 * 
 * @cname('__pyx_memoryview_check')
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_result);
  __pyx_r = ((PyObject *)__pyx_v_result);
  goto __pyx_L0;

  /* "View.MemoryView":662
 * 
 * 
 * @cname('__pyx_memoryview_new')             # <<<<<<<<<<<<<<
 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, const __Pyx_TypeInfo *typeinfo):
 *     cdef memoryview result = memoryview(o, flags, dtype_is_object)
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":668
 *     return result
 * 
 * @cname('__pyx_memoryview_check')             # <<<<<<<<<<<<<<
 * cdef inline bint memoryview_check(object o) noexcept:
 *     return isinstance(o, memoryview)
*/

static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
  int __pyx_r;
  int __pyx_t_1;

  /* "View.MemoryView":670
 * @cname('__pyx_memoryview_check')
 * cdef inline bint memoryview_check(object o) noexcept:
 *     return isinstance(o, memoryview)             # <<<<<<<<<<<<<<
 * 
 * cdef tuple _unellipsify(object index, int ndim):
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_mstate_global->__pyx_memoryview_type); 
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "View.MemoryView":668
 *     return result
 * 
 * @cname('__pyx_memoryview_check')             # <<<<<<<<<<<<<<
 * cdef inline bint memoryview_check(object o) noexcept:
 *     return isinstance(o, memoryview)
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "View.MemoryView":672
 *     return isinstance(o, memoryview)
 * 
 * cdef tuple _unellipsify(object index, int ndim):             # <<<<<<<<<<<<<<
 *     """
 *     Replace all ellipses with full slices and fill incomplete indices with
*/

static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
  Py_ssize_t __pyx_v_idx;
  PyObject *__pyx_v_tup = NULL;
  PyObject *__pyx_v_result = NULL;
  int __pyx_v_have_slices;
  int __pyx_v_seen_ellipsis;
  PyObject *__pyx_v_item = NULL;
  Py_ssize_t __pyx_v_nslices;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  Py_ssize_t __pyx_t_4;
  Py_ssize_t __pyx_t_5;
  PyObject *__pyx_t_6[3];
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_unellipsify", 0);

  /* "View.MemoryView":678
 *     """
 *     cdef Py_ssize_t idx
 *     tup = <tuple>index if isinstance(index, tuple) else (index,)             # <<<<<<<<<<<<<<
 * 
 *     result = [slice(None)] * ndim
*/
  __pyx_t_2 = PyTuple_Check(__pyx_v_index); 
  if (__pyx_t_2) {
    __Pyx_INCREF(((PyObject*)__pyx_v_index));
    __pyx_t_1 = __pyx_v_index;
  } else {
    __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 678, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_INCREF(__pyx_v_index);
    __Pyx_GIVEREF(__pyx_v_index);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index) != (0)) __PYX_ERR(1, 678, __pyx_L1_error);
    __pyx_t_1 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_tup = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "View.MemoryView":680
 *     tup = <tuple>index if isinstance(index, tuple) else (index,)
 * 
 *     result = [slice(None)] * ndim             # <<<<<<<<<<<<<<
 *     have_slices = False
 *     seen_ellipsis = False
*/
  __pyx_t_1 = PyList_New(1 * ((__pyx_v_ndim<0) ? 0:__pyx_v_ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 680, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  { Py_ssize_t __pyx_temp;
    for (__pyx_temp=0; __pyx_temp < __pyx_v_ndim; __pyx_temp++) {
      __Pyx_INCREF(__pyx_mstate_global->__pyx_slice[0]);
      __Pyx_GIVEREF(__pyx_mstate_global->__pyx_slice[0]);
      if (__Pyx_PyList_SET_ITEM(__pyx_t_1, __pyx_temp, __pyx_mstate_global->__pyx_slice[0]) != (0)) __PYX_ERR(1, 680, __pyx_L1_error);
    }
  }
  __pyx_v_result = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "View.MemoryView":681
 * 
 *     result = [slice(None)] * ndim
 *     have_slices = False             # <<<<<<<<<<<<<<
 *     seen_ellipsis = False
 *     idx = 0
*/
  __pyx_v_have_slices = 0;

  /* "View.MemoryView":682
 *     result = [slice(None)] * ndim
 *     have_slices = False
 *     seen_ellipsis = False             # <<<<<<<<<<<<<<
 *     idx = 0
 *     for item in tup:
*/
  __pyx_v_seen_ellipsis = 0;

  /* "View.MemoryView":683
 *     have_slices = False
 *     seen_ellipsis = False
 *     idx = 0             # <<<<<<<<<<<<<<
 *     for item in tup:
 *         if item is Ellipsis:
*/
  __pyx_v_idx = 0;

  /* "View.MemoryView":684
 *     seen_ellipsis = False
 *     idx = 0
 *     for item in tup:             # <<<<<<<<<<<<<<
 *         if item is Ellipsis:
 *             if not seen_ellipsis:
*/
  if (unlikely(__pyx_v_tup == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
    __PYX_ERR(1, 684, __pyx_L1_error)
  }
  __pyx_t_1 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_1);
  __pyx_t_4 = 0;
  for (;;) {
    {
      Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1);
      #if !CYTHON_ASSUME_SAFE_SIZE
      if (unlikely((__pyx_temp < 0))) __PYX_ERR(1, 684, __pyx_L1_error)
      #endif
      if (__pyx_t_4 >= __pyx_temp) break;
    }
    #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
    __pyx_t_3 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_4));
    #else
    __pyx_t_3 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_4);
    #endif
    ++__pyx_t_4;
    if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 684, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_3);
    __pyx_t_3 = 0;

    /* "View.MemoryView":685
 *     idx = 0
 *     for item in tup:
 *         if item is Ellipsis:             # <<<<<<<<<<<<<<
 *             if not seen_ellipsis:
 *                 idx += ndim - len(tup)
*/
    __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
    if (__pyx_t_2) {

      /* "View.MemoryView":686
 *     for item in tup:
 *         if item is Ellipsis:
 *             if not seen_ellipsis:             # <<<<<<<<<<<<<<
 *                 idx += ndim - len(tup)
 *                 seen_ellipsis = True
*/
      __pyx_t_2 = (!__pyx_v_seen_ellipsis);
      if (__pyx_t_2) {

        /* "View.MemoryView":687
 *         if item is Ellipsis:
 *             if not seen_ellipsis:
 *                 idx += ndim - len(tup)             # <<<<<<<<<<<<<<
 *                 seen_ellipsis = True
 *             have_slices = True
*/
        if (unlikely(__pyx_v_tup == Py_None)) {
          PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
          __PYX_ERR(1, 687, __pyx_L1_error)
        }
        __pyx_t_5 = __Pyx_PyTuple_GET_SIZE(__pyx_v_tup); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 687, __pyx_L1_error)
        __pyx_v_idx = (__pyx_v_idx + (__pyx_v_ndim - __pyx_t_5));

        /* "View.MemoryView":688
 *             if not seen_ellipsis:
 *                 idx += ndim - len(tup)
 *                 seen_ellipsis = True             # <<<<<<<<<<<<<<
 *             have_slices = True
 *         else:
*/
        __pyx_v_seen_ellipsis = 1;

        /* "View.MemoryView":686
 *     for item in tup:
 *         if item is Ellipsis:
 *             if not seen_ellipsis:             # <<<<<<<<<<<<<<
 *                 idx += ndim - len(tup)
 *                 seen_ellipsis = True
*/
      }

      /* "View.MemoryView":689
 *                 idx += ndim - len(tup)
 *                 seen_ellipsis = True
 *             have_slices = True             # <<<<<<<<<<<<<<
 *         else:
 *             if isinstance(item, slice):
*/
      __pyx_v_have_slices = 1;

      /* "View.MemoryView":685
 *     idx = 0
 *     for item in tup:
 *         if item is Ellipsis:             # <<<<<<<<<<<<<<
 *             if not seen_ellipsis:
 *                 idx += ndim - len(tup)
*/
      goto __pyx_L5;
    }

    /* "View.MemoryView":691
 *             have_slices = True
 *         else:
 *             if isinstance(item, slice):             # <<<<<<<<<<<<<<
 *                 have_slices = True
 *             elif not PyIndex_Check(item):
*/
    /*else*/ {
      __pyx_t_2 = PySlice_Check(__pyx_v_item); 
      if (__pyx_t_2) {

        /* "View.MemoryView":692
 *         else:
 *             if isinstance(item, slice):
 *                 have_slices = True             # <<<<<<<<<<<<<<
 *             elif not PyIndex_Check(item):
 *                 raise TypeError, f"Cannot index with type '{type(item)}'"
*/
        __pyx_v_have_slices = 1;

        /* "View.MemoryView":691
 *             have_slices = True
 *         else:
 *             if isinstance(item, slice):             # <<<<<<<<<<<<<<
 *                 have_slices = True
 *             elif not PyIndex_Check(item):
*/
        goto __pyx_L7;
      }

      /* "View.MemoryView":693
 *             if isinstance(item, slice):
 *                 have_slices = True
 *             elif not PyIndex_Check(item):             # <<<<<<<<<<<<<<
 *                 raise TypeError, f"Cannot index with type '{type(item)}'"
 *             result[idx] = item
*/
      __pyx_t_2 = (!(PyIndex_Check(__pyx_v_item) != 0));
      if (unlikely(__pyx_t_2)) {

        /* "View.MemoryView":694
 *                 have_slices = True
 *             elif not PyIndex_Check(item):
 *                 raise TypeError, f"Cannot index with type '{type(item)}'"             # <<<<<<<<<<<<<<
 *             result[idx] = item
 *         idx += 1
*/
        __pyx_t_3 = __Pyx_PyObject_FormatSimple(((PyObject *)Py_TYPE(__pyx_v_item)), __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 694, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
        __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u_Cannot_index_with_type;
        __pyx_t_6[1] = __pyx_t_3;
        __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u__4;
        __pyx_t_7 = __Pyx_PyUnicode_Join(__pyx_t_6, 3, 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 1, 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
        if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 694, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_7);
        __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_t_7, 0, 0);
        __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
        __PYX_ERR(1, 694, __pyx_L1_error)

        /* "View.MemoryView":693
 *             if isinstance(item, slice):
 *                 have_slices = True
 *             elif not PyIndex_Check(item):             # <<<<<<<<<<<<<<
 *                 raise TypeError, f"Cannot index with type '{type(item)}'"
 *             result[idx] = item
*/
      }
      __pyx_L7:;

      /* "View.MemoryView":695
 *             elif not PyIndex_Check(item):
 *                 raise TypeError, f"Cannot index with type '{type(item)}'"
 *             result[idx] = item             # <<<<<<<<<<<<<<
 *         idx += 1
 * 
*/
      if (unlikely((__Pyx_SetItemInt(__pyx_v_result, __pyx_v_idx, __pyx_v_item, Py_ssize_t, 1, PyLong_FromSsize_t, 1, 1, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference) < 0))) __PYX_ERR(1, 695, __pyx_L1_error)
    }
    __pyx_L5:;

    /* "View.MemoryView":696
 *                 raise TypeError, f"Cannot index with type '{type(item)}'"
 *             result[idx] = item
 *         idx += 1             # <<<<<<<<<<<<<<
 * 
 *     nslices = ndim - idx
*/
    __pyx_v_idx = (__pyx_v_idx + 1);

    /* "View.MemoryView":684
 *     seen_ellipsis = False
 *     idx = 0
 *     for item in tup:             # <<<<<<<<<<<<<<
 *         if item is Ellipsis:
 *             if not seen_ellipsis:
*/
  }
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;

  /* "View.MemoryView":698
 *         idx += 1
 * 
 *     nslices = ndim - idx             # <<<<<<<<<<<<<<
 *     return have_slices or nslices, tuple(result)
 * 
*/
  __pyx_v_nslices = (__pyx_v_ndim - __pyx_v_idx);

  /* "View.MemoryView":699
 * 
 *     nslices = ndim - idx
 *     return have_slices or nslices, tuple(result)             # <<<<<<<<<<<<<<
 * 
 * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1:
*/
  __Pyx_XDECREF(__pyx_r);
  if (!__pyx_v_have_slices) {
  } else {
    __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 699, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_1 = __pyx_t_7;
    __pyx_t_7 = 0;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = PyLong_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 699, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_1 = __pyx_t_7;
  __pyx_t_7 = 0;
  __pyx_L9_bool_binop_done:;
  __pyx_t_7 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 699, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 699, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_GIVEREF(__pyx_t_1);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1) != (0)) __PYX_ERR(1, 699, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_7) != (0)) __PYX_ERR(1, 699, __pyx_L1_error);
  __pyx_t_1 = 0;
  __pyx_t_7 = 0;
  __pyx_r = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":672
 *     return isinstance(o, memoryview)
 * 
 * cdef tuple _unellipsify(object index, int ndim):             # <<<<<<<<<<<<<<
 *     """
 *     Replace all ellipses with full slices and fill incomplete indices with
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_tup);
  __Pyx_XDECREF(__pyx_v_result);
  __Pyx_XDECREF(__pyx_v_item);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":701
 *     return have_slices or nslices, tuple(result)
 * 
 * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1:             # <<<<<<<<<<<<<<
 *     for suboffset in suboffsets[:ndim]:
 *         if suboffset >= 0:
*/

static int assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
  Py_ssize_t __pyx_v_suboffset;
  int __pyx_r;
  Py_ssize_t *__pyx_t_1;
  Py_ssize_t *__pyx_t_2;
  Py_ssize_t *__pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "View.MemoryView":702
 * 
 * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1:
 *     for suboffset in suboffsets[:ndim]:             # <<<<<<<<<<<<<<
 *         if suboffset >= 0:
 *             raise ValueError, "Indirect dimensions not supported"
*/
  __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
  for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
    __pyx_t_1 = __pyx_t_3;
    __pyx_v_suboffset = (__pyx_t_1[0]);

    /* "View.MemoryView":703
 * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1:
 *     for suboffset in suboffsets[:ndim]:
 *         if suboffset >= 0:             # <<<<<<<<<<<<<<
 *             raise ValueError, "Indirect dimensions not supported"
 *     return 0  # return type just used as an error flag
*/
    __pyx_t_4 = (__pyx_v_suboffset >= 0);
    if (unlikely(__pyx_t_4)) {

      /* "View.MemoryView":704
 *     for suboffset in suboffsets[:ndim]:
 *         if suboffset >= 0:
 *             raise ValueError, "Indirect dimensions not supported"             # <<<<<<<<<<<<<<
 *     return 0  # return type just used as an error flag
 * 
*/
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_ValueError))), __pyx_mstate_global->__pyx_kp_u_Indirect_dimensions_not_supporte, 0, 0);
      __PYX_ERR(1, 704, __pyx_L1_error)

      /* "View.MemoryView":703
 * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1:
 *     for suboffset in suboffsets[:ndim]:
 *         if suboffset >= 0:             # <<<<<<<<<<<<<<
 *             raise ValueError, "Indirect dimensions not supported"
 *     return 0  # return type just used as an error flag
*/
    }
  }

  /* "View.MemoryView":705
 *         if suboffset >= 0:
 *             raise ValueError, "Indirect dimensions not supported"
 *     return 0  # return type just used as an error flag             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = 0;
  goto __pyx_L0;

  /* "View.MemoryView":701
 *     return have_slices or nslices, tuple(result)
 * 
 * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1:             # <<<<<<<<<<<<<<
 *     for suboffset in suboffsets[:ndim]:
 *         if suboffset >= 0:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "View.MemoryView":711
 * 
 * 
 * @cname('__pyx_memview_slice')             # <<<<<<<<<<<<<<
 * cdef memoryview memview_slice(memoryview memview, object indices):
 *     cdef int new_ndim = 0, suboffset_dim = -1, dim
*/

static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
  int __pyx_v_new_ndim;
  int __pyx_v_suboffset_dim;
  int __pyx_v_dim;
  __Pyx_memviewslice __pyx_v_src;
  __Pyx_memviewslice __pyx_v_dst;
  __Pyx_memviewslice *__pyx_v_p_src;
  struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
  __Pyx_memviewslice *__pyx_v_p_dst;
  int *__pyx_v_p_suboffset_dim;
  Py_ssize_t __pyx_v_start;
  Py_ssize_t __pyx_v_stop;
  Py_ssize_t __pyx_v_step;
  Py_ssize_t __pyx_v_cindex;
  int __pyx_v_have_start;
  int __pyx_v_have_stop;
  int __pyx_v_have_step;
  PyObject *__pyx_v_index = NULL;
  struct __pyx_memoryview_obj *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  struct __pyx_memoryview_obj *__pyx_t_3;
  char *__pyx_t_4;
  int __pyx_t_5;
  Py_ssize_t __pyx_t_6;
  PyObject *(*__pyx_t_7)(PyObject *);
  PyObject *__pyx_t_8 = NULL;
  Py_ssize_t __pyx_t_9;
  int __pyx_t_10;
  Py_ssize_t __pyx_t_11;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("memview_slice", 0);

  /* "View.MemoryView":713
 * @cname('__pyx_memview_slice')
 * cdef memoryview memview_slice(memoryview memview, object indices):
 *     cdef int new_ndim = 0, suboffset_dim = -1, dim             # <<<<<<<<<<<<<<
 *     cdef bint negative_step
 *     cdef __Pyx_memviewslice src, dst
*/
  __pyx_v_new_ndim = 0;
  __pyx_v_suboffset_dim = -1;

  /* "View.MemoryView":720
 * 
 * 
 *     memset(&dst, 0, sizeof(dst))             # <<<<<<<<<<<<<<
 * 
 *     cdef _memoryviewslice memviewsliceobj
*/
  (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))));

  /* "View.MemoryView":724
 *     cdef _memoryviewslice memviewsliceobj
 * 
 *     assert memview.view.ndim > 0             # <<<<<<<<<<<<<<
 * 
 *     if isinstance(memview, _memoryviewslice):
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = (__pyx_v_memview->view.ndim > 0);
    if (unlikely(!__pyx_t_1)) {
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), 0, 0, 0);
      __PYX_ERR(1, 724, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(1, 724, __pyx_L1_error)
  #endif

  /* "View.MemoryView":726
 *     assert memview.view.ndim > 0
 * 
 *     if isinstance(memview, _memoryviewslice):             # <<<<<<<<<<<<<<
 *         memviewsliceobj = memview
 *         p_src = &memviewsliceobj.from_slice
*/
  __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_mstate_global->__pyx_memoryviewslice_type); 
  if (__pyx_t_1) {

    /* "View.MemoryView":727
 * 
 *     if isinstance(memview, _memoryviewslice):
 *         memviewsliceobj = memview             # <<<<<<<<<<<<<<
 *         p_src = &memviewsliceobj.from_slice
 *     else:
*/
    __pyx_t_2 = ((PyObject *)__pyx_v_memview);
    __Pyx_INCREF(__pyx_t_2);
    if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_mstate_global->__pyx_memoryviewslice_type))))) __PYX_ERR(1, 727, __pyx_L1_error)
    __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
    __pyx_t_2 = 0;

    /* "View.MemoryView":728
 *     if isinstance(memview, _memoryviewslice):
 *         memviewsliceobj = memview
 *         p_src = &memviewsliceobj.from_slice             # <<<<<<<<<<<<<<
 *     else:
 *         slice_copy(memview, &src)
*/
    __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);

    /* "View.MemoryView":726
 *     assert memview.view.ndim > 0
 * 
 *     if isinstance(memview, _memoryviewslice):             # <<<<<<<<<<<<<<
 *         memviewsliceobj = memview
 *         p_src = &memviewsliceobj.from_slice
*/
    goto __pyx_L3;
  }

  /* "View.MemoryView":730
 *         p_src = &memviewsliceobj.from_slice
 *     else:
 *         slice_copy(memview, &src)             # <<<<<<<<<<<<<<
 *         p_src = &src
 * 
*/
  /*else*/ {
    __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));

    /* "View.MemoryView":731
 *     else:
 *         slice_copy(memview, &src)
 *         p_src = &src             # <<<<<<<<<<<<<<
 * 
 * 
*/
    __pyx_v_p_src = (&__pyx_v_src);
  }
  __pyx_L3:;

  /* "View.MemoryView":737
 * 
 * 
 *     dst.memview = p_src.memview             # <<<<<<<<<<<<<<
 *     dst.data = p_src.data
 * 
*/
  __pyx_t_3 = __pyx_v_p_src->memview;
  __pyx_v_dst.memview = __pyx_t_3;

  /* "View.MemoryView":738
 * 
 *     dst.memview = p_src.memview
 *     dst.data = p_src.data             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_4 = __pyx_v_p_src->data;
  __pyx_v_dst.data = __pyx_t_4;

  /* "View.MemoryView":743
 * 
 * 
 *     cdef __Pyx_memviewslice *p_dst = &dst             # <<<<<<<<<<<<<<
 *     cdef int *p_suboffset_dim = &suboffset_dim
 *     cdef Py_ssize_t start, stop, step, cindex
*/
  __pyx_v_p_dst = (&__pyx_v_dst);

  /* "View.MemoryView":744
 * 
 *     cdef __Pyx_memviewslice *p_dst = &dst
 *     cdef int *p_suboffset_dim = &suboffset_dim             # <<<<<<<<<<<<<<
 *     cdef Py_ssize_t start, stop, step, cindex
 *     cdef bint have_start, have_stop, have_step
*/
  __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);

  /* "View.MemoryView":748
 *     cdef bint have_start, have_stop, have_step
 * 
 *     for dim, index in enumerate(indices):             # <<<<<<<<<<<<<<
 *         if PyIndex_Check(index):
 *             cindex = index
*/
  __pyx_t_5 = 0;
  if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
    __pyx_t_2 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_2);
    __pyx_t_6 = 0;
    __pyx_t_7 = NULL;
  } else {
    __pyx_t_6 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 748, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_7 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 748, __pyx_L1_error)
  }
  for (;;) {
    if (likely(!__pyx_t_7)) {
      if (likely(PyList_CheckExact(__pyx_t_2))) {
        {
          Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_2);
          #if !CYTHON_ASSUME_SAFE_SIZE
          if (unlikely((__pyx_temp < 0))) __PYX_ERR(1, 748, __pyx_L1_error)
          #endif
          if (__pyx_t_6 >= __pyx_temp) break;
        }
        __pyx_t_8 = __Pyx_PyList_GetItemRefFast(__pyx_t_2, __pyx_t_6, __Pyx_ReferenceSharing_OwnStrongReference);
        ++__pyx_t_6;
      } else {
        {
          Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_2);
          #if !CYTHON_ASSUME_SAFE_SIZE
          if (unlikely((__pyx_temp < 0))) __PYX_ERR(1, 748, __pyx_L1_error)
          #endif
          if (__pyx_t_6 >= __pyx_temp) break;
        }
        #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
        __pyx_t_8 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_6));
        #else
        __pyx_t_8 = __Pyx_PySequence_ITEM(__pyx_t_2, __pyx_t_6);
        #endif
        ++__pyx_t_6;
      }
      if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 748, __pyx_L1_error)
    } else {
      __pyx_t_8 = __pyx_t_7(__pyx_t_2);
      if (unlikely(!__pyx_t_8)) {
        PyObject* exc_type = PyErr_Occurred();
        if (exc_type) {
          if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(1, 748, __pyx_L1_error)
          PyErr_Clear();
        }
        break;
      }
    }
    __Pyx_GOTREF(__pyx_t_8);
    __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_8);
    __pyx_t_8 = 0;
    __pyx_v_dim = __pyx_t_5;
    __pyx_t_5 = (__pyx_t_5 + 1);

    /* "View.MemoryView":749
 * 
 *     for dim, index in enumerate(indices):
 *         if PyIndex_Check(index):             # <<<<<<<<<<<<<<
 *             cindex = index
 *             slice_memviewslice(
*/
    __pyx_t_1 = (PyIndex_Check(__pyx_v_index) != 0);
    if (__pyx_t_1) {

      /* "View.MemoryView":750
 *     for dim, index in enumerate(indices):
 *         if PyIndex_Check(index):
 *             cindex = index             # <<<<<<<<<<<<<<
 *             slice_memviewslice(
 *                 p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
      __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 750, __pyx_L1_error)
      __pyx_v_cindex = __pyx_t_9;

      /* "View.MemoryView":751
 *         if PyIndex_Check(index):
 *             cindex = index
 *             slice_memviewslice(             # <<<<<<<<<<<<<<
 *                 p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
 *                 dim, new_ndim, p_suboffset_dim,
*/
      __pyx_t_10 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_cindex, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(1, 751, __pyx_L1_error)

      /* "View.MemoryView":749
 * 
 *     for dim, index in enumerate(indices):
 *         if PyIndex_Check(index):             # <<<<<<<<<<<<<<
 *             cindex = index
 *             slice_memviewslice(
*/
      goto __pyx_L6;
    }

    /* "View.MemoryView":757
 *                 0, 0, 0, # have_{start,stop,step}
 *                 False)
 *         elif index is None:             # <<<<<<<<<<<<<<
 *             p_dst.shape[new_ndim] = 1
 *             p_dst.strides[new_ndim] = 0
*/
    __pyx_t_1 = (__pyx_v_index == Py_None);
    if (__pyx_t_1) {

      /* "View.MemoryView":758
 *                 False)
 *         elif index is None:
 *             p_dst.shape[new_ndim] = 1             # <<<<<<<<<<<<<<
 *             p_dst.strides[new_ndim] = 0
 *             p_dst.suboffsets[new_ndim] = -1
*/
      (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;

      /* "View.MemoryView":759
 *         elif index is None:
 *             p_dst.shape[new_ndim] = 1
 *             p_dst.strides[new_ndim] = 0             # <<<<<<<<<<<<<<
 *             p_dst.suboffsets[new_ndim] = -1
 *             new_ndim += 1
*/
      (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;

      /* "View.MemoryView":760
 *             p_dst.shape[new_ndim] = 1
 *             p_dst.strides[new_ndim] = 0
 *             p_dst.suboffsets[new_ndim] = -1             # <<<<<<<<<<<<<<
 *             new_ndim += 1
 *         else:
*/
      (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L;

      /* "View.MemoryView":761
 *             p_dst.strides[new_ndim] = 0
 *             p_dst.suboffsets[new_ndim] = -1
 *             new_ndim += 1             # <<<<<<<<<<<<<<
 *         else:
 *             start = index.start or 0
*/
      __pyx_v_new_ndim = (__pyx_v_new_ndim + 1);

      /* "View.MemoryView":757
 *                 0, 0, 0, # have_{start,stop,step}
 *                 False)
 *         elif index is None:             # <<<<<<<<<<<<<<
 *             p_dst.shape[new_ndim] = 1
 *             p_dst.strides[new_ndim] = 0
*/
      goto __pyx_L6;
    }

    /* "View.MemoryView":763
 *             new_ndim += 1
 *         else:
 *             start = index.start or 0             # <<<<<<<<<<<<<<
 *             stop = index.stop or 0
 *             step = index.step or 0
*/
    /*else*/ {
      __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_mstate_global->__pyx_n_u_start); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 763, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_8);
      __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 763, __pyx_L1_error)
      if (!__pyx_t_1) {
        __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
      } else {
        __pyx_t_11 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_11 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 763, __pyx_L1_error)
        __pyx_t_9 = __pyx_t_11;
        __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
        goto __pyx_L7_bool_binop_done;
      }
      __pyx_t_9 = 0;
      __pyx_L7_bool_binop_done:;
      __pyx_v_start = __pyx_t_9;

      /* "View.MemoryView":764
 *         else:
 *             start = index.start or 0
 *             stop = index.stop or 0             # <<<<<<<<<<<<<<
 *             step = index.step or 0
 * 
*/
      __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_mstate_global->__pyx_n_u_stop); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 764, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_8);
      __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 764, __pyx_L1_error)
      if (!__pyx_t_1) {
        __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
      } else {
        __pyx_t_11 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_11 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 764, __pyx_L1_error)
        __pyx_t_9 = __pyx_t_11;
        __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
        goto __pyx_L9_bool_binop_done;
      }
      __pyx_t_9 = 0;
      __pyx_L9_bool_binop_done:;
      __pyx_v_stop = __pyx_t_9;

      /* "View.MemoryView":765
 *             start = index.start or 0
 *             stop = index.stop or 0
 *             step = index.step or 0             # <<<<<<<<<<<<<<
 * 
 *             have_start = index.start is not None
*/
      __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_mstate_global->__pyx_n_u_step); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 765, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_8);
      __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 765, __pyx_L1_error)
      if (!__pyx_t_1) {
        __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
      } else {
        __pyx_t_11 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_11 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 765, __pyx_L1_error)
        __pyx_t_9 = __pyx_t_11;
        __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
        goto __pyx_L11_bool_binop_done;
      }
      __pyx_t_9 = 0;
      __pyx_L11_bool_binop_done:;
      __pyx_v_step = __pyx_t_9;

      /* "View.MemoryView":767
 *             step = index.step or 0
 * 
 *             have_start = index.start is not None             # <<<<<<<<<<<<<<
 *             have_stop = index.stop is not None
 *             have_step = index.step is not None
*/
      __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_mstate_global->__pyx_n_u_start); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 767, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_8);
      __pyx_t_1 = (__pyx_t_8 != Py_None);
      __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
      __pyx_v_have_start = __pyx_t_1;

      /* "View.MemoryView":768
 * 
 *             have_start = index.start is not None
 *             have_stop = index.stop is not None             # <<<<<<<<<<<<<<
 *             have_step = index.step is not None
 * 
*/
      __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_mstate_global->__pyx_n_u_stop); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 768, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_8);
      __pyx_t_1 = (__pyx_t_8 != Py_None);
      __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
      __pyx_v_have_stop = __pyx_t_1;

      /* "View.MemoryView":769
 *             have_start = index.start is not None
 *             have_stop = index.stop is not None
 *             have_step = index.step is not None             # <<<<<<<<<<<<<<
 * 
 *             slice_memviewslice(
*/
      __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_mstate_global->__pyx_n_u_step); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 769, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_8);
      __pyx_t_1 = (__pyx_t_8 != Py_None);
      __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
      __pyx_v_have_step = __pyx_t_1;

      /* "View.MemoryView":771
 *             have_step = index.step is not None
 * 
 *             slice_memviewslice(             # <<<<<<<<<<<<<<
 *                 p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
 *                 dim, new_ndim, p_suboffset_dim,
*/
      __pyx_t_10 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(1, 771, __pyx_L1_error)

      /* "View.MemoryView":777
 *                 have_start, have_stop, have_step,
 *                 True)
 *             new_ndim += 1             # <<<<<<<<<<<<<<
 * 
 *     if isinstance(memview, _memoryviewslice):
*/
      __pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
    }
    __pyx_L6:;

    /* "View.MemoryView":748
 *     cdef bint have_start, have_stop, have_step
 * 
 *     for dim, index in enumerate(indices):             # <<<<<<<<<<<<<<
 *         if PyIndex_Check(index):
 *             cindex = index
*/
  }
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "View.MemoryView":779
 *             new_ndim += 1
 * 
 *     if isinstance(memview, _memoryviewslice):             # <<<<<<<<<<<<<<
 *         return memoryview_fromslice(dst, new_ndim,
 *                                     memviewsliceobj.to_object_func,
*/
  __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_mstate_global->__pyx_memoryviewslice_type); 
  if (__pyx_t_1) {

    /* "View.MemoryView":780
 * 
 *     if isinstance(memview, _memoryviewslice):
 *         return memoryview_fromslice(dst, new_ndim,             # <<<<<<<<<<<<<<
 *                                     memviewsliceobj.to_object_func,
 *                                     memviewsliceobj.to_dtype_func,
*/
    __Pyx_XDECREF((PyObject *)__pyx_r);

    /* "View.MemoryView":781
 *     if isinstance(memview, _memoryviewslice):
 *         return memoryview_fromslice(dst, new_ndim,
 *                                     memviewsliceobj.to_object_func,             # <<<<<<<<<<<<<<
 *                                     memviewsliceobj.to_dtype_func,
 *                                     memview.dtype_is_object)
*/
    if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 781, __pyx_L1_error) }

    /* "View.MemoryView":782
 *         return memoryview_fromslice(dst, new_ndim,
 *                                     memviewsliceobj.to_object_func,
 *                                     memviewsliceobj.to_dtype_func,             # <<<<<<<<<<<<<<
 *                                     memview.dtype_is_object)
 *     else:
*/
    if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 782, __pyx_L1_error) }

    /* "View.MemoryView":780
 * 
 *     if isinstance(memview, _memoryviewslice):
 *         return memoryview_fromslice(dst, new_ndim,             # <<<<<<<<<<<<<<
 *                                     memviewsliceobj.to_object_func,
 *                                     memviewsliceobj.to_dtype_func,
*/
    __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 780, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_mstate_global->__pyx_memoryview_type))))) __PYX_ERR(1, 780, __pyx_L1_error)
    __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_2);
    __pyx_t_2 = 0;
    goto __pyx_L0;

    /* "View.MemoryView":779
 *             new_ndim += 1
 * 
 *     if isinstance(memview, _memoryviewslice):             # <<<<<<<<<<<<<<
 *         return memoryview_fromslice(dst, new_ndim,
 *                                     memviewsliceobj.to_object_func,
*/
  }

  /* "View.MemoryView":785
 *                                     memview.dtype_is_object)
 *     else:
 *         return memoryview_fromslice(dst, new_ndim, NULL, NULL,             # <<<<<<<<<<<<<<
 *                                     memview.dtype_is_object)
 * 
*/
  /*else*/ {
    __Pyx_XDECREF((PyObject *)__pyx_r);

    /* "View.MemoryView":786
 *     else:
 *         return memoryview_fromslice(dst, new_ndim, NULL, NULL,
 *                                     memview.dtype_is_object)             # <<<<<<<<<<<<<<
 * 
 * 
*/
    __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 785, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);

    /* "View.MemoryView":785
 *                                     memview.dtype_is_object)
 *     else:
 *         return memoryview_fromslice(dst, new_ndim, NULL, NULL,             # <<<<<<<<<<<<<<
 *                                     memview.dtype_is_object)
 * 
*/
    if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_mstate_global->__pyx_memoryview_type))))) __PYX_ERR(1, 785, __pyx_L1_error)
    __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_2);
    __pyx_t_2 = 0;
    goto __pyx_L0;
  }

  /* "View.MemoryView":711
 * 
 * 
 * @cname('__pyx_memview_slice')             # <<<<<<<<<<<<<<
 * cdef memoryview memview_slice(memoryview memview, object indices):
 *     cdef int new_ndim = 0, suboffset_dim = -1, dim
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
  __Pyx_XDECREF(__pyx_v_index);
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":793
 * 
 * 
 * @cname('__pyx_memoryview_slice_memviewslice')             # <<<<<<<<<<<<<<
 * cdef int slice_memviewslice(
 *         __Pyx_memviewslice *dst,
*/

static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
  Py_ssize_t __pyx_v_new_shape;
  int __pyx_v_negative_step;
  int __pyx_r;
  int __pyx_t_1;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "View.MemoryView":814
 *     cdef bint negative_step
 * 
 *     if not is_slice:             # <<<<<<<<<<<<<<
 * 
 *         if start < 0:
*/
  __pyx_t_1 = (!__pyx_v_is_slice);
  if (__pyx_t_1) {

    /* "View.MemoryView":816
 *     if not is_slice:
 * 
 *         if start < 0:             # <<<<<<<<<<<<<<
 *             start += shape
 *         if not 0 <= start < shape:
*/
    __pyx_t_1 = (__pyx_v_start < 0);
    if (__pyx_t_1) {

      /* "View.MemoryView":817
 * 
 *         if start < 0:
 *             start += shape             # <<<<<<<<<<<<<<
 *         if not 0 <= start < shape:
 *             _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim)
*/
      __pyx_v_start = (__pyx_v_start + __pyx_v_shape);

      /* "View.MemoryView":816
 *     if not is_slice:
 * 
 *         if start < 0:             # <<<<<<<<<<<<<<
 *             start += shape
 *         if not 0 <= start < shape:
*/
    }

    /* "View.MemoryView":818
 *         if start < 0:
 *             start += shape
 *         if not 0 <= start < shape:             # <<<<<<<<<<<<<<
 *             _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim)
 *     else:
*/
    __pyx_t_1 = (0 <= __pyx_v_start);
    if (__pyx_t_1) {
      __pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
    }
    __pyx_t_2 = (!__pyx_t_1);
    if (__pyx_t_2) {

      /* "View.MemoryView":819
 *             start += shape
 *         if not 0 <= start < shape:
 *             _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim)             # <<<<<<<<<<<<<<
 *     else:
 * 
*/
      __pyx_t_3 = __pyx_memoryview_err_dim(PyExc_IndexError, __pyx_mstate_global->__pyx_kp_u_Index_out_of_bounds_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 819, __pyx_L1_error)

      /* "View.MemoryView":818
 *         if start < 0:
 *             start += shape
 *         if not 0 <= start < shape:             # <<<<<<<<<<<<<<
 *             _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim)
 *     else:
*/
    }

    /* "View.MemoryView":814
 *     cdef bint negative_step
 * 
 *     if not is_slice:             # <<<<<<<<<<<<<<
 * 
 *         if start < 0:
*/
    goto __pyx_L3;
  }

  /* "View.MemoryView":822
 *     else:
 * 
 *         if have_step:             # <<<<<<<<<<<<<<
 *             negative_step = step < 0
 *             if step == 0:
*/
  /*else*/ {
    __pyx_t_2 = (__pyx_v_have_step != 0);
    if (__pyx_t_2) {

      /* "View.MemoryView":823
 * 
 *         if have_step:
 *             negative_step = step < 0             # <<<<<<<<<<<<<<
 *             if step == 0:
 *                 _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim)
*/
      __pyx_v_negative_step = (__pyx_v_step < 0);

      /* "View.MemoryView":824
 *         if have_step:
 *             negative_step = step < 0
 *             if step == 0:             # <<<<<<<<<<<<<<
 *                 _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim)
 *         else:
*/
      __pyx_t_2 = (__pyx_v_step == 0);
      if (__pyx_t_2) {

        /* "View.MemoryView":825
 *             negative_step = step < 0
 *             if step == 0:
 *                 _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim)             # <<<<<<<<<<<<<<
 *         else:
 *             negative_step = False
*/
        __pyx_t_3 = __pyx_memoryview_err_dim(PyExc_ValueError, __pyx_mstate_global->__pyx_kp_u_Step_may_not_be_zero_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 825, __pyx_L1_error)

        /* "View.MemoryView":824
 *         if have_step:
 *             negative_step = step < 0
 *             if step == 0:             # <<<<<<<<<<<<<<
 *                 _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim)
 *         else:
*/
      }

      /* "View.MemoryView":822
 *     else:
 * 
 *         if have_step:             # <<<<<<<<<<<<<<
 *             negative_step = step < 0
 *             if step == 0:
*/
      goto __pyx_L6;
    }

    /* "View.MemoryView":827
 *                 _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim)
 *         else:
 *             negative_step = False             # <<<<<<<<<<<<<<
 *             step = 1
 * 
*/
    /*else*/ {
      __pyx_v_negative_step = 0;

      /* "View.MemoryView":828
 *         else:
 *             negative_step = False
 *             step = 1             # <<<<<<<<<<<<<<
 * 
 * 
*/
      __pyx_v_step = 1;
    }
    __pyx_L6:;

    /* "View.MemoryView":831
 * 
 * 
 *         if have_start:             # <<<<<<<<<<<<<<
 *             if start < 0:
 *                 start += shape
*/
    __pyx_t_2 = (__pyx_v_have_start != 0);
    if (__pyx_t_2) {

      /* "View.MemoryView":832
 * 
 *         if have_start:
 *             if start < 0:             # <<<<<<<<<<<<<<
 *                 start += shape
 *                 if start < 0:
*/
      __pyx_t_2 = (__pyx_v_start < 0);
      if (__pyx_t_2) {

        /* "View.MemoryView":833
 *         if have_start:
 *             if start < 0:
 *                 start += shape             # <<<<<<<<<<<<<<
 *                 if start < 0:
 *                     start = 0
*/
        __pyx_v_start = (__pyx_v_start + __pyx_v_shape);

        /* "View.MemoryView":834
 *             if start < 0:
 *                 start += shape
 *                 if start < 0:             # <<<<<<<<<<<<<<
 *                     start = 0
 *             elif start >= shape:
*/
        __pyx_t_2 = (__pyx_v_start < 0);
        if (__pyx_t_2) {

          /* "View.MemoryView":835
 *                 start += shape
 *                 if start < 0:
 *                     start = 0             # <<<<<<<<<<<<<<
 *             elif start >= shape:
 *                 if negative_step:
*/
          __pyx_v_start = 0;

          /* "View.MemoryView":834
 *             if start < 0:
 *                 start += shape
 *                 if start < 0:             # <<<<<<<<<<<<<<
 *                     start = 0
 *             elif start >= shape:
*/
        }

        /* "View.MemoryView":832
 * 
 *         if have_start:
 *             if start < 0:             # <<<<<<<<<<<<<<
 *                 start += shape
 *                 if start < 0:
*/
        goto __pyx_L9;
      }

      /* "View.MemoryView":836
 *                 if start < 0:
 *                     start = 0
 *             elif start >= shape:             # <<<<<<<<<<<<<<
 *                 if negative_step:
 *                     start = shape - 1
*/
      __pyx_t_2 = (__pyx_v_start >= __pyx_v_shape);
      if (__pyx_t_2) {

        /* "View.MemoryView":837
 *                     start = 0
 *             elif start >= shape:
 *                 if negative_step:             # <<<<<<<<<<<<<<
 *                     start = shape - 1
 *                 else:
*/
        if (__pyx_v_negative_step) {

          /* "View.MemoryView":838
 *             elif start >= shape:
 *                 if negative_step:
 *                     start = shape - 1             # <<<<<<<<<<<<<<
 *                 else:
 *                     start = shape
*/
          __pyx_v_start = (__pyx_v_shape - 1);

          /* "View.MemoryView":837
 *                     start = 0
 *             elif start >= shape:
 *                 if negative_step:             # <<<<<<<<<<<<<<
 *                     start = shape - 1
 *                 else:
*/
          goto __pyx_L11;
        }

        /* "View.MemoryView":840
 *                     start = shape - 1
 *                 else:
 *                     start = shape             # <<<<<<<<<<<<<<
 *         else:
 *             if negative_step:
*/
        /*else*/ {
          __pyx_v_start = __pyx_v_shape;
        }
        __pyx_L11:;

        /* "View.MemoryView":836
 *                 if start < 0:
 *                     start = 0
 *             elif start >= shape:             # <<<<<<<<<<<<<<
 *                 if negative_step:
 *                     start = shape - 1
*/
      }
      __pyx_L9:;

      /* "View.MemoryView":831
 * 
 * 
 *         if have_start:             # <<<<<<<<<<<<<<
 *             if start < 0:
 *                 start += shape
*/
      goto __pyx_L8;
    }

    /* "View.MemoryView":842
 *                     start = shape
 *         else:
 *             if negative_step:             # <<<<<<<<<<<<<<
 *                 start = shape - 1
 *             else:
*/
    /*else*/ {
      if (__pyx_v_negative_step) {

        /* "View.MemoryView":843
 *         else:
 *             if negative_step:
 *                 start = shape - 1             # <<<<<<<<<<<<<<
 *             else:
 *                 start = 0
*/
        __pyx_v_start = (__pyx_v_shape - 1);

        /* "View.MemoryView":842
 *                     start = shape
 *         else:
 *             if negative_step:             # <<<<<<<<<<<<<<
 *                 start = shape - 1
 *             else:
*/
        goto __pyx_L12;
      }

      /* "View.MemoryView":845
 *                 start = shape - 1
 *             else:
 *                 start = 0             # <<<<<<<<<<<<<<
 * 
 *         if have_stop:
*/
      /*else*/ {
        __pyx_v_start = 0;
      }
      __pyx_L12:;
    }
    __pyx_L8:;

    /* "View.MemoryView":847
 *                 start = 0
 * 
 *         if have_stop:             # <<<<<<<<<<<<<<
 *             if stop < 0:
 *                 stop += shape
*/
    __pyx_t_2 = (__pyx_v_have_stop != 0);
    if (__pyx_t_2) {

      /* "View.MemoryView":848
 * 
 *         if have_stop:
 *             if stop < 0:             # <<<<<<<<<<<<<<
 *                 stop += shape
 *                 if stop < 0:
*/
      __pyx_t_2 = (__pyx_v_stop < 0);
      if (__pyx_t_2) {

        /* "View.MemoryView":849
 *         if have_stop:
 *             if stop < 0:
 *                 stop += shape             # <<<<<<<<<<<<<<
 *                 if stop < 0:
 *                     stop = 0
*/
        __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);

        /* "View.MemoryView":850
 *             if stop < 0:
 *                 stop += shape
 *                 if stop < 0:             # <<<<<<<<<<<<<<
 *                     stop = 0
 *             elif stop > shape:
*/
        __pyx_t_2 = (__pyx_v_stop < 0);
        if (__pyx_t_2) {

          /* "View.MemoryView":851
 *                 stop += shape
 *                 if stop < 0:
 *                     stop = 0             # <<<<<<<<<<<<<<
 *             elif stop > shape:
 *                 stop = shape
*/
          __pyx_v_stop = 0;

          /* "View.MemoryView":850
 *             if stop < 0:
 *                 stop += shape
 *                 if stop < 0:             # <<<<<<<<<<<<<<
 *                     stop = 0
 *             elif stop > shape:
*/
        }

        /* "View.MemoryView":848
 * 
 *         if have_stop:
 *             if stop < 0:             # <<<<<<<<<<<<<<
 *                 stop += shape
 *                 if stop < 0:
*/
        goto __pyx_L14;
      }

      /* "View.MemoryView":852
 *                 if stop < 0:
 *                     stop = 0
 *             elif stop > shape:             # <<<<<<<<<<<<<<
 *                 stop = shape
 *         else:
*/
      __pyx_t_2 = (__pyx_v_stop > __pyx_v_shape);
      if (__pyx_t_2) {

        /* "View.MemoryView":853
 *                     stop = 0
 *             elif stop > shape:
 *                 stop = shape             # <<<<<<<<<<<<<<
 *         else:
 *             if negative_step:
*/
        __pyx_v_stop = __pyx_v_shape;

        /* "View.MemoryView":852
 *                 if stop < 0:
 *                     stop = 0
 *             elif stop > shape:             # <<<<<<<<<<<<<<
 *                 stop = shape
 *         else:
*/
      }
      __pyx_L14:;

      /* "View.MemoryView":847
 *                 start = 0
 * 
 *         if have_stop:             # <<<<<<<<<<<<<<
 *             if stop < 0:
 *                 stop += shape
*/
      goto __pyx_L13;
    }

    /* "View.MemoryView":855
 *                 stop = shape
 *         else:
 *             if negative_step:             # <<<<<<<<<<<<<<
 *                 stop = -1
 *             else:
*/
    /*else*/ {
      if (__pyx_v_negative_step) {

        /* "View.MemoryView":856
 *         else:
 *             if negative_step:
 *                 stop = -1             # <<<<<<<<<<<<<<
 *             else:
 *                 stop = shape
*/
        __pyx_v_stop = -1L;

        /* "View.MemoryView":855
 *                 stop = shape
 *         else:
 *             if negative_step:             # <<<<<<<<<<<<<<
 *                 stop = -1
 *             else:
*/
        goto __pyx_L16;
      }

      /* "View.MemoryView":858
 *                 stop = -1
 *             else:
 *                 stop = shape             # <<<<<<<<<<<<<<
 * 
 * 
*/
      /*else*/ {
        __pyx_v_stop = __pyx_v_shape;
      }
      __pyx_L16:;
    }
    __pyx_L13:;

    /* "View.MemoryView":862
 * 
 *         with cython.cdivision(True):
 *             new_shape = (stop - start) // step             # <<<<<<<<<<<<<<
 * 
 *             if (stop - start) - step * new_shape:
*/
    __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);

    /* "View.MemoryView":864
 *             new_shape = (stop - start) // step
 * 
 *             if (stop - start) - step * new_shape:             # <<<<<<<<<<<<<<
 *                 new_shape += 1
 * 
*/
    __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
    if (__pyx_t_2) {

      /* "View.MemoryView":865
 * 
 *             if (stop - start) - step * new_shape:
 *                 new_shape += 1             # <<<<<<<<<<<<<<
 * 
 *         if new_shape < 0:
*/
      __pyx_v_new_shape = (__pyx_v_new_shape + 1);

      /* "View.MemoryView":864
 *             new_shape = (stop - start) // step
 * 
 *             if (stop - start) - step * new_shape:             # <<<<<<<<<<<<<<
 *                 new_shape += 1
 * 
*/
    }

    /* "View.MemoryView":867
 *                 new_shape += 1
 * 
 *         if new_shape < 0:             # <<<<<<<<<<<<<<
 *             new_shape = 0
 * 
*/
    __pyx_t_2 = (__pyx_v_new_shape < 0);
    if (__pyx_t_2) {

      /* "View.MemoryView":868
 * 
 *         if new_shape < 0:
 *             new_shape = 0             # <<<<<<<<<<<<<<
 * 
 * 
*/
      __pyx_v_new_shape = 0;

      /* "View.MemoryView":867
 *                 new_shape += 1
 * 
 *         if new_shape < 0:             # <<<<<<<<<<<<<<
 *             new_shape = 0
 * 
*/
    }

    /* "View.MemoryView":871
 * 
 * 
 *         dst.strides[new_ndim] = stride * step             # <<<<<<<<<<<<<<
 *         dst.shape[new_ndim] = new_shape
 *         dst.suboffsets[new_ndim] = suboffset
*/
    (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);

    /* "View.MemoryView":872
 * 
 *         dst.strides[new_ndim] = stride * step
 *         dst.shape[new_ndim] = new_shape             # <<<<<<<<<<<<<<
 *         dst.suboffsets[new_ndim] = suboffset
 * 
*/
    (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;

    /* "View.MemoryView":873
 *         dst.strides[new_ndim] = stride * step
 *         dst.shape[new_ndim] = new_shape
 *         dst.suboffsets[new_ndim] = suboffset             # <<<<<<<<<<<<<<
 * 
 * 
*/
    (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
  }
  __pyx_L3:;

  /* "View.MemoryView":876
 * 
 * 
 *     if suboffset_dim[0] < 0:             # <<<<<<<<<<<<<<
 *         dst.data += start * stride
 *     else:
*/
  __pyx_t_2 = ((__pyx_v_suboffset_dim[0]) < 0);
  if (__pyx_t_2) {

    /* "View.MemoryView":877
 * 
 *     if suboffset_dim[0] < 0:
 *         dst.data += start * stride             # <<<<<<<<<<<<<<
 *     else:
 *         dst.suboffsets[suboffset_dim[0]] += start * stride
*/
    __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));

    /* "View.MemoryView":876
 * 
 * 
 *     if suboffset_dim[0] < 0:             # <<<<<<<<<<<<<<
 *         dst.data += start * stride
 *     else:
*/
    goto __pyx_L19;
  }

  /* "View.MemoryView":879
 *         dst.data += start * stride
 *     else:
 *         dst.suboffsets[suboffset_dim[0]] += start * stride             # <<<<<<<<<<<<<<
 * 
 *     if suboffset >= 0:
*/
  /*else*/ {
    __pyx_t_3 = (__pyx_v_suboffset_dim[0]);
    (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
  }
  __pyx_L19:;

  /* "View.MemoryView":881
 *         dst.suboffsets[suboffset_dim[0]] += start * stride
 * 
 *     if suboffset >= 0:             # <<<<<<<<<<<<<<
 *         if not is_slice:
 *             if new_ndim == 0:
*/
  __pyx_t_2 = (__pyx_v_suboffset >= 0);
  if (__pyx_t_2) {

    /* "View.MemoryView":882
 * 
 *     if suboffset >= 0:
 *         if not is_slice:             # <<<<<<<<<<<<<<
 *             if new_ndim == 0:
 *                 dst.data = (<char **> dst.data)[0] + suboffset
*/
    __pyx_t_2 = (!__pyx_v_is_slice);
    if (__pyx_t_2) {

      /* "View.MemoryView":883
 *     if suboffset >= 0:
 *         if not is_slice:
 *             if new_ndim == 0:             # <<<<<<<<<<<<<<
 *                 dst.data = (<char **> dst.data)[0] + suboffset
 *             else:
*/
      __pyx_t_2 = (__pyx_v_new_ndim == 0);
      if (__pyx_t_2) {

        /* "View.MemoryView":884
 *         if not is_slice:
 *             if new_ndim == 0:
 *                 dst.data = (<char **> dst.data)[0] + suboffset             # <<<<<<<<<<<<<<
 *             else:
 *                 _err_dim(PyExc_IndexError, "All dimensions preceding dimension %d "
*/
        __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);

        /* "View.MemoryView":883
 *     if suboffset >= 0:
 *         if not is_slice:
 *             if new_ndim == 0:             # <<<<<<<<<<<<<<
 *                 dst.data = (<char **> dst.data)[0] + suboffset
 *             else:
*/
        goto __pyx_L22;
      }

      /* "View.MemoryView":886
 *                 dst.data = (<char **> dst.data)[0] + suboffset
 *             else:
 *                 _err_dim(PyExc_IndexError, "All dimensions preceding dimension %d "             # <<<<<<<<<<<<<<
 *                                      "must be indexed and not sliced", dim)
 *         else:
*/
      /*else*/ {

        /* "View.MemoryView":887
 *             else:
 *                 _err_dim(PyExc_IndexError, "All dimensions preceding dimension %d "
 *                                      "must be indexed and not sliced", dim)             # <<<<<<<<<<<<<<
 *         else:
 *             suboffset_dim[0] = new_ndim
*/
        __pyx_t_3 = __pyx_memoryview_err_dim(PyExc_IndexError, __pyx_mstate_global->__pyx_kp_u_All_dimensions_preceding_dimensi, __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 886, __pyx_L1_error)
      }
      __pyx_L22:;

      /* "View.MemoryView":882
 * 
 *     if suboffset >= 0:
 *         if not is_slice:             # <<<<<<<<<<<<<<
 *             if new_ndim == 0:
 *                 dst.data = (<char **> dst.data)[0] + suboffset
*/
      goto __pyx_L21;
    }

    /* "View.MemoryView":889
 *                                      "must be indexed and not sliced", dim)
 *         else:
 *             suboffset_dim[0] = new_ndim             # <<<<<<<<<<<<<<
 * 
 *     return 0
*/
    /*else*/ {
      (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
    }
    __pyx_L21:;

    /* "View.MemoryView":881
 *         dst.suboffsets[suboffset_dim[0]] += start * stride
 * 
 *     if suboffset >= 0:             # <<<<<<<<<<<<<<
 *         if not is_slice:
 *             if new_ndim == 0:
*/
  }

  /* "View.MemoryView":891
 *             suboffset_dim[0] = new_ndim
 * 
 *     return 0             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = 0;
  goto __pyx_L0;

  /* "View.MemoryView":793
 * 
 * 
 * @cname('__pyx_memoryview_slice_memviewslice')             # <<<<<<<<<<<<<<
 * cdef int slice_memviewslice(
 *         __Pyx_memviewslice *dst,
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "View.MemoryView":896
 * 
 * 
 * @cname('__pyx_pybuffer_index')             # <<<<<<<<<<<<<<
 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
 *                           Py_ssize_t dim) except NULL:
*/

static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
  Py_ssize_t __pyx_v_shape;
  Py_ssize_t __pyx_v_stride;
  Py_ssize_t __pyx_v_suboffset;
  Py_ssize_t __pyx_v_itemsize;
  char *__pyx_v_resultp;
  char *__pyx_r;
  __Pyx_RefNannyDeclarations
  Py_ssize_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[3];
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("pybuffer_index", 0);

  /* "View.MemoryView":899
 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
 *                           Py_ssize_t dim) except NULL:
 *     cdef Py_ssize_t shape, stride, suboffset = -1             # <<<<<<<<<<<<<<
 *     cdef Py_ssize_t itemsize = view.itemsize
 *     cdef char *resultp
*/
  __pyx_v_suboffset = -1L;

  /* "View.MemoryView":900
 *                           Py_ssize_t dim) except NULL:
 *     cdef Py_ssize_t shape, stride, suboffset = -1
 *     cdef Py_ssize_t itemsize = view.itemsize             # <<<<<<<<<<<<<<
 *     cdef char *resultp
 * 
*/
  __pyx_t_1 = __pyx_v_view->itemsize;
  __pyx_v_itemsize = __pyx_t_1;

  /* "View.MemoryView":903
 *     cdef char *resultp
 * 
 *     if view.ndim == 0:             # <<<<<<<<<<<<<<
 *         shape = view.len // itemsize
 *         stride = itemsize
*/
  __pyx_t_2 = (__pyx_v_view->ndim == 0);
  if (__pyx_t_2) {

    /* "View.MemoryView":904
 * 
 *     if view.ndim == 0:
 *         shape = view.len // itemsize             # <<<<<<<<<<<<<<
 *         stride = itemsize
 *     else:
*/
    if (unlikely(__pyx_v_itemsize == 0)) {
      PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
      __PYX_ERR(1, 904, __pyx_L1_error)
    }
    else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1)  && unlikely(__Pyx_UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
      PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
      __PYX_ERR(1, 904, __pyx_L1_error)
    }
    __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize, 0);

    /* "View.MemoryView":905
 *     if view.ndim == 0:
 *         shape = view.len // itemsize
 *         stride = itemsize             # <<<<<<<<<<<<<<
 *     else:
 *         shape = view.shape[dim]
*/
    __pyx_v_stride = __pyx_v_itemsize;

    /* "View.MemoryView":903
 *     cdef char *resultp
 * 
 *     if view.ndim == 0:             # <<<<<<<<<<<<<<
 *         shape = view.len // itemsize
 *         stride = itemsize
*/
    goto __pyx_L3;
  }

  /* "View.MemoryView":907
 *         stride = itemsize
 *     else:
 *         shape = view.shape[dim]             # <<<<<<<<<<<<<<
 *         stride = view.strides[dim]
 *         if view.suboffsets != NULL:
*/
  /*else*/ {
    __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);

    /* "View.MemoryView":908
 *     else:
 *         shape = view.shape[dim]
 *         stride = view.strides[dim]             # <<<<<<<<<<<<<<
 *         if view.suboffsets != NULL:
 *             suboffset = view.suboffsets[dim]
*/
    __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);

    /* "View.MemoryView":909
 *         shape = view.shape[dim]
 *         stride = view.strides[dim]
 *         if view.suboffsets != NULL:             # <<<<<<<<<<<<<<
 *             suboffset = view.suboffsets[dim]
 * 
*/
    __pyx_t_2 = (__pyx_v_view->suboffsets != NULL);
    if (__pyx_t_2) {

      /* "View.MemoryView":910
 *         stride = view.strides[dim]
 *         if view.suboffsets != NULL:
 *             suboffset = view.suboffsets[dim]             # <<<<<<<<<<<<<<
 * 
 *     if index < 0:
*/
      __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);

      /* "View.MemoryView":909
 *         shape = view.shape[dim]
 *         stride = view.strides[dim]
 *         if view.suboffsets != NULL:             # <<<<<<<<<<<<<<
 *             suboffset = view.suboffsets[dim]
 * 
*/
    }
  }
  __pyx_L3:;

  /* "View.MemoryView":912
 *             suboffset = view.suboffsets[dim]
 * 
 *     if index < 0:             # <<<<<<<<<<<<<<
 *         index += view.shape[dim]
 *         if index < 0:
*/
  __pyx_t_2 = (__pyx_v_index < 0);
  if (__pyx_t_2) {

    /* "View.MemoryView":913
 * 
 *     if index < 0:
 *         index += view.shape[dim]             # <<<<<<<<<<<<<<
 *         if index < 0:
 *             raise IndexError, f"Out of bounds on buffer access (axis {dim})"
*/
    __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));

    /* "View.MemoryView":914
 *     if index < 0:
 *         index += view.shape[dim]
 *         if index < 0:             # <<<<<<<<<<<<<<
 *             raise IndexError, f"Out of bounds on buffer access (axis {dim})"
 * 
*/
    __pyx_t_2 = (__pyx_v_index < 0);
    if (unlikely(__pyx_t_2)) {

      /* "View.MemoryView":915
 *         index += view.shape[dim]
 *         if index < 0:
 *             raise IndexError, f"Out of bounds on buffer access (axis {dim})"             # <<<<<<<<<<<<<<
 * 
 *     if index >= shape:
*/
      __pyx_t_3 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_dim, 0, ' ', 'd'); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 915, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u_Out_of_bounds_on_buffer_access_a;
      __pyx_t_4[1] = __pyx_t_3;
      __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u__5;
      __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_4, 3, 37 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 1, 127);
      if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 915, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_IndexError))), __pyx_t_5, 0, 0);
      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
      __PYX_ERR(1, 915, __pyx_L1_error)

      /* "View.MemoryView":914
 *     if index < 0:
 *         index += view.shape[dim]
 *         if index < 0:             # <<<<<<<<<<<<<<
 *             raise IndexError, f"Out of bounds on buffer access (axis {dim})"
 * 
*/
    }

    /* "View.MemoryView":912
 *             suboffset = view.suboffsets[dim]
 * 
 *     if index < 0:             # <<<<<<<<<<<<<<
 *         index += view.shape[dim]
 *         if index < 0:
*/
  }

  /* "View.MemoryView":917
 *             raise IndexError, f"Out of bounds on buffer access (axis {dim})"
 * 
 *     if index >= shape:             # <<<<<<<<<<<<<<
 *         raise IndexError, f"Out of bounds on buffer access (axis {dim})"
 * 
*/
  __pyx_t_2 = (__pyx_v_index >= __pyx_v_shape);
  if (unlikely(__pyx_t_2)) {

    /* "View.MemoryView":918
 * 
 *     if index >= shape:
 *         raise IndexError, f"Out of bounds on buffer access (axis {dim})"             # <<<<<<<<<<<<<<
 * 
 *     resultp = bufp + index * stride
*/
    __pyx_t_5 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_dim, 0, ' ', 'd'); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 918, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u_Out_of_bounds_on_buffer_access_a;
    __pyx_t_4[1] = __pyx_t_5;
    __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u__5;
    __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 3, 37 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5) + 1, 127);
    if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 918, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_IndexError))), __pyx_t_3, 0, 0);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __PYX_ERR(1, 918, __pyx_L1_error)

    /* "View.MemoryView":917
 *             raise IndexError, f"Out of bounds on buffer access (axis {dim})"
 * 
 *     if index >= shape:             # <<<<<<<<<<<<<<
 *         raise IndexError, f"Out of bounds on buffer access (axis {dim})"
 * 
*/
  }

  /* "View.MemoryView":920
 *         raise IndexError, f"Out of bounds on buffer access (axis {dim})"
 * 
 *     resultp = bufp + index * stride             # <<<<<<<<<<<<<<
 *     if suboffset >= 0:
 *         resultp = (<char **> resultp)[0] + suboffset
*/
  __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));

  /* "View.MemoryView":921
 * 
 *     resultp = bufp + index * stride
 *     if suboffset >= 0:             # <<<<<<<<<<<<<<
 *         resultp = (<char **> resultp)[0] + suboffset
 * 
*/
  __pyx_t_2 = (__pyx_v_suboffset >= 0);
  if (__pyx_t_2) {

    /* "View.MemoryView":922
 *     resultp = bufp + index * stride
 *     if suboffset >= 0:
 *         resultp = (<char **> resultp)[0] + suboffset             # <<<<<<<<<<<<<<
 * 
 *     return resultp
*/
    __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);

    /* "View.MemoryView":921
 * 
 *     resultp = bufp + index * stride
 *     if suboffset >= 0:             # <<<<<<<<<<<<<<
 *         resultp = (<char **> resultp)[0] + suboffset
 * 
*/
  }

  /* "View.MemoryView":924
 *         resultp = (<char **> resultp)[0] + suboffset
 * 
 *     return resultp             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_resultp;
  goto __pyx_L0;

  /* "View.MemoryView":896
 * 
 * 
 * @cname('__pyx_pybuffer_index')             # <<<<<<<<<<<<<<
 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
 *                           Py_ssize_t dim) except NULL:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":929
 * 
 * 
 * @cname('__pyx_memslice_transpose')             # <<<<<<<<<<<<<<
 * cdef int transpose_memslice(__Pyx_memviewslice *memslice) except -1 nogil:
 *     cdef int ndim = memslice.memview.view.ndim
*/

static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
  int __pyx_v_ndim;
  Py_ssize_t *__pyx_v_shape;
  Py_ssize_t *__pyx_v_strides;
  int __pyx_v_i;
  int __pyx_v_j;
  int __pyx_r;
  int __pyx_t_1;
  Py_ssize_t *__pyx_t_2;
  long __pyx_t_3;
  long __pyx_t_4;
  Py_ssize_t __pyx_t_5;
  Py_ssize_t __pyx_t_6;
  int __pyx_t_7;
  int __pyx_t_8;
  int __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "View.MemoryView":931
 * @cname('__pyx_memslice_transpose')
 * cdef int transpose_memslice(__Pyx_memviewslice *memslice) except -1 nogil:
 *     cdef int ndim = memslice.memview.view.ndim             # <<<<<<<<<<<<<<
 * 
 *     cdef Py_ssize_t *shape = memslice.shape
*/
  __pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
  __pyx_v_ndim = __pyx_t_1;

  /* "View.MemoryView":933
 *     cdef int ndim = memslice.memview.view.ndim
 * 
 *     cdef Py_ssize_t *shape = memslice.shape             # <<<<<<<<<<<<<<
 *     cdef Py_ssize_t *strides = memslice.strides
 * 
*/
  __pyx_t_2 = __pyx_v_memslice->shape;
  __pyx_v_shape = __pyx_t_2;

  /* "View.MemoryView":934
 * 
 *     cdef Py_ssize_t *shape = memslice.shape
 *     cdef Py_ssize_t *strides = memslice.strides             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_v_memslice->strides;
  __pyx_v_strides = __pyx_t_2;

  /* "View.MemoryView":938
 * 
 *     cdef int i, j
 *     for i in range(ndim // 2):             # <<<<<<<<<<<<<<
 *         j = ndim - 1 - i
 *         strides[i], strides[j] = strides[j], strides[i]
*/
  __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2, 1);
  __pyx_t_4 = __pyx_t_3;
  for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) {
    __pyx_v_i = __pyx_t_1;

    /* "View.MemoryView":939
 *     cdef int i, j
 *     for i in range(ndim // 2):
 *         j = ndim - 1 - i             # <<<<<<<<<<<<<<
 *         strides[i], strides[j] = strides[j], strides[i]
 *         shape[i], shape[j] = shape[j], shape[i]
*/
    __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);

    /* "View.MemoryView":940
 *     for i in range(ndim // 2):
 *         j = ndim - 1 - i
 *         strides[i], strides[j] = strides[j], strides[i]             # <<<<<<<<<<<<<<
 *         shape[i], shape[j] = shape[j], shape[i]
 * 
*/
    __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]);
    __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]);
    (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5;
    (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6;

    /* "View.MemoryView":941
 *         j = ndim - 1 - i
 *         strides[i], strides[j] = strides[j], strides[i]
 *         shape[i], shape[j] = shape[j], shape[i]             # <<<<<<<<<<<<<<
 * 
 *         if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
    __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]);
    __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]);
    (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6;
    (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5;

    /* "View.MemoryView":943
 *         shape[i], shape[j] = shape[j], shape[i]
 * 
 *         if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:             # <<<<<<<<<<<<<<
 *             _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions")
 * 
*/
    __pyx_t_8 = ((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0);
    if (!__pyx_t_8) {
    } else {
      __pyx_t_7 = __pyx_t_8;
      goto __pyx_L6_bool_binop_done;
    }
    __pyx_t_8 = ((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0);
    __pyx_t_7 = __pyx_t_8;
    __pyx_L6_bool_binop_done:;
    if (__pyx_t_7) {

      /* "View.MemoryView":944
 * 
 *         if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
 *             _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions")             # <<<<<<<<<<<<<<
 * 
 *     return 0
*/
      __pyx_t_9 = __pyx_memoryview_err(PyExc_ValueError, __pyx_mstate_global->__pyx_kp_u_Cannot_transpose_memoryview_with); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 944, __pyx_L1_error)

      /* "View.MemoryView":943
 *         shape[i], shape[j] = shape[j], shape[i]
 * 
 *         if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:             # <<<<<<<<<<<<<<
 *             _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions")
 * 
*/
    }
  }

  /* "View.MemoryView":946
 *             _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions")
 * 
 *     return 0             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = 0;
  goto __pyx_L0;

  /* "View.MemoryView":929
 * 
 * 
 * @cname('__pyx_memslice_transpose')             # <<<<<<<<<<<<<<
 * cdef int transpose_memslice(__Pyx_memviewslice *memslice) except -1 nogil:
 *     cdef int ndim = memslice.memview.view.ndim
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "View.MemoryView":964
 *     cdef int (*to_dtype_func)(char *, object) except 0
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1)
 * 
*/

/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {

  /* "View.MemoryView":965
 * 
 *     def __dealloc__(self):
 *         __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1)             # <<<<<<<<<<<<<<
 * 
 *     cdef convert_item_to_object(self, char *itemp):
*/
  __PYX_XCLEAR_MEMVIEW((&__pyx_v_self->from_slice), 1);

  /* "View.MemoryView":964
 *     cdef int (*to_dtype_func)(char *, object) except 0
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1)
 * 
*/

  /* function exit code */
}

/* "View.MemoryView":967
 *         __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1)
 * 
 *     cdef convert_item_to_object(self, char *itemp):             # <<<<<<<<<<<<<<
 *         if self.to_object_func != NULL:
 *             return self.to_object_func(itemp)
*/

static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("convert_item_to_object", 0);

  /* "View.MemoryView":968
 * 
 *     cdef convert_item_to_object(self, char *itemp):
 *         if self.to_object_func != NULL:             # <<<<<<<<<<<<<<
 *             return self.to_object_func(itemp)
 *         else:
*/
  __pyx_t_1 = (__pyx_v_self->to_object_func != NULL);
  if (__pyx_t_1) {

    /* "View.MemoryView":969
 *     cdef convert_item_to_object(self, char *itemp):
 *         if self.to_object_func != NULL:
 *             return self.to_object_func(itemp)             # <<<<<<<<<<<<<<
 *         else:
 *             return memoryview.convert_item_to_object(self, itemp)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 969, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_r = __pyx_t_2;
    __pyx_t_2 = 0;
    goto __pyx_L0;

    /* "View.MemoryView":968
 * 
 *     cdef convert_item_to_object(self, char *itemp):
 *         if self.to_object_func != NULL:             # <<<<<<<<<<<<<<
 *             return self.to_object_func(itemp)
 *         else:
*/
  }

  /* "View.MemoryView":971
 *             return self.to_object_func(itemp)
 *         else:
 *             return memoryview.convert_item_to_object(self, itemp)             # <<<<<<<<<<<<<<
 * 
 *     cdef assign_item_from_object(self, char *itemp, object value):
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 971, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_r = __pyx_t_2;
    __pyx_t_2 = 0;
    goto __pyx_L0;
  }

  /* "View.MemoryView":967
 *         __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1)
 * 
 *     cdef convert_item_to_object(self, char *itemp):             # <<<<<<<<<<<<<<
 *         if self.to_object_func != NULL:
 *             return self.to_object_func(itemp)
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":973
 *             return memoryview.convert_item_to_object(self, itemp)
 * 
 *     cdef assign_item_from_object(self, char *itemp, object value):             # <<<<<<<<<<<<<<
 *         if self.to_dtype_func != NULL:
 *             self.to_dtype_func(itemp, value)
*/

static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("assign_item_from_object", 0);

  /* "View.MemoryView":974
 * 
 *     cdef assign_item_from_object(self, char *itemp, object value):
 *         if self.to_dtype_func != NULL:             # <<<<<<<<<<<<<<
 *             self.to_dtype_func(itemp, value)
 *         else:
*/
  __pyx_t_1 = (__pyx_v_self->to_dtype_func != NULL);
  if (__pyx_t_1) {

    /* "View.MemoryView":975
 *     cdef assign_item_from_object(self, char *itemp, object value):
 *         if self.to_dtype_func != NULL:
 *             self.to_dtype_func(itemp, value)             # <<<<<<<<<<<<<<
 *         else:
 *             memoryview.assign_item_from_object(self, itemp, value)
*/
    __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 975, __pyx_L1_error)

    /* "View.MemoryView":974
 * 
 *     cdef assign_item_from_object(self, char *itemp, object value):
 *         if self.to_dtype_func != NULL:             # <<<<<<<<<<<<<<
 *             self.to_dtype_func(itemp, value)
 *         else:
*/
    goto __pyx_L3;
  }

  /* "View.MemoryView":977
 *             self.to_dtype_func(itemp, value)
 *         else:
 *             memoryview.assign_item_from_object(self, itemp, value)             # <<<<<<<<<<<<<<
 * 
 *     cdef _get_base(self):
*/
  /*else*/ {
    __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 977, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  }
  __pyx_L3:;

  /* "View.MemoryView":973
 *             return memoryview.convert_item_to_object(self, itemp)
 * 
 *     cdef assign_item_from_object(self, char *itemp, object value):             # <<<<<<<<<<<<<<
 *         if self.to_dtype_func != NULL:
 *             self.to_dtype_func(itemp, value)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":979
 *             memoryview.assign_item_from_object(self, itemp, value)
 * 
 *     cdef _get_base(self):             # <<<<<<<<<<<<<<
 *         return self.from_object
 * 
*/

static PyObject *__pyx_memoryviewslice__get_base(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("_get_base", 0);

  /* "View.MemoryView":980
 * 
 *     cdef _get_base(self):
 *         return self.from_object             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->from_object);
  __pyx_r = __pyx_v_self->from_object;
  goto __pyx_L0;

  /* "View.MemoryView":979
 *             memoryview.assign_item_from_object(self, itemp, value)
 * 
 *     cdef _get_base(self):             # <<<<<<<<<<<<<<
 *         return self.from_object
 * 
*/

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
*/

/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":999
 *     pass  # ignore failure, it's a minor issue
 * 
 * @cname('__pyx_memoryview_fromslice')             # <<<<<<<<<<<<<<
 * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice,
 *                           int ndim,
*/

static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
  struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
  Py_ssize_t __pyx_v_suboffset;
  PyObject *__pyx_v_length = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  __Pyx_TypeInfo const *__pyx_t_4;
  Py_buffer __pyx_t_5;
  Py_ssize_t *__pyx_t_6;
  Py_ssize_t *__pyx_t_7;
  Py_ssize_t *__pyx_t_8;
  Py_ssize_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("memoryview_fromslice", 0);

  /* "View.MemoryView":1008
 *     cdef _memoryviewslice result
 * 
 *     if <PyObject *> memviewslice.memview == Py_None:             # <<<<<<<<<<<<<<
 *         return None
 * 
*/
  __pyx_t_1 = (((PyObject *)__pyx_v_memviewslice.memview) == Py_None);
  if (__pyx_t_1) {

    /* "View.MemoryView":1009
 * 
 *     if <PyObject *> memviewslice.memview == Py_None:
 *         return None             # <<<<<<<<<<<<<<
 * 
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_r = Py_None; __Pyx_INCREF(Py_None);
    goto __pyx_L0;

    /* "View.MemoryView":1008
 *     cdef _memoryviewslice result
 * 
 *     if <PyObject *> memviewslice.memview == Py_None:             # <<<<<<<<<<<<<<
 *         return None
 * 
*/
  }

  /* "View.MemoryView":1014
 * 
 * 
 *     result = _memoryviewslice.__new__(_memoryviewslice, None, 0, dtype_is_object)             # <<<<<<<<<<<<<<
 * 
 *     result.from_slice = memviewslice
*/
  __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1014, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1014, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None) != (0)) __PYX_ERR(1, 1014, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_0);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_mstate_global->__pyx_int_0) != (0)) __PYX_ERR(1, 1014, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_2);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2) != (0)) __PYX_ERR(1, 1014, __pyx_L1_error);
  __pyx_t_2 = 0;
  __pyx_t_2 = ((PyObject *)__pyx_tp_new__memoryviewslice(((PyTypeObject *)__pyx_mstate_global->__pyx_memoryviewslice_type), __pyx_t_3, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1014, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "View.MemoryView":1016
 *     result = _memoryviewslice.__new__(_memoryviewslice, None, 0, dtype_is_object)
 * 
 *     result.from_slice = memviewslice             # <<<<<<<<<<<<<<
 *     __PYX_INC_MEMVIEW(&memviewslice, 1)
 * 
*/
  __pyx_v_result->from_slice = __pyx_v_memviewslice;

  /* "View.MemoryView":1017
 * 
 *     result.from_slice = memviewslice
 *     __PYX_INC_MEMVIEW(&memviewslice, 1)             # <<<<<<<<<<<<<<
 * 
 *     result.from_object = (<memoryview> memviewslice.memview)._get_base()
*/
  __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);

  /* "View.MemoryView":1019
 *     __PYX_INC_MEMVIEW(&memviewslice, 1)
 * 
 *     result.from_object = (<memoryview> memviewslice.memview)._get_base()             # <<<<<<<<<<<<<<
 *     result.typeinfo = memviewslice.memview.typeinfo
 * 
*/
  __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->__pyx_vtab)->_get_base(((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1019, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_result->from_object);
  __Pyx_DECREF(__pyx_v_result->from_object);
  __pyx_v_result->from_object = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "View.MemoryView":1020
 * 
 *     result.from_object = (<memoryview> memviewslice.memview)._get_base()
 *     result.typeinfo = memviewslice.memview.typeinfo             # <<<<<<<<<<<<<<
 * 
 *     result.view = memviewslice.memview.view
*/
  __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
  __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;

  /* "View.MemoryView":1022
 *     result.typeinfo = memviewslice.memview.typeinfo
 * 
 *     result.view = memviewslice.memview.view             # <<<<<<<<<<<<<<
 *     result.view.buf = <void *> memviewslice.data
 *     result.view.ndim = ndim
*/
  __pyx_t_5 = __pyx_v_memviewslice.memview->view;
  __pyx_v_result->__pyx_base.view = __pyx_t_5;

  /* "View.MemoryView":1023
 * 
 *     result.view = memviewslice.memview.view
 *     result.view.buf = <void *> memviewslice.data             # <<<<<<<<<<<<<<
 *     result.view.ndim = ndim
 *     (<__pyx_buffer *> &result.view).obj = Py_None
*/
  __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);

  /* "View.MemoryView":1024
 *     result.view = memviewslice.memview.view
 *     result.view.buf = <void *> memviewslice.data
 *     result.view.ndim = ndim             # <<<<<<<<<<<<<<
 *     (<__pyx_buffer *> &result.view).obj = Py_None
 *     Py_INCREF(Py_None)
*/
  __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;

  /* "View.MemoryView":1025
 *     result.view.buf = <void *> memviewslice.data
 *     result.view.ndim = ndim
 *     (<__pyx_buffer *> &result.view).obj = Py_None             # <<<<<<<<<<<<<<
 *     Py_INCREF(Py_None)
 * 
*/
  ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;

  /* "View.MemoryView":1026
 *     result.view.ndim = ndim
 *     (<__pyx_buffer *> &result.view).obj = Py_None
 *     Py_INCREF(Py_None)             # <<<<<<<<<<<<<<
 * 
 *     if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
*/
  Py_INCREF(Py_None);

  /* "View.MemoryView":1028
 *     Py_INCREF(Py_None)
 * 
 *     if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:             # <<<<<<<<<<<<<<
 *         result.flags = PyBUF_RECORDS
 *     else:
*/
  __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0);
  if (__pyx_t_1) {

    /* "View.MemoryView":1029
 * 
 *     if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
 *         result.flags = PyBUF_RECORDS             # <<<<<<<<<<<<<<
 *     else:
 *         result.flags = PyBUF_RECORDS_RO
*/
    __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;

    /* "View.MemoryView":1028
 *     Py_INCREF(Py_None)
 * 
 *     if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:             # <<<<<<<<<<<<<<
 *         result.flags = PyBUF_RECORDS
 *     else:
*/
    goto __pyx_L4;
  }

  /* "View.MemoryView":1031
 *         result.flags = PyBUF_RECORDS
 *     else:
 *         result.flags = PyBUF_RECORDS_RO             # <<<<<<<<<<<<<<
 * 
 *     result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
  /*else*/ {
    __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO;
  }
  __pyx_L4:;

  /* "View.MemoryView":1033
 *         result.flags = PyBUF_RECORDS_RO
 * 
 *     result.view.shape = <Py_ssize_t *> result.from_slice.shape             # <<<<<<<<<<<<<<
 *     result.view.strides = <Py_ssize_t *> result.from_slice.strides
 * 
*/
  __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);

  /* "View.MemoryView":1034
 * 
 *     result.view.shape = <Py_ssize_t *> result.from_slice.shape
 *     result.view.strides = <Py_ssize_t *> result.from_slice.strides             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);

  /* "View.MemoryView":1037
 * 
 * 
 *     result.view.suboffsets = NULL             # <<<<<<<<<<<<<<
 *     for suboffset in result.from_slice.suboffsets[:ndim]:
 *         if suboffset >= 0:
*/
  __pyx_v_result->__pyx_base.view.suboffsets = NULL;

  /* "View.MemoryView":1038
 * 
 *     result.view.suboffsets = NULL
 *     for suboffset in result.from_slice.suboffsets[:ndim]:             # <<<<<<<<<<<<<<
 *         if suboffset >= 0:
 *             result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
  __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
  for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
    __pyx_t_6 = __pyx_t_8;
    __pyx_v_suboffset = (__pyx_t_6[0]);

    /* "View.MemoryView":1039
 *     result.view.suboffsets = NULL
 *     for suboffset in result.from_slice.suboffsets[:ndim]:
 *         if suboffset >= 0:             # <<<<<<<<<<<<<<
 *             result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
 *             break
*/
    __pyx_t_1 = (__pyx_v_suboffset >= 0);
    if (__pyx_t_1) {

      /* "View.MemoryView":1040
 *     for suboffset in result.from_slice.suboffsets[:ndim]:
 *         if suboffset >= 0:
 *             result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets             # <<<<<<<<<<<<<<
 *             break
 * 
*/
      __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);

      /* "View.MemoryView":1041
 *         if suboffset >= 0:
 *             result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
 *             break             # <<<<<<<<<<<<<<
 * 
 *     result.view.len = result.view.itemsize
*/
      goto __pyx_L6_break;

      /* "View.MemoryView":1039
 *     result.view.suboffsets = NULL
 *     for suboffset in result.from_slice.suboffsets[:ndim]:
 *         if suboffset >= 0:             # <<<<<<<<<<<<<<
 *             result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
 *             break
*/
    }
  }
  __pyx_L6_break:;

  /* "View.MemoryView":1043
 *             break
 * 
 *     result.view.len = result.view.itemsize             # <<<<<<<<<<<<<<
 *     for length in result.view.shape[:ndim]:
 *         result.view.len *= length
*/
  __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
  __pyx_v_result->__pyx_base.view.len = __pyx_t_9;

  /* "View.MemoryView":1044
 * 
 *     result.view.len = result.view.itemsize
 *     for length in result.view.shape[:ndim]:             # <<<<<<<<<<<<<<
 *         result.view.len *= length
 * 
*/
  __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
  for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
    __pyx_t_6 = __pyx_t_8;
    __pyx_t_2 = PyLong_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
    __pyx_t_2 = 0;

    /* "View.MemoryView":1045
 *     result.view.len = result.view.itemsize
 *     for length in result.view.shape[:ndim]:
 *         result.view.len *= length             # <<<<<<<<<<<<<<
 * 
 *     result.to_object_func = to_object_func
*/
    __pyx_t_2 = PyLong_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1045, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1045, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1045, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_result->__pyx_base.view.len = __pyx_t_9;
  }

  /* "View.MemoryView":1047
 *         result.view.len *= length
 * 
 *     result.to_object_func = to_object_func             # <<<<<<<<<<<<<<
 *     result.to_dtype_func = to_dtype_func
 * 
*/
  __pyx_v_result->to_object_func = __pyx_v_to_object_func;

  /* "View.MemoryView":1048
 * 
 *     result.to_object_func = to_object_func
 *     result.to_dtype_func = to_dtype_func             # <<<<<<<<<<<<<<
 * 
 *     return result
*/
  __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;

  /* "View.MemoryView":1050
 *     result.to_dtype_func = to_dtype_func
 * 
 *     return result             # <<<<<<<<<<<<<<
 * 
 * @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_result);
  __pyx_r = ((PyObject *)__pyx_v_result);
  goto __pyx_L0;

  /* "View.MemoryView":999
 *     pass  # ignore failure, it's a minor issue
 * 
 * @cname('__pyx_memoryview_fromslice')             # <<<<<<<<<<<<<<
 * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice,
 *                           int ndim,
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_result);
  __Pyx_XDECREF(__pyx_v_length);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":1052
 *     return result
 * 
 * @cname('__pyx_memoryview_get_slice_from_memoryview')             # <<<<<<<<<<<<<<
 * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview,
 *                                                    __Pyx_memviewslice *mslice) except NULL:
*/

static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
  struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
  __Pyx_memviewslice *__pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("get_slice_from_memview", 0);

  /* "View.MemoryView":1056
 *                                                    __Pyx_memviewslice *mslice) except NULL:
 *     cdef _memoryviewslice obj
 *     if isinstance(memview, _memoryviewslice):             # <<<<<<<<<<<<<<
 *         obj = memview
 *         return &obj.from_slice
*/
  __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_mstate_global->__pyx_memoryviewslice_type); 
  if (__pyx_t_1) {

    /* "View.MemoryView":1057
 *     cdef _memoryviewslice obj
 *     if isinstance(memview, _memoryviewslice):
 *         obj = memview             # <<<<<<<<<<<<<<
 *         return &obj.from_slice
 *     else:
*/
    __pyx_t_2 = ((PyObject *)__pyx_v_memview);
    __Pyx_INCREF(__pyx_t_2);
    if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_mstate_global->__pyx_memoryviewslice_type))))) __PYX_ERR(1, 1057, __pyx_L1_error)
    __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
    __pyx_t_2 = 0;

    /* "View.MemoryView":1058
 *     if isinstance(memview, _memoryviewslice):
 *         obj = memview
 *         return &obj.from_slice             # <<<<<<<<<<<<<<
 *     else:
 *         slice_copy(memview, mslice)
*/
    __pyx_r = (&__pyx_v_obj->from_slice);
    goto __pyx_L0;

    /* "View.MemoryView":1056
 *                                                    __Pyx_memviewslice *mslice) except NULL:
 *     cdef _memoryviewslice obj
 *     if isinstance(memview, _memoryviewslice):             # <<<<<<<<<<<<<<
 *         obj = memview
 *         return &obj.from_slice
*/
  }

  /* "View.MemoryView":1060
 *         return &obj.from_slice
 *     else:
 *         slice_copy(memview, mslice)             # <<<<<<<<<<<<<<
 *         return mslice
 * 
*/
  /*else*/ {
    __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);

    /* "View.MemoryView":1061
 *     else:
 *         slice_copy(memview, mslice)
 *         return mslice             # <<<<<<<<<<<<<<
 * 
 * @cname('__pyx_memoryview_slice_copy')
*/
    __pyx_r = __pyx_v_mslice;
    goto __pyx_L0;
  }

  /* "View.MemoryView":1052
 *     return result
 * 
 * @cname('__pyx_memoryview_get_slice_from_memoryview')             # <<<<<<<<<<<<<<
 * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview,
 *                                                    __Pyx_memviewslice *mslice) except NULL:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":1063
 *         return mslice
 * 
 * @cname('__pyx_memoryview_slice_copy')             # <<<<<<<<<<<<<<
 * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst) noexcept:
 *     cdef int dim
*/

static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
  int __pyx_v_dim;
  Py_ssize_t *__pyx_v_shape;
  Py_ssize_t *__pyx_v_strides;
  Py_ssize_t *__pyx_v_suboffsets;
  Py_ssize_t *__pyx_t_1;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  Py_ssize_t __pyx_t_5;
  int __pyx_t_6;

  /* "View.MemoryView":1068
 *     cdef (Py_ssize_t*) shape, strides, suboffsets
 * 
 *     shape = memview.view.shape             # <<<<<<<<<<<<<<
 *     strides = memview.view.strides
 *     suboffsets = memview.view.suboffsets
*/
  __pyx_t_1 = __pyx_v_memview->view.shape;
  __pyx_v_shape = __pyx_t_1;

  /* "View.MemoryView":1069
 * 
 *     shape = memview.view.shape
 *     strides = memview.view.strides             # <<<<<<<<<<<<<<
 *     suboffsets = memview.view.suboffsets
 * 
*/
  __pyx_t_1 = __pyx_v_memview->view.strides;
  __pyx_v_strides = __pyx_t_1;

  /* "View.MemoryView":1070
 *     shape = memview.view.shape
 *     strides = memview.view.strides
 *     suboffsets = memview.view.suboffsets             # <<<<<<<<<<<<<<
 * 
 *     dst.memview = <__pyx_memoryview *> memview
*/
  __pyx_t_1 = __pyx_v_memview->view.suboffsets;
  __pyx_v_suboffsets = __pyx_t_1;

  /* "View.MemoryView":1072
 *     suboffsets = memview.view.suboffsets
 * 
 *     dst.memview = <__pyx_memoryview *> memview             # <<<<<<<<<<<<<<
 *     dst.data = <char *> memview.view.buf
 * 
*/
  __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);

  /* "View.MemoryView":1073
 * 
 *     dst.memview = <__pyx_memoryview *> memview
 *     dst.data = <char *> memview.view.buf             # <<<<<<<<<<<<<<
 * 
 *     for dim in range(memview.view.ndim):
*/
  __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);

  /* "View.MemoryView":1075
 *     dst.data = <char *> memview.view.buf
 * 
 *     for dim in range(memview.view.ndim):             # <<<<<<<<<<<<<<
 *         dst.shape[dim] = shape[dim]
 *         dst.strides[dim] = strides[dim]
*/
  __pyx_t_2 = __pyx_v_memview->view.ndim;
  __pyx_t_3 = __pyx_t_2;
  for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
    __pyx_v_dim = __pyx_t_4;

    /* "View.MemoryView":1076
 * 
 *     for dim in range(memview.view.ndim):
 *         dst.shape[dim] = shape[dim]             # <<<<<<<<<<<<<<
 *         dst.strides[dim] = strides[dim]
 *         dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*/
    (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);

    /* "View.MemoryView":1077
 *     for dim in range(memview.view.ndim):
 *         dst.shape[dim] = shape[dim]
 *         dst.strides[dim] = strides[dim]             # <<<<<<<<<<<<<<
 *         dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
 * 
*/
    (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);

    /* "View.MemoryView":1078
 *         dst.shape[dim] = shape[dim]
 *         dst.strides[dim] = strides[dim]
 *         dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1             # <<<<<<<<<<<<<<
 * 
 * @cname('__pyx_memoryview_copy_object')
*/
    __pyx_t_6 = (__pyx_v_suboffsets != 0);
    if (__pyx_t_6) {
      __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]);
    } else {
      __pyx_t_5 = -1L;
    }
    (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5;
  }

  /* "View.MemoryView":1063
 *         return mslice
 * 
 * @cname('__pyx_memoryview_slice_copy')             # <<<<<<<<<<<<<<
 * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst) noexcept:
 *     cdef int dim
*/

  /* function exit code */
}

/* "View.MemoryView":1080
 *         dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
 * 
 * @cname('__pyx_memoryview_copy_object')             # <<<<<<<<<<<<<<
 * cdef memoryview_copy(memoryview memview):
 *     "Create a new memoryview object"
*/

static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
  __Pyx_memviewslice __pyx_v_memviewslice;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("memoryview_copy", 0);

  /* "View.MemoryView":1084
 *     "Create a new memoryview object"
 *     cdef __Pyx_memviewslice memviewslice
 *     slice_copy(memview, &memviewslice)             # <<<<<<<<<<<<<<
 *     return memoryview_copy_from_slice(memview, &memviewslice)
 * 
*/
  __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));

  /* "View.MemoryView":1085
 *     cdef __Pyx_memviewslice memviewslice
 *     slice_copy(memview, &memviewslice)
 *     return memoryview_copy_from_slice(memview, &memviewslice)             # <<<<<<<<<<<<<<
 * 
 * @cname('__pyx_memoryview_copy_object_from_slice')
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1085, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":1080
 *         dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
 * 
 * @cname('__pyx_memoryview_copy_object')             # <<<<<<<<<<<<<<
 * cdef memoryview_copy(memoryview memview):
 *     "Create a new memoryview object"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":1087
 *     return memoryview_copy_from_slice(memview, &memviewslice)
 * 
 * @cname('__pyx_memoryview_copy_object_from_slice')             # <<<<<<<<<<<<<<
 * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice):
 *     """
*/

static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
  PyObject *(*__pyx_v_to_object_func)(char *);
  int (*__pyx_v_to_dtype_func)(char *, PyObject *);
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *(*__pyx_t_2)(char *);
  int (*__pyx_t_3)(char *, PyObject *);
  PyObject *__pyx_t_4 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);

  /* "View.MemoryView":1095
 *     cdef int (*to_dtype_func)(char *, object) except 0
 * 
 *     if isinstance(memview, _memoryviewslice):             # <<<<<<<<<<<<<<
 *         to_object_func = (<_memoryviewslice> memview).to_object_func
 *         to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
  __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_mstate_global->__pyx_memoryviewslice_type); 
  if (__pyx_t_1) {

    /* "View.MemoryView":1096
 * 
 *     if isinstance(memview, _memoryviewslice):
 *         to_object_func = (<_memoryviewslice> memview).to_object_func             # <<<<<<<<<<<<<<
 *         to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
 *     else:
*/
    __pyx_t_2 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
    __pyx_v_to_object_func = __pyx_t_2;

    /* "View.MemoryView":1097
 *     if isinstance(memview, _memoryviewslice):
 *         to_object_func = (<_memoryviewslice> memview).to_object_func
 *         to_dtype_func = (<_memoryviewslice> memview).to_dtype_func             # <<<<<<<<<<<<<<
 *     else:
 *         to_object_func = NULL
*/
    __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
    __pyx_v_to_dtype_func = __pyx_t_3;

    /* "View.MemoryView":1095
 *     cdef int (*to_dtype_func)(char *, object) except 0
 * 
 *     if isinstance(memview, _memoryviewslice):             # <<<<<<<<<<<<<<
 *         to_object_func = (<_memoryviewslice> memview).to_object_func
 *         to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
    goto __pyx_L3;
  }

  /* "View.MemoryView":1099
 *         to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
 *     else:
 *         to_object_func = NULL             # <<<<<<<<<<<<<<
 *         to_dtype_func = NULL
 * 
*/
  /*else*/ {
    __pyx_v_to_object_func = NULL;

    /* "View.MemoryView":1100
 *     else:
 *         to_object_func = NULL
 *         to_dtype_func = NULL             # <<<<<<<<<<<<<<
 * 
 *     return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
    __pyx_v_to_dtype_func = NULL;
  }
  __pyx_L3:;

  /* "View.MemoryView":1102
 *         to_dtype_func = NULL
 * 
 *     return memoryview_fromslice(memviewslice[0], memview.view.ndim,             # <<<<<<<<<<<<<<
 *                                 to_object_func, to_dtype_func,
 *                                 memview.dtype_is_object)
*/
  __Pyx_XDECREF(__pyx_r);

  /* "View.MemoryView":1104
 *     return memoryview_fromslice(memviewslice[0], memview.view.ndim,
 *                                 to_object_func, to_dtype_func,
 *                                 memview.dtype_is_object)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_4 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1102, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_r = __pyx_t_4;
  __pyx_t_4 = 0;
  goto __pyx_L0;

  /* "View.MemoryView":1087
 *     return memoryview_copy_from_slice(memview, &memviewslice)
 * 
 * @cname('__pyx_memoryview_copy_object_from_slice')             # <<<<<<<<<<<<<<
 * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice):
 *     """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "View.MemoryView":1110
 * 
 * 
 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) noexcept nogil:             # <<<<<<<<<<<<<<
 *     return -arg if arg < 0 else arg
 * 
*/

static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
  Py_ssize_t __pyx_r;
  Py_ssize_t __pyx_t_1;
  int __pyx_t_2;

  /* "View.MemoryView":1111
 * 
 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) noexcept nogil:
 *     return -arg if arg < 0 else arg             # <<<<<<<<<<<<<<
 * 
 * @cname('__pyx_get_best_slice_order')
*/
  __pyx_t_2 = (__pyx_v_arg < 0);
  if (__pyx_t_2) {
    __pyx_t_1 = (-__pyx_v_arg);
  } else {
    __pyx_t_1 = __pyx_v_arg;
  }
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "View.MemoryView":1110
 * 
 * 
 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) noexcept nogil:             # <<<<<<<<<<<<<<
 *     return -arg if arg < 0 else arg
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "View.MemoryView":1113
 *     return -arg if arg < 0 else arg
 * 
 * @cname('__pyx_get_best_slice_order')             # <<<<<<<<<<<<<<
 * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) noexcept nogil:
 *     """
*/

static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
  int __pyx_v_i;
  Py_ssize_t __pyx_v_c_stride;
  Py_ssize_t __pyx_v_f_stride;
  char __pyx_r;
  int __pyx_t_1;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;

  /* "View.MemoryView":1119
 *     """
 *     cdef int i
 *     cdef Py_ssize_t c_stride = 0             # <<<<<<<<<<<<<<
 *     cdef Py_ssize_t f_stride = 0
 * 
*/
  __pyx_v_c_stride = 0;

  /* "View.MemoryView":1120
 *     cdef int i
 *     cdef Py_ssize_t c_stride = 0
 *     cdef Py_ssize_t f_stride = 0             # <<<<<<<<<<<<<<
 * 
 *     for i in range(ndim - 1, -1, -1):
*/
  __pyx_v_f_stride = 0;

  /* "View.MemoryView":1122
 *     cdef Py_ssize_t f_stride = 0
 * 
 *     for i in range(ndim - 1, -1, -1):             # <<<<<<<<<<<<<<
 *         if mslice.shape[i] > 1:
 *             c_stride = mslice.strides[i]
*/
  for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
    __pyx_v_i = __pyx_t_1;

    /* "View.MemoryView":1123
 * 
 *     for i in range(ndim - 1, -1, -1):
 *         if mslice.shape[i] > 1:             # <<<<<<<<<<<<<<
 *             c_stride = mslice.strides[i]
 *             break
*/
    __pyx_t_2 = ((__pyx_v_mslice->shape[__pyx_v_i]) > 1);
    if (__pyx_t_2) {

      /* "View.MemoryView":1124
 *     for i in range(ndim - 1, -1, -1):
 *         if mslice.shape[i] > 1:
 *             c_stride = mslice.strides[i]             # <<<<<<<<<<<<<<
 *             break
 * 
*/
      __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);

      /* "View.MemoryView":1125
 *         if mslice.shape[i] > 1:
 *             c_stride = mslice.strides[i]
 *             break             # <<<<<<<<<<<<<<
 * 
 *     for i in range(ndim):
*/
      goto __pyx_L4_break;

      /* "View.MemoryView":1123
 * 
 *     for i in range(ndim - 1, -1, -1):
 *         if mslice.shape[i] > 1:             # <<<<<<<<<<<<<<
 *             c_stride = mslice.strides[i]
 *             break
*/
    }
  }
  __pyx_L4_break:;

  /* "View.MemoryView":1127
 *             break
 * 
 *     for i in range(ndim):             # <<<<<<<<<<<<<<
 *         if mslice.shape[i] > 1:
 *             f_stride = mslice.strides[i]
*/
  __pyx_t_1 = __pyx_v_ndim;
  __pyx_t_3 = __pyx_t_1;
  for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
    __pyx_v_i = __pyx_t_4;

    /* "View.MemoryView":1128
 * 
 *     for i in range(ndim):
 *         if mslice.shape[i] > 1:             # <<<<<<<<<<<<<<
 *             f_stride = mslice.strides[i]
 *             break
*/
    __pyx_t_2 = ((__pyx_v_mslice->shape[__pyx_v_i]) > 1);
    if (__pyx_t_2) {

      /* "View.MemoryView":1129
 *     for i in range(ndim):
 *         if mslice.shape[i] > 1:
 *             f_stride = mslice.strides[i]             # <<<<<<<<<<<<<<
 *             break
 * 
*/
      __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);

      /* "View.MemoryView":1130
 *         if mslice.shape[i] > 1:
 *             f_stride = mslice.strides[i]
 *             break             # <<<<<<<<<<<<<<
 * 
 *     if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
      goto __pyx_L7_break;

      /* "View.MemoryView":1128
 * 
 *     for i in range(ndim):
 *         if mslice.shape[i] > 1:             # <<<<<<<<<<<<<<
 *             f_stride = mslice.strides[i]
 *             break
*/
    }
  }
  __pyx_L7_break:;

  /* "View.MemoryView":1132
 *             break
 * 
 *     if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):             # <<<<<<<<<<<<<<
 *         return 'C'
 *     else:
*/
  __pyx_t_2 = (abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride));
  if (__pyx_t_2) {

    /* "View.MemoryView":1133
 * 
 *     if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
 *         return 'C'             # <<<<<<<<<<<<<<
 *     else:
 *         return 'F'
*/
    __pyx_r = 'C';
    goto __pyx_L0;

    /* "View.MemoryView":1132
 *             break
 * 
 *     if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):             # <<<<<<<<<<<<<<
 *         return 'C'
 *     else:
*/
  }

  /* "View.MemoryView":1135
 *         return 'C'
 *     else:
 *         return 'F'             # <<<<<<<<<<<<<<
 * 
 * @cython.cdivision(True)
*/
  /*else*/ {
    __pyx_r = 'F';
    goto __pyx_L0;
  }

  /* "View.MemoryView":1113
 *     return -arg if arg < 0 else arg
 * 
 * @cname('__pyx_get_best_slice_order')             # <<<<<<<<<<<<<<
 * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) noexcept nogil:
 *     """
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "View.MemoryView":1137
 *         return 'F'
 * 
 * @cython.cdivision(True)             # <<<<<<<<<<<<<<
 * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides,
 *                                    char *dst_data, Py_ssize_t *dst_strides,
*/

static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
  CYTHON_UNUSED Py_ssize_t __pyx_v_i;
  CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
  Py_ssize_t __pyx_v_dst_extent;
  Py_ssize_t __pyx_v_src_stride;
  Py_ssize_t __pyx_v_dst_stride;
  int __pyx_t_1;
  int __pyx_t_2;
  Py_ssize_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  Py_ssize_t __pyx_t_5;

  /* "View.MemoryView":1145
 * 
 *     cdef Py_ssize_t i
 *     cdef Py_ssize_t src_extent = src_shape[0]             # <<<<<<<<<<<<<<
 *     cdef Py_ssize_t dst_extent = dst_shape[0]
 *     cdef Py_ssize_t src_stride = src_strides[0]
*/
  __pyx_v_src_extent = (__pyx_v_src_shape[0]);

  /* "View.MemoryView":1146
 *     cdef Py_ssize_t i
 *     cdef Py_ssize_t src_extent = src_shape[0]
 *     cdef Py_ssize_t dst_extent = dst_shape[0]             # <<<<<<<<<<<<<<
 *     cdef Py_ssize_t src_stride = src_strides[0]
 *     cdef Py_ssize_t dst_stride = dst_strides[0]
*/
  __pyx_v_dst_extent = (__pyx_v_dst_shape[0]);

  /* "View.MemoryView":1147
 *     cdef Py_ssize_t src_extent = src_shape[0]
 *     cdef Py_ssize_t dst_extent = dst_shape[0]
 *     cdef Py_ssize_t src_stride = src_strides[0]             # <<<<<<<<<<<<<<
 *     cdef Py_ssize_t dst_stride = dst_strides[0]
 * 
*/
  __pyx_v_src_stride = (__pyx_v_src_strides[0]);

  /* "View.MemoryView":1148
 *     cdef Py_ssize_t dst_extent = dst_shape[0]
 *     cdef Py_ssize_t src_stride = src_strides[0]
 *     cdef Py_ssize_t dst_stride = dst_strides[0]             # <<<<<<<<<<<<<<
 * 
 *     if ndim == 1:
*/
  __pyx_v_dst_stride = (__pyx_v_dst_strides[0]);

  /* "View.MemoryView":1150
 *     cdef Py_ssize_t dst_stride = dst_strides[0]
 * 
 *     if ndim == 1:             # <<<<<<<<<<<<<<
 *         if (src_stride > 0 and dst_stride > 0 and
 *                 <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
  __pyx_t_1 = (__pyx_v_ndim == 1);
  if (__pyx_t_1) {

    /* "View.MemoryView":1151
 * 
 *     if ndim == 1:
 *         if (src_stride > 0 and dst_stride > 0 and             # <<<<<<<<<<<<<<
 *                 <size_t> src_stride == itemsize == <size_t> dst_stride):
 *             memcpy(dst_data, src_data, itemsize * dst_extent)
*/
    __pyx_t_2 = (__pyx_v_src_stride > 0);
    if (__pyx_t_2) {
    } else {
      __pyx_t_1 = __pyx_t_2;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_2 = (__pyx_v_dst_stride > 0);
    if (__pyx_t_2) {
    } else {
      __pyx_t_1 = __pyx_t_2;
      goto __pyx_L5_bool_binop_done;
    }

    /* "View.MemoryView":1152
 *     if ndim == 1:
 *         if (src_stride > 0 and dst_stride > 0 and
 *                 <size_t> src_stride == itemsize == <size_t> dst_stride):             # <<<<<<<<<<<<<<
 *             memcpy(dst_data, src_data, itemsize * dst_extent)
 *         else:
*/
    __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
    if (__pyx_t_2) {
      __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
    }
    __pyx_t_1 = __pyx_t_2;
    __pyx_L5_bool_binop_done:;

    /* "View.MemoryView":1151
 * 
 *     if ndim == 1:
 *         if (src_stride > 0 and dst_stride > 0 and             # <<<<<<<<<<<<<<
 *                 <size_t> src_stride == itemsize == <size_t> dst_stride):
 *             memcpy(dst_data, src_data, itemsize * dst_extent)
*/
    if (__pyx_t_1) {

      /* "View.MemoryView":1153
 *         if (src_stride > 0 and dst_stride > 0 and
 *                 <size_t> src_stride == itemsize == <size_t> dst_stride):
 *             memcpy(dst_data, src_data, itemsize * dst_extent)             # <<<<<<<<<<<<<<
 *         else:
 *             for i in range(dst_extent):
*/
      (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)));

      /* "View.MemoryView":1151
 * 
 *     if ndim == 1:
 *         if (src_stride > 0 and dst_stride > 0 and             # <<<<<<<<<<<<<<
 *                 <size_t> src_stride == itemsize == <size_t> dst_stride):
 *             memcpy(dst_data, src_data, itemsize * dst_extent)
*/
      goto __pyx_L4;
    }

    /* "View.MemoryView":1155
 *             memcpy(dst_data, src_data, itemsize * dst_extent)
 *         else:
 *             for i in range(dst_extent):             # <<<<<<<<<<<<<<
 *                 memcpy(dst_data, src_data, itemsize)
 *                 src_data += src_stride
*/
    /*else*/ {
      __pyx_t_3 = __pyx_v_dst_extent;
      __pyx_t_4 = __pyx_t_3;
      for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
        __pyx_v_i = __pyx_t_5;

        /* "View.MemoryView":1156
 *         else:
 *             for i in range(dst_extent):
 *                 memcpy(dst_data, src_data, itemsize)             # <<<<<<<<<<<<<<
 *                 src_data += src_stride
 *                 dst_data += dst_stride
*/
        (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize));

        /* "View.MemoryView":1157
 *             for i in range(dst_extent):
 *                 memcpy(dst_data, src_data, itemsize)
 *                 src_data += src_stride             # <<<<<<<<<<<<<<
 *                 dst_data += dst_stride
 *     else:
*/
        __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);

        /* "View.MemoryView":1158
 *                 memcpy(dst_data, src_data, itemsize)
 *                 src_data += src_stride
 *                 dst_data += dst_stride             # <<<<<<<<<<<<<<
 *     else:
 *         for i in range(dst_extent):
*/
        __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
      }
    }
    __pyx_L4:;

    /* "View.MemoryView":1150
 *     cdef Py_ssize_t dst_stride = dst_strides[0]
 * 
 *     if ndim == 1:             # <<<<<<<<<<<<<<
 *         if (src_stride > 0 and dst_stride > 0 and
 *                 <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
    goto __pyx_L3;
  }

  /* "View.MemoryView":1160
 *                 dst_data += dst_stride
 *     else:
 *         for i in range(dst_extent):             # <<<<<<<<<<<<<<
 *             _copy_strided_to_strided(src_data, src_strides + 1,
 *                                      dst_data, dst_strides + 1,
*/
  /*else*/ {
    __pyx_t_3 = __pyx_v_dst_extent;
    __pyx_t_4 = __pyx_t_3;
    for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
      __pyx_v_i = __pyx_t_5;

      /* "View.MemoryView":1161
 *     else:
 *         for i in range(dst_extent):
 *             _copy_strided_to_strided(src_data, src_strides + 1,             # <<<<<<<<<<<<<<
 *                                      dst_data, dst_strides + 1,
 *                                      src_shape + 1, dst_shape + 1,
*/
      _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);

      /* "View.MemoryView":1165
 *                                      src_shape + 1, dst_shape + 1,
 *                                      ndim - 1, itemsize)
 *             src_data += src_stride             # <<<<<<<<<<<<<<
 *             dst_data += dst_stride
 * 
*/
      __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);

      /* "View.MemoryView":1166
 *                                      ndim - 1, itemsize)
 *             src_data += src_stride
 *             dst_data += dst_stride             # <<<<<<<<<<<<<<
 * 
 * cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
      __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
    }
  }
  __pyx_L3:;

  /* "View.MemoryView":1137
 *         return 'F'
 * 
 * @cython.cdivision(True)             # <<<<<<<<<<<<<<
 * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides,
 *                                    char *dst_data, Py_ssize_t *dst_strides,
*/

  /* function exit code */
}

/* "View.MemoryView":1168
 *             dst_data += dst_stride
 * 
 * cdef void copy_strided_to_strided(__Pyx_memviewslice *src,             # <<<<<<<<<<<<<<
 *                                   __Pyx_memviewslice *dst,
 *                                   int ndim, size_t itemsize) noexcept nogil:
*/

static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {

  /* "View.MemoryView":1171
 *                                   __Pyx_memviewslice *dst,
 *                                   int ndim, size_t itemsize) noexcept nogil:
 *     _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides,             # <<<<<<<<<<<<<<
 *                              src.shape, dst.shape, ndim, itemsize)
 * 
*/
  _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);

  /* "View.MemoryView":1168
 *             dst_data += dst_stride
 * 
 * cdef void copy_strided_to_strided(__Pyx_memviewslice *src,             # <<<<<<<<<<<<<<
 *                                   __Pyx_memviewslice *dst,
 *                                   int ndim, size_t itemsize) noexcept nogil:
*/

  /* function exit code */
}

/* "View.MemoryView":1174
 *                              src.shape, dst.shape, ndim, itemsize)
 * 
 * @cname('__pyx_memoryview_slice_get_size')             # <<<<<<<<<<<<<<
 * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) noexcept nogil:
 *     "Return the size of the memory occupied by the slice in number of bytes"
*/

static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
  Py_ssize_t __pyx_v_shape;
  Py_ssize_t __pyx_v_size;
  Py_ssize_t __pyx_r;
  Py_ssize_t __pyx_t_1;
  Py_ssize_t *__pyx_t_2;
  Py_ssize_t *__pyx_t_3;
  Py_ssize_t *__pyx_t_4;

  /* "View.MemoryView":1177
 * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) noexcept nogil:
 *     "Return the size of the memory occupied by the slice in number of bytes"
 *     cdef Py_ssize_t shape, size = src.memview.view.itemsize             # <<<<<<<<<<<<<<
 * 
 *     for shape in src.shape[:ndim]:
*/
  __pyx_t_1 = __pyx_v_src->memview->view.itemsize;
  __pyx_v_size = __pyx_t_1;

  /* "View.MemoryView":1179
 *     cdef Py_ssize_t shape, size = src.memview.view.itemsize
 * 
 *     for shape in src.shape[:ndim]:             # <<<<<<<<<<<<<<
 *         size *= shape
 * 
*/
  __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim);
  for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
    __pyx_t_2 = __pyx_t_4;
    __pyx_v_shape = (__pyx_t_2[0]);

    /* "View.MemoryView":1180
 * 
 *     for shape in src.shape[:ndim]:
 *         size *= shape             # <<<<<<<<<<<<<<
 * 
 *     return size
*/
    __pyx_v_size = (__pyx_v_size * __pyx_v_shape);
  }

  /* "View.MemoryView":1182
 *         size *= shape
 * 
 *     return size             # <<<<<<<<<<<<<<
 * 
 * @cname('__pyx_fill_contig_strides_array')
*/
  __pyx_r = __pyx_v_size;
  goto __pyx_L0;

  /* "View.MemoryView":1174
 *                              src.shape, dst.shape, ndim, itemsize)
 * 
 * @cname('__pyx_memoryview_slice_get_size')             # <<<<<<<<<<<<<<
 * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) noexcept nogil:
 *     "Return the size of the memory occupied by the slice in number of bytes"
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "View.MemoryView":1184
 *     return size
 * 
 * @cname('__pyx_fill_contig_strides_array')             # <<<<<<<<<<<<<<
 * cdef Py_ssize_t fill_contig_strides_array(
 *                 Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
*/

static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
  int __pyx_v_idx;
  Py_ssize_t __pyx_r;
  int __pyx_t_1;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;

  /* "View.MemoryView":1194
 *     cdef int idx
 * 
 *     if order == 'F':             # <<<<<<<<<<<<<<
 *         for idx in range(ndim):
 *             strides[idx] = stride
*/
  __pyx_t_1 = (__pyx_v_order == 'F');
  if (__pyx_t_1) {

    /* "View.MemoryView":1195
 * 
 *     if order == 'F':
 *         for idx in range(ndim):             # <<<<<<<<<<<<<<
 *             strides[idx] = stride
 *             stride *= shape[idx]
*/
    __pyx_t_2 = __pyx_v_ndim;
    __pyx_t_3 = __pyx_t_2;
    for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
      __pyx_v_idx = __pyx_t_4;

      /* "View.MemoryView":1196
 *     if order == 'F':
 *         for idx in range(ndim):
 *             strides[idx] = stride             # <<<<<<<<<<<<<<
 *             stride *= shape[idx]
 *     else:
*/
      (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;

      /* "View.MemoryView":1197
 *         for idx in range(ndim):
 *             strides[idx] = stride
 *             stride *= shape[idx]             # <<<<<<<<<<<<<<
 *     else:
 *         for idx in range(ndim - 1, -1, -1):
*/
      __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
    }

    /* "View.MemoryView":1194
 *     cdef int idx
 * 
 *     if order == 'F':             # <<<<<<<<<<<<<<
 *         for idx in range(ndim):
 *             strides[idx] = stride
*/
    goto __pyx_L3;
  }

  /* "View.MemoryView":1199
 *             stride *= shape[idx]
 *     else:
 *         for idx in range(ndim - 1, -1, -1):             # <<<<<<<<<<<<<<
 *             strides[idx] = stride
 *             stride *= shape[idx]
*/
  /*else*/ {
    for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
      __pyx_v_idx = __pyx_t_2;

      /* "View.MemoryView":1200
 *     else:
 *         for idx in range(ndim - 1, -1, -1):
 *             strides[idx] = stride             # <<<<<<<<<<<<<<
 *             stride *= shape[idx]
 * 
*/
      (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;

      /* "View.MemoryView":1201
 *         for idx in range(ndim - 1, -1, -1):
 *             strides[idx] = stride
 *             stride *= shape[idx]             # <<<<<<<<<<<<<<
 * 
 *     return stride
*/
      __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
    }
  }
  __pyx_L3:;

  /* "View.MemoryView":1203
 *             stride *= shape[idx]
 * 
 *     return stride             # <<<<<<<<<<<<<<
 * 
 * @cname('__pyx_memoryview_copy_data_to_temp')
*/
  __pyx_r = __pyx_v_stride;
  goto __pyx_L0;

  /* "View.MemoryView":1184
 *     return size
 * 
 * @cname('__pyx_fill_contig_strides_array')             # <<<<<<<<<<<<<<
 * cdef Py_ssize_t fill_contig_strides_array(
 *                 Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "View.MemoryView":1205
 *     return stride
 * 
 * @cname('__pyx_memoryview_copy_data_to_temp')             # <<<<<<<<<<<<<<
 * cdef void *copy_data_to_temp(__Pyx_memviewslice *src,
 *                              __Pyx_memviewslice *tmpslice,
*/

static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
  int __pyx_v_i;
  void *__pyx_v_result;
  size_t __pyx_v_itemsize;
  size_t __pyx_v_size;
  void *__pyx_r;
  Py_ssize_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_t_3;
  struct __pyx_memoryview_obj *__pyx_t_4;
  int __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "View.MemoryView":1217
 *     cdef void *result
 * 
 *     cdef size_t itemsize = src.memview.view.itemsize             # <<<<<<<<<<<<<<
 *     cdef size_t size = slice_get_size(src, ndim)
 * 
*/
  __pyx_t_1 = __pyx_v_src->memview->view.itemsize;
  __pyx_v_itemsize = __pyx_t_1;

  /* "View.MemoryView":1218
 * 
 *     cdef size_t itemsize = src.memview.view.itemsize
 *     cdef size_t size = slice_get_size(src, ndim)             # <<<<<<<<<<<<<<
 * 
 *     result = malloc(size)
*/
  __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);

  /* "View.MemoryView":1220
 *     cdef size_t size = slice_get_size(src, ndim)
 * 
 *     result = malloc(size)             # <<<<<<<<<<<<<<
 *     if not result:
 *         _err_no_memory()
*/
  __pyx_v_result = malloc(__pyx_v_size);

  /* "View.MemoryView":1221
 * 
 *     result = malloc(size)
 *     if not result:             # <<<<<<<<<<<<<<
 *         _err_no_memory()
 * 
*/
  __pyx_t_2 = (!(__pyx_v_result != 0));
  if (__pyx_t_2) {

    /* "View.MemoryView":1222
 *     result = malloc(size)
 *     if not result:
 *         _err_no_memory()             # <<<<<<<<<<<<<<
 * 
 * 
*/
    __pyx_t_3 = __pyx_memoryview_err_no_memory(); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1222, __pyx_L1_error)

    /* "View.MemoryView":1221
 * 
 *     result = malloc(size)
 *     if not result:             # <<<<<<<<<<<<<<
 *         _err_no_memory()
 * 
*/
  }

  /* "View.MemoryView":1225
 * 
 * 
 *     tmpslice.data = <char *> result             # <<<<<<<<<<<<<<
 *     tmpslice.memview = src.memview
 *     for i in range(ndim):
*/
  __pyx_v_tmpslice->data = ((char *)__pyx_v_result);

  /* "View.MemoryView":1226
 * 
 *     tmpslice.data = <char *> result
 *     tmpslice.memview = src.memview             # <<<<<<<<<<<<<<
 *     for i in range(ndim):
 *         tmpslice.shape[i] = src.shape[i]
*/
  __pyx_t_4 = __pyx_v_src->memview;
  __pyx_v_tmpslice->memview = __pyx_t_4;

  /* "View.MemoryView":1227
 *     tmpslice.data = <char *> result
 *     tmpslice.memview = src.memview
 *     for i in range(ndim):             # <<<<<<<<<<<<<<
 *         tmpslice.shape[i] = src.shape[i]
 *         tmpslice.suboffsets[i] = -1
*/
  __pyx_t_3 = __pyx_v_ndim;
  __pyx_t_5 = __pyx_t_3;
  for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
    __pyx_v_i = __pyx_t_6;

    /* "View.MemoryView":1228
 *     tmpslice.memview = src.memview
 *     for i in range(ndim):
 *         tmpslice.shape[i] = src.shape[i]             # <<<<<<<<<<<<<<
 *         tmpslice.suboffsets[i] = -1
 * 
*/
    (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);

    /* "View.MemoryView":1229
 *     for i in range(ndim):
 *         tmpslice.shape[i] = src.shape[i]
 *         tmpslice.suboffsets[i] = -1             # <<<<<<<<<<<<<<
 * 
 *     fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, ndim, order)
*/
    (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L;
  }

  /* "View.MemoryView":1231
 *         tmpslice.suboffsets[i] = -1
 * 
 *     fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, ndim, order)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order));

  /* "View.MemoryView":1234
 * 
 * 
 *     for i in range(ndim):             # <<<<<<<<<<<<<<
 *         if tmpslice.shape[i] == 1:
 *             tmpslice.strides[i] = 0
*/
  __pyx_t_3 = __pyx_v_ndim;
  __pyx_t_5 = __pyx_t_3;
  for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
    __pyx_v_i = __pyx_t_6;

    /* "View.MemoryView":1235
 * 
 *     for i in range(ndim):
 *         if tmpslice.shape[i] == 1:             # <<<<<<<<<<<<<<
 *             tmpslice.strides[i] = 0
 * 
*/
    __pyx_t_2 = ((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1);
    if (__pyx_t_2) {

      /* "View.MemoryView":1236
 *     for i in range(ndim):
 *         if tmpslice.shape[i] == 1:
 *             tmpslice.strides[i] = 0             # <<<<<<<<<<<<<<
 * 
 *     if slice_is_contig(src[0], order, ndim):
*/
      (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;

      /* "View.MemoryView":1235
 * 
 *     for i in range(ndim):
 *         if tmpslice.shape[i] == 1:             # <<<<<<<<<<<<<<
 *             tmpslice.strides[i] = 0
 * 
*/
    }
  }

  /* "View.MemoryView":1238
 *             tmpslice.strides[i] = 0
 * 
 *     if slice_is_contig(src[0], order, ndim):             # <<<<<<<<<<<<<<
 *         memcpy(result, src.data, size)
 *     else:
*/
  __pyx_t_2 = __pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim);
  if (__pyx_t_2) {

    /* "View.MemoryView":1239
 * 
 *     if slice_is_contig(src[0], order, ndim):
 *         memcpy(result, src.data, size)             # <<<<<<<<<<<<<<
 *     else:
 *         copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
    (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size));

    /* "View.MemoryView":1238
 *             tmpslice.strides[i] = 0
 * 
 *     if slice_is_contig(src[0], order, ndim):             # <<<<<<<<<<<<<<
 *         memcpy(result, src.data, size)
 *     else:
*/
    goto __pyx_L9;
  }

  /* "View.MemoryView":1241
 *         memcpy(result, src.data, size)
 *     else:
 *         copy_strided_to_strided(src, tmpslice, ndim, itemsize)             # <<<<<<<<<<<<<<
 * 
 *     return result
*/
  /*else*/ {
    copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
  }
  __pyx_L9:;

  /* "View.MemoryView":1243
 *         copy_strided_to_strided(src, tmpslice, ndim, itemsize)
 * 
 *     return result             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_result;
  goto __pyx_L0;

  /* "View.MemoryView":1205
 *     return stride
 * 
 * @cname('__pyx_memoryview_copy_data_to_temp')             # <<<<<<<<<<<<<<
 * cdef void *copy_data_to_temp(__Pyx_memviewslice *src,
 *                              __Pyx_memviewslice *tmpslice,
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "View.MemoryView":1247
 * 
 * 
 * @cname('__pyx_memoryview_err_extents')             # <<<<<<<<<<<<<<
 * cdef int _err_extents(int i, Py_ssize_t extent1,
 *                              Py_ssize_t extent2) except -1 with gil:
*/

static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[7];
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_RefNannySetupContext("_err_extents", 0);

  /* "View.MemoryView":1250
 * cdef int _err_extents(int i, Py_ssize_t extent1,
 *                              Py_ssize_t extent2) except -1 with gil:
 *     raise ValueError, f"got differing extents in dimension {i} (got {extent1} and {extent2})"             # <<<<<<<<<<<<<<
 * 
 * @cname('__pyx_memoryview_err_dim')
*/
  __pyx_t_1 = __Pyx_PyUnicode_From_int(__pyx_v_i, 0, ' ', 'd'); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1250, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_extent1, 0, ' ', 'd'); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1250, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_3 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_extent2, 0, ' ', 'd'); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1250, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u_got_differing_extents_in_dimensi;
  __pyx_t_4[1] = __pyx_t_1;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_got;
  __pyx_t_4[3] = __pyx_t_2;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u_and;
  __pyx_t_4[5] = __pyx_t_3;
  __pyx_t_4[6] = __pyx_mstate_global->__pyx_kp_u__5;
  __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_4, 7, 35 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 6 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 5 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 1, 127);
  if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1250, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_ValueError))), __pyx_t_5, 0, 0);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __PYX_ERR(1, 1250, __pyx_L1_error)

  /* "View.MemoryView":1247
 * 
 * 
 * @cname('__pyx_memoryview_err_extents')             # <<<<<<<<<<<<<<
 * cdef int _err_extents(int i, Py_ssize_t extent1,
 *                              Py_ssize_t extent2) except -1 with gil:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __Pyx_RefNannyFinishContext();
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  return __pyx_r;
}

/* "View.MemoryView":1252
 *     raise ValueError, f"got differing extents in dimension {i} (got {extent1} and {extent2})"
 * 
 * @cname('__pyx_memoryview_err_dim')             # <<<<<<<<<<<<<<
 * cdef int _err_dim(PyObject *error, str msg, int dim) except -1 with gil:
 *     raise <object>error, msg % dim
*/

static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, PyObject *__pyx_v_msg, int __pyx_v_dim) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_RefNannySetupContext("_err_dim", 0);
  __Pyx_INCREF(__pyx_v_msg);

  /* "View.MemoryView":1254
 * @cname('__pyx_memoryview_err_dim')
 * cdef int _err_dim(PyObject *error, str msg, int dim) except -1 with gil:
 *     raise <object>error, msg % dim             # <<<<<<<<<<<<<<
 * 
 * @cname('__pyx_memoryview_err')
*/
  __pyx_t_1 = __Pyx_PyLong_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyUnicode_FormatSafe(__pyx_v_msg, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_Raise(((PyObject *)__pyx_v_error), __pyx_t_2, 0, 0);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __PYX_ERR(1, 1254, __pyx_L1_error)

  /* "View.MemoryView":1252
 *     raise ValueError, f"got differing extents in dimension {i} (got {extent1} and {extent2})"
 * 
 * @cname('__pyx_memoryview_err_dim')             # <<<<<<<<<<<<<<
 * cdef int _err_dim(PyObject *error, str msg, int dim) except -1 with gil:
 *     raise <object>error, msg % dim
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __Pyx_XDECREF(__pyx_v_msg);
  __Pyx_RefNannyFinishContext();
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  return __pyx_r;
}

/* "View.MemoryView":1256
 *     raise <object>error, msg % dim
 * 
 * @cname('__pyx_memoryview_err')             # <<<<<<<<<<<<<<
 * cdef int _err(PyObject *error, str msg) except -1 with gil:
 *     raise <object>error, msg
*/

static int __pyx_memoryview_err(PyObject *__pyx_v_error, PyObject *__pyx_v_msg) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_RefNannySetupContext("_err", 0);
  __Pyx_INCREF(__pyx_v_msg);

  /* "View.MemoryView":1258
 * @cname('__pyx_memoryview_err')
 * cdef int _err(PyObject *error, str msg) except -1 with gil:
 *     raise <object>error, msg             # <<<<<<<<<<<<<<
 * 
 * @cname('__pyx_memoryview_err_no_memory')
*/
  __Pyx_Raise(((PyObject *)__pyx_v_error), __pyx_v_msg, 0, 0);
  __PYX_ERR(1, 1258, __pyx_L1_error)

  /* "View.MemoryView":1256
 *     raise <object>error, msg % dim
 * 
 * @cname('__pyx_memoryview_err')             # <<<<<<<<<<<<<<
 * cdef int _err(PyObject *error, str msg) except -1 with gil:
 *     raise <object>error, msg
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __Pyx_XDECREF(__pyx_v_msg);
  __Pyx_RefNannyFinishContext();
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  return __pyx_r;
}

/* "View.MemoryView":1260
 *     raise <object>error, msg
 * 
 * @cname('__pyx_memoryview_err_no_memory')             # <<<<<<<<<<<<<<
 * cdef int _err_no_memory() except -1 with gil:
 *     raise MemoryError
*/

static int __pyx_memoryview_err_no_memory(void) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();

  /* "View.MemoryView":1262
 * @cname('__pyx_memoryview_err_no_memory')
 * cdef int _err_no_memory() except -1 with gil:
 *     raise MemoryError             # <<<<<<<<<<<<<<
 * 
 * 
*/
  PyErr_NoMemory(); __PYX_ERR(1, 1262, __pyx_L1_error)

  /* "View.MemoryView":1260
 *     raise <object>error, msg
 * 
 * @cname('__pyx_memoryview_err_no_memory')             # <<<<<<<<<<<<<<
 * cdef int _err_no_memory() except -1 with gil:
 *     raise MemoryError
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("View.MemoryView._err_no_memory", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  return __pyx_r;
}

/* "View.MemoryView":1265
 * 
 * 
 * @cname('__pyx_memoryview_copy_contents')             # <<<<<<<<<<<<<<
 * cdef int memoryview_copy_contents(__Pyx_memviewslice src,
 *                                   __Pyx_memviewslice dst,
*/

static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
  void *__pyx_v_tmpdata;
  size_t __pyx_v_itemsize;
  int __pyx_v_i;
  char __pyx_v_order;
  int __pyx_v_broadcasting;
  int __pyx_v_direct_copy;
  __Pyx_memviewslice __pyx_v_tmp;
  int __pyx_v_ndim;
  int __pyx_r;
  Py_ssize_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  int __pyx_t_5;
  int __pyx_t_6;
  void *__pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "View.MemoryView":1274
 *     Check for overlapping memory and verify the shapes.
 *     """
 *     cdef void *tmpdata = NULL             # <<<<<<<<<<<<<<
 *     cdef size_t itemsize = src.memview.view.itemsize
 *     cdef int i
*/
  __pyx_v_tmpdata = NULL;

  /* "View.MemoryView":1275
 *     """
 *     cdef void *tmpdata = NULL
 *     cdef size_t itemsize = src.memview.view.itemsize             # <<<<<<<<<<<<<<
 *     cdef int i
 *     cdef char order = get_best_order(&src, src_ndim)
*/
  __pyx_t_1 = __pyx_v_src.memview->view.itemsize;
  __pyx_v_itemsize = __pyx_t_1;

  /* "View.MemoryView":1277
 *     cdef size_t itemsize = src.memview.view.itemsize
 *     cdef int i
 *     cdef char order = get_best_order(&src, src_ndim)             # <<<<<<<<<<<<<<
 *     cdef bint broadcasting = False
 *     cdef bint direct_copy = False
*/
  __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);

  /* "View.MemoryView":1278
 *     cdef int i
 *     cdef char order = get_best_order(&src, src_ndim)
 *     cdef bint broadcasting = False             # <<<<<<<<<<<<<<
 *     cdef bint direct_copy = False
 *     cdef __Pyx_memviewslice tmp
*/
  __pyx_v_broadcasting = 0;

  /* "View.MemoryView":1279
 *     cdef char order = get_best_order(&src, src_ndim)
 *     cdef bint broadcasting = False
 *     cdef bint direct_copy = False             # <<<<<<<<<<<<<<
 *     cdef __Pyx_memviewslice tmp
 * 
*/
  __pyx_v_direct_copy = 0;

  /* "View.MemoryView":1282
 *     cdef __Pyx_memviewslice tmp
 * 
 *     if src_ndim < dst_ndim:             # <<<<<<<<<<<<<<
 *         broadcast_leading(&src, src_ndim, dst_ndim)
 *     elif dst_ndim < src_ndim:
*/
  __pyx_t_2 = (__pyx_v_src_ndim < __pyx_v_dst_ndim);
  if (__pyx_t_2) {

    /* "View.MemoryView":1283
 * 
 *     if src_ndim < dst_ndim:
 *         broadcast_leading(&src, src_ndim, dst_ndim)             # <<<<<<<<<<<<<<
 *     elif dst_ndim < src_ndim:
 *         broadcast_leading(&dst, dst_ndim, src_ndim)
*/
    __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);

    /* "View.MemoryView":1282
 *     cdef __Pyx_memviewslice tmp
 * 
 *     if src_ndim < dst_ndim:             # <<<<<<<<<<<<<<
 *         broadcast_leading(&src, src_ndim, dst_ndim)
 *     elif dst_ndim < src_ndim:
*/
    goto __pyx_L3;
  }

  /* "View.MemoryView":1284
 *     if src_ndim < dst_ndim:
 *         broadcast_leading(&src, src_ndim, dst_ndim)
 *     elif dst_ndim < src_ndim:             # <<<<<<<<<<<<<<
 *         broadcast_leading(&dst, dst_ndim, src_ndim)
 * 
*/
  __pyx_t_2 = (__pyx_v_dst_ndim < __pyx_v_src_ndim);
  if (__pyx_t_2) {

    /* "View.MemoryView":1285
 *         broadcast_leading(&src, src_ndim, dst_ndim)
 *     elif dst_ndim < src_ndim:
 *         broadcast_leading(&dst, dst_ndim, src_ndim)             # <<<<<<<<<<<<<<
 * 
 *     cdef int ndim = max(src_ndim, dst_ndim)
*/
    __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);

    /* "View.MemoryView":1284
 *     if src_ndim < dst_ndim:
 *         broadcast_leading(&src, src_ndim, dst_ndim)
 *     elif dst_ndim < src_ndim:             # <<<<<<<<<<<<<<
 *         broadcast_leading(&dst, dst_ndim, src_ndim)
 * 
*/
  }
  __pyx_L3:;

  /* "View.MemoryView":1287
 *         broadcast_leading(&dst, dst_ndim, src_ndim)
 * 
 *     cdef int ndim = max(src_ndim, dst_ndim)             # <<<<<<<<<<<<<<
 * 
 *     for i in range(ndim):
*/
  __pyx_t_3 = __pyx_v_dst_ndim;
  __pyx_t_4 = __pyx_v_src_ndim;
  __pyx_t_2 = (__pyx_t_3 > __pyx_t_4);
  if (__pyx_t_2) {
    __pyx_t_5 = __pyx_t_3;
  } else {
    __pyx_t_5 = __pyx_t_4;
  }
  __pyx_v_ndim = __pyx_t_5;

  /* "View.MemoryView":1289
 *     cdef int ndim = max(src_ndim, dst_ndim)
 * 
 *     for i in range(ndim):             # <<<<<<<<<<<<<<
 *         if src.shape[i] != dst.shape[i]:
 *             if src.shape[i] == 1:
*/
  __pyx_t_5 = __pyx_v_ndim;
  __pyx_t_3 = __pyx_t_5;
  for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
    __pyx_v_i = __pyx_t_4;

    /* "View.MemoryView":1290
 * 
 *     for i in range(ndim):
 *         if src.shape[i] != dst.shape[i]:             # <<<<<<<<<<<<<<
 *             if src.shape[i] == 1:
 *                 broadcasting = True
*/
    __pyx_t_2 = ((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i]));
    if (__pyx_t_2) {

      /* "View.MemoryView":1291
 *     for i in range(ndim):
 *         if src.shape[i] != dst.shape[i]:
 *             if src.shape[i] == 1:             # <<<<<<<<<<<<<<
 *                 broadcasting = True
 *                 src.strides[i] = 0
*/
      __pyx_t_2 = ((__pyx_v_src.shape[__pyx_v_i]) == 1);
      if (__pyx_t_2) {

        /* "View.MemoryView":1292
 *         if src.shape[i] != dst.shape[i]:
 *             if src.shape[i] == 1:
 *                 broadcasting = True             # <<<<<<<<<<<<<<
 *                 src.strides[i] = 0
 *             else:
*/
        __pyx_v_broadcasting = 1;

        /* "View.MemoryView":1293
 *             if src.shape[i] == 1:
 *                 broadcasting = True
 *                 src.strides[i] = 0             # <<<<<<<<<<<<<<
 *             else:
 *                 _err_extents(i, dst.shape[i], src.shape[i])
*/
        (__pyx_v_src.strides[__pyx_v_i]) = 0;

        /* "View.MemoryView":1291
 *     for i in range(ndim):
 *         if src.shape[i] != dst.shape[i]:
 *             if src.shape[i] == 1:             # <<<<<<<<<<<<<<
 *                 broadcasting = True
 *                 src.strides[i] = 0
*/
        goto __pyx_L7;
      }

      /* "View.MemoryView":1295
 *                 src.strides[i] = 0
 *             else:
 *                 _err_extents(i, dst.shape[i], src.shape[i])             # <<<<<<<<<<<<<<
 * 
 *         if src.suboffsets[i] >= 0:
*/
      /*else*/ {
        __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1295, __pyx_L1_error)
      }
      __pyx_L7:;

      /* "View.MemoryView":1290
 * 
 *     for i in range(ndim):
 *         if src.shape[i] != dst.shape[i]:             # <<<<<<<<<<<<<<
 *             if src.shape[i] == 1:
 *                 broadcasting = True
*/
    }

    /* "View.MemoryView":1297
 *                 _err_extents(i, dst.shape[i], src.shape[i])
 * 
 *         if src.suboffsets[i] >= 0:             # <<<<<<<<<<<<<<
 *             _err_dim(PyExc_ValueError, "Dimension %d is not direct", i)
 * 
*/
    __pyx_t_2 = ((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0);
    if (__pyx_t_2) {

      /* "View.MemoryView":1298
 * 
 *         if src.suboffsets[i] >= 0:
 *             _err_dim(PyExc_ValueError, "Dimension %d is not direct", i)             # <<<<<<<<<<<<<<
 * 
 *     if slices_overlap(&src, &dst, ndim, itemsize):
*/
      __pyx_t_6 = __pyx_memoryview_err_dim(PyExc_ValueError, __pyx_mstate_global->__pyx_kp_u_Dimension_d_is_not_direct, __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1298, __pyx_L1_error)

      /* "View.MemoryView":1297
 *                 _err_extents(i, dst.shape[i], src.shape[i])
 * 
 *         if src.suboffsets[i] >= 0:             # <<<<<<<<<<<<<<
 *             _err_dim(PyExc_ValueError, "Dimension %d is not direct", i)
 * 
*/
    }
  }

  /* "View.MemoryView":1300
 *             _err_dim(PyExc_ValueError, "Dimension %d is not direct", i)
 * 
 *     if slices_overlap(&src, &dst, ndim, itemsize):             # <<<<<<<<<<<<<<
 * 
 *         if not slice_is_contig(src, order, ndim):
*/
  __pyx_t_2 = __pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
  if (__pyx_t_2) {

    /* "View.MemoryView":1302
 *     if slices_overlap(&src, &dst, ndim, itemsize):
 * 
 *         if not slice_is_contig(src, order, ndim):             # <<<<<<<<<<<<<<
 *             order = get_best_order(&dst, ndim)
 * 
*/
    __pyx_t_2 = (!__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim));
    if (__pyx_t_2) {

      /* "View.MemoryView":1303
 * 
 *         if not slice_is_contig(src, order, ndim):
 *             order = get_best_order(&dst, ndim)             # <<<<<<<<<<<<<<
 * 
 *         tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
      __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);

      /* "View.MemoryView":1302
 *     if slices_overlap(&src, &dst, ndim, itemsize):
 * 
 *         if not slice_is_contig(src, order, ndim):             # <<<<<<<<<<<<<<
 *             order = get_best_order(&dst, ndim)
 * 
*/
    }

    /* "View.MemoryView":1305
 *             order = get_best_order(&dst, ndim)
 * 
 *         tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)             # <<<<<<<<<<<<<<
 *         src = tmp
 * 
*/
    __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1305, __pyx_L1_error)
    __pyx_v_tmpdata = __pyx_t_7;

    /* "View.MemoryView":1306
 * 
 *         tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
 *         src = tmp             # <<<<<<<<<<<<<<
 * 
 *     if not broadcasting:
*/
    __pyx_v_src = __pyx_v_tmp;

    /* "View.MemoryView":1300
 *             _err_dim(PyExc_ValueError, "Dimension %d is not direct", i)
 * 
 *     if slices_overlap(&src, &dst, ndim, itemsize):             # <<<<<<<<<<<<<<
 * 
 *         if not slice_is_contig(src, order, ndim):
*/
  }

  /* "View.MemoryView":1308
 *         src = tmp
 * 
 *     if not broadcasting:             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = (!__pyx_v_broadcasting);
  if (__pyx_t_2) {

    /* "View.MemoryView":1311
 * 
 * 
 *         if slice_is_contig(src, 'C', ndim):             # <<<<<<<<<<<<<<
 *             direct_copy = slice_is_contig(dst, 'C', ndim)
 *         elif slice_is_contig(src, 'F', ndim):
*/
    __pyx_t_2 = __pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim);
    if (__pyx_t_2) {

      /* "View.MemoryView":1312
 * 
 *         if slice_is_contig(src, 'C', ndim):
 *             direct_copy = slice_is_contig(dst, 'C', ndim)             # <<<<<<<<<<<<<<
 *         elif slice_is_contig(src, 'F', ndim):
 *             direct_copy = slice_is_contig(dst, 'F', ndim)
*/
      __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim);

      /* "View.MemoryView":1311
 * 
 * 
 *         if slice_is_contig(src, 'C', ndim):             # <<<<<<<<<<<<<<
 *             direct_copy = slice_is_contig(dst, 'C', ndim)
 *         elif slice_is_contig(src, 'F', ndim):
*/
      goto __pyx_L12;
    }

    /* "View.MemoryView":1313
 *         if slice_is_contig(src, 'C', ndim):
 *             direct_copy = slice_is_contig(dst, 'C', ndim)
 *         elif slice_is_contig(src, 'F', ndim):             # <<<<<<<<<<<<<<
 *             direct_copy = slice_is_contig(dst, 'F', ndim)
 * 
*/
    __pyx_t_2 = __pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim);
    if (__pyx_t_2) {

      /* "View.MemoryView":1314
 *             direct_copy = slice_is_contig(dst, 'C', ndim)
 *         elif slice_is_contig(src, 'F', ndim):
 *             direct_copy = slice_is_contig(dst, 'F', ndim)             # <<<<<<<<<<<<<<
 * 
 *         if direct_copy:
*/
      __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim);

      /* "View.MemoryView":1313
 *         if slice_is_contig(src, 'C', ndim):
 *             direct_copy = slice_is_contig(dst, 'C', ndim)
 *         elif slice_is_contig(src, 'F', ndim):             # <<<<<<<<<<<<<<
 *             direct_copy = slice_is_contig(dst, 'F', ndim)
 * 
*/
    }
    __pyx_L12:;

    /* "View.MemoryView":1316
 *             direct_copy = slice_is_contig(dst, 'F', ndim)
 * 
 *         if direct_copy:             # <<<<<<<<<<<<<<
 * 
 *             refcount_copying(&dst, dtype_is_object, ndim, inc=False)
*/
    if (__pyx_v_direct_copy) {

      /* "View.MemoryView":1318
 *         if direct_copy:
 * 
 *             refcount_copying(&dst, dtype_is_object, ndim, inc=False)             # <<<<<<<<<<<<<<
 *             memcpy(dst.data, src.data, slice_get_size(&src, ndim))
 *             refcount_copying(&dst, dtype_is_object, ndim, inc=True)
*/
      __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);

      /* "View.MemoryView":1319
 * 
 *             refcount_copying(&dst, dtype_is_object, ndim, inc=False)
 *             memcpy(dst.data, src.data, slice_get_size(&src, ndim))             # <<<<<<<<<<<<<<
 *             refcount_copying(&dst, dtype_is_object, ndim, inc=True)
 *             free(tmpdata)
*/
      (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)));

      /* "View.MemoryView":1320
 *             refcount_copying(&dst, dtype_is_object, ndim, inc=False)
 *             memcpy(dst.data, src.data, slice_get_size(&src, ndim))
 *             refcount_copying(&dst, dtype_is_object, ndim, inc=True)             # <<<<<<<<<<<<<<
 *             free(tmpdata)
 *             return 0
*/
      __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);

      /* "View.MemoryView":1321
 *             memcpy(dst.data, src.data, slice_get_size(&src, ndim))
 *             refcount_copying(&dst, dtype_is_object, ndim, inc=True)
 *             free(tmpdata)             # <<<<<<<<<<<<<<
 *             return 0
 * 
*/
      free(__pyx_v_tmpdata);

      /* "View.MemoryView":1322
 *             refcount_copying(&dst, dtype_is_object, ndim, inc=True)
 *             free(tmpdata)
 *             return 0             # <<<<<<<<<<<<<<
 * 
 *     if order == 'F' == get_best_order(&dst, ndim):
*/
      __pyx_r = 0;
      goto __pyx_L0;

      /* "View.MemoryView":1316
 *             direct_copy = slice_is_contig(dst, 'F', ndim)
 * 
 *         if direct_copy:             # <<<<<<<<<<<<<<
 * 
 *             refcount_copying(&dst, dtype_is_object, ndim, inc=False)
*/
    }

    /* "View.MemoryView":1308
 *         src = tmp
 * 
 *     if not broadcasting:             # <<<<<<<<<<<<<<
 * 
 * 
*/
  }

  /* "View.MemoryView":1324
 *             return 0
 * 
 *     if order == 'F' == get_best_order(&dst, ndim):             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = (__pyx_v_order == 'F');
  if (__pyx_t_2) {
    __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
  }
  if (__pyx_t_2) {

    /* "View.MemoryView":1327
 * 
 * 
 *         transpose_memslice(&src)             # <<<<<<<<<<<<<<
 *         transpose_memslice(&dst)
 * 
*/
    __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(1, 1327, __pyx_L1_error)

    /* "View.MemoryView":1328
 * 
 *         transpose_memslice(&src)
 *         transpose_memslice(&dst)             # <<<<<<<<<<<<<<
 * 
 *     refcount_copying(&dst, dtype_is_object, ndim, inc=False)
*/
    __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(1, 1328, __pyx_L1_error)

    /* "View.MemoryView":1324
 *             return 0
 * 
 *     if order == 'F' == get_best_order(&dst, ndim):             # <<<<<<<<<<<<<<
 * 
 * 
*/
  }

  /* "View.MemoryView":1330
 *         transpose_memslice(&dst)
 * 
 *     refcount_copying(&dst, dtype_is_object, ndim, inc=False)             # <<<<<<<<<<<<<<
 *     copy_strided_to_strided(&src, &dst, ndim, itemsize)
 *     refcount_copying(&dst, dtype_is_object, ndim, inc=True)
*/
  __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);

  /* "View.MemoryView":1331
 * 
 *     refcount_copying(&dst, dtype_is_object, ndim, inc=False)
 *     copy_strided_to_strided(&src, &dst, ndim, itemsize)             # <<<<<<<<<<<<<<
 *     refcount_copying(&dst, dtype_is_object, ndim, inc=True)
 * 
*/
  copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);

  /* "View.MemoryView":1332
 *     refcount_copying(&dst, dtype_is_object, ndim, inc=False)
 *     copy_strided_to_strided(&src, &dst, ndim, itemsize)
 *     refcount_copying(&dst, dtype_is_object, ndim, inc=True)             # <<<<<<<<<<<<<<
 * 
 *     free(tmpdata)
*/
  __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);

  /* "View.MemoryView":1334
 *     refcount_copying(&dst, dtype_is_object, ndim, inc=True)
 * 
 *     free(tmpdata)             # <<<<<<<<<<<<<<
 *     return 0
 * 
*/
  free(__pyx_v_tmpdata);

  /* "View.MemoryView":1335
 * 
 *     free(tmpdata)
 *     return 0             # <<<<<<<<<<<<<<
 * 
 * @cname('__pyx_memoryview_broadcast_leading')
*/
  __pyx_r = 0;
  goto __pyx_L0;

  /* "View.MemoryView":1265
 * 
 * 
 * @cname('__pyx_memoryview_copy_contents')             # <<<<<<<<<<<<<<
 * cdef int memoryview_copy_contents(__Pyx_memviewslice src,
 *                                   __Pyx_memviewslice dst,
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "View.MemoryView":1337
 *     return 0
 * 
 * @cname('__pyx_memoryview_broadcast_leading')             # <<<<<<<<<<<<<<
 * cdef void broadcast_leading(__Pyx_memviewslice *mslice,
 *                             int ndim,
*/

static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
  int __pyx_v_i;
  int __pyx_v_offset;
  int __pyx_t_1;
  int __pyx_t_2;
  int __pyx_t_3;

  /* "View.MemoryView":1342
 *                             int ndim_other) noexcept nogil:
 *     cdef int i
 *     cdef int offset = ndim_other - ndim             # <<<<<<<<<<<<<<
 * 
 *     for i in range(ndim - 1, -1, -1):
*/
  __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);

  /* "View.MemoryView":1344
 *     cdef int offset = ndim_other - ndim
 * 
 *     for i in range(ndim - 1, -1, -1):             # <<<<<<<<<<<<<<
 *         mslice.shape[i + offset] = mslice.shape[i]
 *         mslice.strides[i + offset] = mslice.strides[i]
*/
  for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
    __pyx_v_i = __pyx_t_1;

    /* "View.MemoryView":1345
 * 
 *     for i in range(ndim - 1, -1, -1):
 *         mslice.shape[i + offset] = mslice.shape[i]             # <<<<<<<<<<<<<<
 *         mslice.strides[i + offset] = mslice.strides[i]
 *         mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*/
    (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);

    /* "View.MemoryView":1346
 *     for i in range(ndim - 1, -1, -1):
 *         mslice.shape[i + offset] = mslice.shape[i]
 *         mslice.strides[i + offset] = mslice.strides[i]             # <<<<<<<<<<<<<<
 *         mslice.suboffsets[i + offset] = mslice.suboffsets[i]
 * 
*/
    (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);

    /* "View.MemoryView":1347
 *         mslice.shape[i + offset] = mslice.shape[i]
 *         mslice.strides[i + offset] = mslice.strides[i]
 *         mslice.suboffsets[i + offset] = mslice.suboffsets[i]             # <<<<<<<<<<<<<<
 * 
 *     for i in range(offset):
*/
    (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
  }

  /* "View.MemoryView":1349
 *         mslice.suboffsets[i + offset] = mslice.suboffsets[i]
 * 
 *     for i in range(offset):             # <<<<<<<<<<<<<<
 *         mslice.shape[i] = 1
 *         mslice.strides[i] = mslice.strides[0]
*/
  __pyx_t_1 = __pyx_v_offset;
  __pyx_t_2 = __pyx_t_1;
  for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
    __pyx_v_i = __pyx_t_3;

    /* "View.MemoryView":1350
 * 
 *     for i in range(offset):
 *         mslice.shape[i] = 1             # <<<<<<<<<<<<<<
 *         mslice.strides[i] = mslice.strides[0]
 *         mslice.suboffsets[i] = -1
*/
    (__pyx_v_mslice->shape[__pyx_v_i]) = 1;

    /* "View.MemoryView":1351
 *     for i in range(offset):
 *         mslice.shape[i] = 1
 *         mslice.strides[i] = mslice.strides[0]             # <<<<<<<<<<<<<<
 *         mslice.suboffsets[i] = -1
 * 
*/
    (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);

    /* "View.MemoryView":1352
 *         mslice.shape[i] = 1
 *         mslice.strides[i] = mslice.strides[0]
 *         mslice.suboffsets[i] = -1             # <<<<<<<<<<<<<<
 * 
 * 
*/
    (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L;
  }

  /* "View.MemoryView":1337
 *     return 0
 * 
 * @cname('__pyx_memoryview_broadcast_leading')             # <<<<<<<<<<<<<<
 * cdef void broadcast_leading(__Pyx_memviewslice *mslice,
 *                             int ndim,
*/

  /* function exit code */
}

/* "View.MemoryView":1359
 * 
 * 
 * @cname('__pyx_memoryview_refcount_copying')             # <<<<<<<<<<<<<<
 * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, int ndim, bint inc) noexcept nogil:
 * 
*/

static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {

  /* "View.MemoryView":1362
 * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, int ndim, bint inc) noexcept nogil:
 * 
 *     if dtype_is_object:             # <<<<<<<<<<<<<<
 *         refcount_objects_in_slice_with_gil(dst.data, dst.shape, dst.strides, ndim, inc)
 * 
*/
  if (__pyx_v_dtype_is_object) {

    /* "View.MemoryView":1363
 * 
 *     if dtype_is_object:
 *         refcount_objects_in_slice_with_gil(dst.data, dst.shape, dst.strides, ndim, inc)             # <<<<<<<<<<<<<<
 * 
 * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
*/
    __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);

    /* "View.MemoryView":1362
 * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, int ndim, bint inc) noexcept nogil:
 * 
 *     if dtype_is_object:             # <<<<<<<<<<<<<<
 *         refcount_objects_in_slice_with_gil(dst.data, dst.shape, dst.strides, ndim, inc)
 * 
*/
  }

  /* "View.MemoryView":1359
 * 
 * 
 * @cname('__pyx_memoryview_refcount_copying')             # <<<<<<<<<<<<<<
 * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, int ndim, bint inc) noexcept nogil:
 * 
*/

  /* function exit code */
}

/* "View.MemoryView":1365
 *         refcount_objects_in_slice_with_gil(dst.data, dst.shape, dst.strides, ndim, inc)
 * 
 * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')             # <<<<<<<<<<<<<<
 * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape,
 *                                              Py_ssize_t *strides, int ndim,
*/

static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
  PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();

  /* "View.MemoryView":1369
 *                                              Py_ssize_t *strides, int ndim,
 *                                              bint inc) noexcept with gil:
 *     refcount_objects_in_slice(data, shape, strides, ndim, inc)             # <<<<<<<<<<<<<<
 * 
 * @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
  __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);

  /* "View.MemoryView":1365
 *         refcount_objects_in_slice_with_gil(dst.data, dst.shape, dst.strides, ndim, inc)
 * 
 * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')             # <<<<<<<<<<<<<<
 * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape,
 *                                              Py_ssize_t *strides, int ndim,
*/

  /* function exit code */
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
}

/* "View.MemoryView":1371
 *     refcount_objects_in_slice(data, shape, strides, ndim, inc)
 * 
 * @cname('__pyx_memoryview_refcount_objects_in_slice')             # <<<<<<<<<<<<<<
 * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape,
 *                                     Py_ssize_t *strides, int ndim, bint inc) noexcept:
*/

static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
  CYTHON_UNUSED Py_ssize_t __pyx_v_i;
  Py_ssize_t __pyx_v_stride;
  Py_ssize_t __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  Py_ssize_t __pyx_t_3;
  int __pyx_t_4;

  /* "View.MemoryView":1375
 *                                     Py_ssize_t *strides, int ndim, bint inc) noexcept:
 *     cdef Py_ssize_t i
 *     cdef Py_ssize_t stride = strides[0]             # <<<<<<<<<<<<<<
 * 
 *     for i in range(shape[0]):
*/
  __pyx_v_stride = (__pyx_v_strides[0]);

  /* "View.MemoryView":1377
 *     cdef Py_ssize_t stride = strides[0]
 * 
 *     for i in range(shape[0]):             # <<<<<<<<<<<<<<
 *         if ndim == 1:
 *             if inc:
*/
  __pyx_t_1 = (__pyx_v_shape[0]);
  __pyx_t_2 = __pyx_t_1;
  for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
    __pyx_v_i = __pyx_t_3;

    /* "View.MemoryView":1378
 * 
 *     for i in range(shape[0]):
 *         if ndim == 1:             # <<<<<<<<<<<<<<
 *             if inc:
 *                 Py_INCREF((<PyObject **> data)[0])
*/
    __pyx_t_4 = (__pyx_v_ndim == 1);
    if (__pyx_t_4) {

      /* "View.MemoryView":1379
 *     for i in range(shape[0]):
 *         if ndim == 1:
 *             if inc:             # <<<<<<<<<<<<<<
 *                 Py_INCREF((<PyObject **> data)[0])
 *             else:
*/
      if (__pyx_v_inc) {

        /* "View.MemoryView":1380
 *         if ndim == 1:
 *             if inc:
 *                 Py_INCREF((<PyObject **> data)[0])             # <<<<<<<<<<<<<<
 *             else:
 *                 Py_DECREF((<PyObject **> data)[0])
*/
        Py_INCREF((((PyObject **)__pyx_v_data)[0]));

        /* "View.MemoryView":1379
 *     for i in range(shape[0]):
 *         if ndim == 1:
 *             if inc:             # <<<<<<<<<<<<<<
 *                 Py_INCREF((<PyObject **> data)[0])
 *             else:
*/
        goto __pyx_L6;
      }

      /* "View.MemoryView":1382
 *                 Py_INCREF((<PyObject **> data)[0])
 *             else:
 *                 Py_DECREF((<PyObject **> data)[0])             # <<<<<<<<<<<<<<
 *         else:
 *             refcount_objects_in_slice(data, shape + 1, strides + 1, ndim - 1, inc)
*/
      /*else*/ {
        Py_DECREF((((PyObject **)__pyx_v_data)[0]));
      }
      __pyx_L6:;

      /* "View.MemoryView":1378
 * 
 *     for i in range(shape[0]):
 *         if ndim == 1:             # <<<<<<<<<<<<<<
 *             if inc:
 *                 Py_INCREF((<PyObject **> data)[0])
*/
      goto __pyx_L5;
    }

    /* "View.MemoryView":1384
 *                 Py_DECREF((<PyObject **> data)[0])
 *         else:
 *             refcount_objects_in_slice(data, shape + 1, strides + 1, ndim - 1, inc)             # <<<<<<<<<<<<<<
 * 
 *         data += stride
*/
    /*else*/ {
      __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
    }
    __pyx_L5:;

    /* "View.MemoryView":1386
 *             refcount_objects_in_slice(data, shape + 1, strides + 1, ndim - 1, inc)
 * 
 *         data += stride             # <<<<<<<<<<<<<<
 * 
 * 
*/
    __pyx_v_data = (__pyx_v_data + __pyx_v_stride);
  }

  /* "View.MemoryView":1371
 *     refcount_objects_in_slice(data, shape, strides, ndim, inc)
 * 
 * @cname('__pyx_memoryview_refcount_objects_in_slice')             # <<<<<<<<<<<<<<
 * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape,
 *                                     Py_ssize_t *strides, int ndim, bint inc) noexcept:
*/

  /* function exit code */
}

/* "View.MemoryView":1391
 * 
 * 
 * @cname('__pyx_memoryview_slice_assign_scalar')             # <<<<<<<<<<<<<<
 * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim,
 *                               size_t itemsize, void *item,
*/

static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {

  /* "View.MemoryView":1395
 *                               size_t itemsize, void *item,
 *                               bint dtype_is_object) noexcept nogil:
 *     refcount_copying(dst, dtype_is_object, ndim, inc=False)             # <<<<<<<<<<<<<<
 *     _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, itemsize, item)
 *     refcount_copying(dst, dtype_is_object, ndim, inc=True)
*/
  __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);

  /* "View.MemoryView":1396
 *                               bint dtype_is_object) noexcept nogil:
 *     refcount_copying(dst, dtype_is_object, ndim, inc=False)
 *     _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, itemsize, item)             # <<<<<<<<<<<<<<
 *     refcount_copying(dst, dtype_is_object, ndim, inc=True)
 * 
*/
  __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);

  /* "View.MemoryView":1397
 *     refcount_copying(dst, dtype_is_object, ndim, inc=False)
 *     _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, itemsize, item)
 *     refcount_copying(dst, dtype_is_object, ndim, inc=True)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);

  /* "View.MemoryView":1391
 * 
 * 
 * @cname('__pyx_memoryview_slice_assign_scalar')             # <<<<<<<<<<<<<<
 * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim,
 *                               size_t itemsize, void *item,
*/

  /* function exit code */
}

/* "View.MemoryView":1400
 * 
 * 
 * @cname('__pyx_memoryview__slice_assign_scalar')             # <<<<<<<<<<<<<<
 * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape,
 *                               Py_ssize_t *strides, int ndim,
*/

static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
  CYTHON_UNUSED Py_ssize_t __pyx_v_i;
  Py_ssize_t __pyx_v_stride;
  Py_ssize_t __pyx_v_extent;
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  Py_ssize_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;

  /* "View.MemoryView":1405
 *                               size_t itemsize, void *item) noexcept nogil:
 *     cdef Py_ssize_t i
 *     cdef Py_ssize_t stride = strides[0]             # <<<<<<<<<<<<<<
 *     cdef Py_ssize_t extent = shape[0]
 * 
*/
  __pyx_v_stride = (__pyx_v_strides[0]);

  /* "View.MemoryView":1406
 *     cdef Py_ssize_t i
 *     cdef Py_ssize_t stride = strides[0]
 *     cdef Py_ssize_t extent = shape[0]             # <<<<<<<<<<<<<<
 * 
 *     if ndim == 1:
*/
  __pyx_v_extent = (__pyx_v_shape[0]);

  /* "View.MemoryView":1408
 *     cdef Py_ssize_t extent = shape[0]
 * 
 *     if ndim == 1:             # <<<<<<<<<<<<<<
 *         for i in range(extent):
 *             memcpy(data, item, itemsize)
*/
  __pyx_t_1 = (__pyx_v_ndim == 1);
  if (__pyx_t_1) {

    /* "View.MemoryView":1409
 * 
 *     if ndim == 1:
 *         for i in range(extent):             # <<<<<<<<<<<<<<
 *             memcpy(data, item, itemsize)
 *             data += stride
*/
    __pyx_t_2 = __pyx_v_extent;
    __pyx_t_3 = __pyx_t_2;
    for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
      __pyx_v_i = __pyx_t_4;

      /* "View.MemoryView":1410
 *     if ndim == 1:
 *         for i in range(extent):
 *             memcpy(data, item, itemsize)             # <<<<<<<<<<<<<<
 *             data += stride
 *     else:
*/
      (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize));

      /* "View.MemoryView":1411
 *         for i in range(extent):
 *             memcpy(data, item, itemsize)
 *             data += stride             # <<<<<<<<<<<<<<
 *     else:
 *         for i in range(extent):
*/
      __pyx_v_data = (__pyx_v_data + __pyx_v_stride);
    }

    /* "View.MemoryView":1408
 *     cdef Py_ssize_t extent = shape[0]
 * 
 *     if ndim == 1:             # <<<<<<<<<<<<<<
 *         for i in range(extent):
 *             memcpy(data, item, itemsize)
*/
    goto __pyx_L3;
  }

  /* "View.MemoryView":1413
 *             data += stride
 *     else:
 *         for i in range(extent):             # <<<<<<<<<<<<<<
 *             _slice_assign_scalar(data, shape + 1, strides + 1, ndim - 1, itemsize, item)
 *             data += stride
*/
  /*else*/ {
    __pyx_t_2 = __pyx_v_extent;
    __pyx_t_3 = __pyx_t_2;
    for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
      __pyx_v_i = __pyx_t_4;

      /* "View.MemoryView":1414
 *     else:
 *         for i in range(extent):
 *             _slice_assign_scalar(data, shape + 1, strides + 1, ndim - 1, itemsize, item)             # <<<<<<<<<<<<<<
 *             data += stride
 * 
*/
      __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);

      /* "View.MemoryView":1415
 *         for i in range(extent):
 *             _slice_assign_scalar(data, shape + 1, strides + 1, ndim - 1, itemsize, item)
 *             data += stride             # <<<<<<<<<<<<<<
 * 
*/
      __pyx_v_data = (__pyx_v_data + __pyx_v_stride);
    }
  }
  __pyx_L3:;

  /* "View.MemoryView":1400
 * 
 * 
 * @cname('__pyx_memoryview__slice_assign_scalar')             # <<<<<<<<<<<<<<
 * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape,
 *                               Py_ssize_t *strides, int ndim,
*/

  /* function exit code */
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0x82a3537, 0x6ae9995, 0xb068931, b'name')
*/

/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_Enum", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0x82a3537, 0x6ae9995, 0xb068931, b'name')             # <<<<<<<<<<<<<<
 *     __pyx_result = Enum.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0x82a3537, 0x6ae9995, 0xb068931, __pyx_k_name); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0x82a3537, 0x6ae9995, 0xb068931, b'name')
 *     __pyx_result = Enum.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_MemviewEnum_type);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0x82a3537, 0x6ae9995, 0xb068931, b'name')
 *     __pyx_result = Enum.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = Enum.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0x82a3537, 0x6ae9995, 0xb068931, b'name')
 *     __pyx_result = Enum.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, __pyx_state: tuple):
 *     __pyx_result.name = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0x82a3537, 0x6ae9995, 0xb068931, b'name')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result.name = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, __pyx_state: tuple):
 *     __pyx_result.name = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->name);
  __Pyx_DECREF(__pyx_v___pyx_result->name);
  __pyx_v___pyx_result->name = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, __pyx_state: tuple):
 *     __pyx_result.name = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result.name = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cpython/complex.pxd":20
 * 
 *         # unavailable in limited API
 *         @property             # <<<<<<<<<<<<<<
 *         @_cython.c_compile_guard("!CYTHON_COMPILING_IN_LIMITED_API")
 *         cdef inline double real(self) noexcept:
*/

#if !CYTHON_COMPILING_IN_LIMITED_API
static CYTHON_INLINE double __pyx_f_7cpython_7complex_7complex_4real_real(PyComplexObject *__pyx_v_self) {
  double __pyx_r;

  /* "cpython/complex.pxd":23
 *         @_cython.c_compile_guard("!CYTHON_COMPILING_IN_LIMITED_API")
 *         cdef inline double real(self) noexcept:
 *             return self.cval.real             # <<<<<<<<<<<<<<
 * 
 *         # unavailable in limited API
*/
  __pyx_r = __pyx_v_self->cval.real;
  goto __pyx_L0;

  /* "cpython/complex.pxd":20
 * 
 *         # unavailable in limited API
 *         @property             # <<<<<<<<<<<<<<
 *         @_cython.c_compile_guard("!CYTHON_COMPILING_IN_LIMITED_API")
 *         cdef inline double real(self) noexcept:
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}
#endif /*!(#if !CYTHON_COMPILING_IN_LIMITED_API)*/

/* "cpython/complex.pxd":26
 * 
 *         # unavailable in limited API
 *         @property             # <<<<<<<<<<<<<<
 *         @_cython.c_compile_guard("!CYTHON_COMPILING_IN_LIMITED_API")
 *         cdef inline double imag(self) noexcept:
*/

#if !CYTHON_COMPILING_IN_LIMITED_API
static CYTHON_INLINE double __pyx_f_7cpython_7complex_7complex_4imag_imag(PyComplexObject *__pyx_v_self) {
  double __pyx_r;

  /* "cpython/complex.pxd":29
 *         @_cython.c_compile_guard("!CYTHON_COMPILING_IN_LIMITED_API")
 *         cdef inline double imag(self) noexcept:
 *             return self.cval.imag             # <<<<<<<<<<<<<<
 * 
 *     # PyTypeObject PyComplex_Type
*/
  __pyx_r = __pyx_v_self->cval.imag;
  goto __pyx_L0;

  /* "cpython/complex.pxd":26
 * 
 *         # unavailable in limited API
 *         @property             # <<<<<<<<<<<<<<
 *         @_cython.c_compile_guard("!CYTHON_COMPILING_IN_LIMITED_API")
 *         cdef inline double imag(self) noexcept:
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}
#endif /*!(#if !CYTHON_COMPILING_IN_LIMITED_API)*/

/* "cpython/contextvars.pxd":115
 * 
 * 
 * @_cython.c_compile_guard("!CYTHON_COMPILING_IN_LIMITED_API")             # <<<<<<<<<<<<<<
 * cdef inline object get_value(var, default_value=None):
 *     """Return a new reference to the value of the context variable,
*/

#if !CYTHON_COMPILING_IN_LIMITED_API
static CYTHON_INLINE PyObject *__pyx_f_7cpython_11contextvars_get_value(PyObject *__pyx_v_var, struct __pyx_opt_args_7cpython_11contextvars_get_value *__pyx_optional_args) {

  /* "cpython/contextvars.pxd":116
 * 
 * @_cython.c_compile_guard("!CYTHON_COMPILING_IN_LIMITED_API")
 * cdef inline object get_value(var, default_value=None):             # <<<<<<<<<<<<<<
 *     """Return a new reference to the value of the context variable,
 *     or the default value of the context variable,
*/
  PyObject *__pyx_v_default_value = ((PyObject *)Py_None);
  PyObject *__pyx_v_value;
  PyObject *__pyx_v_pyvalue = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("get_value", 0);
  if (__pyx_optional_args) {
    if (__pyx_optional_args->__pyx_n > 0) {
      __pyx_v_default_value = __pyx_optional_args->default_value;
    }
  }

  /* "cpython/contextvars.pxd":121
 *     or None if no such value or default was found.
 *     """
 *     cdef PyObject *value = NULL             # <<<<<<<<<<<<<<
 *     PyContextVar_Get(var, NULL, &value)
 *     if value is NULL:
*/
  __pyx_v_value = NULL;

  /* "cpython/contextvars.pxd":122
 *     """
 *     cdef PyObject *value = NULL
 *     PyContextVar_Get(var, NULL, &value)             # <<<<<<<<<<<<<<
 *     if value is NULL:
 *         # context variable does not have a default
*/
  __pyx_t_1 = PyContextVar_Get(__pyx_v_var, NULL, (&__pyx_v_value)); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(2, 122, __pyx_L1_error)

  /* "cpython/contextvars.pxd":123
 *     cdef PyObject *value = NULL
 *     PyContextVar_Get(var, NULL, &value)
 *     if value is NULL:             # <<<<<<<<<<<<<<
 *         # context variable does not have a default
 *         pyvalue = default_value
*/
  __pyx_t_2 = (__pyx_v_value == NULL);
  if (__pyx_t_2) {

    /* "cpython/contextvars.pxd":125
 *     if value is NULL:
 *         # context variable does not have a default
 *         pyvalue = default_value             # <<<<<<<<<<<<<<
 *     else:
 *         # value or default value of context variable
*/
    __Pyx_INCREF(__pyx_v_default_value);
    __pyx_v_pyvalue = __pyx_v_default_value;

    /* "cpython/contextvars.pxd":123
 *     cdef PyObject *value = NULL
 *     PyContextVar_Get(var, NULL, &value)
 *     if value is NULL:             # <<<<<<<<<<<<<<
 *         # context variable does not have a default
 *         pyvalue = default_value
*/
    goto __pyx_L3;
  }

  /* "cpython/contextvars.pxd":128
 *     else:
 *         # value or default value of context variable
 *         pyvalue = <object>value             # <<<<<<<<<<<<<<
 *         Py_XDECREF(value)  # PyContextVar_Get() returned an owned reference as 'PyObject*'
 *     return pyvalue
*/
  /*else*/ {
    __pyx_t_3 = ((PyObject *)__pyx_v_value);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_v_pyvalue = __pyx_t_3;
    __pyx_t_3 = 0;

    /* "cpython/contextvars.pxd":129
 *         # value or default value of context variable
 *         pyvalue = <object>value
 *         Py_XDECREF(value)  # PyContextVar_Get() returned an owned reference as 'PyObject*'             # <<<<<<<<<<<<<<
 *     return pyvalue
 * 
*/
    Py_XDECREF(__pyx_v_value);
  }
  __pyx_L3:;

  /* "cpython/contextvars.pxd":130
 *         pyvalue = <object>value
 *         Py_XDECREF(value)  # PyContextVar_Get() returned an owned reference as 'PyObject*'
 *     return pyvalue             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_pyvalue);
  __pyx_r = __pyx_v_pyvalue;
  goto __pyx_L0;

  /* "cpython/contextvars.pxd":115
 * 
 * 
 * @_cython.c_compile_guard("!CYTHON_COMPILING_IN_LIMITED_API")             # <<<<<<<<<<<<<<
 * cdef inline object get_value(var, default_value=None):
 *     """Return a new reference to the value of the context variable,
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cpython.contextvars.get_value", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_pyvalue);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
#endif /*!(#if !CYTHON_COMPILING_IN_LIMITED_API)*/

/* "cpython/contextvars.pxd":133
 * 
 * 
 * @_cython.c_compile_guard("!CYTHON_COMPILING_IN_LIMITED_API")             # <<<<<<<<<<<<<<
 * cdef inline object get_value_no_default(var, default_value=None):
 *     """Return a new reference to the value of the context variable,
*/

#if !CYTHON_COMPILING_IN_LIMITED_API
static CYTHON_INLINE PyObject *__pyx_f_7cpython_11contextvars_get_value_no_default(PyObject *__pyx_v_var, struct __pyx_opt_args_7cpython_11contextvars_get_value_no_default *__pyx_optional_args) {

  /* "cpython/contextvars.pxd":134
 * 
 * @_cython.c_compile_guard("!CYTHON_COMPILING_IN_LIMITED_API")
 * cdef inline object get_value_no_default(var, default_value=None):             # <<<<<<<<<<<<<<
 *     """Return a new reference to the value of the context variable,
 *     or the provided default value if no such value was found.
*/
  PyObject *__pyx_v_default_value = ((PyObject *)Py_None);
  PyObject *__pyx_v_value;
  PyObject *__pyx_v_pyvalue = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("get_value_no_default", 0);
  if (__pyx_optional_args) {
    if (__pyx_optional_args->__pyx_n > 0) {
      __pyx_v_default_value = __pyx_optional_args->default_value;
    }
  }

  /* "cpython/contextvars.pxd":140
 *     Ignores the default value of the context variable, if any.
 *     """
 *     cdef PyObject *value = NULL             # <<<<<<<<<<<<<<
 *     PyContextVar_Get(var, <PyObject*>default_value, &value)
 *     # value of context variable or 'default_value'
*/
  __pyx_v_value = NULL;

  /* "cpython/contextvars.pxd":141
 *     """
 *     cdef PyObject *value = NULL
 *     PyContextVar_Get(var, <PyObject*>default_value, &value)             # <<<<<<<<<<<<<<
 *     # value of context variable or 'default_value'
 *     pyvalue = <object>value
*/
  __pyx_t_1 = PyContextVar_Get(__pyx_v_var, ((PyObject *)__pyx_v_default_value), (&__pyx_v_value)); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(2, 141, __pyx_L1_error)

  /* "cpython/contextvars.pxd":143
 *     PyContextVar_Get(var, <PyObject*>default_value, &value)
 *     # value of context variable or 'default_value'
 *     pyvalue = <object>value             # <<<<<<<<<<<<<<
 *     Py_XDECREF(value)  # PyContextVar_Get() returned an owned reference as 'PyObject*'
 *     return pyvalue
*/
  __pyx_t_2 = ((PyObject *)__pyx_v_value);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_v_pyvalue = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cpython/contextvars.pxd":144
 *     # value of context variable or 'default_value'
 *     pyvalue = <object>value
 *     Py_XDECREF(value)  # PyContextVar_Get() returned an owned reference as 'PyObject*'             # <<<<<<<<<<<<<<
 *     return pyvalue
*/
  Py_XDECREF(__pyx_v_value);

  /* "cpython/contextvars.pxd":145
 *     pyvalue = <object>value
 *     Py_XDECREF(value)  # PyContextVar_Get() returned an owned reference as 'PyObject*'
 *     return pyvalue             # <<<<<<<<<<<<<<
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_pyvalue);
  __pyx_r = __pyx_v_pyvalue;
  goto __pyx_L0;

  /* "cpython/contextvars.pxd":133
 * 
 * 
 * @_cython.c_compile_guard("!CYTHON_COMPILING_IN_LIMITED_API")             # <<<<<<<<<<<<<<
 * cdef inline object get_value_no_default(var, default_value=None):
 *     """Return a new reference to the value of the context variable,
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cpython.contextvars.get_value_no_default", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_pyvalue);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
#endif /*!(#if !CYTHON_COMPILING_IN_LIMITED_API)*/

/* "cuda/bindings/_nvml.pyx":25
 * 
 * 
 * cdef __from_data(data, dtype_name, expected_dtype, lowpp_type):             # <<<<<<<<<<<<<<
 *     # _numpy.recarray is a subclass of _numpy.ndarray, so implicitly handled here.
 *     if isinstance(data, lowpp_type):
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___from_data(PyObject *__pyx_v_data, PyObject *__pyx_v_dtype_name, PyObject *__pyx_v_expected_dtype, PyObject *__pyx_v_lowpp_type) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__from_data", 0);

  /* "cuda/bindings/_nvml.pyx":27
 * cdef __from_data(data, dtype_name, expected_dtype, lowpp_type):
 *     # _numpy.recarray is a subclass of _numpy.ndarray, so implicitly handled here.
 *     if isinstance(data, lowpp_type):             # <<<<<<<<<<<<<<
 *         return data
 *     if not isinstance(data, _numpy.ndarray):
*/
  __pyx_t_1 = PyObject_IsInstance(__pyx_v_data, __pyx_v_lowpp_type); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 27, __pyx_L1_error)
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":28
 *     # _numpy.recarray is a subclass of _numpy.ndarray, so implicitly handled here.
 *     if isinstance(data, lowpp_type):
 *         return data             # <<<<<<<<<<<<<<
 *     if not isinstance(data, _numpy.ndarray):
 *         raise TypeError("data argument must be a NumPy ndarray")
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(__pyx_v_data);
    __pyx_r = __pyx_v_data;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":27
 * cdef __from_data(data, dtype_name, expected_dtype, lowpp_type):
 *     # _numpy.recarray is a subclass of _numpy.ndarray, so implicitly handled here.
 *     if isinstance(data, lowpp_type):             # <<<<<<<<<<<<<<
 *         return data
 *     if not isinstance(data, _numpy.ndarray):
*/
  }

  /* "cuda/bindings/_nvml.pyx":29
 *     if isinstance(data, lowpp_type):
 *         return data
 *     if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *         raise TypeError("data argument must be a NumPy ndarray")
 *     if data.size != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 29, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 29, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_1 = PyObject_IsInstance(__pyx_v_data, __pyx_t_3); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 29, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = (!__pyx_t_1);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":30
 *         return data
 *     if not isinstance(data, _numpy.ndarray):
 *         raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *     if data.size != 1:
 *         raise ValueError("data array must have a size of 1")
*/
    __pyx_t_2 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 30, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __Pyx_Raise(__pyx_t_3, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __PYX_ERR(0, 30, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":29
 *     if isinstance(data, lowpp_type):
 *         return data
 *     if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *         raise TypeError("data argument must be a NumPy ndarray")
 *     if data.size != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":31
 *     if not isinstance(data, _numpy.ndarray):
 *         raise TypeError("data argument must be a NumPy ndarray")
 *     if data.size != 1:             # <<<<<<<<<<<<<<
 *         raise ValueError("data array must have a size of 1")
 *     if data.dtype != expected_dtype:
*/
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 31, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_3, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 31, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":32
 *         raise TypeError("data argument must be a NumPy ndarray")
 *     if data.size != 1:
 *         raise ValueError("data array must have a size of 1")             # <<<<<<<<<<<<<<
 *     if data.dtype != expected_dtype:
 *         raise ValueError(f"data array must be of dtype {dtype_name}")
*/
    __pyx_t_2 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_data_array_must_have_a_size_of_1};
      __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 32, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __Pyx_Raise(__pyx_t_3, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __PYX_ERR(0, 32, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":31
 *     if not isinstance(data, _numpy.ndarray):
 *         raise TypeError("data argument must be a NumPy ndarray")
 *     if data.size != 1:             # <<<<<<<<<<<<<<
 *         raise ValueError("data array must have a size of 1")
 *     if data.dtype != expected_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":33
 *     if data.size != 1:
 *         raise ValueError("data array must have a size of 1")
 *     if data.dtype != expected_dtype:             # <<<<<<<<<<<<<<
 *         raise ValueError(f"data array must be of dtype {dtype_name}")
 *     return lowpp_type.from_ptr(data.ctypes.data, not data.flags.writeable, data)
*/
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 33, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_3, __pyx_v_expected_dtype, Py_NE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 33, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 33, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":34
 *         raise ValueError("data array must have a size of 1")
 *     if data.dtype != expected_dtype:
 *         raise ValueError(f"data array must be of dtype {dtype_name}")             # <<<<<<<<<<<<<<
 *     return lowpp_type.from_ptr(data.ctypes.data, not data.flags.writeable, data)
 * 
*/
    __pyx_t_3 = NULL;
    __pyx_t_6 = __Pyx_PyObject_FormatSimple(__pyx_v_dtype_name, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 34, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __pyx_t_7 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 34, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_7};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 34, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 34, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":33
 *     if data.size != 1:
 *         raise ValueError("data array must have a size of 1")
 *     if data.dtype != expected_dtype:             # <<<<<<<<<<<<<<
 *         raise ValueError(f"data array must be of dtype {dtype_name}")
 *     return lowpp_type.from_ptr(data.ctypes.data, not data.flags.writeable, data)
*/
  }

  /* "cuda/bindings/_nvml.pyx":35
 *     if data.dtype != expected_dtype:
 *         raise ValueError(f"data array must be of dtype {dtype_name}")
 *     return lowpp_type.from_ptr(data.ctypes.data, not data.flags.writeable, data)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_7 = __pyx_v_lowpp_type;
  __Pyx_INCREF(__pyx_t_7);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 35, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 35, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 35, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 35, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 35, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
  __pyx_t_8 = __Pyx_PyBool_FromLong((!__pyx_t_4)); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 35, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_7, __pyx_t_6, __pyx_t_8, __pyx_v_data};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 35, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25
 * 
 * 
 * cdef __from_data(data, dtype_name, expected_dtype, lowpp_type):             # <<<<<<<<<<<<<<
 *     # _numpy.recarray is a subclass of _numpy.ndarray, so implicitly handled here.
 *     if isinstance(data, lowpp_type):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml.__from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1143
 * 
 * class NvmlError(Exception):
 *     def __init__(self, status):             # <<<<<<<<<<<<<<
 *         self.status = status
 *         s = error_string(status)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9NvmlError_1__init__(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_9NvmlError___init__, "NvmlError.__init__(self, status)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_9NvmlError_1__init__ = {"__init__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9NvmlError_1__init__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9NvmlError___init__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9NvmlError_1__init__(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_self = 0;
  PyObject *__pyx_v_status = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_self,&__pyx_mstate_global->__pyx_n_u_status,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 1143, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 1143, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1143, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 1143, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, i); __PYX_ERR(0, 1143, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1143, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 1143, __pyx_L3_error)
    }
    __pyx_v_self = values[0];
    __pyx_v_status = values[1];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 1143, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvmlError.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9NvmlError___init__(__pyx_self, __pyx_v_self, __pyx_v_status);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9NvmlError___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_status) {
  PyObject *__pyx_v_s = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":1144
 * class NvmlError(Exception):
 *     def __init__(self, status):
 *         self.status = status             # <<<<<<<<<<<<<<
 *         s = error_string(status)
 *         super(NvmlError, self).__init__(s)
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_mstate_global->__pyx_n_u_status, __pyx_v_status) < (0)) __PYX_ERR(0, 1144, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1145
 *     def __init__(self, status):
 *         self.status = status
 *         s = error_string(status)             # <<<<<<<<<<<<<<
 *         super(NvmlError, self).__init__(s)
 * 
*/
  __pyx_t_1 = __Pyx_PyLong_As_int(__pyx_v_status); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1145, __pyx_L1_error)
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_error_string(__pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1145, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_s = ((PyObject*)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":1146
 *         self.status = status
 *         s = error_string(status)
 *         super(NvmlError, self).__init__(s)             # <<<<<<<<<<<<<<
 * 
 *     def __reduce__(self):
*/
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1146, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = 1;
  {
    PyObject *__pyx_callargs[3] = {__pyx_t_5, __pyx_t_6, __pyx_v_self};
    __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_super, __pyx_callargs+__pyx_t_7, (3-__pyx_t_7) | (__pyx_t_7*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1146, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
  }
  __pyx_t_3 = __pyx_t_4;
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_7 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_s};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_init, __pyx_callargs+__pyx_t_7, (2-__pyx_t_7) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1146, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":1143
 * 
 * class NvmlError(Exception):
 *     def __init__(self, status):             # <<<<<<<<<<<<<<
 *         self.status = status
 *         s = error_string(status)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvmlError.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_s);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1148
 *         super(NvmlError, self).__init__(s)
 * 
 *     def __reduce__(self):             # <<<<<<<<<<<<<<
 *         return (type(self), (self.status,))
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9NvmlError_3__reduce__(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_9NvmlError_2__reduce__, "NvmlError.__reduce__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_9NvmlError_3__reduce__ = {"__reduce__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9NvmlError_3__reduce__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9NvmlError_2__reduce__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9NvmlError_3__reduce__(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_self = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_self,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 1148, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1148, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__reduce__", 0) < (0)) __PYX_ERR(0, 1148, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__reduce__", 1, 1, 1, i); __PYX_ERR(0, 1148, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1148, __pyx_L3_error)
    }
    __pyx_v_self = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__reduce__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 1148, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvmlError.__reduce__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9NvmlError_2__reduce__(__pyx_self, __pyx_v_self);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9NvmlError_2__reduce__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce__", 0);

  /* "cuda/bindings/_nvml.pyx":1149
 * 
 *     def __reduce__(self):
 *         return (type(self), (self.status,))             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_mstate_global->__pyx_n_u_status); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1149, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1149, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_1);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 1149, __pyx_L1_error);
  __pyx_t_1 = 0;
  __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1149, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(((PyObject *)Py_TYPE(__pyx_v_self)));
  __Pyx_GIVEREF(((PyObject *)Py_TYPE(__pyx_v_self)));
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(__pyx_v_self))) != (0)) __PYX_ERR(0, 1149, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_2);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2) != (0)) __PYX_ERR(0, 1149, __pyx_L1_error);
  __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1148
 *         super(NvmlError, self).__init__(s)
 * 
 *     def __reduce__(self):             # <<<<<<<<<<<<<<
 *         return (type(self), (self.status,))
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvmlError.__reduce__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1245
 *     pass
 * 
 * cdef object _nvml_error_factory(int status):             # <<<<<<<<<<<<<<
 *     cdef object pystatus = status
 *     if status == 1:
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__nvml_error_factory(int __pyx_v_status) {
  PyObject *__pyx_v_pystatus = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_nvml_error_factory", 0);

  /* "cuda/bindings/_nvml.pyx":1246
 * 
 * cdef object _nvml_error_factory(int status):
 *     cdef object pystatus = status             # <<<<<<<<<<<<<<
 *     if status == 1:
 *         return UninitializedError(pystatus)
*/
  __pyx_t_1 = __Pyx_PyLong_From_int(__pyx_v_status); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1246, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v_pystatus = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":1247
 * cdef object _nvml_error_factory(int status):
 *     cdef object pystatus = status
 *     if status == 1:             # <<<<<<<<<<<<<<
 *         return UninitializedError(pystatus)
 *     elif status == 2:
*/
  switch (__pyx_v_status) {
    case 1:

    /* "cuda/bindings/_nvml.pyx":1248
 *     cdef object pystatus = status
 *     if status == 1:
 *         return UninitializedError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 2:
 *         return InvalidArgumentError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_UninitializedError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1248, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_3))) {
      __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
      assert(__pyx_t_2);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3);
      __Pyx_INCREF(__pyx_t_2);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_3, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1248, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1247
 * cdef object _nvml_error_factory(int status):
 *     cdef object pystatus = status
 *     if status == 1:             # <<<<<<<<<<<<<<
 *         return UninitializedError(pystatus)
 *     elif status == 2:
*/
    break;
    case 2:

    /* "cuda/bindings/_nvml.pyx":1250
 *         return UninitializedError(pystatus)
 *     elif status == 2:
 *         return InvalidArgumentError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 3:
 *         return NotSupportedError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_InvalidArgumentError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1250, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_2))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1250, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1249
 *     if status == 1:
 *         return UninitializedError(pystatus)
 *     elif status == 2:             # <<<<<<<<<<<<<<
 *         return InvalidArgumentError(pystatus)
 *     elif status == 3:
*/
    break;
    case 3:

    /* "cuda/bindings/_nvml.pyx":1252
 *         return InvalidArgumentError(pystatus)
 *     elif status == 3:
 *         return NotSupportedError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 4:
 *         return NoPermissionError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_NotSupportedError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1252, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_3))) {
      __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
      assert(__pyx_t_2);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3);
      __Pyx_INCREF(__pyx_t_2);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_3, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1252, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1251
 *     elif status == 2:
 *         return InvalidArgumentError(pystatus)
 *     elif status == 3:             # <<<<<<<<<<<<<<
 *         return NotSupportedError(pystatus)
 *     elif status == 4:
*/
    break;
    case 4:

    /* "cuda/bindings/_nvml.pyx":1254
 *         return NotSupportedError(pystatus)
 *     elif status == 4:
 *         return NoPermissionError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 5:
 *         return AlreadyInitializedError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_NoPermissionError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1254, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_2))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1254, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1253
 *     elif status == 3:
 *         return NotSupportedError(pystatus)
 *     elif status == 4:             # <<<<<<<<<<<<<<
 *         return NoPermissionError(pystatus)
 *     elif status == 5:
*/
    break;
    case 5:

    /* "cuda/bindings/_nvml.pyx":1256
 *         return NoPermissionError(pystatus)
 *     elif status == 5:
 *         return AlreadyInitializedError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 6:
 *         return NotFoundError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_AlreadyInitializedError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1256, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_3))) {
      __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
      assert(__pyx_t_2);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3);
      __Pyx_INCREF(__pyx_t_2);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_3, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1256, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1255
 *     elif status == 4:
 *         return NoPermissionError(pystatus)
 *     elif status == 5:             # <<<<<<<<<<<<<<
 *         return AlreadyInitializedError(pystatus)
 *     elif status == 6:
*/
    break;
    case 6:

    /* "cuda/bindings/_nvml.pyx":1258
 *         return AlreadyInitializedError(pystatus)
 *     elif status == 6:
 *         return NotFoundError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 7:
 *         return InsufficientSizeError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_NotFoundError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1258, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_2))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1258, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1257
 *     elif status == 5:
 *         return AlreadyInitializedError(pystatus)
 *     elif status == 6:             # <<<<<<<<<<<<<<
 *         return NotFoundError(pystatus)
 *     elif status == 7:
*/
    break;
    case 7:

    /* "cuda/bindings/_nvml.pyx":1260
 *         return NotFoundError(pystatus)
 *     elif status == 7:
 *         return InsufficientSizeError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 8:
 *         return InsufficientPowerError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_InsufficientSizeError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1260, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_3))) {
      __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
      assert(__pyx_t_2);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3);
      __Pyx_INCREF(__pyx_t_2);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_3, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1260, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1259
 *     elif status == 6:
 *         return NotFoundError(pystatus)
 *     elif status == 7:             # <<<<<<<<<<<<<<
 *         return InsufficientSizeError(pystatus)
 *     elif status == 8:
*/
    break;
    case 8:

    /* "cuda/bindings/_nvml.pyx":1262
 *         return InsufficientSizeError(pystatus)
 *     elif status == 8:
 *         return InsufficientPowerError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 9:
 *         return DriverNotLoadedError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_InsufficientPowerError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1262, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_2))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1262, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1261
 *     elif status == 7:
 *         return InsufficientSizeError(pystatus)
 *     elif status == 8:             # <<<<<<<<<<<<<<
 *         return InsufficientPowerError(pystatus)
 *     elif status == 9:
*/
    break;
    case 9:

    /* "cuda/bindings/_nvml.pyx":1264
 *         return InsufficientPowerError(pystatus)
 *     elif status == 9:
 *         return DriverNotLoadedError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 10:
 *         return TimeoutError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_DriverNotLoadedError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1264, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_3))) {
      __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
      assert(__pyx_t_2);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3);
      __Pyx_INCREF(__pyx_t_2);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_3, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1264, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1263
 *     elif status == 8:
 *         return InsufficientPowerError(pystatus)
 *     elif status == 9:             # <<<<<<<<<<<<<<
 *         return DriverNotLoadedError(pystatus)
 *     elif status == 10:
*/
    break;
    case 10:

    /* "cuda/bindings/_nvml.pyx":1266
 *         return DriverNotLoadedError(pystatus)
 *     elif status == 10:
 *         return TimeoutError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 11:
 *         return IrqIssueError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_TimeoutError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1266, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_2))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1266, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1265
 *     elif status == 9:
 *         return DriverNotLoadedError(pystatus)
 *     elif status == 10:             # <<<<<<<<<<<<<<
 *         return TimeoutError(pystatus)
 *     elif status == 11:
*/
    break;
    case 11:

    /* "cuda/bindings/_nvml.pyx":1268
 *         return TimeoutError(pystatus)
 *     elif status == 11:
 *         return IrqIssueError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 12:
 *         return LibraryNotFoundError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_IrqIssueError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1268, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_3))) {
      __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
      assert(__pyx_t_2);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3);
      __Pyx_INCREF(__pyx_t_2);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_3, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1268, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1267
 *     elif status == 10:
 *         return TimeoutError(pystatus)
 *     elif status == 11:             # <<<<<<<<<<<<<<
 *         return IrqIssueError(pystatus)
 *     elif status == 12:
*/
    break;
    case 12:

    /* "cuda/bindings/_nvml.pyx":1270
 *         return IrqIssueError(pystatus)
 *     elif status == 12:
 *         return LibraryNotFoundError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 13:
 *         return FunctionNotFoundError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_LibraryNotFoundError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1270, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_2))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1270, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1269
 *     elif status == 11:
 *         return IrqIssueError(pystatus)
 *     elif status == 12:             # <<<<<<<<<<<<<<
 *         return LibraryNotFoundError(pystatus)
 *     elif status == 13:
*/
    break;
    case 13:

    /* "cuda/bindings/_nvml.pyx":1272
 *         return LibraryNotFoundError(pystatus)
 *     elif status == 13:
 *         return FunctionNotFoundError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 14:
 *         return CorruptedInforomError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_FunctionNotFoundError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1272, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_3))) {
      __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
      assert(__pyx_t_2);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3);
      __Pyx_INCREF(__pyx_t_2);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_3, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1272, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1271
 *     elif status == 12:
 *         return LibraryNotFoundError(pystatus)
 *     elif status == 13:             # <<<<<<<<<<<<<<
 *         return FunctionNotFoundError(pystatus)
 *     elif status == 14:
*/
    break;
    case 14:

    /* "cuda/bindings/_nvml.pyx":1274
 *         return FunctionNotFoundError(pystatus)
 *     elif status == 14:
 *         return CorruptedInforomError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 15:
 *         return GpuIsLostError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_CorruptedInforomError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1274, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_2))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1274, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1273
 *     elif status == 13:
 *         return FunctionNotFoundError(pystatus)
 *     elif status == 14:             # <<<<<<<<<<<<<<
 *         return CorruptedInforomError(pystatus)
 *     elif status == 15:
*/
    break;
    case 15:

    /* "cuda/bindings/_nvml.pyx":1276
 *         return CorruptedInforomError(pystatus)
 *     elif status == 15:
 *         return GpuIsLostError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 16:
 *         return ResetRequiredError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_GpuIsLostError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1276, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_3))) {
      __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
      assert(__pyx_t_2);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3);
      __Pyx_INCREF(__pyx_t_2);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_3, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1276, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1275
 *     elif status == 14:
 *         return CorruptedInforomError(pystatus)
 *     elif status == 15:             # <<<<<<<<<<<<<<
 *         return GpuIsLostError(pystatus)
 *     elif status == 16:
*/
    break;
    case 16:

    /* "cuda/bindings/_nvml.pyx":1278
 *         return GpuIsLostError(pystatus)
 *     elif status == 16:
 *         return ResetRequiredError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 17:
 *         return OperatingSystemError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_ResetRequiredError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1278, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_2))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1278, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1277
 *     elif status == 15:
 *         return GpuIsLostError(pystatus)
 *     elif status == 16:             # <<<<<<<<<<<<<<
 *         return ResetRequiredError(pystatus)
 *     elif status == 17:
*/
    break;
    case 17:

    /* "cuda/bindings/_nvml.pyx":1280
 *         return ResetRequiredError(pystatus)
 *     elif status == 17:
 *         return OperatingSystemError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 18:
 *         return LibRmVersionMismatchError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_OperatingSystemError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1280, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_3))) {
      __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
      assert(__pyx_t_2);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3);
      __Pyx_INCREF(__pyx_t_2);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_3, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1280, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1279
 *     elif status == 16:
 *         return ResetRequiredError(pystatus)
 *     elif status == 17:             # <<<<<<<<<<<<<<
 *         return OperatingSystemError(pystatus)
 *     elif status == 18:
*/
    break;
    case 18:

    /* "cuda/bindings/_nvml.pyx":1282
 *         return OperatingSystemError(pystatus)
 *     elif status == 18:
 *         return LibRmVersionMismatchError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 19:
 *         return InUseError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_LibRmVersionMismatchError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1282, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_2))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1282, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1281
 *     elif status == 17:
 *         return OperatingSystemError(pystatus)
 *     elif status == 18:             # <<<<<<<<<<<<<<
 *         return LibRmVersionMismatchError(pystatus)
 *     elif status == 19:
*/
    break;
    case 19:

    /* "cuda/bindings/_nvml.pyx":1284
 *         return LibRmVersionMismatchError(pystatus)
 *     elif status == 19:
 *         return InUseError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 20:
 *         return MemoryError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_InUseError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1284, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_3))) {
      __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
      assert(__pyx_t_2);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3);
      __Pyx_INCREF(__pyx_t_2);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_3, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1284, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1283
 *     elif status == 18:
 *         return LibRmVersionMismatchError(pystatus)
 *     elif status == 19:             # <<<<<<<<<<<<<<
 *         return InUseError(pystatus)
 *     elif status == 20:
*/
    break;
    case 20:

    /* "cuda/bindings/_nvml.pyx":1286
 *         return InUseError(pystatus)
 *     elif status == 20:
 *         return MemoryError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 21:
 *         return NoDataError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1286, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_2))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1286, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1285
 *     elif status == 19:
 *         return InUseError(pystatus)
 *     elif status == 20:             # <<<<<<<<<<<<<<
 *         return MemoryError(pystatus)
 *     elif status == 21:
*/
    break;
    case 21:

    /* "cuda/bindings/_nvml.pyx":1288
 *         return MemoryError(pystatus)
 *     elif status == 21:
 *         return NoDataError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 22:
 *         return VgpuEccNotSupportedError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_NoDataError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1288, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_3))) {
      __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
      assert(__pyx_t_2);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3);
      __Pyx_INCREF(__pyx_t_2);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_3, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1288, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1287
 *     elif status == 20:
 *         return MemoryError(pystatus)
 *     elif status == 21:             # <<<<<<<<<<<<<<
 *         return NoDataError(pystatus)
 *     elif status == 22:
*/
    break;
    case 22:

    /* "cuda/bindings/_nvml.pyx":1290
 *         return NoDataError(pystatus)
 *     elif status == 22:
 *         return VgpuEccNotSupportedError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 23:
 *         return InsufficientResourcesError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_VgpuEccNotSupportedError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1290, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_2))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1290, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1289
 *     elif status == 21:
 *         return NoDataError(pystatus)
 *     elif status == 22:             # <<<<<<<<<<<<<<
 *         return VgpuEccNotSupportedError(pystatus)
 *     elif status == 23:
*/
    break;
    case 23:

    /* "cuda/bindings/_nvml.pyx":1292
 *         return VgpuEccNotSupportedError(pystatus)
 *     elif status == 23:
 *         return InsufficientResourcesError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 24:
 *         return FreqNotSupportedError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_InsufficientResourcesError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1292, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_3))) {
      __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
      assert(__pyx_t_2);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3);
      __Pyx_INCREF(__pyx_t_2);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_3, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1292, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1291
 *     elif status == 22:
 *         return VgpuEccNotSupportedError(pystatus)
 *     elif status == 23:             # <<<<<<<<<<<<<<
 *         return InsufficientResourcesError(pystatus)
 *     elif status == 24:
*/
    break;
    case 24:

    /* "cuda/bindings/_nvml.pyx":1294
 *         return InsufficientResourcesError(pystatus)
 *     elif status == 24:
 *         return FreqNotSupportedError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 25:
 *         return ArgumentVersionMismatchError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_FreqNotSupportedError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1294, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_2))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1294, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1293
 *     elif status == 23:
 *         return InsufficientResourcesError(pystatus)
 *     elif status == 24:             # <<<<<<<<<<<<<<
 *         return FreqNotSupportedError(pystatus)
 *     elif status == 25:
*/
    break;
    case 25:

    /* "cuda/bindings/_nvml.pyx":1296
 *         return FreqNotSupportedError(pystatus)
 *     elif status == 25:
 *         return ArgumentVersionMismatchError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 26:
 *         return DeprecatedError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ArgumentVersionMismatchError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1296, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_3))) {
      __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
      assert(__pyx_t_2);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3);
      __Pyx_INCREF(__pyx_t_2);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_3, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1296, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1295
 *     elif status == 24:
 *         return FreqNotSupportedError(pystatus)
 *     elif status == 25:             # <<<<<<<<<<<<<<
 *         return ArgumentVersionMismatchError(pystatus)
 *     elif status == 26:
*/
    break;
    case 26:

    /* "cuda/bindings/_nvml.pyx":1298
 *         return ArgumentVersionMismatchError(pystatus)
 *     elif status == 26:
 *         return DeprecatedError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 27:
 *         return NotReadyError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_DeprecatedError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1298, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_2))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1298, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1297
 *     elif status == 25:
 *         return ArgumentVersionMismatchError(pystatus)
 *     elif status == 26:             # <<<<<<<<<<<<<<
 *         return DeprecatedError(pystatus)
 *     elif status == 27:
*/
    break;
    case 27:

    /* "cuda/bindings/_nvml.pyx":1300
 *         return DeprecatedError(pystatus)
 *     elif status == 27:
 *         return NotReadyError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 28:
 *         return GpuNotFoundError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_NotReadyError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1300, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_3))) {
      __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
      assert(__pyx_t_2);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3);
      __Pyx_INCREF(__pyx_t_2);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_3, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1300, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1299
 *     elif status == 26:
 *         return DeprecatedError(pystatus)
 *     elif status == 27:             # <<<<<<<<<<<<<<
 *         return NotReadyError(pystatus)
 *     elif status == 28:
*/
    break;
    case 28:

    /* "cuda/bindings/_nvml.pyx":1302
 *         return NotReadyError(pystatus)
 *     elif status == 28:
 *         return GpuNotFoundError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 29:
 *         return InvalidStateError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_GpuNotFoundError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1302, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_2))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1302, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1301
 *     elif status == 27:
 *         return NotReadyError(pystatus)
 *     elif status == 28:             # <<<<<<<<<<<<<<
 *         return GpuNotFoundError(pystatus)
 *     elif status == 29:
*/
    break;
    case 29:

    /* "cuda/bindings/_nvml.pyx":1304
 *         return GpuNotFoundError(pystatus)
 *     elif status == 29:
 *         return InvalidStateError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 30:
 *         return ResetTypeNotSupportedError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_InvalidStateError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1304, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_3))) {
      __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
      assert(__pyx_t_2);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3);
      __Pyx_INCREF(__pyx_t_2);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_3, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1304, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1303
 *     elif status == 28:
 *         return GpuNotFoundError(pystatus)
 *     elif status == 29:             # <<<<<<<<<<<<<<
 *         return InvalidStateError(pystatus)
 *     elif status == 30:
*/
    break;
    case 30:

    /* "cuda/bindings/_nvml.pyx":1306
 *         return InvalidStateError(pystatus)
 *     elif status == 30:
 *         return ResetTypeNotSupportedError(pystatus)             # <<<<<<<<<<<<<<
 *     elif status == 999:
 *         return UnknownError(pystatus)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_ResetTypeNotSupportedError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1306, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_2))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1306, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1305
 *     elif status == 29:
 *         return InvalidStateError(pystatus)
 *     elif status == 30:             # <<<<<<<<<<<<<<
 *         return ResetTypeNotSupportedError(pystatus)
 *     elif status == 999:
*/
    break;
    case 0x3E7:

    /* "cuda/bindings/_nvml.pyx":1308
 *         return ResetTypeNotSupportedError(pystatus)
 *     elif status == 999:
 *         return UnknownError(pystatus)             # <<<<<<<<<<<<<<
 *     return NvmlError(status)
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_UnknownError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1308, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_3))) {
      __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
      assert(__pyx_t_2);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3);
      __Pyx_INCREF(__pyx_t_2);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_3, __pyx__function);
      __pyx_t_4 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pystatus};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1308, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1307
 *     elif status == 30:
 *         return ResetTypeNotSupportedError(pystatus)
 *     elif status == 999:             # <<<<<<<<<<<<<<
 *         return UnknownError(pystatus)
 *     return NvmlError(status)
*/
    break;
    default: break;
  }

  /* "cuda/bindings/_nvml.pyx":1309
 *     elif status == 999:
 *         return UnknownError(pystatus)
 *     return NvmlError(status)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1309, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_5 = __Pyx_PyLong_From_int(__pyx_v_status); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1309, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_5};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1309, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1245
 *     pass
 * 
 * cdef object _nvml_error_factory(int status):             # <<<<<<<<<<<<<<
 *     cdef object pystatus = status
 *     if status == 1:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml._nvml_error_factory", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_pystatus);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1313
 * 
 * 
 * @cython.profile(False)             # <<<<<<<<<<<<<<
 * cpdef int check_status(int status) except 1 nogil:
 *     if status != 0:
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_1check_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_check_status(int __pyx_v_status, CYTHON_UNUSED int __pyx_skip_dispatch) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;
  __Pyx_RefNannySetupContext("check_status", 1);

  /* "cuda/bindings/_nvml.pyx":1315
 * @cython.profile(False)
 * cpdef int check_status(int status) except 1 nogil:
 *     if status != 0:             # <<<<<<<<<<<<<<
 *         with gil:
 *             raise _nvml_error_factory(status)
*/
  __pyx_t_1 = (__pyx_v_status != 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":1316
 * cpdef int check_status(int status) except 1 nogil:
 *     if status != 0:
 *         with gil:             # <<<<<<<<<<<<<<
 *             raise _nvml_error_factory(status)
 *     return status != 0
*/
    {
        PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
        /*try:*/ {

          /* "cuda/bindings/_nvml.pyx":1317
 *     if status != 0:
 *         with gil:
 *             raise _nvml_error_factory(status)             # <<<<<<<<<<<<<<
 *     return status != 0
 * 
*/
          __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml__nvml_error_factory(__pyx_v_status); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1317, __pyx_L5_error)
          __Pyx_GOTREF(__pyx_t_2);
          __Pyx_Raise(__pyx_t_2, 0, 0, 0);
          __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
          __PYX_ERR(0, 1317, __pyx_L5_error)
        }

        /* "cuda/bindings/_nvml.pyx":1316
 * cpdef int check_status(int status) except 1 nogil:
 *     if status != 0:
 *         with gil:             # <<<<<<<<<<<<<<
 *             raise _nvml_error_factory(status)
 *     return status != 0
*/
        /*finally:*/ {
          __pyx_L5_error: {
            __Pyx_PyGILState_Release(__pyx_gilstate_save);
            goto __pyx_L1_error;
          }
        }
    }

    /* "cuda/bindings/_nvml.pyx":1315
 * @cython.profile(False)
 * cpdef int check_status(int status) except 1 nogil:
 *     if status != 0:             # <<<<<<<<<<<<<<
 *         with gil:
 *             raise _nvml_error_factory(status)
*/
  }

  /* "cuda/bindings/_nvml.pyx":1318
 *         with gil:
 *             raise _nvml_error_factory(status)
 *     return status != 0             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = (__pyx_v_status != 0);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1313
 * 
 * 
 * @cython.profile(False)             # <<<<<<<<<<<<<<
 * cpdef int check_status(int status) except 1 nogil:
 *     if status != 0:
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.check_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 1;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  __Pyx_RefNannyFinishContextNogil()
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_1check_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_check_status, "check_status(int status) -> int");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_1check_status = {"check_status", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_1check_status, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_check_status};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_1check_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  int __pyx_v_status;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("check_status (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_status,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 1313, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1313, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "check_status", 0) < (0)) __PYX_ERR(0, 1313, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("check_status", 1, 1, 1, i); __PYX_ERR(0, 1313, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1313, __pyx_L3_error)
    }
    __pyx_v_status = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_status == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1314, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("check_status", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 1313, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.check_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_check_status(__pyx_self, __pyx_v_status);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_check_status(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_status) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("check_status", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v_status, 1); if (unlikely(__pyx_t_1 == ((int)1))) __PYX_ERR(0, 1313, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1313, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.check_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1321
 * 
 * 
 * @cython.profile(False)             # <<<<<<<<<<<<<<
 * cpdef int check_status_size(int status) except 1 nogil:
 *     if status == nvmlReturn_t.NVML_ERROR_INSUFFICIENT_SIZE:
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_3check_status_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_check_status_size(int __pyx_v_status, CYTHON_UNUSED int __pyx_skip_dispatch) {
  int __pyx_r;
  int __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/_nvml.pyx":1323
 * @cython.profile(False)
 * cpdef int check_status_size(int status) except 1 nogil:
 *     if status == nvmlReturn_t.NVML_ERROR_INSUFFICIENT_SIZE:             # <<<<<<<<<<<<<<
 *         return 0
 *     return check_status(status)
*/
  __pyx_t_1 = (__pyx_v_status == NVML_ERROR_INSUFFICIENT_SIZE);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":1324
 * cpdef int check_status_size(int status) except 1 nogil:
 *     if status == nvmlReturn_t.NVML_ERROR_INSUFFICIENT_SIZE:
 *         return 0             # <<<<<<<<<<<<<<
 *     return check_status(status)
 * 
*/
    __pyx_r = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1323
 * @cython.profile(False)
 * cpdef int check_status_size(int status) except 1 nogil:
 *     if status == nvmlReturn_t.NVML_ERROR_INSUFFICIENT_SIZE:             # <<<<<<<<<<<<<<
 *         return 0
 *     return check_status(status)
*/
  }

  /* "cuda/bindings/_nvml.pyx":1325
 *     if status == nvmlReturn_t.NVML_ERROR_INSUFFICIENT_SIZE:
 *         return 0
 *     return check_status(status)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v_status, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 1325, __pyx_L1_error)
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1321
 * 
 * 
 * @cython.profile(False)             # <<<<<<<<<<<<<<
 * cpdef int check_status_size(int status) except 1 nogil:
 *     if status == nvmlReturn_t.NVML_ERROR_INSUFFICIENT_SIZE:
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings._nvml.check_status_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 1;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_3check_status_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_2check_status_size, "check_status_size(int status) -> int");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_3check_status_size = {"check_status_size", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_3check_status_size, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_2check_status_size};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_3check_status_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  int __pyx_v_status;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("check_status_size (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_status,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 1321, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1321, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "check_status_size", 0) < (0)) __PYX_ERR(0, 1321, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("check_status_size", 1, 1, 1, i); __PYX_ERR(0, 1321, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1321, __pyx_L3_error)
    }
    __pyx_v_status = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_status == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1322, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("check_status_size", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 1321, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.check_status_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_2check_status_size(__pyx_self, __pyx_v_status);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_2check_status_size(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_status) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("check_status_size", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v_status, 1); if (unlikely(__pyx_t_1 == ((int)1))) __PYX_ERR(0, 1321, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1321, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.check_status_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1333
 * 
 * 
 * cdef _get_pci_info_ext_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlPciInfoExt_v1_t pod = nvmlPciInfoExt_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_pci_info_ext_v1_dtype_offsets(void) {
  nvmlPciInfoExt_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlPciInfoExt_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  PyObject *__pyx_t_14 = NULL;
  PyObject *__pyx_t_15 = NULL;
  size_t __pyx_t_16;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_pci_info_ext_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":1334
 * 
 * cdef _get_pci_info_ext_v1_dtype_offsets():
 *     cdef nvmlPciInfoExt_v1_t pod = nvmlPciInfoExt_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'domain', 'bus', 'device_', 'pci_device_id', 'pci_sub_system_id', 'base_class', 'sub_class', 'bus_id'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":1335
 * cdef _get_pci_info_ext_v1_dtype_offsets():
 *     cdef nvmlPciInfoExt_v1_t pod = nvmlPciInfoExt_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'domain', 'bus', 'device_', 'pci_device_id', 'pci_sub_system_id', 'base_class', 'sub_class', 'bus_id'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1335, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1335, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":1336
 *     cdef nvmlPciInfoExt_v1_t pod = nvmlPciInfoExt_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'domain', 'bus', 'device_', 'pci_device_id', 'pci_sub_system_id', 'base_class', 'sub_class', 'bus_id'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1336, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(9); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1336, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 1336, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_domain);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_domain);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_domain) != (0)) __PYX_ERR(0, 1336, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_bus);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_bus);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_bus) != (0)) __PYX_ERR(0, 1336, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_device);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_device);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_device) != (0)) __PYX_ERR(0, 1336, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_pci_device_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_pci_device_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_pci_device_id) != (0)) __PYX_ERR(0, 1336, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_pci_sub_system_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_pci_sub_system_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_pci_sub_system_id) != (0)) __PYX_ERR(0, 1336, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_base_class);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_base_class);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_base_class) != (0)) __PYX_ERR(0, 1336, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_sub_class);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_sub_class);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_mstate_global->__pyx_n_u_sub_class) != (0)) __PYX_ERR(0, 1336, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_bus_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_bus_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_mstate_global->__pyx_n_u_bus_id) != (0)) __PYX_ERR(0, 1336, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 1336, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":1337
 *     return _numpy.dtype({
 *         'names': ['version', 'domain', 'bus', 'device_', 'pci_device_id', 'pci_sub_system_id', 'base_class', 'sub_class', 'bus_id'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_15 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(9); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 1337, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 1337, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 1337, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 1337, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 1337, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 1337, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 1337, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_t_14) != (0)) __PYX_ERR(0, 1337, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_t_15) != (0)) __PYX_ERR(0, 1337, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  __pyx_t_14 = 0;
  __pyx_t_15 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 1336, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":1339
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.domain)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bus)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1339, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":1340
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.domain)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bus)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
*/
  __pyx_t_15 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.domain)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 1340, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);

  /* "cuda/bindings/_nvml.pyx":1341
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.domain)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bus)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pciDeviceId)) - (<intptr_t>&pod),
*/
  __pyx_t_14 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bus)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1341, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);

  /* "cuda/bindings/_nvml.pyx":1342
 *             (<intptr_t>&(pod.domain)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bus)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.pciDeviceId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pciSubSystemId)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.device)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1342, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":1343
 *             (<intptr_t>&(pod.bus)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pciDeviceId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.pciSubSystemId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.baseClass)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.pciDeviceId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 1343, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":1344
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pciDeviceId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pciSubSystemId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.baseClass)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.subClass)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.pciSubSystemId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1344, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":1345
 *             (<intptr_t>&(pod.pciDeviceId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pciSubSystemId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.baseClass)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.subClass)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.busId)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.baseClass)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1345, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":1346
 *             (<intptr_t>&(pod.pciSubSystemId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.baseClass)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.subClass)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.busId)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.subClass)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1346, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":1347
 *             (<intptr_t>&(pod.baseClass)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.subClass)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.busId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlPciInfoExt_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.busId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1347, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":1338
 *         'names': ['version', 'domain', 'bus', 'device_', 'pci_device_id', 'pci_sub_system_id', 'base_class', 'sub_class', 'bus_id'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.domain)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(9); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1338, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 1338, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_15) != (0)) __PYX_ERR(0, 1338, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_14) != (0)) __PYX_ERR(0, 1338, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_13) != (0)) __PYX_ERR(0, 1338, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_12) != (0)) __PYX_ERR(0, 1338, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_11) != (0)) __PYX_ERR(0, 1338, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_10) != (0)) __PYX_ERR(0, 1338, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 7, __pyx_t_9) != (0)) __PYX_ERR(0, 1338, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 8, __pyx_t_8) != (0)) __PYX_ERR(0, 1338, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_15 = 0;
  __pyx_t_14 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 1336, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":1349
 *             (<intptr_t>&(pod.busId)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlPciInfoExt_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlPciInfoExt_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1349, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 1336, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_16 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_16 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_16, (2-__pyx_t_16) | (__pyx_t_16*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1335, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1333
 * 
 * 
 * cdef _get_pci_info_ext_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlPciInfoExt_v1_t pod = nvmlPciInfoExt_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_XDECREF(__pyx_t_14);
  __Pyx_XDECREF(__pyx_t_15);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_pci_info_ext_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1366
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlPciInfoExt_v1_t *>calloc(1, sizeof(nvmlPciInfoExt_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":1367
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlPciInfoExt_v1_t *>calloc(1, sizeof(nvmlPciInfoExt_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating PciInfoExt_v1")
*/
  __pyx_v_self->_ptr = ((nvmlPciInfoExt_v1_t *)calloc(1, (sizeof(nvmlPciInfoExt_v1_t))));

  /* "cuda/bindings/_nvml.pyx":1368
 *     def __init__(self):
 *         self._ptr = <nvmlPciInfoExt_v1_t *>calloc(1, sizeof(nvmlPciInfoExt_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating PciInfoExt_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":1369
 *         self._ptr = <nvmlPciInfoExt_v1_t *>calloc(1, sizeof(nvmlPciInfoExt_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating PciInfoExt_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1369, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_PciInfoExt_v1};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1369, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 1369, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1368
 *     def __init__(self):
 *         self._ptr = <nvmlPciInfoExt_v1_t *>calloc(1, sizeof(nvmlPciInfoExt_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating PciInfoExt_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":1370
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating PciInfoExt_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":1371
 *             raise MemoryError("Error allocating PciInfoExt_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":1372
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":1366
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlPciInfoExt_v1_t *>calloc(1, sizeof(nvmlPciInfoExt_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1374
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlPciInfoExt_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self) {
  nvmlPciInfoExt_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlPciInfoExt_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":1376
 *     def __dealloc__(self):
 *         cdef nvmlPciInfoExt_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":1377
 *         cdef nvmlPciInfoExt_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":1378
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":1379
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":1376
 *     def __dealloc__(self):
 *         cdef nvmlPciInfoExt_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":1374
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlPciInfoExt_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":1381
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.PciInfoExt_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":1382
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.PciInfoExt_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1382, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1382, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1382, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1382, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1382, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_PciInfoExt_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 25 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1382, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1381
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.PciInfoExt_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1384
 *         return f"<{__name__}.PciInfoExt_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1387
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1387, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1384
 *         return f"<{__name__}.PciInfoExt_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1389
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13PciInfoExt_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":1390
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1389
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1392
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":1393
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1393, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1392
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1395
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef PciInfoExt_v1 other_
 *         if not isinstance(other, PciInfoExt_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":1397
 *     def __eq__(self, other):
 *         cdef PciInfoExt_v1 other_
 *         if not isinstance(other, PciInfoExt_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":1398
 *         cdef PciInfoExt_v1 other_
 *         if not isinstance(other, PciInfoExt_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPciInfoExt_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1397
 *     def __eq__(self, other):
 *         cdef PciInfoExt_v1 other_
 *         if not isinstance(other, PciInfoExt_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":1399
 *         if not isinstance(other, PciInfoExt_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPciInfoExt_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1))))) __PYX_ERR(0, 1399, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":1400
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPciInfoExt_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlPciInfoExt_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1400, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1395
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef PciInfoExt_v1 other_
 *         if not isinstance(other, PciInfoExt_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1402
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPciInfoExt_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPciInfoExt_v1_t *>malloc(sizeof(nvmlPciInfoExt_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":1403
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlPciInfoExt_v1_t *>malloc(sizeof(nvmlPciInfoExt_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 1403, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1403, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1403, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 1403, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":1404
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPciInfoExt_v1_t *>malloc(sizeof(nvmlPciInfoExt_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating PciInfoExt_v1")
*/
    __pyx_v_self->_ptr = ((nvmlPciInfoExt_v1_t *)malloc((sizeof(nvmlPciInfoExt_v1_t))));

    /* "cuda/bindings/_nvml.pyx":1405
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPciInfoExt_v1_t *>malloc(sizeof(nvmlPciInfoExt_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating PciInfoExt_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPciInfoExt_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":1406
 *             self._ptr = <nvmlPciInfoExt_v1_t *>malloc(sizeof(nvmlPciInfoExt_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating PciInfoExt_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPciInfoExt_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1406, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_PciInfoExt_v1};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1406, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 1406, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":1405
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPciInfoExt_v1_t *>malloc(sizeof(nvmlPciInfoExt_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating PciInfoExt_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPciInfoExt_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":1407
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating PciInfoExt_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPciInfoExt_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1407, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1407, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 1407, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlPciInfoExt_v1_t))));

    /* "cuda/bindings/_nvml.pyx":1408
 *                 raise MemoryError("Error allocating PciInfoExt_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPciInfoExt_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":1409
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPciInfoExt_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":1410
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1410, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1410, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 1410, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":1403
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlPciInfoExt_v1_t *>malloc(sizeof(nvmlPciInfoExt_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":1412
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 1412, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":1402
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPciInfoExt_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPciInfoExt_v1_t *>malloc(sizeof(nvmlPciInfoExt_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1414
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1417
 *     def version(self):
 *         """int: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1417, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1414
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1419
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1421
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1422
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PciInfoExt_v1_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1422, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1422, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1421
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1423
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1423, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1419
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1425
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def domain(self):
 *         """int: The PCI domain on which the device's bus resides, 0 to 0xffffffff."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6domain_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6domain_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6domain___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6domain___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1428
 *     def domain(self):
 *         """int: The PCI domain on which the device's bus resides, 0 to 0xffffffff."""
 *         return self._ptr[0].domain             # <<<<<<<<<<<<<<
 * 
 *     @domain.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).domain); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1428, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1425
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def domain(self):
 *         """int: The PCI domain on which the device's bus resides, 0 to 0xffffffff."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.domain.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1430
 *         return self._ptr[0].domain
 * 
 *     @domain.setter             # <<<<<<<<<<<<<<
 *     def domain(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6domain_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6domain_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6domain_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6domain_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1432
 *     @domain.setter
 *     def domain(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].domain = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1433
 *     def domain(self, val):
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].domain = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PciInfoExt_v1_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1433, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1433, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1432
 *     @domain.setter
 *     def domain(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].domain = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1434
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].domain = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1434, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).domain = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1430
 *         return self._ptr[0].domain
 * 
 *     @domain.setter             # <<<<<<<<<<<<<<
 *     def domain(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.domain.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1436
 *         self._ptr[0].domain = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bus(self):
 *         """int: The bus on which the device resides, 0 to 0xff."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3bus_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3bus_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3bus___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3bus___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1439
 *     def bus(self):
 *         """int: The bus on which the device resides, 0 to 0xff."""
 *         return self._ptr[0].bus             # <<<<<<<<<<<<<<
 * 
 *     @bus.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).bus); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1439, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1436
 *         self._ptr[0].domain = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bus(self):
 *         """int: The bus on which the device resides, 0 to 0xff."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.bus.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1441
 *         return self._ptr[0].bus
 * 
 *     @bus.setter             # <<<<<<<<<<<<<<
 *     def bus(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3bus_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3bus_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3bus_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3bus_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1443
 *     @bus.setter
 *     def bus(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].bus = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1444
 *     def bus(self, val):
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].bus = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PciInfoExt_v1_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1444, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1444, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1443
 *     @bus.setter
 *     def bus(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].bus = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1445
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].bus = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1445, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).bus = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1441
 *         return self._ptr[0].bus
 * 
 *     @bus.setter             # <<<<<<<<<<<<<<
 *     def bus(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.bus.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1447
 *         self._ptr[0].bus = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def device_(self):
 *         """int: The device's id on the bus, 0 to 31."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7device__1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7device__1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7device____get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7device____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1450
 *     def device_(self):
 *         """int: The device's id on the bus, 0 to 31."""
 *         return self._ptr[0].device             # <<<<<<<<<<<<<<
 * 
 *     @device_.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).device); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1450, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1447
 *         self._ptr[0].bus = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def device_(self):
 *         """int: The device's id on the bus, 0 to 31."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.device_.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1452
 *         return self._ptr[0].device
 * 
 *     @device_.setter             # <<<<<<<<<<<<<<
 *     def device_(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7device__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7device__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7device__2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7device__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1454
 *     @device_.setter
 *     def device_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].device = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1455
 *     def device_(self, val):
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].device = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PciInfoExt_v1_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1455, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1455, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1454
 *     @device_.setter
 *     def device_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].device = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1456
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].device = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1456, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).device = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1452
 *         return self._ptr[0].device
 * 
 *     @device_.setter             # <<<<<<<<<<<<<<
 *     def device_(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.device_.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1458
 *         self._ptr[0].device = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pci_device_id(self):
 *         """int: The combined 16-bit device id and 16-bit vendor id."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13pci_device_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13pci_device_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13pci_device_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13pci_device_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1461
 *     def pci_device_id(self):
 *         """int: The combined 16-bit device id and 16-bit vendor id."""
 *         return self._ptr[0].pciDeviceId             # <<<<<<<<<<<<<<
 * 
 *     @pci_device_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).pciDeviceId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1461, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1458
 *         self._ptr[0].device = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pci_device_id(self):
 *         """int: The combined 16-bit device id and 16-bit vendor id."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.pci_device_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1463
 *         return self._ptr[0].pciDeviceId
 * 
 *     @pci_device_id.setter             # <<<<<<<<<<<<<<
 *     def pci_device_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13pci_device_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13pci_device_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13pci_device_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13pci_device_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1465
 *     @pci_device_id.setter
 *     def pci_device_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].pciDeviceId = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1466
 *     def pci_device_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].pciDeviceId = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PciInfoExt_v1_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1466, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1466, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1465
 *     @pci_device_id.setter
 *     def pci_device_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].pciDeviceId = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1467
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].pciDeviceId = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1467, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).pciDeviceId = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1463
 *         return self._ptr[0].pciDeviceId
 * 
 *     @pci_device_id.setter             # <<<<<<<<<<<<<<
 *     def pci_device_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.pci_device_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1469
 *         self._ptr[0].pciDeviceId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pci_sub_system_id(self):
 *         """int: The 32-bit Sub System Device ID."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17pci_sub_system_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17pci_sub_system_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17pci_sub_system_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17pci_sub_system_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1472
 *     def pci_sub_system_id(self):
 *         """int: The 32-bit Sub System Device ID."""
 *         return self._ptr[0].pciSubSystemId             # <<<<<<<<<<<<<<
 * 
 *     @pci_sub_system_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).pciSubSystemId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1472, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1469
 *         self._ptr[0].pciDeviceId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pci_sub_system_id(self):
 *         """int: The 32-bit Sub System Device ID."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.pci_sub_system_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1474
 *         return self._ptr[0].pciSubSystemId
 * 
 *     @pci_sub_system_id.setter             # <<<<<<<<<<<<<<
 *     def pci_sub_system_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17pci_sub_system_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17pci_sub_system_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17pci_sub_system_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17pci_sub_system_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1476
 *     @pci_sub_system_id.setter
 *     def pci_sub_system_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].pciSubSystemId = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1477
 *     def pci_sub_system_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].pciSubSystemId = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PciInfoExt_v1_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1477, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1477, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1476
 *     @pci_sub_system_id.setter
 *     def pci_sub_system_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].pciSubSystemId = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1478
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].pciSubSystemId = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1478, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).pciSubSystemId = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1474
 *         return self._ptr[0].pciSubSystemId
 * 
 *     @pci_sub_system_id.setter             # <<<<<<<<<<<<<<
 *     def pci_sub_system_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.pci_sub_system_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1480
 *         self._ptr[0].pciSubSystemId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def base_class(self):
 *         """int: The 8-bit PCI base class code."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_10base_class_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_10base_class_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_10base_class___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_10base_class___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1483
 *     def base_class(self):
 *         """int: The 8-bit PCI base class code."""
 *         return self._ptr[0].baseClass             # <<<<<<<<<<<<<<
 * 
 *     @base_class.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).baseClass); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1483, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1480
 *         self._ptr[0].pciSubSystemId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def base_class(self):
 *         """int: The 8-bit PCI base class code."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.base_class.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1485
 *         return self._ptr[0].baseClass
 * 
 *     @base_class.setter             # <<<<<<<<<<<<<<
 *     def base_class(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_10base_class_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_10base_class_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_10base_class_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_10base_class_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1487
 *     @base_class.setter
 *     def base_class(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].baseClass = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1488
 *     def base_class(self, val):
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].baseClass = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PciInfoExt_v1_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1488, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1488, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1487
 *     @base_class.setter
 *     def base_class(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].baseClass = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1489
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].baseClass = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1489, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).baseClass = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1485
 *         return self._ptr[0].baseClass
 * 
 *     @base_class.setter             # <<<<<<<<<<<<<<
 *     def base_class(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.base_class.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1491
 *         self._ptr[0].baseClass = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sub_class(self):
 *         """int: The 8-bit PCI sub class code."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_9sub_class_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_9sub_class_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_9sub_class___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_9sub_class___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1494
 *     def sub_class(self):
 *         """int: The 8-bit PCI sub class code."""
 *         return self._ptr[0].subClass             # <<<<<<<<<<<<<<
 * 
 *     @sub_class.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).subClass); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1494, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1491
 *         self._ptr[0].baseClass = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sub_class(self):
 *         """int: The 8-bit PCI sub class code."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.sub_class.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1496
 *         return self._ptr[0].subClass
 * 
 *     @sub_class.setter             # <<<<<<<<<<<<<<
 *     def sub_class(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_9sub_class_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_9sub_class_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_9sub_class_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_9sub_class_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1498
 *     @sub_class.setter
 *     def sub_class(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].subClass = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1499
 *     def sub_class(self, val):
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].subClass = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PciInfoExt_v1_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1499, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1499, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1498
 *     @sub_class.setter
 *     def sub_class(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].subClass = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1500
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         self._ptr[0].subClass = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1500, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).subClass = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1496
 *         return self._ptr[0].subClass
 * 
 *     @sub_class.setter             # <<<<<<<<<<<<<<
 *     def sub_class(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.sub_class.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1502
 *         self._ptr[0].subClass = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bus_id(self):
 *         """~_numpy.int8: (array of length 32).The tuple domain:bus:device.function PCI identifier (& NULL terminator)"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6bus_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6bus_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6bus_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6bus_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1505
 *     def bus_id(self):
 *         """~_numpy.int8: (array of length 32).The tuple domain:bus:device.function PCI identifier (& NULL terminator)"""
 *         return cpython.PyUnicode_FromString(self._ptr[0].busId)             # <<<<<<<<<<<<<<
 * 
 *     @bus_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).busId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1505, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1502
 *         self._ptr[0].subClass = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bus_id(self):
 *         """~_numpy.int8: (array of length 32).The tuple domain:bus:device.function PCI identifier (& NULL terminator)"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.bus_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1507
 *         return cpython.PyUnicode_FromString(self._ptr[0].busId)
 * 
 *     @bus_id.setter             # <<<<<<<<<<<<<<
 *     def bus_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6bus_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6bus_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6bus_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6bus_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1509
 *     @bus_id.setter
 *     def bus_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1510
 *     def bus_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 32:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PciInfoExt_v1_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1510, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1510, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1509
 *     @bus_id.setter
 *     def bus_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":1511
 *         if self._readonly:
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 32:
 *             raise ValueError("String too long for field bus_id, max length is 31")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1511, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 1511, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":1512
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 32:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field bus_id, max length is 31")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 1512, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 1512, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 32);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":1513
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 32:
 *             raise ValueError("String too long for field bus_id, max length is 31")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].busId), <void *>ptr, 32)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_bus_id};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1513, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1513, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1512
 *             raise ValueError("This PciInfoExt_v1 instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 32:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field bus_id, max length is 31")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":1514
 *         if len(buf) >= 32:
 *             raise ValueError("String too long for field bus_id, max length is 31")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].busId), <void *>ptr, 32)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 1514, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 1514, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":1515
 *             raise ValueError("String too long for field bus_id, max length is 31")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].busId), <void *>ptr, 32)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).busId), ((void *)__pyx_v_ptr), 32));

  /* "cuda/bindings/_nvml.pyx":1507
 *         return cpython.PyUnicode_FromString(self._ptr[0].busId)
 * 
 *     @bus_id.setter             # <<<<<<<<<<<<<<
 *     def bus_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.bus_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1517
 *         memcpy(<void *>(self._ptr[0].busId), <void *>ptr, 32)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an PciInfoExt_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13PciInfoExt_v1_12from_data, "PciInfoExt_v1.from_data(data)\n\nCreate an PciInfoExt_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `pci_info_ext_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13PciInfoExt_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 1517, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1517, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 1517, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 1517, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1517, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 1517, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":1524
 *             data (_numpy.ndarray): a single-element array of dtype `pci_info_ext_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "pci_info_ext_v1_dtype", pci_info_ext_v1_dtype, PciInfoExt_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_pci_info_ext_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1524, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_pci_info_ext_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1524, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1517
 *         memcpy(<void *>(self._ptr[0].busId), <void *>ptr, 32)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an PciInfoExt_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1526
 *         return __from_data(data, "pci_info_ext_v1_dtype", pci_info_ext_v1_dtype, PciInfoExt_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an PciInfoExt_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13PciInfoExt_v1_14from_ptr, "PciInfoExt_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an PciInfoExt_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13PciInfoExt_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13PciInfoExt_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 1526, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 1526, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 1526, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1526, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 1526, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":1527
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an PciInfoExt_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 1526, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 1526, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 1526, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1526, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 1527, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1527, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 1526, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":1526
 *         return __from_data(data, "pci_info_ext_v1_dtype", pci_info_ext_v1_dtype, PciInfoExt_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an PciInfoExt_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":1535
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PciInfoExt_v1 obj = PciInfoExt_v1.__new__(PciInfoExt_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":1536
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef PciInfoExt_v1 obj = PciInfoExt_v1.__new__(PciInfoExt_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1536, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 1536, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1535
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PciInfoExt_v1 obj = PciInfoExt_v1.__new__(PciInfoExt_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":1537
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PciInfoExt_v1 obj = PciInfoExt_v1.__new__(PciInfoExt_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlPciInfoExt_v1_t *>malloc(sizeof(nvmlPciInfoExt_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_PciInfoExt_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1537, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":1538
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PciInfoExt_v1 obj = PciInfoExt_v1.__new__(PciInfoExt_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlPciInfoExt_v1_t *>malloc(sizeof(nvmlPciInfoExt_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":1539
 *         cdef PciInfoExt_v1 obj = PciInfoExt_v1.__new__(PciInfoExt_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlPciInfoExt_v1_t *>malloc(sizeof(nvmlPciInfoExt_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating PciInfoExt_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlPciInfoExt_v1_t *)malloc((sizeof(nvmlPciInfoExt_v1_t))));

    /* "cuda/bindings/_nvml.pyx":1540
 *         if owner is None:
 *             obj._ptr = <nvmlPciInfoExt_v1_t *>malloc(sizeof(nvmlPciInfoExt_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating PciInfoExt_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPciInfoExt_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":1541
 *             obj._ptr = <nvmlPciInfoExt_v1_t *>malloc(sizeof(nvmlPciInfoExt_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating PciInfoExt_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPciInfoExt_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1541, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_PciInfoExt_v1};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1541, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 1541, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":1540
 *         if owner is None:
 *             obj._ptr = <nvmlPciInfoExt_v1_t *>malloc(sizeof(nvmlPciInfoExt_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating PciInfoExt_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPciInfoExt_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":1542
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating PciInfoExt_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPciInfoExt_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlPciInfoExt_v1_t))));

    /* "cuda/bindings/_nvml.pyx":1543
 *                 raise MemoryError("Error allocating PciInfoExt_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPciInfoExt_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":1544
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPciInfoExt_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlPciInfoExt_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":1538
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PciInfoExt_v1 obj = PciInfoExt_v1.__new__(PciInfoExt_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlPciInfoExt_v1_t *>malloc(sizeof(nvmlPciInfoExt_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":1546
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlPciInfoExt_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlPciInfoExt_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":1547
 *         else:
 *             obj._ptr = <nvmlPciInfoExt_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":1548
 *             obj._ptr = <nvmlPciInfoExt_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":1549
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":1550
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1526
 *         return __from_data(data, "pci_info_ext_v1_dtype", pci_info_ext_v1_dtype, PciInfoExt_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an PciInfoExt_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13PciInfoExt_v1_16__reduce_cython__, "PciInfoExt_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13PciInfoExt_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13PciInfoExt_v1_18__setstate_cython__, "PciInfoExt_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13PciInfoExt_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13PciInfoExt_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13PciInfoExt_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfoExt_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1553
 * 
 * 
 * cdef _get_pci_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlPciInfo_t pod = nvmlPciInfo_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_pci_info_dtype_offsets(void) {
  nvmlPciInfo_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlPciInfo_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  size_t __pyx_t_14;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_pci_info_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":1554
 * 
 * cdef _get_pci_info_dtype_offsets():
 *     cdef nvmlPciInfo_t pod = nvmlPciInfo_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['bus_id_legacy', 'domain', 'bus', 'device_', 'pci_device_id', 'pci_sub_system_id', 'bus_id'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":1555
 * cdef _get_pci_info_dtype_offsets():
 *     cdef nvmlPciInfo_t pod = nvmlPciInfo_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['bus_id_legacy', 'domain', 'bus', 'device_', 'pci_device_id', 'pci_sub_system_id', 'bus_id'],
 *         'formats': [_numpy.int8, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1555, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1555, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":1556
 *     cdef nvmlPciInfo_t pod = nvmlPciInfo_t()
 *     return _numpy.dtype({
 *         'names': ['bus_id_legacy', 'domain', 'bus', 'device_', 'pci_device_id', 'pci_sub_system_id', 'bus_id'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.int8, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1556, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1556, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_bus_id_legacy);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_bus_id_legacy);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_bus_id_legacy) != (0)) __PYX_ERR(0, 1556, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_domain);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_domain);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_domain) != (0)) __PYX_ERR(0, 1556, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_bus);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_bus);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_bus) != (0)) __PYX_ERR(0, 1556, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_device);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_device);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_device) != (0)) __PYX_ERR(0, 1556, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_pci_device_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_pci_device_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_pci_device_id) != (0)) __PYX_ERR(0, 1556, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_pci_sub_system_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_pci_sub_system_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_pci_sub_system_id) != (0)) __PYX_ERR(0, 1556, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_bus_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_bus_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_bus_id) != (0)) __PYX_ERR(0, 1556, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 1556, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":1557
 *     return _numpy.dtype({
 *         'names': ['bus_id_legacy', 'domain', 'bus', 'device_', 'pci_device_id', 'pci_sub_system_id', 'bus_id'],
 *         'formats': [_numpy.int8, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.busIdLegacy)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 1557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 1557, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 1557, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 1557, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 1557, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 1557, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 1557, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 1557, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 1556, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":1559
 *         'formats': [_numpy.int8, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],
 *         'offsets': [
 *             (<intptr_t>&(pod.busIdLegacy)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.domain)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bus)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.busIdLegacy)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1559, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":1560
 *         'offsets': [
 *             (<intptr_t>&(pod.busIdLegacy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.domain)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bus)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.domain)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1560, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":1561
 *             (<intptr_t>&(pod.busIdLegacy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.domain)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bus)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pciDeviceId)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bus)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 1561, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":1562
 *             (<intptr_t>&(pod.domain)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bus)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.pciDeviceId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pciSubSystemId)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.device)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1562, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":1563
 *             (<intptr_t>&(pod.bus)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pciDeviceId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.pciSubSystemId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.busId)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.pciDeviceId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1563, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":1564
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pciDeviceId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pciSubSystemId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.busId)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.pciSubSystemId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1564, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":1565
 *             (<intptr_t>&(pod.pciDeviceId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pciSubSystemId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.busId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlPciInfo_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.busId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1565, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":1558
 *         'names': ['bus_id_legacy', 'domain', 'bus', 'device_', 'pci_device_id', 'pci_sub_system_id', 'bus_id'],
 *         'formats': [_numpy.int8, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.busIdLegacy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.domain)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1558, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 1558, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_13) != (0)) __PYX_ERR(0, 1558, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_12) != (0)) __PYX_ERR(0, 1558, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_11) != (0)) __PYX_ERR(0, 1558, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_10) != (0)) __PYX_ERR(0, 1558, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_9) != (0)) __PYX_ERR(0, 1558, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_8) != (0)) __PYX_ERR(0, 1558, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 1556, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":1567
 *             (<intptr_t>&(pod.busId)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlPciInfo_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlPciInfo_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1567, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 1556, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_14 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_14 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_14, (2-__pyx_t_14) | (__pyx_t_14*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1555, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1553
 * 
 * 
 * cdef _get_pci_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlPciInfo_t pod = nvmlPciInfo_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_pci_info_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1584
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlPciInfo_t *>calloc(1, sizeof(nvmlPciInfo_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":1585
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlPciInfo_t *>calloc(1, sizeof(nvmlPciInfo_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating PciInfo")
*/
  __pyx_v_self->_ptr = ((nvmlPciInfo_t *)calloc(1, (sizeof(nvmlPciInfo_t))));

  /* "cuda/bindings/_nvml.pyx":1586
 *     def __init__(self):
 *         self._ptr = <nvmlPciInfo_t *>calloc(1, sizeof(nvmlPciInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating PciInfo")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":1587
 *         self._ptr = <nvmlPciInfo_t *>calloc(1, sizeof(nvmlPciInfo_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating PciInfo")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1587, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_PciInfo};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1587, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 1587, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1586
 *     def __init__(self):
 *         self._ptr = <nvmlPciInfo_t *>calloc(1, sizeof(nvmlPciInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating PciInfo")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":1588
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating PciInfo")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":1589
 *             raise MemoryError("Error allocating PciInfo")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":1590
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":1584
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlPciInfo_t *>calloc(1, sizeof(nvmlPciInfo_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1592
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlPciInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self) {
  nvmlPciInfo_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlPciInfo_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":1594
 *     def __dealloc__(self):
 *         cdef nvmlPciInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":1595
 *         cdef nvmlPciInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":1596
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":1597
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":1594
 *     def __dealloc__(self):
 *         cdef nvmlPciInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":1592
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlPciInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":1599
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.PciInfo object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":1600
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.PciInfo object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1600, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1600, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1600, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1600, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1600, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_PciInfo_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 19 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1600, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1599
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.PciInfo object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1602
 *         return f"<{__name__}.PciInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1605
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1605, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1602
 *         return f"<{__name__}.PciInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1607
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_7PciInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":1608
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1607
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1610
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":1611
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1611, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1610
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1613
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef PciInfo other_
 *         if not isinstance(other, PciInfo):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":1615
 *     def __eq__(self, other):
 *         cdef PciInfo other_
 *         if not isinstance(other, PciInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":1616
 *         cdef PciInfo other_
 *         if not isinstance(other, PciInfo):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPciInfo_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1615
 *     def __eq__(self, other):
 *         cdef PciInfo other_
 *         if not isinstance(other, PciInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":1617
 *         if not isinstance(other, PciInfo):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPciInfo_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo))))) __PYX_ERR(0, 1617, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":1618
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPciInfo_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlPciInfo_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1618, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1613
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef PciInfo other_
 *         if not isinstance(other, PciInfo):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1620
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPciInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPciInfo_t *>malloc(sizeof(nvmlPciInfo_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":1621
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlPciInfo_t *>malloc(sizeof(nvmlPciInfo_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 1621, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1621, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1621, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 1621, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":1622
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPciInfo_t *>malloc(sizeof(nvmlPciInfo_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating PciInfo")
*/
    __pyx_v_self->_ptr = ((nvmlPciInfo_t *)malloc((sizeof(nvmlPciInfo_t))));

    /* "cuda/bindings/_nvml.pyx":1623
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPciInfo_t *>malloc(sizeof(nvmlPciInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating PciInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPciInfo_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":1624
 *             self._ptr = <nvmlPciInfo_t *>malloc(sizeof(nvmlPciInfo_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating PciInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPciInfo_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1624, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_PciInfo};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1624, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 1624, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":1623
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPciInfo_t *>malloc(sizeof(nvmlPciInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating PciInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPciInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":1625
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating PciInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPciInfo_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1625, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1625, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 1625, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlPciInfo_t))));

    /* "cuda/bindings/_nvml.pyx":1626
 *                 raise MemoryError("Error allocating PciInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPciInfo_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":1627
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPciInfo_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":1628
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1628, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1628, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 1628, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":1621
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlPciInfo_t *>malloc(sizeof(nvmlPciInfo_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":1630
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 1630, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":1620
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPciInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPciInfo_t *>malloc(sizeof(nvmlPciInfo_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1632
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bus_id_legacy(self):
 *         """~_numpy.int8: (array of length 16)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_13bus_id_legacy_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_13bus_id_legacy_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_13bus_id_legacy___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_13bus_id_legacy___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1635
 *     def bus_id_legacy(self):
 *         """~_numpy.int8: (array of length 16)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].busIdLegacy)             # <<<<<<<<<<<<<<
 * 
 *     @bus_id_legacy.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).busIdLegacy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1635, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1632
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bus_id_legacy(self):
 *         """~_numpy.int8: (array of length 16)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.bus_id_legacy.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1637
 *         return cpython.PyUnicode_FromString(self._ptr[0].busIdLegacy)
 * 
 *     @bus_id_legacy.setter             # <<<<<<<<<<<<<<
 *     def bus_id_legacy(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_13bus_id_legacy_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_13bus_id_legacy_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_13bus_id_legacy_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_13bus_id_legacy_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1639
 *     @bus_id_legacy.setter
 *     def bus_id_legacy(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfo instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1640
 *     def bus_id_legacy(self, val):
 *         if self._readonly:
 *             raise ValueError("This PciInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 16:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PciInfo_instance_is_read_on};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1640, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1640, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1639
 *     @bus_id_legacy.setter
 *     def bus_id_legacy(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfo instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":1641
 *         if self._readonly:
 *             raise ValueError("This PciInfo instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 16:
 *             raise ValueError("String too long for field bus_id_legacy, max length is 15")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1641, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 1641, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":1642
 *             raise ValueError("This PciInfo instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 16:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field bus_id_legacy, max length is 15")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 1642, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 1642, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 16);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":1643
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 16:
 *             raise ValueError("String too long for field bus_id_legacy, max length is 15")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].busIdLegacy), <void *>ptr, 16)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_bus_id_2};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1643, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1643, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1642
 *             raise ValueError("This PciInfo instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 16:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field bus_id_legacy, max length is 15")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":1644
 *         if len(buf) >= 16:
 *             raise ValueError("String too long for field bus_id_legacy, max length is 15")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].busIdLegacy), <void *>ptr, 16)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 1644, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 1644, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":1645
 *             raise ValueError("String too long for field bus_id_legacy, max length is 15")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].busIdLegacy), <void *>ptr, 16)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).busIdLegacy), ((void *)__pyx_v_ptr), 16));

  /* "cuda/bindings/_nvml.pyx":1637
 *         return cpython.PyUnicode_FromString(self._ptr[0].busIdLegacy)
 * 
 *     @bus_id_legacy.setter             # <<<<<<<<<<<<<<
 *     def bus_id_legacy(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.bus_id_legacy.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1647
 *         memcpy(<void *>(self._ptr[0].busIdLegacy), <void *>ptr, 16)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def domain(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_6domain_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_6domain_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_6domain___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_6domain___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1650
 *     def domain(self):
 *         """int: """
 *         return self._ptr[0].domain             # <<<<<<<<<<<<<<
 * 
 *     @domain.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).domain); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1650, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1647
 *         memcpy(<void *>(self._ptr[0].busIdLegacy), <void *>ptr, 16)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def domain(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.domain.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1652
 *         return self._ptr[0].domain
 * 
 *     @domain.setter             # <<<<<<<<<<<<<<
 *     def domain(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_6domain_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_6domain_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_6domain_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_6domain_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1654
 *     @domain.setter
 *     def domain(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfo instance is read-only")
 *         self._ptr[0].domain = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1655
 *     def domain(self, val):
 *         if self._readonly:
 *             raise ValueError("This PciInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].domain = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PciInfo_instance_is_read_on};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1655, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1655, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1654
 *     @domain.setter
 *     def domain(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfo instance is read-only")
 *         self._ptr[0].domain = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1656
 *         if self._readonly:
 *             raise ValueError("This PciInfo instance is read-only")
 *         self._ptr[0].domain = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1656, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).domain = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1652
 *         return self._ptr[0].domain
 * 
 *     @domain.setter             # <<<<<<<<<<<<<<
 *     def domain(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.domain.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1658
 *         self._ptr[0].domain = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bus(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_3bus_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_3bus_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_3bus___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_3bus___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1661
 *     def bus(self):
 *         """int: """
 *         return self._ptr[0].bus             # <<<<<<<<<<<<<<
 * 
 *     @bus.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).bus); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1661, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1658
 *         self._ptr[0].domain = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bus(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.bus.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1663
 *         return self._ptr[0].bus
 * 
 *     @bus.setter             # <<<<<<<<<<<<<<
 *     def bus(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_3bus_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_3bus_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_3bus_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_3bus_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1665
 *     @bus.setter
 *     def bus(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfo instance is read-only")
 *         self._ptr[0].bus = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1666
 *     def bus(self, val):
 *         if self._readonly:
 *             raise ValueError("This PciInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].bus = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PciInfo_instance_is_read_on};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1666, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1666, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1665
 *     @bus.setter
 *     def bus(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfo instance is read-only")
 *         self._ptr[0].bus = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1667
 *         if self._readonly:
 *             raise ValueError("This PciInfo instance is read-only")
 *         self._ptr[0].bus = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1667, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).bus = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1663
 *         return self._ptr[0].bus
 * 
 *     @bus.setter             # <<<<<<<<<<<<<<
 *     def bus(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.bus.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1669
 *         self._ptr[0].bus = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def device_(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_7device__1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_7device__1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_7device____get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_7device____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1672
 *     def device_(self):
 *         """int: """
 *         return self._ptr[0].device             # <<<<<<<<<<<<<<
 * 
 *     @device_.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).device); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1672, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1669
 *         self._ptr[0].bus = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def device_(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.device_.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1674
 *         return self._ptr[0].device
 * 
 *     @device_.setter             # <<<<<<<<<<<<<<
 *     def device_(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_7device__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_7device__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_7device__2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_7device__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1676
 *     @device_.setter
 *     def device_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfo instance is read-only")
 *         self._ptr[0].device = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1677
 *     def device_(self, val):
 *         if self._readonly:
 *             raise ValueError("This PciInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].device = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PciInfo_instance_is_read_on};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1677, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1677, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1676
 *     @device_.setter
 *     def device_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfo instance is read-only")
 *         self._ptr[0].device = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1678
 *         if self._readonly:
 *             raise ValueError("This PciInfo instance is read-only")
 *         self._ptr[0].device = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1678, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).device = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1674
 *         return self._ptr[0].device
 * 
 *     @device_.setter             # <<<<<<<<<<<<<<
 *     def device_(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.device_.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1680
 *         self._ptr[0].device = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pci_device_id(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_13pci_device_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_13pci_device_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_13pci_device_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_13pci_device_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1683
 *     def pci_device_id(self):
 *         """int: """
 *         return self._ptr[0].pciDeviceId             # <<<<<<<<<<<<<<
 * 
 *     @pci_device_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).pciDeviceId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1683, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1680
 *         self._ptr[0].device = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pci_device_id(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.pci_device_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1685
 *         return self._ptr[0].pciDeviceId
 * 
 *     @pci_device_id.setter             # <<<<<<<<<<<<<<
 *     def pci_device_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_13pci_device_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_13pci_device_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_13pci_device_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_13pci_device_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1687
 *     @pci_device_id.setter
 *     def pci_device_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfo instance is read-only")
 *         self._ptr[0].pciDeviceId = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1688
 *     def pci_device_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This PciInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].pciDeviceId = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PciInfo_instance_is_read_on};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1688, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1688, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1687
 *     @pci_device_id.setter
 *     def pci_device_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfo instance is read-only")
 *         self._ptr[0].pciDeviceId = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1689
 *         if self._readonly:
 *             raise ValueError("This PciInfo instance is read-only")
 *         self._ptr[0].pciDeviceId = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1689, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).pciDeviceId = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1685
 *         return self._ptr[0].pciDeviceId
 * 
 *     @pci_device_id.setter             # <<<<<<<<<<<<<<
 *     def pci_device_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.pci_device_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1691
 *         self._ptr[0].pciDeviceId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pci_sub_system_id(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_17pci_sub_system_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_17pci_sub_system_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_17pci_sub_system_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_17pci_sub_system_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1694
 *     def pci_sub_system_id(self):
 *         """int: """
 *         return self._ptr[0].pciSubSystemId             # <<<<<<<<<<<<<<
 * 
 *     @pci_sub_system_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).pciSubSystemId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1694, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1691
 *         self._ptr[0].pciDeviceId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pci_sub_system_id(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.pci_sub_system_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1696
 *         return self._ptr[0].pciSubSystemId
 * 
 *     @pci_sub_system_id.setter             # <<<<<<<<<<<<<<
 *     def pci_sub_system_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_17pci_sub_system_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_17pci_sub_system_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_17pci_sub_system_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_17pci_sub_system_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1698
 *     @pci_sub_system_id.setter
 *     def pci_sub_system_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfo instance is read-only")
 *         self._ptr[0].pciSubSystemId = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1699
 *     def pci_sub_system_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This PciInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].pciSubSystemId = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PciInfo_instance_is_read_on};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1699, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1699, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1698
 *     @pci_sub_system_id.setter
 *     def pci_sub_system_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfo instance is read-only")
 *         self._ptr[0].pciSubSystemId = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1700
 *         if self._readonly:
 *             raise ValueError("This PciInfo instance is read-only")
 *         self._ptr[0].pciSubSystemId = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1700, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).pciSubSystemId = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1696
 *         return self._ptr[0].pciSubSystemId
 * 
 *     @pci_sub_system_id.setter             # <<<<<<<<<<<<<<
 *     def pci_sub_system_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.pci_sub_system_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1702
 *         self._ptr[0].pciSubSystemId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bus_id(self):
 *         """~_numpy.int8: (array of length 32)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_6bus_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_6bus_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_6bus_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_6bus_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1705
 *     def bus_id(self):
 *         """~_numpy.int8: (array of length 32)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].busId)             # <<<<<<<<<<<<<<
 * 
 *     @bus_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).busId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1705, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1702
 *         self._ptr[0].pciSubSystemId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bus_id(self):
 *         """~_numpy.int8: (array of length 32)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.bus_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1707
 *         return cpython.PyUnicode_FromString(self._ptr[0].busId)
 * 
 *     @bus_id.setter             # <<<<<<<<<<<<<<
 *     def bus_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_6bus_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_6bus_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_6bus_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_6bus_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1709
 *     @bus_id.setter
 *     def bus_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfo instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1710
 *     def bus_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This PciInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 32:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PciInfo_instance_is_read_on};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1710, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1710, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1709
 *     @bus_id.setter
 *     def bus_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PciInfo instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":1711
 *         if self._readonly:
 *             raise ValueError("This PciInfo instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 32:
 *             raise ValueError("String too long for field bus_id, max length is 31")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1711, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 1711, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":1712
 *             raise ValueError("This PciInfo instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 32:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field bus_id, max length is 31")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 1712, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 1712, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 32);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":1713
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 32:
 *             raise ValueError("String too long for field bus_id, max length is 31")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].busId), <void *>ptr, 32)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_bus_id};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1713, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1713, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1712
 *             raise ValueError("This PciInfo instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 32:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field bus_id, max length is 31")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":1714
 *         if len(buf) >= 32:
 *             raise ValueError("String too long for field bus_id, max length is 31")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].busId), <void *>ptr, 32)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 1714, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 1714, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":1715
 *             raise ValueError("String too long for field bus_id, max length is 31")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].busId), <void *>ptr, 32)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).busId), ((void *)__pyx_v_ptr), 32));

  /* "cuda/bindings/_nvml.pyx":1707
 *         return cpython.PyUnicode_FromString(self._ptr[0].busId)
 * 
 *     @bus_id.setter             # <<<<<<<<<<<<<<
 *     def bus_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.bus_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1717
 *         memcpy(<void *>(self._ptr[0].busId), <void *>ptr, 32)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an PciInfo instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_7PciInfo_12from_data, "PciInfo.from_data(data)\n\nCreate an PciInfo instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `pci_info_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_7PciInfo_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_7PciInfo_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 1717, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1717, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 1717, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 1717, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1717, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 1717, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":1724
 *             data (_numpy.ndarray): a single-element array of dtype `pci_info_dtype` holding the data.
 *         """
 *         return __from_data(data, "pci_info_dtype", pci_info_dtype, PciInfo)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_pci_info_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1724, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_pci_info_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1724, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1717
 *         memcpy(<void *>(self._ptr[0].busId), <void *>ptr, 32)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an PciInfo instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1726
 *         return __from_data(data, "pci_info_dtype", pci_info_dtype, PciInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an PciInfo instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_7PciInfo_14from_ptr, "PciInfo.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an PciInfo instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_7PciInfo_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_7PciInfo_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 1726, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 1726, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 1726, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1726, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 1726, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":1727
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an PciInfo instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 1726, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 1726, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 1726, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1726, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 1727, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1727, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 1726, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":1726
 *         return __from_data(data, "pci_info_dtype", pci_info_dtype, PciInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an PciInfo instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":1735
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PciInfo obj = PciInfo.__new__(PciInfo)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":1736
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef PciInfo obj = PciInfo.__new__(PciInfo)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1736, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 1736, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1735
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PciInfo obj = PciInfo.__new__(PciInfo)
*/
  }

  /* "cuda/bindings/_nvml.pyx":1737
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PciInfo obj = PciInfo.__new__(PciInfo)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlPciInfo_t *>malloc(sizeof(nvmlPciInfo_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_PciInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1737, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":1738
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PciInfo obj = PciInfo.__new__(PciInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlPciInfo_t *>malloc(sizeof(nvmlPciInfo_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":1739
 *         cdef PciInfo obj = PciInfo.__new__(PciInfo)
 *         if owner is None:
 *             obj._ptr = <nvmlPciInfo_t *>malloc(sizeof(nvmlPciInfo_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating PciInfo")
*/
    __pyx_v_obj->_ptr = ((nvmlPciInfo_t *)malloc((sizeof(nvmlPciInfo_t))));

    /* "cuda/bindings/_nvml.pyx":1740
 *         if owner is None:
 *             obj._ptr = <nvmlPciInfo_t *>malloc(sizeof(nvmlPciInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating PciInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPciInfo_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":1741
 *             obj._ptr = <nvmlPciInfo_t *>malloc(sizeof(nvmlPciInfo_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating PciInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPciInfo_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1741, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_PciInfo};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1741, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 1741, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":1740
 *         if owner is None:
 *             obj._ptr = <nvmlPciInfo_t *>malloc(sizeof(nvmlPciInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating PciInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPciInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":1742
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating PciInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPciInfo_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlPciInfo_t))));

    /* "cuda/bindings/_nvml.pyx":1743
 *                 raise MemoryError("Error allocating PciInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPciInfo_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":1744
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPciInfo_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlPciInfo_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":1738
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PciInfo obj = PciInfo.__new__(PciInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlPciInfo_t *>malloc(sizeof(nvmlPciInfo_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":1746
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlPciInfo_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlPciInfo_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":1747
 *         else:
 *             obj._ptr = <nvmlPciInfo_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":1748
 *             obj._ptr = <nvmlPciInfo_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":1749
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":1750
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1726
 *         return __from_data(data, "pci_info_dtype", pci_info_dtype, PciInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an PciInfo instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_7PciInfo_16__reduce_cython__, "PciInfo.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_7PciInfo_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_7PciInfo_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_7PciInfo_18__setstate_cython__, "PciInfo.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_7PciInfo_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_7PciInfo_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PciInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.PciInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1753
 * 
 * 
 * cdef _get_utilization_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlUtilization_t pod = nvmlUtilization_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_utilization_dtype_offsets(void) {
  nvmlUtilization_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlUtilization_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_utilization_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":1754
 * 
 * cdef _get_utilization_dtype_offsets():
 *     cdef nvmlUtilization_t pod = nvmlUtilization_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['gpu', 'memory'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":1755
 * cdef _get_utilization_dtype_offsets():
 *     cdef nvmlUtilization_t pod = nvmlUtilization_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['gpu', 'memory'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1755, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1755, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":1756
 *     cdef nvmlUtilization_t pod = nvmlUtilization_t()
 *     return _numpy.dtype({
 *         'names': ['gpu', 'memory'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1756, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1756, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_gpu);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_gpu);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_gpu) != (0)) __PYX_ERR(0, 1756, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_memory);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_memory);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_memory) != (0)) __PYX_ERR(0, 1756, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 1756, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":1757
 *     return _numpy.dtype({
 *         'names': ['gpu', 'memory'],
 *         'formats': [_numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.gpu)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1757, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1757, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1757, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1757, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1757, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 1757, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 1757, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 1756, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":1759
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.gpu)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.memory)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.gpu)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1759, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":1760
 *         'offsets': [
 *             (<intptr_t>&(pod.gpu)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memory)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlUtilization_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.memory)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1760, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":1758
 *         'names': ['gpu', 'memory'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.gpu)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memory)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1758, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 1758, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 1758, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 1756, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":1762
 *             (<intptr_t>&(pod.memory)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlUtilization_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlUtilization_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1762, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 1756, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1755, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1753
 * 
 * 
 * cdef _get_utilization_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlUtilization_t pod = nvmlUtilization_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_utilization_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1779
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlUtilization_t *>calloc(1, sizeof(nvmlUtilization_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11Utilization___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11Utilization___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":1780
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlUtilization_t *>calloc(1, sizeof(nvmlUtilization_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating Utilization")
*/
  __pyx_v_self->_ptr = ((nvmlUtilization_t *)calloc(1, (sizeof(nvmlUtilization_t))));

  /* "cuda/bindings/_nvml.pyx":1781
 *     def __init__(self):
 *         self._ptr = <nvmlUtilization_t *>calloc(1, sizeof(nvmlUtilization_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating Utilization")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":1782
 *         self._ptr = <nvmlUtilization_t *>calloc(1, sizeof(nvmlUtilization_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating Utilization")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1782, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_Utilization};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1782, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 1782, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1781
 *     def __init__(self):
 *         self._ptr = <nvmlUtilization_t *>calloc(1, sizeof(nvmlUtilization_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating Utilization")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":1783
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating Utilization")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":1784
 *             raise MemoryError("Error allocating Utilization")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":1785
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":1779
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlUtilization_t *>calloc(1, sizeof(nvmlUtilization_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.Utilization.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1787
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlUtilization_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self) {
  nvmlUtilization_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlUtilization_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":1789
 *     def __dealloc__(self):
 *         cdef nvmlUtilization_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":1790
 *         cdef nvmlUtilization_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":1791
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":1792
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":1789
 *     def __dealloc__(self):
 *         cdef nvmlUtilization_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":1787
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlUtilization_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":1794
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.Utilization object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":1795
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.Utilization object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1795, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1795, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1795, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1795, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1795, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_Utilization_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 23 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1795, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1794
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.Utilization object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.Utilization.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1797
 *         return f"<{__name__}.Utilization object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1800
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1800, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1797
 *         return f"<{__name__}.Utilization object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Utilization.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1802
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_11Utilization__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":1803
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1802
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1805
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":1806
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1806, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1805
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Utilization.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1808
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef Utilization other_
 *         if not isinstance(other, Utilization):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":1810
 *     def __eq__(self, other):
 *         cdef Utilization other_
 *         if not isinstance(other, Utilization):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":1811
 *         cdef Utilization other_
 *         if not isinstance(other, Utilization):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlUtilization_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1810
 *     def __eq__(self, other):
 *         cdef Utilization other_
 *         if not isinstance(other, Utilization):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":1812
 *         if not isinstance(other, Utilization):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlUtilization_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization))))) __PYX_ERR(0, 1812, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":1813
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlUtilization_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlUtilization_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1813, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1808
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef Utilization other_
 *         if not isinstance(other, Utilization):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.Utilization.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1815
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlUtilization_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlUtilization_t *>malloc(sizeof(nvmlUtilization_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":1816
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlUtilization_t *>malloc(sizeof(nvmlUtilization_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 1816, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1816, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1816, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 1816, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":1817
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlUtilization_t *>malloc(sizeof(nvmlUtilization_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating Utilization")
*/
    __pyx_v_self->_ptr = ((nvmlUtilization_t *)malloc((sizeof(nvmlUtilization_t))));

    /* "cuda/bindings/_nvml.pyx":1818
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlUtilization_t *>malloc(sizeof(nvmlUtilization_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Utilization")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUtilization_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":1819
 *             self._ptr = <nvmlUtilization_t *>malloc(sizeof(nvmlUtilization_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating Utilization")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUtilization_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1819, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_Utilization};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1819, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 1819, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":1818
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlUtilization_t *>malloc(sizeof(nvmlUtilization_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Utilization")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUtilization_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":1820
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating Utilization")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUtilization_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1820, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1820, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 1820, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlUtilization_t))));

    /* "cuda/bindings/_nvml.pyx":1821
 *                 raise MemoryError("Error allocating Utilization")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUtilization_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":1822
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUtilization_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":1823
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1823, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1823, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 1823, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":1816
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlUtilization_t *>malloc(sizeof(nvmlUtilization_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":1825
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 1825, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":1815
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlUtilization_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlUtilization_t *>malloc(sizeof(nvmlUtilization_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.Utilization.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1827
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def gpu(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_3gpu_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_3gpu_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_3gpu___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_3gpu___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1830
 *     def gpu(self):
 *         """int: """
 *         return self._ptr[0].gpu             # <<<<<<<<<<<<<<
 * 
 *     @gpu.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).gpu); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1830, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1827
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def gpu(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Utilization.gpu.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1832
 *         return self._ptr[0].gpu
 * 
 *     @gpu.setter             # <<<<<<<<<<<<<<
 *     def gpu(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_3gpu_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_3gpu_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_3gpu_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_3gpu_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1834
 *     @gpu.setter
 *     def gpu(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Utilization instance is read-only")
 *         self._ptr[0].gpu = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1835
 *     def gpu(self, val):
 *         if self._readonly:
 *             raise ValueError("This Utilization instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].gpu = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Utilization_instance_is_rea};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1835, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1835, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1834
 *     @gpu.setter
 *     def gpu(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Utilization instance is read-only")
 *         self._ptr[0].gpu = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1836
 *         if self._readonly:
 *             raise ValueError("This Utilization instance is read-only")
 *         self._ptr[0].gpu = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1836, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).gpu = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1832
 *         return self._ptr[0].gpu
 * 
 *     @gpu.setter             # <<<<<<<<<<<<<<
 *     def gpu(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Utilization.gpu.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1838
 *         self._ptr[0].gpu = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def memory(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_6memory_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_6memory_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_6memory___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_6memory___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1841
 *     def memory(self):
 *         """int: """
 *         return self._ptr[0].memory             # <<<<<<<<<<<<<<
 * 
 *     @memory.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).memory); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1841, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1838
 *         self._ptr[0].gpu = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def memory(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Utilization.memory.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1843
 *         return self._ptr[0].memory
 * 
 *     @memory.setter             # <<<<<<<<<<<<<<
 *     def memory(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_6memory_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_6memory_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_6memory_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_6memory_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1845
 *     @memory.setter
 *     def memory(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Utilization instance is read-only")
 *         self._ptr[0].memory = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1846
 *     def memory(self, val):
 *         if self._readonly:
 *             raise ValueError("This Utilization instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].memory = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Utilization_instance_is_rea};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1846, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1846, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1845
 *     @memory.setter
 *     def memory(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Utilization instance is read-only")
 *         self._ptr[0].memory = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1847
 *         if self._readonly:
 *             raise ValueError("This Utilization instance is read-only")
 *         self._ptr[0].memory = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1847, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).memory = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1843
 *         return self._ptr[0].memory
 * 
 *     @memory.setter             # <<<<<<<<<<<<<<
 *     def memory(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Utilization.memory.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1849
 *         self._ptr[0].memory = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Utilization instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_11Utilization_12from_data, "Utilization.from_data(data)\n\nCreate an Utilization instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `utilization_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_11Utilization_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11Utilization_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 1849, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1849, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 1849, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 1849, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1849, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 1849, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Utilization.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":1856
 *             data (_numpy.ndarray): a single-element array of dtype `utilization_dtype` holding the data.
 *         """
 *         return __from_data(data, "utilization_dtype", utilization_dtype, Utilization)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_utilization_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1856, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_utilization_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1856, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1849
 *         self._ptr[0].memory = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Utilization instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Utilization.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1858
 *         return __from_data(data, "utilization_dtype", utilization_dtype, Utilization)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Utilization instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_11Utilization_14from_ptr, "Utilization.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an Utilization instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_11Utilization_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11Utilization_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 1858, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 1858, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 1858, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1858, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 1858, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":1859
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an Utilization instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 1858, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 1858, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 1858, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1858, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 1859, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1859, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 1858, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Utilization.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":1858
 *         return __from_data(data, "utilization_dtype", utilization_dtype, Utilization)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Utilization instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":1867
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Utilization obj = Utilization.__new__(Utilization)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":1868
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef Utilization obj = Utilization.__new__(Utilization)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1868, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 1868, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1867
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Utilization obj = Utilization.__new__(Utilization)
*/
  }

  /* "cuda/bindings/_nvml.pyx":1869
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Utilization obj = Utilization.__new__(Utilization)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlUtilization_t *>malloc(sizeof(nvmlUtilization_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_Utilization(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1869, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":1870
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Utilization obj = Utilization.__new__(Utilization)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlUtilization_t *>malloc(sizeof(nvmlUtilization_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":1871
 *         cdef Utilization obj = Utilization.__new__(Utilization)
 *         if owner is None:
 *             obj._ptr = <nvmlUtilization_t *>malloc(sizeof(nvmlUtilization_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating Utilization")
*/
    __pyx_v_obj->_ptr = ((nvmlUtilization_t *)malloc((sizeof(nvmlUtilization_t))));

    /* "cuda/bindings/_nvml.pyx":1872
 *         if owner is None:
 *             obj._ptr = <nvmlUtilization_t *>malloc(sizeof(nvmlUtilization_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Utilization")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUtilization_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":1873
 *             obj._ptr = <nvmlUtilization_t *>malloc(sizeof(nvmlUtilization_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating Utilization")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUtilization_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1873, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_Utilization};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1873, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 1873, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":1872
 *         if owner is None:
 *             obj._ptr = <nvmlUtilization_t *>malloc(sizeof(nvmlUtilization_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Utilization")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUtilization_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":1874
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating Utilization")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUtilization_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlUtilization_t))));

    /* "cuda/bindings/_nvml.pyx":1875
 *                 raise MemoryError("Error allocating Utilization")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUtilization_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":1876
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUtilization_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlUtilization_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":1870
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Utilization obj = Utilization.__new__(Utilization)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlUtilization_t *>malloc(sizeof(nvmlUtilization_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":1878
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlUtilization_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlUtilization_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":1879
 *         else:
 *             obj._ptr = <nvmlUtilization_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":1880
 *             obj._ptr = <nvmlUtilization_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":1881
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":1882
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1858
 *         return __from_data(data, "utilization_dtype", utilization_dtype, Utilization)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Utilization instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.Utilization.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_11Utilization_16__reduce_cython__, "Utilization.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_11Utilization_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11Utilization_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.Utilization.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_11Utilization_18__setstate_cython__, "Utilization.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_11Utilization_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11Utilization_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Utilization.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11Utilization_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11Utilization_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.Utilization.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1885
 * 
 * 
 * cdef _get_memory_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlMemory_t pod = nvmlMemory_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_memory_dtype_offsets(void) {
  nvmlMemory_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlMemory_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_memory_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":1886
 * 
 * cdef _get_memory_dtype_offsets():
 *     cdef nvmlMemory_t pod = nvmlMemory_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['total', 'free', 'used'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":1887
 * cdef _get_memory_dtype_offsets():
 *     cdef nvmlMemory_t pod = nvmlMemory_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['total', 'free', 'used'],
 *         'formats': [_numpy.uint64, _numpy.uint64, _numpy.uint64],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1887, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1887, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":1888
 *     cdef nvmlMemory_t pod = nvmlMemory_t()
 *     return _numpy.dtype({
 *         'names': ['total', 'free', 'used'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint64, _numpy.uint64, _numpy.uint64],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1888, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1888, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_total);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_total);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_total) != (0)) __PYX_ERR(0, 1888, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_free);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_free);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_free) != (0)) __PYX_ERR(0, 1888, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_used);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_used);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_used) != (0)) __PYX_ERR(0, 1888, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 1888, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":1889
 *     return _numpy.dtype({
 *         'names': ['total', 'free', 'used'],
 *         'formats': [_numpy.uint64, _numpy.uint64, _numpy.uint64],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.total)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 1889, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 1889, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 1889, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 1888, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":1891
 *         'formats': [_numpy.uint64, _numpy.uint64, _numpy.uint64],
 *         'offsets': [
 *             (<intptr_t>&(pod.total)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.free)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.used)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.total)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1891, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":1892
 *         'offsets': [
 *             (<intptr_t>&(pod.total)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.free)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.used)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.free)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1892, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":1893
 *             (<intptr_t>&(pod.total)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.free)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.used)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlMemory_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.used)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1893, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":1890
 *         'names': ['total', 'free', 'used'],
 *         'formats': [_numpy.uint64, _numpy.uint64, _numpy.uint64],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.total)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.free)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1890, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 1890, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 1890, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 1890, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 1888, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":1895
 *             (<intptr_t>&(pod.used)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlMemory_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlMemory_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1895, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 1888, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1887, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1885
 * 
 * 
 * cdef _get_memory_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlMemory_t pod = nvmlMemory_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_memory_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1912
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlMemory_t *>calloc(1, sizeof(nvmlMemory_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_6Memory_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_6Memory_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Memory___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_6Memory___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":1913
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlMemory_t *>calloc(1, sizeof(nvmlMemory_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating Memory")
*/
  __pyx_v_self->_ptr = ((nvmlMemory_t *)calloc(1, (sizeof(nvmlMemory_t))));

  /* "cuda/bindings/_nvml.pyx":1914
 *     def __init__(self):
 *         self._ptr = <nvmlMemory_t *>calloc(1, sizeof(nvmlMemory_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating Memory")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":1915
 *         self._ptr = <nvmlMemory_t *>calloc(1, sizeof(nvmlMemory_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating Memory")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1915, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_Memory};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1915, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 1915, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1914
 *     def __init__(self):
 *         self._ptr = <nvmlMemory_t *>calloc(1, sizeof(nvmlMemory_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating Memory")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":1916
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating Memory")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":1917
 *             raise MemoryError("Error allocating Memory")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":1918
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":1912
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlMemory_t *>calloc(1, sizeof(nvmlMemory_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1920
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlMemory_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_6Memory_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_6Memory_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_6Memory_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_6Memory_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self) {
  nvmlMemory_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlMemory_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":1922
 *     def __dealloc__(self):
 *         cdef nvmlMemory_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":1923
 *         cdef nvmlMemory_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":1924
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":1925
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":1922
 *     def __dealloc__(self):
 *         cdef nvmlMemory_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":1920
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlMemory_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":1927
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.Memory object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Memory_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":1928
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.Memory object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1928, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1928, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1928, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1928, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1928, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_Memory_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 18 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1928, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1927
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.Memory object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1930
 *         return f"<{__name__}.Memory object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Memory_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1933
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1933, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1930
 *         return f"<{__name__}.Memory object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1935
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_6Memory__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":1936
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1935
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1938
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Memory_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":1939
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1939, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1938
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1941
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef Memory other_
 *         if not isinstance(other, Memory):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Memory_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":1943
 *     def __eq__(self, other):
 *         cdef Memory other_
 *         if not isinstance(other, Memory):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":1944
 *         cdef Memory other_
 *         if not isinstance(other, Memory):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlMemory_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":1943
 *     def __eq__(self, other):
 *         cdef Memory other_
 *         if not isinstance(other, Memory):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":1945
 *         if not isinstance(other, Memory):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlMemory_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory))))) __PYX_ERR(0, 1945, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":1946
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlMemory_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlMemory_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1946, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1941
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef Memory other_
 *         if not isinstance(other, Memory):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1948
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlMemory_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlMemory_t *>malloc(sizeof(nvmlMemory_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_6Memory_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_6Memory_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Memory_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_6Memory_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":1949
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlMemory_t *>malloc(sizeof(nvmlMemory_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 1949, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1949, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1949, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 1949, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":1950
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlMemory_t *>malloc(sizeof(nvmlMemory_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating Memory")
*/
    __pyx_v_self->_ptr = ((nvmlMemory_t *)malloc((sizeof(nvmlMemory_t))));

    /* "cuda/bindings/_nvml.pyx":1951
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlMemory_t *>malloc(sizeof(nvmlMemory_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Memory")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMemory_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":1952
 *             self._ptr = <nvmlMemory_t *>malloc(sizeof(nvmlMemory_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating Memory")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMemory_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1952, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_Memory};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1952, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 1952, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":1951
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlMemory_t *>malloc(sizeof(nvmlMemory_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Memory")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMemory_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":1953
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating Memory")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMemory_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1953, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1953, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 1953, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlMemory_t))));

    /* "cuda/bindings/_nvml.pyx":1954
 *                 raise MemoryError("Error allocating Memory")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMemory_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":1955
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMemory_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":1956
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1956, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1956, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 1956, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":1949
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlMemory_t *>malloc(sizeof(nvmlMemory_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":1958
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 1958, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":1948
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlMemory_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlMemory_t *>malloc(sizeof(nvmlMemory_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1960
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def total(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_5total_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_5total_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Memory_5total___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_5total___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1963
 *     def total(self):
 *         """int: """
 *         return self._ptr[0].total             # <<<<<<<<<<<<<<
 * 
 *     @total.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).total); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1963, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1960
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def total(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.total.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1965
 *         return self._ptr[0].total
 * 
 *     @total.setter             # <<<<<<<<<<<<<<
 *     def total(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_6Memory_5total_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_6Memory_5total_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Memory_5total_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_6Memory_5total_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1967
 *     @total.setter
 *     def total(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Memory instance is read-only")
 *         self._ptr[0].total = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1968
 *     def total(self, val):
 *         if self._readonly:
 *             raise ValueError("This Memory instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].total = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Memory_instance_is_read_onl};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1968, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1968, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1967
 *     @total.setter
 *     def total(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Memory instance is read-only")
 *         self._ptr[0].total = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1969
 *         if self._readonly:
 *             raise ValueError("This Memory instance is read-only")
 *         self._ptr[0].total = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 1969, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).total = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1965
 *         return self._ptr[0].total
 * 
 *     @total.setter             # <<<<<<<<<<<<<<
 *     def total(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.total.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1971
 *         self._ptr[0].total = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def free(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_4free_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_4free_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Memory_4free___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_4free___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1974
 *     def free(self):
 *         """int: """
 *         return self._ptr[0].free             # <<<<<<<<<<<<<<
 * 
 *     @free.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).free); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1974, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1971
 *         self._ptr[0].total = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def free(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.free.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1976
 *         return self._ptr[0].free
 * 
 *     @free.setter             # <<<<<<<<<<<<<<
 *     def free(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_6Memory_4free_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_6Memory_4free_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Memory_4free_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_6Memory_4free_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1978
 *     @free.setter
 *     def free(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Memory instance is read-only")
 *         self._ptr[0].free = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1979
 *     def free(self, val):
 *         if self._readonly:
 *             raise ValueError("This Memory instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].free = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Memory_instance_is_read_onl};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1979, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1979, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1978
 *     @free.setter
 *     def free(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Memory instance is read-only")
 *         self._ptr[0].free = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1980
 *         if self._readonly:
 *             raise ValueError("This Memory instance is read-only")
 *         self._ptr[0].free = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 1980, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).free = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1976
 *         return self._ptr[0].free
 * 
 *     @free.setter             # <<<<<<<<<<<<<<
 *     def free(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.free.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1982
 *         self._ptr[0].free = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def used(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_4used_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_4used_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Memory_4used___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_4used___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":1985
 *     def used(self):
 *         """int: """
 *         return self._ptr[0].used             # <<<<<<<<<<<<<<
 * 
 *     @used.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).used); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1985, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1982
 *         self._ptr[0].free = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def used(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.used.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1987
 *         return self._ptr[0].used
 * 
 *     @used.setter             # <<<<<<<<<<<<<<
 *     def used(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_6Memory_4used_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_6Memory_4used_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Memory_4used_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_6Memory_4used_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":1989
 *     @used.setter
 *     def used(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Memory instance is read-only")
 *         self._ptr[0].used = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":1990
 *     def used(self, val):
 *         if self._readonly:
 *             raise ValueError("This Memory instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].used = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Memory_instance_is_read_onl};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1990, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 1990, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":1989
 *     @used.setter
 *     def used(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Memory instance is read-only")
 *         self._ptr[0].used = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":1991
 *         if self._readonly:
 *             raise ValueError("This Memory instance is read-only")
 *         self._ptr[0].used = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 1991, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).used = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":1987
 *         return self._ptr[0].used
 * 
 *     @used.setter             # <<<<<<<<<<<<<<
 *     def used(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.used.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":1993
 *         self._ptr[0].used = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Memory instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_6Memory_12from_data, "Memory.from_data(data)\n\nCreate an Memory instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `memory_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_6Memory_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Memory_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Memory_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 1993, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1993, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 1993, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 1993, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 1993, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 1993, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Memory_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":2000
 *             data (_numpy.ndarray): a single-element array of dtype `memory_dtype` holding the data.
 *         """
 *         return __from_data(data, "memory_dtype", memory_dtype, Memory)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_memory_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2000, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_memory_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2000, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":1993
 *         self._ptr[0].used = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Memory instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2002
 *         return __from_data(data, "memory_dtype", memory_dtype, Memory)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Memory instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_6Memory_14from_ptr, "Memory.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an Memory instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_6Memory_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Memory_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Memory_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 2002, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 2002, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 2002, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2002, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 2002, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":2003
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an Memory instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 2002, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 2002, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 2002, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2002, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 2003, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2003, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 2002, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Memory_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":2002
 *         return __from_data(data, "memory_dtype", memory_dtype, Memory)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Memory instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":2011
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Memory obj = Memory.__new__(Memory)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":2012
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef Memory obj = Memory.__new__(Memory)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2012, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 2012, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2011
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Memory obj = Memory.__new__(Memory)
*/
  }

  /* "cuda/bindings/_nvml.pyx":2013
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Memory obj = Memory.__new__(Memory)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlMemory_t *>malloc(sizeof(nvmlMemory_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_Memory(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2013, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":2014
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Memory obj = Memory.__new__(Memory)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlMemory_t *>malloc(sizeof(nvmlMemory_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":2015
 *         cdef Memory obj = Memory.__new__(Memory)
 *         if owner is None:
 *             obj._ptr = <nvmlMemory_t *>malloc(sizeof(nvmlMemory_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating Memory")
*/
    __pyx_v_obj->_ptr = ((nvmlMemory_t *)malloc((sizeof(nvmlMemory_t))));

    /* "cuda/bindings/_nvml.pyx":2016
 *         if owner is None:
 *             obj._ptr = <nvmlMemory_t *>malloc(sizeof(nvmlMemory_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Memory")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMemory_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":2017
 *             obj._ptr = <nvmlMemory_t *>malloc(sizeof(nvmlMemory_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating Memory")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMemory_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2017, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_Memory};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2017, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 2017, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":2016
 *         if owner is None:
 *             obj._ptr = <nvmlMemory_t *>malloc(sizeof(nvmlMemory_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Memory")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMemory_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":2018
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating Memory")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMemory_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlMemory_t))));

    /* "cuda/bindings/_nvml.pyx":2019
 *                 raise MemoryError("Error allocating Memory")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMemory_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":2020
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMemory_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlMemory_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":2014
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Memory obj = Memory.__new__(Memory)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlMemory_t *>malloc(sizeof(nvmlMemory_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":2022
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlMemory_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlMemory_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":2023
 *         else:
 *             obj._ptr = <nvmlMemory_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":2024
 *             obj._ptr = <nvmlMemory_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":2025
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":2026
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2002
 *         return __from_data(data, "memory_dtype", memory_dtype, Memory)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Memory instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_6Memory_16__reduce_cython__, "Memory.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_6Memory_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Memory_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Memory_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Memory_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_6Memory_18__setstate_cython__, "Memory.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_6Memory_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Memory_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Memory_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Memory_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Memory_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Memory_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2029
 * 
 * 
 * cdef _get_memory_v2_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlMemory_v2_t pod = nvmlMemory_v2_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_memory_v2_dtype_offsets(void) {
  nvmlMemory_v2_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlMemory_v2_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  size_t __pyx_t_12;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_memory_v2_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":2030
 * 
 * cdef _get_memory_v2_dtype_offsets():
 *     cdef nvmlMemory_v2_t pod = nvmlMemory_v2_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'total', 'reserved', 'free', 'used'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":2031
 * cdef _get_memory_v2_dtype_offsets():
 *     cdef nvmlMemory_v2_t pod = nvmlMemory_v2_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'total', 'reserved', 'free', 'used'],
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2031, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2031, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":2032
 *     cdef nvmlMemory_v2_t pod = nvmlMemory_v2_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'total', 'reserved', 'free', 'used'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2032, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2032, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 2032, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_total);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_total);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_total) != (0)) __PYX_ERR(0, 2032, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_reserved);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_reserved);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_reserved) != (0)) __PYX_ERR(0, 2032, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_free);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_free);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_free) != (0)) __PYX_ERR(0, 2032, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_used);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_used);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_used) != (0)) __PYX_ERR(0, 2032, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 2032, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":2033
 *     return _numpy.dtype({
 *         'names': ['version', 'total', 'reserved', 'free', 'used'],
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 2033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 2033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 2033, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 2033, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 2033, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 2033, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 2033, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 2032, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":2035
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.total)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2035, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":2036
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.total)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.free)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.total)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 2036, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":2037
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.total)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.free)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.used)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.reserved)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 2037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":2038
 *             (<intptr_t>&(pod.total)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.free)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.used)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.free)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":2039
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.free)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.used)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlMemory_v2_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.used)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2039, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":2034
 *         'names': ['version', 'total', 'reserved', 'free', 'used'],
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.total)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2034, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 2034, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_11) != (0)) __PYX_ERR(0, 2034, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_10) != (0)) __PYX_ERR(0, 2034, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_9) != (0)) __PYX_ERR(0, 2034, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_8) != (0)) __PYX_ERR(0, 2034, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 2032, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":2041
 *             (<intptr_t>&(pod.used)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlMemory_v2_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlMemory_v2_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2041, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 2032, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_12 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_12 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_12, (2-__pyx_t_12) | (__pyx_t_12*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2031, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2029
 * 
 * 
 * cdef _get_memory_v2_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlMemory_v2_t pod = nvmlMemory_v2_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_memory_v2_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2058
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlMemory_v2_t *>calloc(1, sizeof(nvmlMemory_v2_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":2059
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlMemory_v2_t *>calloc(1, sizeof(nvmlMemory_v2_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating Memory_v2")
*/
  __pyx_v_self->_ptr = ((nvmlMemory_v2_t *)calloc(1, (sizeof(nvmlMemory_v2_t))));

  /* "cuda/bindings/_nvml.pyx":2060
 *     def __init__(self):
 *         self._ptr = <nvmlMemory_v2_t *>calloc(1, sizeof(nvmlMemory_v2_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating Memory_v2")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":2061
 *         self._ptr = <nvmlMemory_v2_t *>calloc(1, sizeof(nvmlMemory_v2_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating Memory_v2")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2061, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_Memory_v2};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2061, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 2061, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2060
 *     def __init__(self):
 *         self._ptr = <nvmlMemory_v2_t *>calloc(1, sizeof(nvmlMemory_v2_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating Memory_v2")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":2062
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating Memory_v2")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":2063
 *             raise MemoryError("Error allocating Memory_v2")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":2064
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":2058
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlMemory_v2_t *>calloc(1, sizeof(nvmlMemory_v2_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2066
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlMemory_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self) {
  nvmlMemory_v2_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlMemory_v2_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":2068
 *     def __dealloc__(self):
 *         cdef nvmlMemory_v2_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":2069
 *         cdef nvmlMemory_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":2070
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":2071
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":2068
 *     def __dealloc__(self):
 *         cdef nvmlMemory_v2_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":2066
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlMemory_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":2073
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.Memory_v2 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":2074
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.Memory_v2 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2074, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2074, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2074, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2074, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2074, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_Memory_v2_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 21 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2074, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2073
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.Memory_v2 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2076
 *         return f"<{__name__}.Memory_v2 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2079
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2079, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2076
 *         return f"<{__name__}.Memory_v2 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2081
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_9Memory_v2__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":2082
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2081
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2084
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":2085
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2085, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2084
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2087
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef Memory_v2 other_
 *         if not isinstance(other, Memory_v2):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":2089
 *     def __eq__(self, other):
 *         cdef Memory_v2 other_
 *         if not isinstance(other, Memory_v2):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":2090
 *         cdef Memory_v2 other_
 *         if not isinstance(other, Memory_v2):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlMemory_v2_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2089
 *     def __eq__(self, other):
 *         cdef Memory_v2 other_
 *         if not isinstance(other, Memory_v2):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":2091
 *         if not isinstance(other, Memory_v2):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlMemory_v2_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2))))) __PYX_ERR(0, 2091, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":2092
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlMemory_v2_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlMemory_v2_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2092, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2087
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef Memory_v2 other_
 *         if not isinstance(other, Memory_v2):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2094
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlMemory_v2_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlMemory_v2_t *>malloc(sizeof(nvmlMemory_v2_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":2095
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlMemory_v2_t *>malloc(sizeof(nvmlMemory_v2_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 2095, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2095, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2095, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 2095, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":2096
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlMemory_v2_t *>malloc(sizeof(nvmlMemory_v2_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating Memory_v2")
*/
    __pyx_v_self->_ptr = ((nvmlMemory_v2_t *)malloc((sizeof(nvmlMemory_v2_t))));

    /* "cuda/bindings/_nvml.pyx":2097
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlMemory_v2_t *>malloc(sizeof(nvmlMemory_v2_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Memory_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMemory_v2_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":2098
 *             self._ptr = <nvmlMemory_v2_t *>malloc(sizeof(nvmlMemory_v2_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating Memory_v2")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMemory_v2_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2098, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_Memory_v2};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2098, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 2098, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":2097
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlMemory_v2_t *>malloc(sizeof(nvmlMemory_v2_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Memory_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMemory_v2_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":2099
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating Memory_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMemory_v2_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2099, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2099, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 2099, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlMemory_v2_t))));

    /* "cuda/bindings/_nvml.pyx":2100
 *                 raise MemoryError("Error allocating Memory_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMemory_v2_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":2101
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMemory_v2_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":2102
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2102, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2102, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 2102, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":2095
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlMemory_v2_t *>malloc(sizeof(nvmlMemory_v2_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":2104
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 2104, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":2094
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlMemory_v2_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlMemory_v2_t *>malloc(sizeof(nvmlMemory_v2_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2106
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2109
 *     def version(self):
 *         """int: """
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2109, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2106
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2111
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":2113
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Memory_v2 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":2114
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This Memory_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Memory_v2_instance_is_read};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2114, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 2114, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2113
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Memory_v2 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":2115
 *         if self._readonly:
 *             raise ValueError("This Memory_v2 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2115, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":2111
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2117
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def total(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_5total_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_5total_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_5total___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_5total___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2120
 *     def total(self):
 *         """int: """
 *         return self._ptr[0].total             # <<<<<<<<<<<<<<
 * 
 *     @total.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).total); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2120, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2117
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def total(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.total.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2122
 *         return self._ptr[0].total
 * 
 *     @total.setter             # <<<<<<<<<<<<<<
 *     def total(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_5total_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_5total_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_5total_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_5total_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":2124
 *     @total.setter
 *     def total(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Memory_v2 instance is read-only")
 *         self._ptr[0].total = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":2125
 *     def total(self, val):
 *         if self._readonly:
 *             raise ValueError("This Memory_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].total = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Memory_v2_instance_is_read};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2125, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 2125, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2124
 *     @total.setter
 *     def total(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Memory_v2 instance is read-only")
 *         self._ptr[0].total = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":2126
 *         if self._readonly:
 *             raise ValueError("This Memory_v2 instance is read-only")
 *         self._ptr[0].total = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 2126, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).total = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":2122
 *         return self._ptr[0].total
 * 
 *     @total.setter             # <<<<<<<<<<<<<<
 *     def total(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.total.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2128
 *         self._ptr[0].total = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def free(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_4free_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_4free_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_4free___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_4free___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2131
 *     def free(self):
 *         """int: """
 *         return self._ptr[0].free             # <<<<<<<<<<<<<<
 * 
 *     @free.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).free); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2131, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2128
 *         self._ptr[0].total = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def free(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.free.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2133
 *         return self._ptr[0].free
 * 
 *     @free.setter             # <<<<<<<<<<<<<<
 *     def free(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_4free_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_4free_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_4free_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_4free_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":2135
 *     @free.setter
 *     def free(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Memory_v2 instance is read-only")
 *         self._ptr[0].free = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":2136
 *     def free(self, val):
 *         if self._readonly:
 *             raise ValueError("This Memory_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].free = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Memory_v2_instance_is_read};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2136, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 2136, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2135
 *     @free.setter
 *     def free(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Memory_v2 instance is read-only")
 *         self._ptr[0].free = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":2137
 *         if self._readonly:
 *             raise ValueError("This Memory_v2 instance is read-only")
 *         self._ptr[0].free = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 2137, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).free = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":2133
 *         return self._ptr[0].free
 * 
 *     @free.setter             # <<<<<<<<<<<<<<
 *     def free(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.free.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2139
 *         self._ptr[0].free = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def used(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_4used_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_4used_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_4used___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_4used___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2142
 *     def used(self):
 *         """int: """
 *         return self._ptr[0].used             # <<<<<<<<<<<<<<
 * 
 *     @used.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).used); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2142, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2139
 *         self._ptr[0].free = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def used(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.used.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2144
 *         return self._ptr[0].used
 * 
 *     @used.setter             # <<<<<<<<<<<<<<
 *     def used(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_4used_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_4used_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_4used_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_4used_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":2146
 *     @used.setter
 *     def used(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Memory_v2 instance is read-only")
 *         self._ptr[0].used = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":2147
 *     def used(self, val):
 *         if self._readonly:
 *             raise ValueError("This Memory_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].used = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Memory_v2_instance_is_read};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2147, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 2147, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2146
 *     @used.setter
 *     def used(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Memory_v2 instance is read-only")
 *         self._ptr[0].used = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":2148
 *         if self._readonly:
 *             raise ValueError("This Memory_v2 instance is read-only")
 *         self._ptr[0].used = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 2148, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).used = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":2144
 *         return self._ptr[0].used
 * 
 *     @used.setter             # <<<<<<<<<<<<<<
 *     def used(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.used.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2150
 *         self._ptr[0].used = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Memory_v2 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_9Memory_v2_12from_data, "Memory_v2.from_data(data)\n\nCreate an Memory_v2 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `memory_v2_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_9Memory_v2_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9Memory_v2_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 2150, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2150, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 2150, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 2150, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2150, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 2150, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":2157
 *             data (_numpy.ndarray): a single-element array of dtype `memory_v2_dtype` holding the data.
 *         """
 *         return __from_data(data, "memory_v2_dtype", memory_v2_dtype, Memory_v2)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_memory_v2_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2157, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_memory_v2_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2157, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2150
 *         self._ptr[0].used = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Memory_v2 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2159
 *         return __from_data(data, "memory_v2_dtype", memory_v2_dtype, Memory_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Memory_v2 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_9Memory_v2_14from_ptr, "Memory_v2.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an Memory_v2 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_9Memory_v2_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9Memory_v2_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 2159, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 2159, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 2159, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2159, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 2159, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":2160
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an Memory_v2 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 2159, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 2159, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 2159, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2159, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 2160, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2160, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 2159, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":2159
 *         return __from_data(data, "memory_v2_dtype", memory_v2_dtype, Memory_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Memory_v2 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":2168
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Memory_v2 obj = Memory_v2.__new__(Memory_v2)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":2169
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef Memory_v2 obj = Memory_v2.__new__(Memory_v2)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2169, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 2169, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2168
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Memory_v2 obj = Memory_v2.__new__(Memory_v2)
*/
  }

  /* "cuda/bindings/_nvml.pyx":2170
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Memory_v2 obj = Memory_v2.__new__(Memory_v2)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlMemory_v2_t *>malloc(sizeof(nvmlMemory_v2_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_Memory_v2(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2170, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":2171
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Memory_v2 obj = Memory_v2.__new__(Memory_v2)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlMemory_v2_t *>malloc(sizeof(nvmlMemory_v2_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":2172
 *         cdef Memory_v2 obj = Memory_v2.__new__(Memory_v2)
 *         if owner is None:
 *             obj._ptr = <nvmlMemory_v2_t *>malloc(sizeof(nvmlMemory_v2_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating Memory_v2")
*/
    __pyx_v_obj->_ptr = ((nvmlMemory_v2_t *)malloc((sizeof(nvmlMemory_v2_t))));

    /* "cuda/bindings/_nvml.pyx":2173
 *         if owner is None:
 *             obj._ptr = <nvmlMemory_v2_t *>malloc(sizeof(nvmlMemory_v2_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Memory_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMemory_v2_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":2174
 *             obj._ptr = <nvmlMemory_v2_t *>malloc(sizeof(nvmlMemory_v2_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating Memory_v2")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMemory_v2_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2174, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_Memory_v2};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2174, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 2174, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":2173
 *         if owner is None:
 *             obj._ptr = <nvmlMemory_v2_t *>malloc(sizeof(nvmlMemory_v2_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Memory_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMemory_v2_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":2175
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating Memory_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMemory_v2_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlMemory_v2_t))));

    /* "cuda/bindings/_nvml.pyx":2176
 *                 raise MemoryError("Error allocating Memory_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMemory_v2_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":2177
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMemory_v2_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlMemory_v2_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":2171
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Memory_v2 obj = Memory_v2.__new__(Memory_v2)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlMemory_v2_t *>malloc(sizeof(nvmlMemory_v2_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":2179
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlMemory_v2_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlMemory_v2_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":2180
 *         else:
 *             obj._ptr = <nvmlMemory_v2_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":2181
 *             obj._ptr = <nvmlMemory_v2_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":2182
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":2183
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2159
 *         return __from_data(data, "memory_v2_dtype", memory_v2_dtype, Memory_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Memory_v2 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_9Memory_v2_16__reduce_cython__, "Memory_v2.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_9Memory_v2_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9Memory_v2_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_9Memory_v2_18__setstate_cython__, "Memory_v2.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_9Memory_v2_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9Memory_v2_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9Memory_v2_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.Memory_v2.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2186
 * 
 * 
 * cdef _get_ba_r1memory_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlBAR1Memory_t pod = nvmlBAR1Memory_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_ba_r1memory_dtype_offsets(void) {
  nvmlBAR1Memory_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlBAR1Memory_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ba_r1memory_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":2187
 * 
 * cdef _get_ba_r1memory_dtype_offsets():
 *     cdef nvmlBAR1Memory_t pod = nvmlBAR1Memory_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['bar1total', 'bar1free', 'bar1_used'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":2188
 * cdef _get_ba_r1memory_dtype_offsets():
 *     cdef nvmlBAR1Memory_t pod = nvmlBAR1Memory_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['bar1total', 'bar1free', 'bar1_used'],
 *         'formats': [_numpy.uint64, _numpy.uint64, _numpy.uint64],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2188, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2188, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":2189
 *     cdef nvmlBAR1Memory_t pod = nvmlBAR1Memory_t()
 *     return _numpy.dtype({
 *         'names': ['bar1total', 'bar1free', 'bar1_used'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint64, _numpy.uint64, _numpy.uint64],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2189, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2189, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_bar1total);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_bar1total);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_bar1total) != (0)) __PYX_ERR(0, 2189, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_bar1free);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_bar1free);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_bar1free) != (0)) __PYX_ERR(0, 2189, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_bar1_used);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_bar1_used);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_bar1_used) != (0)) __PYX_ERR(0, 2189, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 2189, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":2190
 *     return _numpy.dtype({
 *         'names': ['bar1total', 'bar1free', 'bar1_used'],
 *         'formats': [_numpy.uint64, _numpy.uint64, _numpy.uint64],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.bar1Total)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2190, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2190, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2190, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2190, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2190, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2190, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2190, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 2190, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 2190, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 2190, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 2189, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":2192
 *         'formats': [_numpy.uint64, _numpy.uint64, _numpy.uint64],
 *         'offsets': [
 *             (<intptr_t>&(pod.bar1Total)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bar1Free)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bar1Used)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bar1Total)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2192, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":2193
 *         'offsets': [
 *             (<intptr_t>&(pod.bar1Total)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bar1Free)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bar1Used)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bar1Free)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2193, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":2194
 *             (<intptr_t>&(pod.bar1Total)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bar1Free)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bar1Used)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlBAR1Memory_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bar1Used)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2194, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":2191
 *         'names': ['bar1total', 'bar1free', 'bar1_used'],
 *         'formats': [_numpy.uint64, _numpy.uint64, _numpy.uint64],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bar1Total)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bar1Free)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2191, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 2191, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 2191, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 2191, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 2189, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":2196
 *             (<intptr_t>&(pod.bar1Used)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlBAR1Memory_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlBAR1Memory_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2196, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 2189, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2188, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2186
 * 
 * 
 * cdef _get_ba_r1memory_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlBAR1Memory_t pod = nvmlBAR1Memory_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_ba_r1memory_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2213
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlBAR1Memory_t *>calloc(1, sizeof(nvmlBAR1Memory_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":2214
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlBAR1Memory_t *>calloc(1, sizeof(nvmlBAR1Memory_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating BAR1Memory")
*/
  __pyx_v_self->_ptr = ((nvmlBAR1Memory_t *)calloc(1, (sizeof(nvmlBAR1Memory_t))));

  /* "cuda/bindings/_nvml.pyx":2215
 *     def __init__(self):
 *         self._ptr = <nvmlBAR1Memory_t *>calloc(1, sizeof(nvmlBAR1Memory_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating BAR1Memory")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":2216
 *         self._ptr = <nvmlBAR1Memory_t *>calloc(1, sizeof(nvmlBAR1Memory_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating BAR1Memory")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2216, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_BAR1Memory};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2216, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 2216, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2215
 *     def __init__(self):
 *         self._ptr = <nvmlBAR1Memory_t *>calloc(1, sizeof(nvmlBAR1Memory_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating BAR1Memory")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":2217
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating BAR1Memory")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":2218
 *             raise MemoryError("Error allocating BAR1Memory")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":2219
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":2213
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlBAR1Memory_t *>calloc(1, sizeof(nvmlBAR1Memory_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2221
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlBAR1Memory_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self) {
  nvmlBAR1Memory_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlBAR1Memory_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":2223
 *     def __dealloc__(self):
 *         cdef nvmlBAR1Memory_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":2224
 *         cdef nvmlBAR1Memory_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":2225
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":2226
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":2223
 *     def __dealloc__(self):
 *         cdef nvmlBAR1Memory_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":2221
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlBAR1Memory_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":2228
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.BAR1Memory object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":2229
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.BAR1Memory object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2229, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2229, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2229, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2229, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2229, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_BAR1Memory_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 22 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2229, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2228
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.BAR1Memory object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2231
 *         return f"<{__name__}.BAR1Memory object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2234
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2234, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2231
 *         return f"<{__name__}.BAR1Memory object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2236
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_10BAR1Memory__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":2237
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2236
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2239
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":2240
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2240, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2239
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2242
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef BAR1Memory other_
 *         if not isinstance(other, BAR1Memory):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":2244
 *     def __eq__(self, other):
 *         cdef BAR1Memory other_
 *         if not isinstance(other, BAR1Memory):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":2245
 *         cdef BAR1Memory other_
 *         if not isinstance(other, BAR1Memory):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlBAR1Memory_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2244
 *     def __eq__(self, other):
 *         cdef BAR1Memory other_
 *         if not isinstance(other, BAR1Memory):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":2246
 *         if not isinstance(other, BAR1Memory):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlBAR1Memory_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory))))) __PYX_ERR(0, 2246, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":2247
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlBAR1Memory_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlBAR1Memory_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2247, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2242
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef BAR1Memory other_
 *         if not isinstance(other, BAR1Memory):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2249
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlBAR1Memory_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlBAR1Memory_t *>malloc(sizeof(nvmlBAR1Memory_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":2250
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlBAR1Memory_t *>malloc(sizeof(nvmlBAR1Memory_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 2250, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2250, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2250, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 2250, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":2251
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlBAR1Memory_t *>malloc(sizeof(nvmlBAR1Memory_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating BAR1Memory")
*/
    __pyx_v_self->_ptr = ((nvmlBAR1Memory_t *)malloc((sizeof(nvmlBAR1Memory_t))));

    /* "cuda/bindings/_nvml.pyx":2252
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlBAR1Memory_t *>malloc(sizeof(nvmlBAR1Memory_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating BAR1Memory")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlBAR1Memory_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":2253
 *             self._ptr = <nvmlBAR1Memory_t *>malloc(sizeof(nvmlBAR1Memory_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating BAR1Memory")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlBAR1Memory_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2253, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_BAR1Memory};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2253, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 2253, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":2252
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlBAR1Memory_t *>malloc(sizeof(nvmlBAR1Memory_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating BAR1Memory")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlBAR1Memory_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":2254
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating BAR1Memory")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlBAR1Memory_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2254, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2254, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 2254, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlBAR1Memory_t))));

    /* "cuda/bindings/_nvml.pyx":2255
 *                 raise MemoryError("Error allocating BAR1Memory")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlBAR1Memory_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":2256
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlBAR1Memory_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":2257
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2257, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2257, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 2257, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":2250
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlBAR1Memory_t *>malloc(sizeof(nvmlBAR1Memory_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":2259
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 2259, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":2249
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlBAR1Memory_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlBAR1Memory_t *>malloc(sizeof(nvmlBAR1Memory_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2261
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bar1total(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1total_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1total_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1total___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1total___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2264
 *     def bar1total(self):
 *         """int: """
 *         return self._ptr[0].bar1Total             # <<<<<<<<<<<<<<
 * 
 *     @bar1total.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).bar1Total); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2264, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2261
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bar1total(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.bar1total.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2266
 *         return self._ptr[0].bar1Total
 * 
 *     @bar1total.setter             # <<<<<<<<<<<<<<
 *     def bar1total(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1total_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1total_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1total_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1total_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":2268
 *     @bar1total.setter
 *     def bar1total(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This BAR1Memory instance is read-only")
 *         self._ptr[0].bar1Total = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":2269
 *     def bar1total(self, val):
 *         if self._readonly:
 *             raise ValueError("This BAR1Memory instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].bar1Total = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_BAR1Memory_instance_is_read};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2269, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 2269, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2268
 *     @bar1total.setter
 *     def bar1total(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This BAR1Memory instance is read-only")
 *         self._ptr[0].bar1Total = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":2270
 *         if self._readonly:
 *             raise ValueError("This BAR1Memory instance is read-only")
 *         self._ptr[0].bar1Total = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 2270, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).bar1Total = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":2266
 *         return self._ptr[0].bar1Total
 * 
 *     @bar1total.setter             # <<<<<<<<<<<<<<
 *     def bar1total(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.bar1total.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2272
 *         self._ptr[0].bar1Total = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bar1free(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_8bar1free_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_8bar1free_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_8bar1free___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_8bar1free___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2275
 *     def bar1free(self):
 *         """int: """
 *         return self._ptr[0].bar1Free             # <<<<<<<<<<<<<<
 * 
 *     @bar1free.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).bar1Free); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2275, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2272
 *         self._ptr[0].bar1Total = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bar1free(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.bar1free.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2277
 *         return self._ptr[0].bar1Free
 * 
 *     @bar1free.setter             # <<<<<<<<<<<<<<
 *     def bar1free(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_8bar1free_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_8bar1free_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_8bar1free_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_8bar1free_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":2279
 *     @bar1free.setter
 *     def bar1free(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This BAR1Memory instance is read-only")
 *         self._ptr[0].bar1Free = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":2280
 *     def bar1free(self, val):
 *         if self._readonly:
 *             raise ValueError("This BAR1Memory instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].bar1Free = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_BAR1Memory_instance_is_read};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2280, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 2280, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2279
 *     @bar1free.setter
 *     def bar1free(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This BAR1Memory instance is read-only")
 *         self._ptr[0].bar1Free = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":2281
 *         if self._readonly:
 *             raise ValueError("This BAR1Memory instance is read-only")
 *         self._ptr[0].bar1Free = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 2281, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).bar1Free = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":2277
 *         return self._ptr[0].bar1Free
 * 
 *     @bar1free.setter             # <<<<<<<<<<<<<<
 *     def bar1free(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.bar1free.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2283
 *         self._ptr[0].bar1Free = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bar1_used(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1_used_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1_used_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1_used___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1_used___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2286
 *     def bar1_used(self):
 *         """int: """
 *         return self._ptr[0].bar1Used             # <<<<<<<<<<<<<<
 * 
 *     @bar1_used.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).bar1Used); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2286, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2283
 *         self._ptr[0].bar1Free = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bar1_used(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.bar1_used.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2288
 *         return self._ptr[0].bar1Used
 * 
 *     @bar1_used.setter             # <<<<<<<<<<<<<<
 *     def bar1_used(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1_used_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1_used_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1_used_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1_used_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":2290
 *     @bar1_used.setter
 *     def bar1_used(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This BAR1Memory instance is read-only")
 *         self._ptr[0].bar1Used = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":2291
 *     def bar1_used(self, val):
 *         if self._readonly:
 *             raise ValueError("This BAR1Memory instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].bar1Used = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_BAR1Memory_instance_is_read};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2291, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 2291, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2290
 *     @bar1_used.setter
 *     def bar1_used(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This BAR1Memory instance is read-only")
 *         self._ptr[0].bar1Used = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":2292
 *         if self._readonly:
 *             raise ValueError("This BAR1Memory instance is read-only")
 *         self._ptr[0].bar1Used = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 2292, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).bar1Used = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":2288
 *         return self._ptr[0].bar1Used
 * 
 *     @bar1_used.setter             # <<<<<<<<<<<<<<
 *     def bar1_used(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.bar1_used.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2294
 *         self._ptr[0].bar1Used = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an BAR1Memory instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_10BAR1Memory_12from_data, "BAR1Memory.from_data(data)\n\nCreate an BAR1Memory instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `ba_r1memory_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_10BAR1Memory_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10BAR1Memory_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 2294, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2294, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 2294, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 2294, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2294, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 2294, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":2301
 *             data (_numpy.ndarray): a single-element array of dtype `ba_r1memory_dtype` holding the data.
 *         """
 *         return __from_data(data, "ba_r1memory_dtype", ba_r1memory_dtype, BAR1Memory)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ba_r1memory_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2301, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ba_r1memory_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2301, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2294
 *         self._ptr[0].bar1Used = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an BAR1Memory instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2303
 *         return __from_data(data, "ba_r1memory_dtype", ba_r1memory_dtype, BAR1Memory)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an BAR1Memory instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_10BAR1Memory_14from_ptr, "BAR1Memory.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an BAR1Memory instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_10BAR1Memory_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10BAR1Memory_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 2303, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 2303, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 2303, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2303, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 2303, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":2304
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an BAR1Memory instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 2303, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 2303, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 2303, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2303, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 2304, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2304, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 2303, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":2303
 *         return __from_data(data, "ba_r1memory_dtype", ba_r1memory_dtype, BAR1Memory)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an BAR1Memory instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":2312
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef BAR1Memory obj = BAR1Memory.__new__(BAR1Memory)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":2313
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef BAR1Memory obj = BAR1Memory.__new__(BAR1Memory)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2313, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 2313, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2312
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef BAR1Memory obj = BAR1Memory.__new__(BAR1Memory)
*/
  }

  /* "cuda/bindings/_nvml.pyx":2314
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef BAR1Memory obj = BAR1Memory.__new__(BAR1Memory)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlBAR1Memory_t *>malloc(sizeof(nvmlBAR1Memory_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_BAR1Memory(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2314, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":2315
 *             raise ValueError("ptr must not be null (0)")
 *         cdef BAR1Memory obj = BAR1Memory.__new__(BAR1Memory)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlBAR1Memory_t *>malloc(sizeof(nvmlBAR1Memory_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":2316
 *         cdef BAR1Memory obj = BAR1Memory.__new__(BAR1Memory)
 *         if owner is None:
 *             obj._ptr = <nvmlBAR1Memory_t *>malloc(sizeof(nvmlBAR1Memory_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating BAR1Memory")
*/
    __pyx_v_obj->_ptr = ((nvmlBAR1Memory_t *)malloc((sizeof(nvmlBAR1Memory_t))));

    /* "cuda/bindings/_nvml.pyx":2317
 *         if owner is None:
 *             obj._ptr = <nvmlBAR1Memory_t *>malloc(sizeof(nvmlBAR1Memory_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating BAR1Memory")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlBAR1Memory_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":2318
 *             obj._ptr = <nvmlBAR1Memory_t *>malloc(sizeof(nvmlBAR1Memory_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating BAR1Memory")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlBAR1Memory_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2318, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_BAR1Memory};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2318, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 2318, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":2317
 *         if owner is None:
 *             obj._ptr = <nvmlBAR1Memory_t *>malloc(sizeof(nvmlBAR1Memory_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating BAR1Memory")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlBAR1Memory_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":2319
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating BAR1Memory")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlBAR1Memory_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlBAR1Memory_t))));

    /* "cuda/bindings/_nvml.pyx":2320
 *                 raise MemoryError("Error allocating BAR1Memory")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlBAR1Memory_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":2321
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlBAR1Memory_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlBAR1Memory_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":2315
 *             raise ValueError("ptr must not be null (0)")
 *         cdef BAR1Memory obj = BAR1Memory.__new__(BAR1Memory)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlBAR1Memory_t *>malloc(sizeof(nvmlBAR1Memory_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":2323
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlBAR1Memory_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlBAR1Memory_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":2324
 *         else:
 *             obj._ptr = <nvmlBAR1Memory_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":2325
 *             obj._ptr = <nvmlBAR1Memory_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":2326
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":2327
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2303
 *         return __from_data(data, "ba_r1memory_dtype", ba_r1memory_dtype, BAR1Memory)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an BAR1Memory instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_10BAR1Memory_16__reduce_cython__, "BAR1Memory.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_10BAR1Memory_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10BAR1Memory_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_10BAR1Memory_18__setstate_cython__, "BAR1Memory.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_10BAR1Memory_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10BAR1Memory_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10BAR1Memory_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.BAR1Memory.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2330
 * 
 * 
 * cdef _get_process_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlProcessInfo_t pod = nvmlProcessInfo_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_process_info_dtype_offsets(void) {
  nvmlProcessInfo_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlProcessInfo_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  size_t __pyx_t_11;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_process_info_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":2331
 * 
 * cdef _get_process_info_dtype_offsets():
 *     cdef nvmlProcessInfo_t pod = nvmlProcessInfo_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['pid', 'used_gpu_memory', 'gpu_instance_id', 'compute_instance_id'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":2332
 * cdef _get_process_info_dtype_offsets():
 *     cdef nvmlProcessInfo_t pod = nvmlProcessInfo_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['pid', 'used_gpu_memory', 'gpu_instance_id', 'compute_instance_id'],
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2332, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2332, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":2333
 *     cdef nvmlProcessInfo_t pod = nvmlProcessInfo_t()
 *     return _numpy.dtype({
 *         'names': ['pid', 'used_gpu_memory', 'gpu_instance_id', 'compute_instance_id'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2333, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2333, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_pid);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_pid);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_pid) != (0)) __PYX_ERR(0, 2333, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_used_gpu_memory);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_used_gpu_memory);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_used_gpu_memory) != (0)) __PYX_ERR(0, 2333, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_gpu_instance_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_gpu_instance_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_gpu_instance_id) != (0)) __PYX_ERR(0, 2333, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_compute_instance_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_compute_instance_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_compute_instance_id) != (0)) __PYX_ERR(0, 2333, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 2333, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":2334
 *     return _numpy.dtype({
 *         'names': ['pid', 'used_gpu_memory', 'gpu_instance_id', 'compute_instance_id'],
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2334, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2334, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2334, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2334, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2334, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2334, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2334, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 2334, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2334, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 2334, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 2334, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 2334, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 2334, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 2333, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":2336
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.usedGpuMemory)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gpuInstanceId)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.pid)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2336, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":2337
 *         'offsets': [
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.usedGpuMemory)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.gpuInstanceId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.computeInstanceId)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.usedGpuMemory)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 2337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":2338
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.usedGpuMemory)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gpuInstanceId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.computeInstanceId)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.gpuInstanceId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2338, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":2339
 *             (<intptr_t>&(pod.usedGpuMemory)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gpuInstanceId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.computeInstanceId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlProcessInfo_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.computeInstanceId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2339, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":2335
 *         'names': ['pid', 'used_gpu_memory', 'gpu_instance_id', 'compute_instance_id'],
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.usedGpuMemory)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2335, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 2335, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_10) != (0)) __PYX_ERR(0, 2335, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 2335, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_8) != (0)) __PYX_ERR(0, 2335, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 2333, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":2341
 *             (<intptr_t>&(pod.computeInstanceId)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlProcessInfo_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlProcessInfo_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2341, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 2333, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_11 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_11 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_11, (2-__pyx_t_11) | (__pyx_t_11*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2332, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2330
 * 
 * 
 * cdef _get_process_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlProcessInfo_t pod = nvmlProcessInfo_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_process_info_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2363
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=process_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 2363, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2363, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 2363, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2363, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 2363, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":2364
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=process_info_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlProcessInfo_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2364, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2364, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_process_info_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2364, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2364, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 2364, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2364, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":2365
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=process_info_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlProcessInfo_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessInfo_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2365, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2365, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2365, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":2366
 *         arr = _numpy.empty(size, dtype=process_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlProcessInfo_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessInfo_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2366, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlProcessInfo_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2366, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2366, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 2366, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":2367
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlProcessInfo_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessInfo_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2367, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2367, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlProcessInfo_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2367, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2367, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 2366, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 2366, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":2363
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=process_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2369
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessInfo_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.ProcessInfo_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":2370
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.ProcessInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2370, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2370, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 2370, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":2371
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.ProcessInfo_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.ProcessInfo object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2371, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2371, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2371, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2371, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2371, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2371, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2371, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_ProcessInfo_Array;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 19 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2371, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2370
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.ProcessInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":2373
 *             return f"<{__name__}.ProcessInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.ProcessInfo object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2373, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2373, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2373, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2373, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2373, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_ProcessInfo_object_at;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 23 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2373, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":2369
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessInfo_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.ProcessInfo_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2375
 *             return f"<{__name__}.ProcessInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2378
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2378, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2378, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2375
 *             return f"<{__name__}.ProcessInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2380
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_11ProcessInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":2381
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2381, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2381, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 2381, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2380
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2383
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":2384
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2384, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2384, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 2384, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":2385
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2385, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 2385, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2384
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":2387
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2387, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2387, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2383
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2389
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":2390
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2390, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 2390, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2389
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2392
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":2393
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, ProcessInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":2394
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2394, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2394, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2394, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2394, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 2394, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2394, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2394, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2394, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2394, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 2394, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":2395
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2394
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":2396
 *         if (not isinstance(other, ProcessInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2396, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2396, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 2396, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2392
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2398
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_3pid_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_3pid_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_3pid___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_3pid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2401
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.pid[0])
 *         return self._data.pid
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 2401, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":2402
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.pid[0])             # <<<<<<<<<<<<<<
 *         return self._data.pid
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2402, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2402, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2402, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2401
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.pid[0])
 *         return self._data.pid
*/
  }

  /* "cuda/bindings/_nvml.pyx":2403
 *         if self._data.size == 1:
 *             return int(self._data.pid[0])
 *         return self._data.pid             # <<<<<<<<<<<<<<
 * 
 *     @pid.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2403, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2398
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.pid.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2405
 *         return self._data.pid
 * 
 *     @pid.setter             # <<<<<<<<<<<<<<
 *     def pid(self, val):
 *         self._data.pid = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_3pid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_3pid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_3pid_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_3pid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":2407
 *     @pid.setter
 *     def pid(self, val):
 *         self._data.pid = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid, __pyx_v_val) < (0)) __PYX_ERR(0, 2407, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":2405
 *         return self._data.pid
 * 
 *     @pid.setter             # <<<<<<<<<<<<<<
 *     def pid(self, val):
 *         self._data.pid = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.pid.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2409
 *         self._data.pid = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def used_gpu_memory(self):
 *         """Union[~_numpy.uint64, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_15used_gpu_memory_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_15used_gpu_memory_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_15used_gpu_memory___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_15used_gpu_memory___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2412
 *     def used_gpu_memory(self):
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.used_gpu_memory[0])
 *         return self._data.used_gpu_memory
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2412, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 2412, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":2413
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.used_gpu_memory[0])             # <<<<<<<<<<<<<<
 *         return self._data.used_gpu_memory
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_used_gpu_memory); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2413, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2413, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2413, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2412
 *     def used_gpu_memory(self):
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.used_gpu_memory[0])
 *         return self._data.used_gpu_memory
*/
  }

  /* "cuda/bindings/_nvml.pyx":2414
 *         if self._data.size == 1:
 *             return int(self._data.used_gpu_memory[0])
 *         return self._data.used_gpu_memory             # <<<<<<<<<<<<<<
 * 
 *     @used_gpu_memory.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_used_gpu_memory); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2414, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2409
 *         self._data.pid = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def used_gpu_memory(self):
 *         """Union[~_numpy.uint64, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.used_gpu_memory.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2416
 *         return self._data.used_gpu_memory
 * 
 *     @used_gpu_memory.setter             # <<<<<<<<<<<<<<
 *     def used_gpu_memory(self, val):
 *         self._data.used_gpu_memory = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_15used_gpu_memory_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_15used_gpu_memory_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_15used_gpu_memory_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_15used_gpu_memory_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":2418
 *     @used_gpu_memory.setter
 *     def used_gpu_memory(self, val):
 *         self._data.used_gpu_memory = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_used_gpu_memory, __pyx_v_val) < (0)) __PYX_ERR(0, 2418, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":2416
 *         return self._data.used_gpu_memory
 * 
 *     @used_gpu_memory.setter             # <<<<<<<<<<<<<<
 *     def used_gpu_memory(self, val):
 *         self._data.used_gpu_memory = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.used_gpu_memory.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2420
 *         self._data.used_gpu_memory = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def gpu_instance_id(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_15gpu_instance_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_15gpu_instance_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_15gpu_instance_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_15gpu_instance_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2423
 *     def gpu_instance_id(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.gpu_instance_id[0])
 *         return self._data.gpu_instance_id
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2423, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 2423, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":2424
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.gpu_instance_id[0])             # <<<<<<<<<<<<<<
 *         return self._data.gpu_instance_id
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_gpu_instance_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2424, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2424, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2424, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2423
 *     def gpu_instance_id(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.gpu_instance_id[0])
 *         return self._data.gpu_instance_id
*/
  }

  /* "cuda/bindings/_nvml.pyx":2425
 *         if self._data.size == 1:
 *             return int(self._data.gpu_instance_id[0])
 *         return self._data.gpu_instance_id             # <<<<<<<<<<<<<<
 * 
 *     @gpu_instance_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_gpu_instance_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2425, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2420
 *         self._data.used_gpu_memory = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def gpu_instance_id(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.gpu_instance_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2427
 *         return self._data.gpu_instance_id
 * 
 *     @gpu_instance_id.setter             # <<<<<<<<<<<<<<
 *     def gpu_instance_id(self, val):
 *         self._data.gpu_instance_id = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_15gpu_instance_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_15gpu_instance_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_15gpu_instance_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_15gpu_instance_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":2429
 *     @gpu_instance_id.setter
 *     def gpu_instance_id(self, val):
 *         self._data.gpu_instance_id = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_gpu_instance_id, __pyx_v_val) < (0)) __PYX_ERR(0, 2429, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":2427
 *         return self._data.gpu_instance_id
 * 
 *     @gpu_instance_id.setter             # <<<<<<<<<<<<<<
 *     def gpu_instance_id(self, val):
 *         self._data.gpu_instance_id = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.gpu_instance_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2431
 *         self._data.gpu_instance_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def compute_instance_id(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_19compute_instance_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_19compute_instance_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_19compute_instance_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_19compute_instance_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2434
 *     def compute_instance_id(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.compute_instance_id[0])
 *         return self._data.compute_instance_id
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2434, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 2434, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":2435
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.compute_instance_id[0])             # <<<<<<<<<<<<<<
 *         return self._data.compute_instance_id
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_compute_instance_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2435, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2435, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2435, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2434
 *     def compute_instance_id(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.compute_instance_id[0])
 *         return self._data.compute_instance_id
*/
  }

  /* "cuda/bindings/_nvml.pyx":2436
 *         if self._data.size == 1:
 *             return int(self._data.compute_instance_id[0])
 *         return self._data.compute_instance_id             # <<<<<<<<<<<<<<
 * 
 *     @compute_instance_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_compute_instance_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2436, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2431
 *         self._data.gpu_instance_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def compute_instance_id(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.compute_instance_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2438
 *         return self._data.compute_instance_id
 * 
 *     @compute_instance_id.setter             # <<<<<<<<<<<<<<
 *     def compute_instance_id(self, val):
 *         self._data.compute_instance_id = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_19compute_instance_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_19compute_instance_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_19compute_instance_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_19compute_instance_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":2440
 *     @compute_instance_id.setter
 *     def compute_instance_id(self, val):
 *         self._data.compute_instance_id = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_compute_instance_id, __pyx_v_val) < (0)) __PYX_ERR(0, 2440, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":2438
 *         return self._data.compute_instance_id
 * 
 *     @compute_instance_id.setter             # <<<<<<<<<<<<<<
 *     def compute_instance_id(self, val):
 *         self._data.compute_instance_id = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.compute_instance_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2442
 *         self._data.compute_instance_id = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":2445
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":2446
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 2446, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":2447
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2447, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 2447, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":2448
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":2449
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2449, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 2449, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":2448
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":2450
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return ProcessInfo.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":2451
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return ProcessInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":2450
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return ProcessInfo.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":2452
 *             if key_ < 0:
 *                 key_ += size
 *             return ProcessInfo.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_info_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2452, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2452, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2445
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":2453
 *                 key_ += size
 *             return ProcessInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_info_dtype:
 *             return ProcessInfo.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2453, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":2454
 *             return ProcessInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_info_dtype:             # <<<<<<<<<<<<<<
 *             return ProcessInfo.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2454, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2454, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 2454, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2454, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_process_info_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2454, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2454, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 2454, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":2455
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_info_dtype:
 *             return ProcessInfo.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2455, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2454
 *             return ProcessInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_info_dtype:             # <<<<<<<<<<<<<<
 *             return ProcessInfo.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":2456
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_info_dtype:
 *             return ProcessInfo.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2442
 *         self._data.compute_instance_id = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2458
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":2459
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 2459, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":2458
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2461
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessInfo instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_11ProcessInfo_14from_data, "ProcessInfo.from_data(data)\n\nCreate an ProcessInfo instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `process_info_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_11ProcessInfo_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11ProcessInfo_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 2461, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2461, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 2461, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 2461, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2461, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 2461, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":2468
 *             data (_numpy.ndarray): a 1D array of dtype `process_info_dtype` holding the data.
 *         """
 *         cdef ProcessInfo obj = ProcessInfo.__new__(ProcessInfo)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2468, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":2469
 *         """
 *         cdef ProcessInfo obj = ProcessInfo.__new__(ProcessInfo)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2469, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2469, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 2469, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":2470
 *         cdef ProcessInfo obj = ProcessInfo.__new__(ProcessInfo)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2470, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 2470, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2469
 *         """
 *         cdef ProcessInfo obj = ProcessInfo.__new__(ProcessInfo)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":2471
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_info_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2471, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 2471, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":2472
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != process_info_dtype:
 *             raise ValueError("data array must be of dtype process_info_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2472, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 2472, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2471
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_info_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":2473
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_info_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype process_info_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2473, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_process_info_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2473, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2473, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 2473, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":2474
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_info_dtype:
 *             raise ValueError("data array must be of dtype process_info_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_proc};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2474, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 2474, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2473
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_info_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype process_info_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":2475
 *         if data.dtype != process_info_dtype:
 *             raise ValueError("data array must be of dtype process_info_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2475, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2475, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2475, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":2477
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2461
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessInfo instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2479
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ProcessInfo instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_11ProcessInfo_16from_ptr, "ProcessInfo.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an ProcessInfo instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_11ProcessInfo_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11ProcessInfo_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 2479, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 2479, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 2479, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2479, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 2479, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 2479, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 2479, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 2479, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2479, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 2480, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 2480, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2480, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":2480
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an ProcessInfo instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 2479, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":2479
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ProcessInfo instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":2488
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessInfo obj = ProcessInfo.__new__(ProcessInfo)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":2489
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ProcessInfo obj = ProcessInfo.__new__(ProcessInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2489, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 2489, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2488
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessInfo obj = ProcessInfo.__new__(ProcessInfo)
*/
  }

  /* "cuda/bindings/_nvml.pyx":2490
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessInfo obj = ProcessInfo.__new__(ProcessInfo)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2490, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":2491
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessInfo obj = ProcessInfo.__new__(ProcessInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlProcessInfo_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2491, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2491, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":2493
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlProcessInfo_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=process_info_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2493, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":2492
 *         cdef ProcessInfo obj = ProcessInfo.__new__(ProcessInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlProcessInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=process_info_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlProcessInfo_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2492, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":2494
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlProcessInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=process_info_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2494, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2494, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2494, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_process_info_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2494, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2494, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 2494, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 2494, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2494, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":2495
 *             <char*>ptr, sizeof(nvmlProcessInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=process_info_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2495, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":2497
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2479
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ProcessInfo instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2359
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_11ProcessInfo_18__reduce_cython__, "ProcessInfo.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_11ProcessInfo_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11ProcessInfo_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_ProcessInfo, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_ProcessInfo, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_ProcessInfo, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_ProcessInfo, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ProcessInfo); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_ProcessInfo, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_ProcessInfo, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_ProcessInfo, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_ProcessInfo__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ProcessInfo); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ProcessInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ProcessInfo__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_11ProcessInfo_20__setstate_cython__, "ProcessInfo.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_11ProcessInfo_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11ProcessInfo_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11ProcessInfo_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_ProcessInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_ProcessInfo__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ProcessInfo__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ProcessInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ProcessInfo__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2500
 * 
 * 
 * cdef _get_process_detail_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlProcessDetail_v1_t pod = nvmlProcessDetail_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_process_detail_v1_dtype_offsets(void) {
  nvmlProcessDetail_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlProcessDetail_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  size_t __pyx_t_12;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_process_detail_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":2501
 * 
 * cdef _get_process_detail_v1_dtype_offsets():
 *     cdef nvmlProcessDetail_v1_t pod = nvmlProcessDetail_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['pid', 'used_gpu_memory', 'gpu_instance_id', 'compute_instance_id', 'used_gpu_cc_protected_memory'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":2502
 * cdef _get_process_detail_v1_dtype_offsets():
 *     cdef nvmlProcessDetail_v1_t pod = nvmlProcessDetail_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['pid', 'used_gpu_memory', 'gpu_instance_id', 'compute_instance_id', 'used_gpu_cc_protected_memory'],
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint64],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2502, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2502, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":2503
 *     cdef nvmlProcessDetail_v1_t pod = nvmlProcessDetail_v1_t()
 *     return _numpy.dtype({
 *         'names': ['pid', 'used_gpu_memory', 'gpu_instance_id', 'compute_instance_id', 'used_gpu_cc_protected_memory'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint64],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2503, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2503, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_pid);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_pid);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_pid) != (0)) __PYX_ERR(0, 2503, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_used_gpu_memory);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_used_gpu_memory);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_used_gpu_memory) != (0)) __PYX_ERR(0, 2503, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_gpu_instance_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_gpu_instance_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_gpu_instance_id) != (0)) __PYX_ERR(0, 2503, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_compute_instance_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_compute_instance_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_compute_instance_id) != (0)) __PYX_ERR(0, 2503, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_used_gpu_cc_protected_memory);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_used_gpu_cc_protected_memory);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_used_gpu_cc_protected_memory) != (0)) __PYX_ERR(0, 2503, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 2503, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":2504
 *     return _numpy.dtype({
 *         'names': ['pid', 'used_gpu_memory', 'gpu_instance_id', 'compute_instance_id', 'used_gpu_cc_protected_memory'],
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint64],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 2504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 2504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 2504, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 2504, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 2504, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 2504, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 2504, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 2503, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":2506
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint64],
 *         'offsets': [
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.usedGpuMemory)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gpuInstanceId)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.pid)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2506, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":2507
 *         'offsets': [
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.usedGpuMemory)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.gpuInstanceId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.computeInstanceId)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.usedGpuMemory)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 2507, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":2508
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.usedGpuMemory)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gpuInstanceId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.computeInstanceId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.usedGpuCcProtectedMemory)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.gpuInstanceId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 2508, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":2509
 *             (<intptr_t>&(pod.usedGpuMemory)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gpuInstanceId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.computeInstanceId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.usedGpuCcProtectedMemory)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.computeInstanceId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2509, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":2510
 *             (<intptr_t>&(pod.gpuInstanceId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.computeInstanceId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.usedGpuCcProtectedMemory)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlProcessDetail_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.usedGpuCcProtectedMemory)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2510, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":2505
 *         'names': ['pid', 'used_gpu_memory', 'gpu_instance_id', 'compute_instance_id', 'used_gpu_cc_protected_memory'],
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint64],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.usedGpuMemory)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2505, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 2505, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_11) != (0)) __PYX_ERR(0, 2505, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_10) != (0)) __PYX_ERR(0, 2505, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_9) != (0)) __PYX_ERR(0, 2505, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_8) != (0)) __PYX_ERR(0, 2505, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 2503, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":2512
 *             (<intptr_t>&(pod.usedGpuCcProtectedMemory)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlProcessDetail_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlProcessDetail_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2512, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 2503, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_12 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_12 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_12, (2-__pyx_t_12) | (__pyx_t_12*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2502, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2500
 * 
 * 
 * cdef _get_process_detail_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlProcessDetail_v1_t pod = nvmlProcessDetail_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_process_detail_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2534
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=process_detail_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 2534, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2534, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 2534, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2534, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 2534, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":2535
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=process_detail_v1_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlProcessDetail_v1_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2535, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2535, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_process_detail_v1_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2535, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2535, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 2535, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2535, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":2536
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=process_detail_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlProcessDetail_v1_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessDetail_v1_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2536, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2536, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2536, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":2537
 *         arr = _numpy.empty(size, dtype=process_detail_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlProcessDetail_v1_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessDetail_v1_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2537, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlProcessDetail_v1_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2537, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2537, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 2537, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":2538
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlProcessDetail_v1_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessDetail_v1_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2538, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2538, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlProcessDetail_v1_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2538, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2538, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 2537, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 2537, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":2534
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=process_detail_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2540
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessDetail_v1_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.ProcessDetail_v1_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":2541
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.ProcessDetail_v1_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2541, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2541, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 2541, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":2542
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.ProcessDetail_v1_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.ProcessDetail_v1 object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2542, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2542, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2542, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2542, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2542, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2542, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2542, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_ProcessDetail_v1_Array;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2542, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2541
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.ProcessDetail_v1_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":2544
 *             return f"<{__name__}.ProcessDetail_v1_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.ProcessDetail_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2544, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2544, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2544, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2544, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2544, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_ProcessDetail_v1_object_at;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 28 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2544, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":2540
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessDetail_v1_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.ProcessDetail_v1_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2546
 *             return f"<{__name__}.ProcessDetail_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2549
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2549, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2549, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2546
 *             return f"<{__name__}.ProcessDetail_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2551
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_16ProcessDetail_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":2552
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2552, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2552, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 2552, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2551
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2554
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":2555
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2555, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2555, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 2555, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":2556
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2556, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 2556, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2555
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":2558
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2558, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2558, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2554
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2560
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":2561
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2561, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 2561, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2560
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2563
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessDetail_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":2564
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, ProcessDetail_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":2565
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessDetail_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2565, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2565, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2565, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2565, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 2565, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2565, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2565, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2565, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2565, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 2565, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":2566
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessDetail_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2565
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessDetail_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":2567
 *         if (not isinstance(other, ProcessDetail_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2567, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2567, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2567, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 2567, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2567, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2563
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessDetail_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2569
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: Process ID."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3pid_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3pid_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3pid___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3pid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2572
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: Process ID."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.pid[0])
 *         return self._data.pid
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2572, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 2572, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":2573
 *         """Union[~_numpy.uint32, int]: Process ID."""
 *         if self._data.size == 1:
 *             return int(self._data.pid[0])             # <<<<<<<<<<<<<<
 *         return self._data.pid
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2573, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2573, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2573, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2572
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: Process ID."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.pid[0])
 *         return self._data.pid
*/
  }

  /* "cuda/bindings/_nvml.pyx":2574
 *         if self._data.size == 1:
 *             return int(self._data.pid[0])
 *         return self._data.pid             # <<<<<<<<<<<<<<
 * 
 *     @pid.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2574, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2569
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: Process ID."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.pid.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2576
 *         return self._data.pid
 * 
 *     @pid.setter             # <<<<<<<<<<<<<<
 *     def pid(self, val):
 *         self._data.pid = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3pid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3pid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3pid_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3pid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":2578
 *     @pid.setter
 *     def pid(self, val):
 *         self._data.pid = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid, __pyx_v_val) < (0)) __PYX_ERR(0, 2578, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":2576
 *         return self._data.pid
 * 
 *     @pid.setter             # <<<<<<<<<<<<<<
 *     def pid(self, val):
 *         self._data.pid = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.pid.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2580
 *         self._data.pid = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def used_gpu_memory(self):
 *         """Union[~_numpy.uint64, int]: Amount of used GPU memory in bytes. Under WDDM, NVML_VALUE_NOT_AVAILABLE is always reported because Windows KMD manages all the memory and not the NVIDIA driver"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15used_gpu_memory_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15used_gpu_memory_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15used_gpu_memory___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15used_gpu_memory___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2583
 *     def used_gpu_memory(self):
 *         """Union[~_numpy.uint64, int]: Amount of used GPU memory in bytes. Under WDDM, NVML_VALUE_NOT_AVAILABLE is always reported because Windows KMD manages all the memory and not the NVIDIA driver"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.used_gpu_memory[0])
 *         return self._data.used_gpu_memory
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2583, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 2583, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":2584
 *         """Union[~_numpy.uint64, int]: Amount of used GPU memory in bytes. Under WDDM, NVML_VALUE_NOT_AVAILABLE is always reported because Windows KMD manages all the memory and not the NVIDIA driver"""
 *         if self._data.size == 1:
 *             return int(self._data.used_gpu_memory[0])             # <<<<<<<<<<<<<<
 *         return self._data.used_gpu_memory
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_used_gpu_memory); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2584, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2584, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2584, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2583
 *     def used_gpu_memory(self):
 *         """Union[~_numpy.uint64, int]: Amount of used GPU memory in bytes. Under WDDM, NVML_VALUE_NOT_AVAILABLE is always reported because Windows KMD manages all the memory and not the NVIDIA driver"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.used_gpu_memory[0])
 *         return self._data.used_gpu_memory
*/
  }

  /* "cuda/bindings/_nvml.pyx":2585
 *         if self._data.size == 1:
 *             return int(self._data.used_gpu_memory[0])
 *         return self._data.used_gpu_memory             # <<<<<<<<<<<<<<
 * 
 *     @used_gpu_memory.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_used_gpu_memory); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2585, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2580
 *         self._data.pid = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def used_gpu_memory(self):
 *         """Union[~_numpy.uint64, int]: Amount of used GPU memory in bytes. Under WDDM, NVML_VALUE_NOT_AVAILABLE is always reported because Windows KMD manages all the memory and not the NVIDIA driver"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.used_gpu_memory.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2587
 *         return self._data.used_gpu_memory
 * 
 *     @used_gpu_memory.setter             # <<<<<<<<<<<<<<
 *     def used_gpu_memory(self, val):
 *         self._data.used_gpu_memory = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15used_gpu_memory_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15used_gpu_memory_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15used_gpu_memory_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15used_gpu_memory_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":2589
 *     @used_gpu_memory.setter
 *     def used_gpu_memory(self, val):
 *         self._data.used_gpu_memory = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_used_gpu_memory, __pyx_v_val) < (0)) __PYX_ERR(0, 2589, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":2587
 *         return self._data.used_gpu_memory
 * 
 *     @used_gpu_memory.setter             # <<<<<<<<<<<<<<
 *     def used_gpu_memory(self, val):
 *         self._data.used_gpu_memory = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.used_gpu_memory.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2591
 *         self._data.used_gpu_memory = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def gpu_instance_id(self):
 *         """Union[~_numpy.uint32, int]: If MIG is enabled, stores a valid GPU instance ID. gpuInstanceId is."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15gpu_instance_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15gpu_instance_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15gpu_instance_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15gpu_instance_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2594
 *     def gpu_instance_id(self):
 *         """Union[~_numpy.uint32, int]: If MIG is enabled, stores a valid GPU instance ID. gpuInstanceId is."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.gpu_instance_id[0])
 *         return self._data.gpu_instance_id
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2594, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 2594, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":2595
 *         """Union[~_numpy.uint32, int]: If MIG is enabled, stores a valid GPU instance ID. gpuInstanceId is."""
 *         if self._data.size == 1:
 *             return int(self._data.gpu_instance_id[0])             # <<<<<<<<<<<<<<
 *         return self._data.gpu_instance_id
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_gpu_instance_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2595, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2595, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2595, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2594
 *     def gpu_instance_id(self):
 *         """Union[~_numpy.uint32, int]: If MIG is enabled, stores a valid GPU instance ID. gpuInstanceId is."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.gpu_instance_id[0])
 *         return self._data.gpu_instance_id
*/
  }

  /* "cuda/bindings/_nvml.pyx":2596
 *         if self._data.size == 1:
 *             return int(self._data.gpu_instance_id[0])
 *         return self._data.gpu_instance_id             # <<<<<<<<<<<<<<
 * 
 *     @gpu_instance_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_gpu_instance_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2596, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2591
 *         self._data.used_gpu_memory = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def gpu_instance_id(self):
 *         """Union[~_numpy.uint32, int]: If MIG is enabled, stores a valid GPU instance ID. gpuInstanceId is."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.gpu_instance_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2598
 *         return self._data.gpu_instance_id
 * 
 *     @gpu_instance_id.setter             # <<<<<<<<<<<<<<
 *     def gpu_instance_id(self, val):
 *         self._data.gpu_instance_id = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15gpu_instance_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15gpu_instance_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15gpu_instance_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15gpu_instance_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":2600
 *     @gpu_instance_id.setter
 *     def gpu_instance_id(self, val):
 *         self._data.gpu_instance_id = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_gpu_instance_id, __pyx_v_val) < (0)) __PYX_ERR(0, 2600, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":2598
 *         return self._data.gpu_instance_id
 * 
 *     @gpu_instance_id.setter             # <<<<<<<<<<<<<<
 *     def gpu_instance_id(self, val):
 *         self._data.gpu_instance_id = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.gpu_instance_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2602
 *         self._data.gpu_instance_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def compute_instance_id(self):
 *         """Union[~_numpy.uint32, int]: If MIG is enabled, stores a valid compute instance ID. computeInstanceId."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19compute_instance_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19compute_instance_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19compute_instance_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19compute_instance_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2605
 *     def compute_instance_id(self):
 *         """Union[~_numpy.uint32, int]: If MIG is enabled, stores a valid compute instance ID. computeInstanceId."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.compute_instance_id[0])
 *         return self._data.compute_instance_id
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2605, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 2605, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":2606
 *         """Union[~_numpy.uint32, int]: If MIG is enabled, stores a valid compute instance ID. computeInstanceId."""
 *         if self._data.size == 1:
 *             return int(self._data.compute_instance_id[0])             # <<<<<<<<<<<<<<
 *         return self._data.compute_instance_id
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_compute_instance_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2606, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2606, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2606, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2605
 *     def compute_instance_id(self):
 *         """Union[~_numpy.uint32, int]: If MIG is enabled, stores a valid compute instance ID. computeInstanceId."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.compute_instance_id[0])
 *         return self._data.compute_instance_id
*/
  }

  /* "cuda/bindings/_nvml.pyx":2607
 *         if self._data.size == 1:
 *             return int(self._data.compute_instance_id[0])
 *         return self._data.compute_instance_id             # <<<<<<<<<<<<<<
 * 
 *     @compute_instance_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_compute_instance_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2602
 *         self._data.gpu_instance_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def compute_instance_id(self):
 *         """Union[~_numpy.uint32, int]: If MIG is enabled, stores a valid compute instance ID. computeInstanceId."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.compute_instance_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2609
 *         return self._data.compute_instance_id
 * 
 *     @compute_instance_id.setter             # <<<<<<<<<<<<<<
 *     def compute_instance_id(self, val):
 *         self._data.compute_instance_id = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19compute_instance_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19compute_instance_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19compute_instance_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19compute_instance_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":2611
 *     @compute_instance_id.setter
 *     def compute_instance_id(self, val):
 *         self._data.compute_instance_id = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_compute_instance_id, __pyx_v_val) < (0)) __PYX_ERR(0, 2611, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":2609
 *         return self._data.compute_instance_id
 * 
 *     @compute_instance_id.setter             # <<<<<<<<<<<<<<
 *     def compute_instance_id(self, val):
 *         self._data.compute_instance_id = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.compute_instance_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2613
 *         self._data.compute_instance_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def used_gpu_cc_protected_memory(self):
 *         """Union[~_numpy.uint64, int]: Amount of used GPU conf compute protected memory in bytes."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_28used_gpu_cc_protected_memory_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_28used_gpu_cc_protected_memory_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_28used_gpu_cc_protected_memory___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_28used_gpu_cc_protected_memory___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2616
 *     def used_gpu_cc_protected_memory(self):
 *         """Union[~_numpy.uint64, int]: Amount of used GPU conf compute protected memory in bytes."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.used_gpu_cc_protected_memory[0])
 *         return self._data.used_gpu_cc_protected_memory
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2616, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 2616, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":2617
 *         """Union[~_numpy.uint64, int]: Amount of used GPU conf compute protected memory in bytes."""
 *         if self._data.size == 1:
 *             return int(self._data.used_gpu_cc_protected_memory[0])             # <<<<<<<<<<<<<<
 *         return self._data.used_gpu_cc_protected_memory
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_used_gpu_cc_protected_memory); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2617, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2617, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2617, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2616
 *     def used_gpu_cc_protected_memory(self):
 *         """Union[~_numpy.uint64, int]: Amount of used GPU conf compute protected memory in bytes."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.used_gpu_cc_protected_memory[0])
 *         return self._data.used_gpu_cc_protected_memory
*/
  }

  /* "cuda/bindings/_nvml.pyx":2618
 *         if self._data.size == 1:
 *             return int(self._data.used_gpu_cc_protected_memory[0])
 *         return self._data.used_gpu_cc_protected_memory             # <<<<<<<<<<<<<<
 * 
 *     @used_gpu_cc_protected_memory.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_used_gpu_cc_protected_memory); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2618, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2613
 *         self._data.compute_instance_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def used_gpu_cc_protected_memory(self):
 *         """Union[~_numpy.uint64, int]: Amount of used GPU conf compute protected memory in bytes."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.used_gpu_cc_protected_memory.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2620
 *         return self._data.used_gpu_cc_protected_memory
 * 
 *     @used_gpu_cc_protected_memory.setter             # <<<<<<<<<<<<<<
 *     def used_gpu_cc_protected_memory(self, val):
 *         self._data.used_gpu_cc_protected_memory = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_28used_gpu_cc_protected_memory_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_28used_gpu_cc_protected_memory_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_28used_gpu_cc_protected_memory_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_28used_gpu_cc_protected_memory_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":2622
 *     @used_gpu_cc_protected_memory.setter
 *     def used_gpu_cc_protected_memory(self, val):
 *         self._data.used_gpu_cc_protected_memory = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_used_gpu_cc_protected_memory, __pyx_v_val) < (0)) __PYX_ERR(0, 2622, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":2620
 *         return self._data.used_gpu_cc_protected_memory
 * 
 *     @used_gpu_cc_protected_memory.setter             # <<<<<<<<<<<<<<
 *     def used_gpu_cc_protected_memory(self, val):
 *         self._data.used_gpu_cc_protected_memory = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.used_gpu_cc_protected_memory.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2624
 *         self._data.used_gpu_cc_protected_memory = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":2627
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":2628
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 2628, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":2629
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2629, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 2629, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":2630
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":2631
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2631, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 2631, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":2630
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":2632
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return ProcessDetail_v1.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":2633
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return ProcessDetail_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":2632
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return ProcessDetail_v1.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":2634
 *             if key_ < 0:
 *                 key_ += size
 *             return ProcessDetail_v1.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_detail_v1_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2634, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2634, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2627
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":2635
 *                 key_ += size
 *             return ProcessDetail_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_detail_v1_dtype:
 *             return ProcessDetail_v1.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2635, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":2636
 *             return ProcessDetail_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_detail_v1_dtype:             # <<<<<<<<<<<<<<
 *             return ProcessDetail_v1.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2636, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2636, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 2636, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2636, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_process_detail_v1_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2636, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2636, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 2636, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":2637
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_detail_v1_dtype:
 *             return ProcessDetail_v1.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2637, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2636
 *             return ProcessDetail_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_detail_v1_dtype:             # <<<<<<<<<<<<<<
 *             return ProcessDetail_v1.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":2638
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_detail_v1_dtype:
 *             return ProcessDetail_v1.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2624
 *         self._data.used_gpu_cc_protected_memory = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2640
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":2641
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 2641, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":2640
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2643
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessDetail_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16ProcessDetail_v1_14from_data, "ProcessDetail_v1.from_data(data)\n\nCreate an ProcessDetail_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `process_detail_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16ProcessDetail_v1_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 2643, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2643, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 2643, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 2643, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2643, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 2643, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":2650
 *             data (_numpy.ndarray): a 1D array of dtype `process_detail_v1_dtype` holding the data.
 *         """
 *         cdef ProcessDetail_v1 obj = ProcessDetail_v1.__new__(ProcessDetail_v1)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessDetail_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2650, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":2651
 *         """
 *         cdef ProcessDetail_v1 obj = ProcessDetail_v1.__new__(ProcessDetail_v1)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2651, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2651, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 2651, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":2652
 *         cdef ProcessDetail_v1 obj = ProcessDetail_v1.__new__(ProcessDetail_v1)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2652, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 2652, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2651
 *         """
 *         cdef ProcessDetail_v1 obj = ProcessDetail_v1.__new__(ProcessDetail_v1)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":2653
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_detail_v1_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2653, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 2653, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":2654
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != process_detail_v1_dtype:
 *             raise ValueError("data array must be of dtype process_detail_v1_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2654, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 2654, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2653
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_detail_v1_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":2655
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_detail_v1_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype process_detail_v1_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2655, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_process_detail_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2655, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2655, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 2655, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":2656
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_detail_v1_dtype:
 *             raise ValueError("data array must be of dtype process_detail_v1_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_proc_2};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2656, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 2656, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2655
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_detail_v1_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype process_detail_v1_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":2657
 *         if data.dtype != process_detail_v1_dtype:
 *             raise ValueError("data array must be of dtype process_detail_v1_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2657, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2657, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2657, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":2659
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2643
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessDetail_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2661
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ProcessDetail_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16ProcessDetail_v1_16from_ptr, "ProcessDetail_v1.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an ProcessDetail_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16ProcessDetail_v1_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16ProcessDetail_v1_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 2661, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 2661, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 2661, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2661, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 2661, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 2661, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 2661, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 2661, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2661, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 2662, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 2662, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2662, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":2662
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an ProcessDetail_v1 instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 2661, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":2661
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ProcessDetail_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":2670
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessDetail_v1 obj = ProcessDetail_v1.__new__(ProcessDetail_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":2671
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ProcessDetail_v1 obj = ProcessDetail_v1.__new__(ProcessDetail_v1)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2671, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 2671, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2670
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessDetail_v1 obj = ProcessDetail_v1.__new__(ProcessDetail_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":2672
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessDetail_v1 obj = ProcessDetail_v1.__new__(ProcessDetail_v1)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessDetail_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2672, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":2673
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessDetail_v1 obj = ProcessDetail_v1.__new__(ProcessDetail_v1)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlProcessDetail_v1_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2673, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2673, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":2675
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlProcessDetail_v1_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=process_detail_v1_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2675, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":2674
 *         cdef ProcessDetail_v1 obj = ProcessDetail_v1.__new__(ProcessDetail_v1)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlProcessDetail_v1_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=process_detail_v1_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlProcessDetail_v1_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2674, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":2676
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlProcessDetail_v1_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=process_detail_v1_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_process_detail_v1_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2676, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 2676, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 2676, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2676, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":2677
 *             <char*>ptr, sizeof(nvmlProcessDetail_v1_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=process_detail_v1_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2677, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2677, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2677, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":2679
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2661
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ProcessDetail_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2530
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16ProcessDetail_v1_18__reduce_cython__, "ProcessDetail_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16ProcessDetail_v1_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_ProcessDetail_v1, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_ProcessDetail_v1, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_ProcessDetail_v1, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_ProcessDetail_v1, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ProcessDetail_v1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_ProcessDetail_v1, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_ProcessDetail_v1, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_ProcessDetail_v1, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_ProcessDetail_v1__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ProcessDetail_v1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ProcessDetail_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ProcessDetail_v1__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16ProcessDetail_v1_20__setstate_cython__, "ProcessDetail_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16ProcessDetail_v1_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16ProcessDetail_v1_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16ProcessDetail_v1_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_ProcessDetail_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_ProcessDetail_v1__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ProcessDetail_v1__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ProcessDetail_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ProcessDetail_v1__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetail_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2682
 * 
 * 
 * cdef _get_device_attributes_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlDeviceAttributes_t pod = nvmlDeviceAttributes_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_device_attributes_dtype_offsets(void) {
  nvmlDeviceAttributes_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlDeviceAttributes_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  PyObject *__pyx_t_14 = NULL;
  PyObject *__pyx_t_15 = NULL;
  size_t __pyx_t_16;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_device_attributes_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":2683
 * 
 * cdef _get_device_attributes_dtype_offsets():
 *     cdef nvmlDeviceAttributes_t pod = nvmlDeviceAttributes_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'gpu_instance_slice_count', 'compute_instance_slice_count', 'memory_size_mb'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":2684
 * cdef _get_device_attributes_dtype_offsets():
 *     cdef nvmlDeviceAttributes_t pod = nvmlDeviceAttributes_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'gpu_instance_slice_count', 'compute_instance_slice_count', 'memory_size_mb'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint64],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2684, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2684, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":2685
 *     cdef nvmlDeviceAttributes_t pod = nvmlDeviceAttributes_t()
 *     return _numpy.dtype({
 *         'names': ['multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'gpu_instance_slice_count', 'compute_instance_slice_count', 'memory_size_mb'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint64],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2685, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(9); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2685, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_multiprocessor_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_multiprocessor_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_multiprocessor_count) != (0)) __PYX_ERR(0, 2685, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_shared_copy_engine_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_shared_copy_engine_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_shared_copy_engine_count) != (0)) __PYX_ERR(0, 2685, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_shared_decoder_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_shared_decoder_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_shared_decoder_count) != (0)) __PYX_ERR(0, 2685, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_shared_encoder_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_shared_encoder_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_shared_encoder_count) != (0)) __PYX_ERR(0, 2685, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_shared_jpeg_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_shared_jpeg_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_shared_jpeg_count) != (0)) __PYX_ERR(0, 2685, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_shared_ofa_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_shared_ofa_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_shared_ofa_count) != (0)) __PYX_ERR(0, 2685, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_gpu_instance_slice_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_gpu_instance_slice_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_gpu_instance_slice_count) != (0)) __PYX_ERR(0, 2685, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_compute_instance_slice_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_compute_instance_slice_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_mstate_global->__pyx_n_u_compute_instance_slice_count) != (0)) __PYX_ERR(0, 2685, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_memory_size_mb);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_memory_size_mb);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_mstate_global->__pyx_n_u_memory_size_mb) != (0)) __PYX_ERR(0, 2685, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 2685, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":2686
 *     return _numpy.dtype({
 *         'names': ['multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'gpu_instance_slice_count', 'compute_instance_slice_count', 'memory_size_mb'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint64],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_15 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(9); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 2686, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 2686, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 2686, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 2686, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 2686, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 2686, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 2686, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_t_14) != (0)) __PYX_ERR(0, 2686, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_t_15) != (0)) __PYX_ERR(0, 2686, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  __pyx_t_14 = 0;
  __pyx_t_15 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 2685, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":2688
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint64],
 *         'offsets': [
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sharedCopyEngineCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedDecoderCount)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.multiprocessorCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2688, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":2689
 *         'offsets': [
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedCopyEngineCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sharedDecoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedEncoderCount)) - (<intptr_t>&pod),
*/
  __pyx_t_15 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sharedCopyEngineCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 2689, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);

  /* "cuda/bindings/_nvml.pyx":2690
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedCopyEngineCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedDecoderCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sharedEncoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedJpegCount)) - (<intptr_t>&pod),
*/
  __pyx_t_14 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sharedDecoderCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 2690, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);

  /* "cuda/bindings/_nvml.pyx":2691
 *             (<intptr_t>&(pod.sharedCopyEngineCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedDecoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedEncoderCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sharedJpegCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedOfaCount)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sharedEncoderCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 2691, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":2692
 *             (<intptr_t>&(pod.sharedDecoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedEncoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedJpegCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sharedOfaCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gpuInstanceSliceCount)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sharedJpegCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 2692, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":2693
 *             (<intptr_t>&(pod.sharedEncoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedJpegCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedOfaCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.gpuInstanceSliceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.computeInstanceSliceCount)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sharedOfaCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 2693, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":2694
 *             (<intptr_t>&(pod.sharedJpegCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedOfaCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gpuInstanceSliceCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.computeInstanceSliceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memorySizeMB)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.gpuInstanceSliceCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 2694, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":2695
 *             (<intptr_t>&(pod.sharedOfaCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gpuInstanceSliceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.computeInstanceSliceCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.memorySizeMB)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.computeInstanceSliceCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2695, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":2696
 *             (<intptr_t>&(pod.gpuInstanceSliceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.computeInstanceSliceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memorySizeMB)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlDeviceAttributes_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.memorySizeMB)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2696, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":2687
 *         'names': ['multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'gpu_instance_slice_count', 'compute_instance_slice_count', 'memory_size_mb'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint64],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedCopyEngineCount)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(9); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2687, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 2687, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_15) != (0)) __PYX_ERR(0, 2687, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_14) != (0)) __PYX_ERR(0, 2687, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_13) != (0)) __PYX_ERR(0, 2687, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_12) != (0)) __PYX_ERR(0, 2687, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_11) != (0)) __PYX_ERR(0, 2687, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_10) != (0)) __PYX_ERR(0, 2687, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 7, __pyx_t_9) != (0)) __PYX_ERR(0, 2687, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 8, __pyx_t_8) != (0)) __PYX_ERR(0, 2687, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_15 = 0;
  __pyx_t_14 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 2685, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":2698
 *             (<intptr_t>&(pod.memorySizeMB)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlDeviceAttributes_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlDeviceAttributes_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2698, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 2685, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_16 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_16 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_16, (2-__pyx_t_16) | (__pyx_t_16*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2684, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2682
 * 
 * 
 * cdef _get_device_attributes_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlDeviceAttributes_t pod = nvmlDeviceAttributes_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_XDECREF(__pyx_t_14);
  __Pyx_XDECREF(__pyx_t_15);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_device_attributes_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2715
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlDeviceAttributes_t *>calloc(1, sizeof(nvmlDeviceAttributes_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":2716
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlDeviceAttributes_t *>calloc(1, sizeof(nvmlDeviceAttributes_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DeviceAttributes")
*/
  __pyx_v_self->_ptr = ((nvmlDeviceAttributes_t *)calloc(1, (sizeof(nvmlDeviceAttributes_t))));

  /* "cuda/bindings/_nvml.pyx":2717
 *     def __init__(self):
 *         self._ptr = <nvmlDeviceAttributes_t *>calloc(1, sizeof(nvmlDeviceAttributes_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating DeviceAttributes")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":2718
 *         self._ptr = <nvmlDeviceAttributes_t *>calloc(1, sizeof(nvmlDeviceAttributes_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DeviceAttributes")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2718, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DeviceAttribute};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2718, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 2718, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2717
 *     def __init__(self):
 *         self._ptr = <nvmlDeviceAttributes_t *>calloc(1, sizeof(nvmlDeviceAttributes_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating DeviceAttributes")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":2719
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DeviceAttributes")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":2720
 *             raise MemoryError("Error allocating DeviceAttributes")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":2721
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":2715
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlDeviceAttributes_t *>calloc(1, sizeof(nvmlDeviceAttributes_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2723
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlDeviceAttributes_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self) {
  nvmlDeviceAttributes_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlDeviceAttributes_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":2725
 *     def __dealloc__(self):
 *         cdef nvmlDeviceAttributes_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":2726
 *         cdef nvmlDeviceAttributes_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":2727
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":2728
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":2725
 *     def __dealloc__(self):
 *         cdef nvmlDeviceAttributes_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":2723
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlDeviceAttributes_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":2730
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.DeviceAttributes object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":2731
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.DeviceAttributes object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2731, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2731, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2731, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2731, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2731, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_DeviceAttributes_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 28 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2731, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2730
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.DeviceAttributes object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2733
 *         return f"<{__name__}.DeviceAttributes object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2736
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2736, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2733
 *         return f"<{__name__}.DeviceAttributes object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2738
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_16DeviceAttributes__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":2739
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2738
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2741
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":2742
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2742, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2741
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2744
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef DeviceAttributes other_
 *         if not isinstance(other, DeviceAttributes):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":2746
 *     def __eq__(self, other):
 *         cdef DeviceAttributes other_
 *         if not isinstance(other, DeviceAttributes):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":2747
 *         cdef DeviceAttributes other_
 *         if not isinstance(other, DeviceAttributes):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceAttributes_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2746
 *     def __eq__(self, other):
 *         cdef DeviceAttributes other_
 *         if not isinstance(other, DeviceAttributes):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":2748
 *         if not isinstance(other, DeviceAttributes):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceAttributes_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes))))) __PYX_ERR(0, 2748, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":2749
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceAttributes_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlDeviceAttributes_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2749, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2744
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef DeviceAttributes other_
 *         if not isinstance(other, DeviceAttributes):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2751
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceAttributes_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceAttributes_t *>malloc(sizeof(nvmlDeviceAttributes_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":2752
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlDeviceAttributes_t *>malloc(sizeof(nvmlDeviceAttributes_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 2752, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2752, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2752, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 2752, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":2753
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceAttributes_t *>malloc(sizeof(nvmlDeviceAttributes_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceAttributes")
*/
    __pyx_v_self->_ptr = ((nvmlDeviceAttributes_t *)malloc((sizeof(nvmlDeviceAttributes_t))));

    /* "cuda/bindings/_nvml.pyx":2754
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceAttributes_t *>malloc(sizeof(nvmlDeviceAttributes_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DeviceAttributes")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceAttributes_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":2755
 *             self._ptr = <nvmlDeviceAttributes_t *>malloc(sizeof(nvmlDeviceAttributes_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceAttributes")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceAttributes_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2755, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DeviceAttribute};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2755, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 2755, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":2754
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceAttributes_t *>malloc(sizeof(nvmlDeviceAttributes_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DeviceAttributes")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceAttributes_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":2756
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceAttributes")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceAttributes_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2756, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2756, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 2756, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlDeviceAttributes_t))));

    /* "cuda/bindings/_nvml.pyx":2757
 *                 raise MemoryError("Error allocating DeviceAttributes")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceAttributes_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":2758
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceAttributes_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":2759
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2759, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2759, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 2759, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":2752
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlDeviceAttributes_t *>malloc(sizeof(nvmlDeviceAttributes_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":2761
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 2761, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":2751
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceAttributes_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceAttributes_t *>malloc(sizeof(nvmlDeviceAttributes_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2763
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20multiprocessor_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20multiprocessor_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20multiprocessor_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20multiprocessor_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2766
 *     def multiprocessor_count(self):
 *         """int: """
 *         return self._ptr[0].multiprocessorCount             # <<<<<<<<<<<<<<
 * 
 *     @multiprocessor_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).multiprocessorCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2766, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2763
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.multiprocessor_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2768
 *         return self._ptr[0].multiprocessorCount
 * 
 *     @multiprocessor_count.setter             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20multiprocessor_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20multiprocessor_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20multiprocessor_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20multiprocessor_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":2770
 *     @multiprocessor_count.setter
 *     def multiprocessor_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].multiprocessorCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":2771
 *     def multiprocessor_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].multiprocessorCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DeviceAttributes_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2771, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 2771, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2770
 *     @multiprocessor_count.setter
 *     def multiprocessor_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].multiprocessorCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":2772
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].multiprocessorCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2772, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).multiprocessorCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":2768
 *         return self._ptr[0].multiprocessorCount
 * 
 *     @multiprocessor_count.setter             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.multiprocessor_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2774
 *         self._ptr[0].multiprocessorCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_copy_engine_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_24shared_copy_engine_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_24shared_copy_engine_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_24shared_copy_engine_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_24shared_copy_engine_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2777
 *     def shared_copy_engine_count(self):
 *         """int: """
 *         return self._ptr[0].sharedCopyEngineCount             # <<<<<<<<<<<<<<
 * 
 *     @shared_copy_engine_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sharedCopyEngineCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2777, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2774
 *         self._ptr[0].multiprocessorCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_copy_engine_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.shared_copy_engine_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2779
 *         return self._ptr[0].sharedCopyEngineCount
 * 
 *     @shared_copy_engine_count.setter             # <<<<<<<<<<<<<<
 *     def shared_copy_engine_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_24shared_copy_engine_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_24shared_copy_engine_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_24shared_copy_engine_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_24shared_copy_engine_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":2781
 *     @shared_copy_engine_count.setter
 *     def shared_copy_engine_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].sharedCopyEngineCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":2782
 *     def shared_copy_engine_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sharedCopyEngineCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DeviceAttributes_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2782, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 2782, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2781
 *     @shared_copy_engine_count.setter
 *     def shared_copy_engine_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].sharedCopyEngineCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":2783
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].sharedCopyEngineCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2783, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sharedCopyEngineCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":2779
 *         return self._ptr[0].sharedCopyEngineCount
 * 
 *     @shared_copy_engine_count.setter             # <<<<<<<<<<<<<<
 *     def shared_copy_engine_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.shared_copy_engine_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2785
 *         self._ptr[0].sharedCopyEngineCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_decoder_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_decoder_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_decoder_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_decoder_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_decoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2788
 *     def shared_decoder_count(self):
 *         """int: """
 *         return self._ptr[0].sharedDecoderCount             # <<<<<<<<<<<<<<
 * 
 *     @shared_decoder_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sharedDecoderCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2788, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2785
 *         self._ptr[0].sharedCopyEngineCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_decoder_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.shared_decoder_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2790
 *         return self._ptr[0].sharedDecoderCount
 * 
 *     @shared_decoder_count.setter             # <<<<<<<<<<<<<<
 *     def shared_decoder_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_decoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_decoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_decoder_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_decoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":2792
 *     @shared_decoder_count.setter
 *     def shared_decoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].sharedDecoderCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":2793
 *     def shared_decoder_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sharedDecoderCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DeviceAttributes_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2793, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 2793, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2792
 *     @shared_decoder_count.setter
 *     def shared_decoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].sharedDecoderCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":2794
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].sharedDecoderCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2794, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sharedDecoderCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":2790
 *         return self._ptr[0].sharedDecoderCount
 * 
 *     @shared_decoder_count.setter             # <<<<<<<<<<<<<<
 *     def shared_decoder_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.shared_decoder_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2796
 *         self._ptr[0].sharedDecoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_encoder_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_encoder_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_encoder_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_encoder_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_encoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2799
 *     def shared_encoder_count(self):
 *         """int: """
 *         return self._ptr[0].sharedEncoderCount             # <<<<<<<<<<<<<<
 * 
 *     @shared_encoder_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sharedEncoderCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2796
 *         self._ptr[0].sharedDecoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_encoder_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.shared_encoder_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2801
 *         return self._ptr[0].sharedEncoderCount
 * 
 *     @shared_encoder_count.setter             # <<<<<<<<<<<<<<
 *     def shared_encoder_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_encoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_encoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_encoder_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_encoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":2803
 *     @shared_encoder_count.setter
 *     def shared_encoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].sharedEncoderCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":2804
 *     def shared_encoder_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sharedEncoderCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DeviceAttributes_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2804, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 2804, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2803
 *     @shared_encoder_count.setter
 *     def shared_encoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].sharedEncoderCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":2805
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].sharedEncoderCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2805, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sharedEncoderCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":2801
 *         return self._ptr[0].sharedEncoderCount
 * 
 *     @shared_encoder_count.setter             # <<<<<<<<<<<<<<
 *     def shared_encoder_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.shared_encoder_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2807
 *         self._ptr[0].sharedEncoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_jpeg_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_17shared_jpeg_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_17shared_jpeg_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_17shared_jpeg_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_17shared_jpeg_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2810
 *     def shared_jpeg_count(self):
 *         """int: """
 *         return self._ptr[0].sharedJpegCount             # <<<<<<<<<<<<<<
 * 
 *     @shared_jpeg_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sharedJpegCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2810, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2807
 *         self._ptr[0].sharedEncoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_jpeg_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.shared_jpeg_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2812
 *         return self._ptr[0].sharedJpegCount
 * 
 *     @shared_jpeg_count.setter             # <<<<<<<<<<<<<<
 *     def shared_jpeg_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_17shared_jpeg_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_17shared_jpeg_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_17shared_jpeg_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_17shared_jpeg_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":2814
 *     @shared_jpeg_count.setter
 *     def shared_jpeg_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].sharedJpegCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":2815
 *     def shared_jpeg_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sharedJpegCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DeviceAttributes_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2815, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 2815, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2814
 *     @shared_jpeg_count.setter
 *     def shared_jpeg_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].sharedJpegCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":2816
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].sharedJpegCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2816, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sharedJpegCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":2812
 *         return self._ptr[0].sharedJpegCount
 * 
 *     @shared_jpeg_count.setter             # <<<<<<<<<<<<<<
 *     def shared_jpeg_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.shared_jpeg_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2818
 *         self._ptr[0].sharedJpegCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_ofa_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_16shared_ofa_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_16shared_ofa_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_16shared_ofa_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_16shared_ofa_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2821
 *     def shared_ofa_count(self):
 *         """int: """
 *         return self._ptr[0].sharedOfaCount             # <<<<<<<<<<<<<<
 * 
 *     @shared_ofa_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sharedOfaCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2821, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2818
 *         self._ptr[0].sharedJpegCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_ofa_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.shared_ofa_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2823
 *         return self._ptr[0].sharedOfaCount
 * 
 *     @shared_ofa_count.setter             # <<<<<<<<<<<<<<
 *     def shared_ofa_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_16shared_ofa_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_16shared_ofa_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_16shared_ofa_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_16shared_ofa_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":2825
 *     @shared_ofa_count.setter
 *     def shared_ofa_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].sharedOfaCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":2826
 *     def shared_ofa_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sharedOfaCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DeviceAttributes_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2826, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 2826, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2825
 *     @shared_ofa_count.setter
 *     def shared_ofa_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].sharedOfaCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":2827
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].sharedOfaCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2827, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sharedOfaCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":2823
 *         return self._ptr[0].sharedOfaCount
 * 
 *     @shared_ofa_count.setter             # <<<<<<<<<<<<<<
 *     def shared_ofa_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.shared_ofa_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2829
 *         self._ptr[0].sharedOfaCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def gpu_instance_slice_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_24gpu_instance_slice_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_24gpu_instance_slice_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_24gpu_instance_slice_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_24gpu_instance_slice_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2832
 *     def gpu_instance_slice_count(self):
 *         """int: """
 *         return self._ptr[0].gpuInstanceSliceCount             # <<<<<<<<<<<<<<
 * 
 *     @gpu_instance_slice_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).gpuInstanceSliceCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2832, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2829
 *         self._ptr[0].sharedOfaCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def gpu_instance_slice_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.gpu_instance_slice_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2834
 *         return self._ptr[0].gpuInstanceSliceCount
 * 
 *     @gpu_instance_slice_count.setter             # <<<<<<<<<<<<<<
 *     def gpu_instance_slice_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_24gpu_instance_slice_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_24gpu_instance_slice_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_24gpu_instance_slice_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_24gpu_instance_slice_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":2836
 *     @gpu_instance_slice_count.setter
 *     def gpu_instance_slice_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].gpuInstanceSliceCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":2837
 *     def gpu_instance_slice_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].gpuInstanceSliceCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DeviceAttributes_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2837, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 2837, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2836
 *     @gpu_instance_slice_count.setter
 *     def gpu_instance_slice_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].gpuInstanceSliceCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":2838
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].gpuInstanceSliceCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2838, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).gpuInstanceSliceCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":2834
 *         return self._ptr[0].gpuInstanceSliceCount
 * 
 *     @gpu_instance_slice_count.setter             # <<<<<<<<<<<<<<
 *     def gpu_instance_slice_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.gpu_instance_slice_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2840
 *         self._ptr[0].gpuInstanceSliceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def compute_instance_slice_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_28compute_instance_slice_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_28compute_instance_slice_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_28compute_instance_slice_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_28compute_instance_slice_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2843
 *     def compute_instance_slice_count(self):
 *         """int: """
 *         return self._ptr[0].computeInstanceSliceCount             # <<<<<<<<<<<<<<
 * 
 *     @compute_instance_slice_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).computeInstanceSliceCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2843, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2840
 *         self._ptr[0].gpuInstanceSliceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def compute_instance_slice_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.compute_instance_slice_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2845
 *         return self._ptr[0].computeInstanceSliceCount
 * 
 *     @compute_instance_slice_count.setter             # <<<<<<<<<<<<<<
 *     def compute_instance_slice_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_28compute_instance_slice_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_28compute_instance_slice_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_28compute_instance_slice_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_28compute_instance_slice_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":2847
 *     @compute_instance_slice_count.setter
 *     def compute_instance_slice_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].computeInstanceSliceCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":2848
 *     def compute_instance_slice_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].computeInstanceSliceCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DeviceAttributes_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2848, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 2848, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2847
 *     @compute_instance_slice_count.setter
 *     def compute_instance_slice_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].computeInstanceSliceCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":2849
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].computeInstanceSliceCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2849, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).computeInstanceSliceCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":2845
 *         return self._ptr[0].computeInstanceSliceCount
 * 
 *     @compute_instance_slice_count.setter             # <<<<<<<<<<<<<<
 *     def compute_instance_slice_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.compute_instance_slice_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2851
 *         self._ptr[0].computeInstanceSliceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def memory_size_mb(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_14memory_size_mb_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_14memory_size_mb_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_14memory_size_mb___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_14memory_size_mb___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2854
 *     def memory_size_mb(self):
 *         """int: """
 *         return self._ptr[0].memorySizeMB             # <<<<<<<<<<<<<<
 * 
 *     @memory_size_mb.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).memorySizeMB); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2854, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2851
 *         self._ptr[0].computeInstanceSliceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def memory_size_mb(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.memory_size_mb.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2856
 *         return self._ptr[0].memorySizeMB
 * 
 *     @memory_size_mb.setter             # <<<<<<<<<<<<<<
 *     def memory_size_mb(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_14memory_size_mb_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_14memory_size_mb_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_14memory_size_mb_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_14memory_size_mb_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":2858
 *     @memory_size_mb.setter
 *     def memory_size_mb(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].memorySizeMB = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":2859
 *     def memory_size_mb(self, val):
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].memorySizeMB = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DeviceAttributes_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2859, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 2859, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2858
 *     @memory_size_mb.setter
 *     def memory_size_mb(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].memorySizeMB = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":2860
 *         if self._readonly:
 *             raise ValueError("This DeviceAttributes instance is read-only")
 *         self._ptr[0].memorySizeMB = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 2860, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).memorySizeMB = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":2856
 *         return self._ptr[0].memorySizeMB
 * 
 *     @memory_size_mb.setter             # <<<<<<<<<<<<<<
 *     def memory_size_mb(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.memory_size_mb.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2862
 *         self._ptr[0].memorySizeMB = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DeviceAttributes instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16DeviceAttributes_12from_data, "DeviceAttributes.from_data(data)\n\nCreate an DeviceAttributes instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `device_attributes_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16DeviceAttributes_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16DeviceAttributes_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 2862, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2862, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 2862, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 2862, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2862, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 2862, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":2869
 *             data (_numpy.ndarray): a single-element array of dtype `device_attributes_dtype` holding the data.
 *         """
 *         return __from_data(data, "device_attributes_dtype", device_attributes_dtype, DeviceAttributes)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_device_attributes_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2869, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_device_attributes_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2869, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2862
 *         self._ptr[0].memorySizeMB = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DeviceAttributes instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2871
 *         return __from_data(data, "device_attributes_dtype", device_attributes_dtype, DeviceAttributes)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DeviceAttributes instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16DeviceAttributes_14from_ptr, "DeviceAttributes.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an DeviceAttributes instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16DeviceAttributes_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16DeviceAttributes_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 2871, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 2871, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 2871, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2871, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 2871, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":2872
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an DeviceAttributes instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 2871, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 2871, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 2871, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2871, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 2872, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2872, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 2871, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":2871
 *         return __from_data(data, "device_attributes_dtype", device_attributes_dtype, DeviceAttributes)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DeviceAttributes instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":2880
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceAttributes obj = DeviceAttributes.__new__(DeviceAttributes)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":2881
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef DeviceAttributes obj = DeviceAttributes.__new__(DeviceAttributes)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2881, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 2881, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2880
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceAttributes obj = DeviceAttributes.__new__(DeviceAttributes)
*/
  }

  /* "cuda/bindings/_nvml.pyx":2882
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceAttributes obj = DeviceAttributes.__new__(DeviceAttributes)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlDeviceAttributes_t *>malloc(sizeof(nvmlDeviceAttributes_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_DeviceAttributes(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2882, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":2883
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceAttributes obj = DeviceAttributes.__new__(DeviceAttributes)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlDeviceAttributes_t *>malloc(sizeof(nvmlDeviceAttributes_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":2884
 *         cdef DeviceAttributes obj = DeviceAttributes.__new__(DeviceAttributes)
 *         if owner is None:
 *             obj._ptr = <nvmlDeviceAttributes_t *>malloc(sizeof(nvmlDeviceAttributes_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceAttributes")
*/
    __pyx_v_obj->_ptr = ((nvmlDeviceAttributes_t *)malloc((sizeof(nvmlDeviceAttributes_t))));

    /* "cuda/bindings/_nvml.pyx":2885
 *         if owner is None:
 *             obj._ptr = <nvmlDeviceAttributes_t *>malloc(sizeof(nvmlDeviceAttributes_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DeviceAttributes")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceAttributes_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":2886
 *             obj._ptr = <nvmlDeviceAttributes_t *>malloc(sizeof(nvmlDeviceAttributes_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceAttributes")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceAttributes_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2886, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DeviceAttribute};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2886, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 2886, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":2885
 *         if owner is None:
 *             obj._ptr = <nvmlDeviceAttributes_t *>malloc(sizeof(nvmlDeviceAttributes_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DeviceAttributes")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceAttributes_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":2887
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceAttributes")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceAttributes_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlDeviceAttributes_t))));

    /* "cuda/bindings/_nvml.pyx":2888
 *                 raise MemoryError("Error allocating DeviceAttributes")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceAttributes_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":2889
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceAttributes_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlDeviceAttributes_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":2883
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceAttributes obj = DeviceAttributes.__new__(DeviceAttributes)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlDeviceAttributes_t *>malloc(sizeof(nvmlDeviceAttributes_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":2891
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlDeviceAttributes_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlDeviceAttributes_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":2892
 *         else:
 *             obj._ptr = <nvmlDeviceAttributes_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":2893
 *             obj._ptr = <nvmlDeviceAttributes_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":2894
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":2895
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2871
 *         return __from_data(data, "device_attributes_dtype", device_attributes_dtype, DeviceAttributes)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DeviceAttributes instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16DeviceAttributes_16__reduce_cython__, "DeviceAttributes.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16DeviceAttributes_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16DeviceAttributes_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16DeviceAttributes_18__setstate_cython__, "DeviceAttributes.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16DeviceAttributes_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16DeviceAttributes_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16DeviceAttributes_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAttributes.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2898
 * 
 * 
 * cdef _get_c2c_mode_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlC2cModeInfo_v1_t pod = nvmlC2cModeInfo_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_c2c_mode_info_v1_dtype_offsets(void) {
  nvmlC2cModeInfo_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlC2cModeInfo_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_c2c_mode_info_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":2899
 * 
 * cdef _get_c2c_mode_info_v1_dtype_offsets():
 *     cdef nvmlC2cModeInfo_v1_t pod = nvmlC2cModeInfo_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['is_c2c_enabled'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":2900
 * cdef _get_c2c_mode_info_v1_dtype_offsets():
 *     cdef nvmlC2cModeInfo_v1_t pod = nvmlC2cModeInfo_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['is_c2c_enabled'],
 *         'formats': [_numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2900, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2900, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":2901
 *     cdef nvmlC2cModeInfo_v1_t pod = nvmlC2cModeInfo_v1_t()
 *     return _numpy.dtype({
 *         'names': ['is_c2c_enabled'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2901, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2901, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_is_c2c_enabled);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_is_c2c_enabled);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_is_c2c_enabled) != (0)) __PYX_ERR(0, 2901, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 2901, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":2902
 *     return _numpy.dtype({
 *         'names': ['is_c2c_enabled'],
 *         'formats': [_numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.isC2cEnabled)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2902, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2902, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2902, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 2902, __pyx_L1_error);
  __pyx_t_7 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 2901, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":2904
 *         'formats': [_numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.isC2cEnabled)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlC2cModeInfo_v1_t),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.isC2cEnabled)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2904, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":2903
 *         'names': ['is_c2c_enabled'],
 *         'formats': [_numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.isC2cEnabled)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_7 = PyList_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2903, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 2903, __pyx_L1_error);
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 2901, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":2906
 *             (<intptr_t>&(pod.isC2cEnabled)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlC2cModeInfo_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlC2cModeInfo_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2906, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 2901, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_8 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_8 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_8, (2-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2900, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2898
 * 
 * 
 * cdef _get_c2c_mode_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlC2cModeInfo_v1_t pod = nvmlC2cModeInfo_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_c2c_mode_info_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2923
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlC2cModeInfo_v1_t *>calloc(1, sizeof(nvmlC2cModeInfo_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":2924
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlC2cModeInfo_v1_t *>calloc(1, sizeof(nvmlC2cModeInfo_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating C2cModeInfo_v1")
*/
  __pyx_v_self->_ptr = ((nvmlC2cModeInfo_v1_t *)calloc(1, (sizeof(nvmlC2cModeInfo_v1_t))));

  /* "cuda/bindings/_nvml.pyx":2925
 *     def __init__(self):
 *         self._ptr = <nvmlC2cModeInfo_v1_t *>calloc(1, sizeof(nvmlC2cModeInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating C2cModeInfo_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":2926
 *         self._ptr = <nvmlC2cModeInfo_v1_t *>calloc(1, sizeof(nvmlC2cModeInfo_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating C2cModeInfo_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2926, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_C2cModeInfo_v1};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2926, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 2926, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2925
 *     def __init__(self):
 *         self._ptr = <nvmlC2cModeInfo_v1_t *>calloc(1, sizeof(nvmlC2cModeInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating C2cModeInfo_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":2927
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating C2cModeInfo_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":2928
 *             raise MemoryError("Error allocating C2cModeInfo_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":2929
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":2923
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlC2cModeInfo_v1_t *>calloc(1, sizeof(nvmlC2cModeInfo_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.C2cModeInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2931
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlC2cModeInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self) {
  nvmlC2cModeInfo_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlC2cModeInfo_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":2933
 *     def __dealloc__(self):
 *         cdef nvmlC2cModeInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":2934
 *         cdef nvmlC2cModeInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":2935
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":2936
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":2933
 *     def __dealloc__(self):
 *         cdef nvmlC2cModeInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":2931
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlC2cModeInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":2938
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.C2cModeInfo_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":2939
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.C2cModeInfo_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2939, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2939, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2939, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2939, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2939, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_C2cModeInfo_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 26 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2939, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2938
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.C2cModeInfo_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.C2cModeInfo_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2941
 *         return f"<{__name__}.C2cModeInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2944
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2944, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2941
 *         return f"<{__name__}.C2cModeInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.C2cModeInfo_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2946
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_14C2cModeInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":2947
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2946
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2949
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":2950
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2950, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2949
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.C2cModeInfo_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2952
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef C2cModeInfo_v1 other_
 *         if not isinstance(other, C2cModeInfo_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":2954
 *     def __eq__(self, other):
 *         cdef C2cModeInfo_v1 other_
 *         if not isinstance(other, C2cModeInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":2955
 *         cdef C2cModeInfo_v1 other_
 *         if not isinstance(other, C2cModeInfo_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlC2cModeInfo_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":2954
 *     def __eq__(self, other):
 *         cdef C2cModeInfo_v1 other_
 *         if not isinstance(other, C2cModeInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":2956
 *         if not isinstance(other, C2cModeInfo_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlC2cModeInfo_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1))))) __PYX_ERR(0, 2956, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":2957
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlC2cModeInfo_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlC2cModeInfo_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2957, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2952
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef C2cModeInfo_v1 other_
 *         if not isinstance(other, C2cModeInfo_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.C2cModeInfo_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2959
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlC2cModeInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlC2cModeInfo_v1_t *>malloc(sizeof(nvmlC2cModeInfo_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":2960
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlC2cModeInfo_v1_t *>malloc(sizeof(nvmlC2cModeInfo_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 2960, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2960, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2960, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 2960, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":2961
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlC2cModeInfo_v1_t *>malloc(sizeof(nvmlC2cModeInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating C2cModeInfo_v1")
*/
    __pyx_v_self->_ptr = ((nvmlC2cModeInfo_v1_t *)malloc((sizeof(nvmlC2cModeInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":2962
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlC2cModeInfo_v1_t *>malloc(sizeof(nvmlC2cModeInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating C2cModeInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlC2cModeInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":2963
 *             self._ptr = <nvmlC2cModeInfo_v1_t *>malloc(sizeof(nvmlC2cModeInfo_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating C2cModeInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlC2cModeInfo_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2963, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_C2cModeInfo_v1};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2963, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 2963, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":2962
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlC2cModeInfo_v1_t *>malloc(sizeof(nvmlC2cModeInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating C2cModeInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlC2cModeInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":2964
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating C2cModeInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlC2cModeInfo_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2964, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2964, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 2964, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlC2cModeInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":2965
 *                 raise MemoryError("Error allocating C2cModeInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlC2cModeInfo_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":2966
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlC2cModeInfo_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":2967
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2967, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2967, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 2967, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":2960
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlC2cModeInfo_v1_t *>malloc(sizeof(nvmlC2cModeInfo_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":2969
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 2969, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":2959
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlC2cModeInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlC2cModeInfo_v1_t *>malloc(sizeof(nvmlC2cModeInfo_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.C2cModeInfo_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2971
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_c2c_enabled(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14is_c2c_enabled_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14is_c2c_enabled_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14is_c2c_enabled___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14is_c2c_enabled___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":2974
 *     def is_c2c_enabled(self):
 *         """int: """
 *         return self._ptr[0].isC2cEnabled             # <<<<<<<<<<<<<<
 * 
 *     @is_c2c_enabled.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).isC2cEnabled); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2974, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2971
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_c2c_enabled(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.C2cModeInfo_v1.is_c2c_enabled.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2976
 *         return self._ptr[0].isC2cEnabled
 * 
 *     @is_c2c_enabled.setter             # <<<<<<<<<<<<<<
 *     def is_c2c_enabled(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14is_c2c_enabled_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14is_c2c_enabled_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14is_c2c_enabled_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14is_c2c_enabled_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":2978
 *     @is_c2c_enabled.setter
 *     def is_c2c_enabled(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This C2cModeInfo_v1 instance is read-only")
 *         self._ptr[0].isC2cEnabled = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":2979
 *     def is_c2c_enabled(self, val):
 *         if self._readonly:
 *             raise ValueError("This C2cModeInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].isC2cEnabled = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_C2cModeInfo_v1_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2979, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 2979, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":2978
 *     @is_c2c_enabled.setter
 *     def is_c2c_enabled(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This C2cModeInfo_v1 instance is read-only")
 *         self._ptr[0].isC2cEnabled = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":2980
 *         if self._readonly:
 *             raise ValueError("This C2cModeInfo_v1 instance is read-only")
 *         self._ptr[0].isC2cEnabled = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2980, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).isC2cEnabled = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":2976
 *         return self._ptr[0].isC2cEnabled
 * 
 *     @is_c2c_enabled.setter             # <<<<<<<<<<<<<<
 *     def is_c2c_enabled(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.C2cModeInfo_v1.is_c2c_enabled.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2982
 *         self._ptr[0].isC2cEnabled = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an C2cModeInfo_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_12from_data, "C2cModeInfo_v1.from_data(data)\n\nCreate an C2cModeInfo_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `c2c_mode_info_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 2982, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2982, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 2982, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 2982, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2982, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 2982, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.C2cModeInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":2989
 *             data (_numpy.ndarray): a single-element array of dtype `c2c_mode_info_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "c2c_mode_info_v1_dtype", c2c_mode_info_v1_dtype, C2cModeInfo_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_c2c_mode_info_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2989, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_c2c_mode_info_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2989, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2982
 *         self._ptr[0].isC2cEnabled = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an C2cModeInfo_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.C2cModeInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":2991
 *         return __from_data(data, "c2c_mode_info_v1_dtype", c2c_mode_info_v1_dtype, C2cModeInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an C2cModeInfo_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14from_ptr, "C2cModeInfo_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an C2cModeInfo_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 2991, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 2991, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 2991, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2991, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 2991, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":2992
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an C2cModeInfo_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 2991, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 2991, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 2991, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 2991, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 2992, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 2992, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 2991, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.C2cModeInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":2991
 *         return __from_data(data, "c2c_mode_info_v1_dtype", c2c_mode_info_v1_dtype, C2cModeInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an C2cModeInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":3000
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef C2cModeInfo_v1 obj = C2cModeInfo_v1.__new__(C2cModeInfo_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":3001
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef C2cModeInfo_v1 obj = C2cModeInfo_v1.__new__(C2cModeInfo_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3001, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 3001, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3000
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef C2cModeInfo_v1 obj = C2cModeInfo_v1.__new__(C2cModeInfo_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":3002
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef C2cModeInfo_v1 obj = C2cModeInfo_v1.__new__(C2cModeInfo_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlC2cModeInfo_v1_t *>malloc(sizeof(nvmlC2cModeInfo_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_C2cModeInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3002, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":3003
 *             raise ValueError("ptr must not be null (0)")
 *         cdef C2cModeInfo_v1 obj = C2cModeInfo_v1.__new__(C2cModeInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlC2cModeInfo_v1_t *>malloc(sizeof(nvmlC2cModeInfo_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3004
 *         cdef C2cModeInfo_v1 obj = C2cModeInfo_v1.__new__(C2cModeInfo_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlC2cModeInfo_v1_t *>malloc(sizeof(nvmlC2cModeInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating C2cModeInfo_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlC2cModeInfo_v1_t *)malloc((sizeof(nvmlC2cModeInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":3005
 *         if owner is None:
 *             obj._ptr = <nvmlC2cModeInfo_v1_t *>malloc(sizeof(nvmlC2cModeInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating C2cModeInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlC2cModeInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":3006
 *             obj._ptr = <nvmlC2cModeInfo_v1_t *>malloc(sizeof(nvmlC2cModeInfo_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating C2cModeInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlC2cModeInfo_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3006, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_C2cModeInfo_v1};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3006, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 3006, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":3005
 *         if owner is None:
 *             obj._ptr = <nvmlC2cModeInfo_v1_t *>malloc(sizeof(nvmlC2cModeInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating C2cModeInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlC2cModeInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":3007
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating C2cModeInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlC2cModeInfo_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlC2cModeInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":3008
 *                 raise MemoryError("Error allocating C2cModeInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlC2cModeInfo_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":3009
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlC2cModeInfo_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlC2cModeInfo_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":3003
 *             raise ValueError("ptr must not be null (0)")
 *         cdef C2cModeInfo_v1 obj = C2cModeInfo_v1.__new__(C2cModeInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlC2cModeInfo_v1_t *>malloc(sizeof(nvmlC2cModeInfo_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":3011
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlC2cModeInfo_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlC2cModeInfo_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":3012
 *         else:
 *             obj._ptr = <nvmlC2cModeInfo_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":3013
 *             obj._ptr = <nvmlC2cModeInfo_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":3014
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":3015
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":2991
 *         return __from_data(data, "c2c_mode_info_v1_dtype", c2c_mode_info_v1_dtype, C2cModeInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an C2cModeInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.C2cModeInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_16__reduce_cython__, "C2cModeInfo_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.C2cModeInfo_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_18__setstate_cython__, "C2cModeInfo_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.C2cModeInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.C2cModeInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3018
 * 
 * 
 * cdef _get_row_remapper_histogram_values_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlRowRemapperHistogramValues_t pod = nvmlRowRemapperHistogramValues_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_row_remapper_histogram_values_dtype_offsets(void) {
  nvmlRowRemapperHistogramValues_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlRowRemapperHistogramValues_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  size_t __pyx_t_12;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_row_remapper_histogram_values_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":3019
 * 
 * cdef _get_row_remapper_histogram_values_dtype_offsets():
 *     cdef nvmlRowRemapperHistogramValues_t pod = nvmlRowRemapperHistogramValues_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['max_', 'high', 'partial', 'low', 'none'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":3020
 * cdef _get_row_remapper_histogram_values_dtype_offsets():
 *     cdef nvmlRowRemapperHistogramValues_t pod = nvmlRowRemapperHistogramValues_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['max_', 'high', 'partial', 'low', 'none'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3020, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3020, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":3021
 *     cdef nvmlRowRemapperHistogramValues_t pod = nvmlRowRemapperHistogramValues_t()
 *     return _numpy.dtype({
 *         'names': ['max_', 'high', 'partial', 'low', 'none'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_max);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_max);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_max) != (0)) __PYX_ERR(0, 3021, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_high);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_high);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_high) != (0)) __PYX_ERR(0, 3021, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_partial);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_partial);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_partial) != (0)) __PYX_ERR(0, 3021, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_low);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_low);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_low) != (0)) __PYX_ERR(0, 3021, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_none);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_none);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_none) != (0)) __PYX_ERR(0, 3021, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 3021, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":3022
 *     return _numpy.dtype({
 *         'names': ['max_', 'high', 'partial', 'low', 'none'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.max)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3022, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3022, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3022, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3022, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3022, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3022, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3022, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3022, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3022, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 3022, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3022, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 3022, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 3022, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 3022, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 3022, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 3022, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 3021, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":3024
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.max)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.high)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.partial)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.max)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3024, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":3025
 *         'offsets': [
 *             (<intptr_t>&(pod.max)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.high)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.partial)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.low)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.high)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 3025, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":3026
 *             (<intptr_t>&(pod.max)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.high)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.partial)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.low)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.none)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.partial)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3026, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":3027
 *             (<intptr_t>&(pod.high)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.partial)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.low)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.none)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.low)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3027, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":3028
 *             (<intptr_t>&(pod.partial)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.low)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.none)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlRowRemapperHistogramValues_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.none)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3028, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":3023
 *         'names': ['max_', 'high', 'partial', 'low', 'none'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.max)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.high)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3023, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 3023, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_11) != (0)) __PYX_ERR(0, 3023, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_10) != (0)) __PYX_ERR(0, 3023, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_9) != (0)) __PYX_ERR(0, 3023, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_8) != (0)) __PYX_ERR(0, 3023, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 3021, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":3030
 *             (<intptr_t>&(pod.none)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlRowRemapperHistogramValues_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlRowRemapperHistogramValues_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 3021, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_12 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_12 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_12, (2-__pyx_t_12) | (__pyx_t_12*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3020, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3018
 * 
 * 
 * cdef _get_row_remapper_histogram_values_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlRowRemapperHistogramValues_t pod = nvmlRowRemapperHistogramValues_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_row_remapper_histogram_values_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3047
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlRowRemapperHistogramValues_t *>calloc(1, sizeof(nvmlRowRemapperHistogramValues_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":3048
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlRowRemapperHistogramValues_t *>calloc(1, sizeof(nvmlRowRemapperHistogramValues_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating RowRemapperHistogramValues")
*/
  __pyx_v_self->_ptr = ((nvmlRowRemapperHistogramValues_t *)calloc(1, (sizeof(nvmlRowRemapperHistogramValues_t))));

  /* "cuda/bindings/_nvml.pyx":3049
 *     def __init__(self):
 *         self._ptr = <nvmlRowRemapperHistogramValues_t *>calloc(1, sizeof(nvmlRowRemapperHistogramValues_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating RowRemapperHistogramValues")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":3050
 *         self._ptr = <nvmlRowRemapperHistogramValues_t *>calloc(1, sizeof(nvmlRowRemapperHistogramValues_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating RowRemapperHistogramValues")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3050, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_RowRemapperHist};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3050, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 3050, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3049
 *     def __init__(self):
 *         self._ptr = <nvmlRowRemapperHistogramValues_t *>calloc(1, sizeof(nvmlRowRemapperHistogramValues_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating RowRemapperHistogramValues")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":3051
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating RowRemapperHistogramValues")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":3052
 *             raise MemoryError("Error allocating RowRemapperHistogramValues")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":3053
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":3047
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlRowRemapperHistogramValues_t *>calloc(1, sizeof(nvmlRowRemapperHistogramValues_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3055
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlRowRemapperHistogramValues_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self) {
  nvmlRowRemapperHistogramValues_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlRowRemapperHistogramValues_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":3057
 *     def __dealloc__(self):
 *         cdef nvmlRowRemapperHistogramValues_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3058
 *         cdef nvmlRowRemapperHistogramValues_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":3059
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":3060
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":3057
 *     def __dealloc__(self):
 *         cdef nvmlRowRemapperHistogramValues_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":3055
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlRowRemapperHistogramValues_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":3062
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.RowRemapperHistogramValues object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":3063
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.RowRemapperHistogramValues object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3063, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3063, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3063, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3063, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3063, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_RowRemapperHistogramValues_obje;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 38 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3063, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3062
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.RowRemapperHistogramValues object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3065
 *         return f"<{__name__}.RowRemapperHistogramValues object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3068
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3068, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3065
 *         return f"<{__name__}.RowRemapperHistogramValues object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3070
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":3071
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3070
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3073
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":3074
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3074, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3073
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3076
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef RowRemapperHistogramValues other_
 *         if not isinstance(other, RowRemapperHistogramValues):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":3078
 *     def __eq__(self, other):
 *         cdef RowRemapperHistogramValues other_
 *         if not isinstance(other, RowRemapperHistogramValues):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":3079
 *         cdef RowRemapperHistogramValues other_
 *         if not isinstance(other, RowRemapperHistogramValues):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlRowRemapperHistogramValues_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":3078
 *     def __eq__(self, other):
 *         cdef RowRemapperHistogramValues other_
 *         if not isinstance(other, RowRemapperHistogramValues):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":3080
 *         if not isinstance(other, RowRemapperHistogramValues):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlRowRemapperHistogramValues_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues))))) __PYX_ERR(0, 3080, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":3081
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlRowRemapperHistogramValues_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlRowRemapperHistogramValues_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3081, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3076
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef RowRemapperHistogramValues other_
 *         if not isinstance(other, RowRemapperHistogramValues):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3083
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlRowRemapperHistogramValues_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlRowRemapperHistogramValues_t *>malloc(sizeof(nvmlRowRemapperHistogramValues_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":3084
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlRowRemapperHistogramValues_t *>malloc(sizeof(nvmlRowRemapperHistogramValues_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 3084, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3084, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3084, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 3084, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3085
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlRowRemapperHistogramValues_t *>malloc(sizeof(nvmlRowRemapperHistogramValues_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating RowRemapperHistogramValues")
*/
    __pyx_v_self->_ptr = ((nvmlRowRemapperHistogramValues_t *)malloc((sizeof(nvmlRowRemapperHistogramValues_t))));

    /* "cuda/bindings/_nvml.pyx":3086
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlRowRemapperHistogramValues_t *>malloc(sizeof(nvmlRowRemapperHistogramValues_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating RowRemapperHistogramValues")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlRowRemapperHistogramValues_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":3087
 *             self._ptr = <nvmlRowRemapperHistogramValues_t *>malloc(sizeof(nvmlRowRemapperHistogramValues_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating RowRemapperHistogramValues")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlRowRemapperHistogramValues_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3087, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_RowRemapperHist};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3087, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 3087, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":3086
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlRowRemapperHistogramValues_t *>malloc(sizeof(nvmlRowRemapperHistogramValues_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating RowRemapperHistogramValues")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlRowRemapperHistogramValues_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":3088
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating RowRemapperHistogramValues")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlRowRemapperHistogramValues_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3088, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3088, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 3088, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlRowRemapperHistogramValues_t))));

    /* "cuda/bindings/_nvml.pyx":3089
 *                 raise MemoryError("Error allocating RowRemapperHistogramValues")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlRowRemapperHistogramValues_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":3090
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlRowRemapperHistogramValues_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":3091
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3091, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3091, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 3091, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":3084
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlRowRemapperHistogramValues_t *>malloc(sizeof(nvmlRowRemapperHistogramValues_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":3093
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 3093, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":3083
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlRowRemapperHistogramValues_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlRowRemapperHistogramValues_t *>malloc(sizeof(nvmlRowRemapperHistogramValues_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3095
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def max_(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4max__1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4max__1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4max____get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4max____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3098
 *     def max_(self):
 *         """int: """
 *         return self._ptr[0].max             # <<<<<<<<<<<<<<
 * 
 *     @max_.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).max); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3098, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3095
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def max_(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.max_.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3100
 *         return self._ptr[0].max
 * 
 *     @max_.setter             # <<<<<<<<<<<<<<
 *     def max_(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4max__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4max__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4max__2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4max__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3102
 *     @max_.setter
 *     def max_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")
 *         self._ptr[0].max = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3103
 *     def max_(self, val):
 *         if self._readonly:
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].max = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_RowRemapperHistogramValues};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3103, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3103, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3102
 *     @max_.setter
 *     def max_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")
 *         self._ptr[0].max = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3104
 *         if self._readonly:
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")
 *         self._ptr[0].max = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3104, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).max = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3100
 *         return self._ptr[0].max
 * 
 *     @max_.setter             # <<<<<<<<<<<<<<
 *     def max_(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.max_.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3106
 *         self._ptr[0].max = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def high(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4high_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4high_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4high___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4high___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3109
 *     def high(self):
 *         """int: """
 *         return self._ptr[0].high             # <<<<<<<<<<<<<<
 * 
 *     @high.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).high); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3109, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3106
 *         self._ptr[0].max = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def high(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.high.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3111
 *         return self._ptr[0].high
 * 
 *     @high.setter             # <<<<<<<<<<<<<<
 *     def high(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4high_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4high_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4high_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4high_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3113
 *     @high.setter
 *     def high(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")
 *         self._ptr[0].high = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3114
 *     def high(self, val):
 *         if self._readonly:
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].high = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_RowRemapperHistogramValues};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3114, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3114, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3113
 *     @high.setter
 *     def high(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")
 *         self._ptr[0].high = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3115
 *         if self._readonly:
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")
 *         self._ptr[0].high = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3115, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).high = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3111
 *         return self._ptr[0].high
 * 
 *     @high.setter             # <<<<<<<<<<<<<<
 *     def high(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.high.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3117
 *         self._ptr[0].high = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def partial(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_7partial_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_7partial_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_7partial___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_7partial___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3120
 *     def partial(self):
 *         """int: """
 *         return self._ptr[0].partial             # <<<<<<<<<<<<<<
 * 
 *     @partial.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).partial); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3120, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3117
 *         self._ptr[0].high = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def partial(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.partial.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3122
 *         return self._ptr[0].partial
 * 
 *     @partial.setter             # <<<<<<<<<<<<<<
 *     def partial(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_7partial_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_7partial_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_7partial_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_7partial_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3124
 *     @partial.setter
 *     def partial(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")
 *         self._ptr[0].partial = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3125
 *     def partial(self, val):
 *         if self._readonly:
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].partial = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_RowRemapperHistogramValues};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3125, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3125, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3124
 *     @partial.setter
 *     def partial(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")
 *         self._ptr[0].partial = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3126
 *         if self._readonly:
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")
 *         self._ptr[0].partial = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3126, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).partial = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3122
 *         return self._ptr[0].partial
 * 
 *     @partial.setter             # <<<<<<<<<<<<<<
 *     def partial(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.partial.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3128
 *         self._ptr[0].partial = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def low(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3low_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3low_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3low___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3low___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3131
 *     def low(self):
 *         """int: """
 *         return self._ptr[0].low             # <<<<<<<<<<<<<<
 * 
 *     @low.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).low); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3131, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3128
 *         self._ptr[0].partial = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def low(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.low.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3133
 *         return self._ptr[0].low
 * 
 *     @low.setter             # <<<<<<<<<<<<<<
 *     def low(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3low_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3low_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3low_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3low_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3135
 *     @low.setter
 *     def low(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")
 *         self._ptr[0].low = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3136
 *     def low(self, val):
 *         if self._readonly:
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].low = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_RowRemapperHistogramValues};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3136, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3136, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3135
 *     @low.setter
 *     def low(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")
 *         self._ptr[0].low = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3137
 *         if self._readonly:
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")
 *         self._ptr[0].low = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3137, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).low = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3133
 *         return self._ptr[0].low
 * 
 *     @low.setter             # <<<<<<<<<<<<<<
 *     def low(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.low.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3139
 *         self._ptr[0].low = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def none(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4none_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4none_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4none___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4none___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3142
 *     def none(self):
 *         """int: """
 *         return self._ptr[0].none             # <<<<<<<<<<<<<<
 * 
 *     @none.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).none); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3142, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3139
 *         self._ptr[0].low = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def none(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.none.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3144
 *         return self._ptr[0].none
 * 
 *     @none.setter             # <<<<<<<<<<<<<<
 *     def none(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4none_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4none_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4none_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4none_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3146
 *     @none.setter
 *     def none(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")
 *         self._ptr[0].none = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3147
 *     def none(self, val):
 *         if self._readonly:
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].none = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_RowRemapperHistogramValues};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3147, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3147, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3146
 *     @none.setter
 *     def none(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")
 *         self._ptr[0].none = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3148
 *         if self._readonly:
 *             raise ValueError("This RowRemapperHistogramValues instance is read-only")
 *         self._ptr[0].none = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3148, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).none = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3144
 *         return self._ptr[0].none
 * 
 *     @none.setter             # <<<<<<<<<<<<<<
 *     def none(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.none.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3150
 *         self._ptr[0].none = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an RowRemapperHistogramValues instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_12from_data, "RowRemapperHistogramValues.from_data(data)\n\nCreate an RowRemapperHistogramValues instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `row_remapper_histogram_values_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 3150, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3150, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 3150, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 3150, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3150, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 3150, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":3157
 *             data (_numpy.ndarray): a single-element array of dtype `row_remapper_histogram_values_dtype` holding the data.
 *         """
 *         return __from_data(data, "row_remapper_histogram_values_dtype", row_remapper_histogram_values_dtype, RowRemapperHistogramValues)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_row_remapper_histogram_values_dt); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3157, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_row_remapper_histogram_values_dt, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3157, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3150
 *         self._ptr[0].none = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an RowRemapperHistogramValues instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3159
 *         return __from_data(data, "row_remapper_histogram_values_dtype", row_remapper_histogram_values_dtype, RowRemapperHistogramValues)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an RowRemapperHistogramValues instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_14from_ptr, "RowRemapperHistogramValues.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an RowRemapperHistogramValues instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 3159, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 3159, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 3159, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3159, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 3159, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":3160
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an RowRemapperHistogramValues instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 3159, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 3159, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 3159, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3159, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 3160, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3160, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 3159, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":3159
 *         return __from_data(data, "row_remapper_histogram_values_dtype", row_remapper_histogram_values_dtype, RowRemapperHistogramValues)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an RowRemapperHistogramValues instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":3168
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef RowRemapperHistogramValues obj = RowRemapperHistogramValues.__new__(RowRemapperHistogramValues)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":3169
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef RowRemapperHistogramValues obj = RowRemapperHistogramValues.__new__(RowRemapperHistogramValues)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3169, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 3169, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3168
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef RowRemapperHistogramValues obj = RowRemapperHistogramValues.__new__(RowRemapperHistogramValues)
*/
  }

  /* "cuda/bindings/_nvml.pyx":3170
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef RowRemapperHistogramValues obj = RowRemapperHistogramValues.__new__(RowRemapperHistogramValues)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlRowRemapperHistogramValues_t *>malloc(sizeof(nvmlRowRemapperHistogramValues_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_RowRemapperHistogramValues(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3170, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":3171
 *             raise ValueError("ptr must not be null (0)")
 *         cdef RowRemapperHistogramValues obj = RowRemapperHistogramValues.__new__(RowRemapperHistogramValues)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlRowRemapperHistogramValues_t *>malloc(sizeof(nvmlRowRemapperHistogramValues_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3172
 *         cdef RowRemapperHistogramValues obj = RowRemapperHistogramValues.__new__(RowRemapperHistogramValues)
 *         if owner is None:
 *             obj._ptr = <nvmlRowRemapperHistogramValues_t *>malloc(sizeof(nvmlRowRemapperHistogramValues_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating RowRemapperHistogramValues")
*/
    __pyx_v_obj->_ptr = ((nvmlRowRemapperHistogramValues_t *)malloc((sizeof(nvmlRowRemapperHistogramValues_t))));

    /* "cuda/bindings/_nvml.pyx":3173
 *         if owner is None:
 *             obj._ptr = <nvmlRowRemapperHistogramValues_t *>malloc(sizeof(nvmlRowRemapperHistogramValues_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating RowRemapperHistogramValues")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlRowRemapperHistogramValues_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":3174
 *             obj._ptr = <nvmlRowRemapperHistogramValues_t *>malloc(sizeof(nvmlRowRemapperHistogramValues_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating RowRemapperHistogramValues")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlRowRemapperHistogramValues_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3174, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_RowRemapperHist};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3174, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 3174, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":3173
 *         if owner is None:
 *             obj._ptr = <nvmlRowRemapperHistogramValues_t *>malloc(sizeof(nvmlRowRemapperHistogramValues_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating RowRemapperHistogramValues")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlRowRemapperHistogramValues_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":3175
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating RowRemapperHistogramValues")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlRowRemapperHistogramValues_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlRowRemapperHistogramValues_t))));

    /* "cuda/bindings/_nvml.pyx":3176
 *                 raise MemoryError("Error allocating RowRemapperHistogramValues")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlRowRemapperHistogramValues_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":3177
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlRowRemapperHistogramValues_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlRowRemapperHistogramValues_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":3171
 *             raise ValueError("ptr must not be null (0)")
 *         cdef RowRemapperHistogramValues obj = RowRemapperHistogramValues.__new__(RowRemapperHistogramValues)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlRowRemapperHistogramValues_t *>malloc(sizeof(nvmlRowRemapperHistogramValues_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":3179
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlRowRemapperHistogramValues_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlRowRemapperHistogramValues_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":3180
 *         else:
 *             obj._ptr = <nvmlRowRemapperHistogramValues_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":3181
 *             obj._ptr = <nvmlRowRemapperHistogramValues_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":3182
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":3183
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3159
 *         return __from_data(data, "row_remapper_histogram_values_dtype", row_remapper_histogram_values_dtype, RowRemapperHistogramValues)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an RowRemapperHistogramValues instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_16__reduce_cython__, "RowRemapperHistogramValues.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_18__setstate_cython__, "RowRemapperHistogramValues.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.RowRemapperHistogramValues.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3186
 * 
 * 
 * cdef _get_bridge_chip_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlBridgeChipInfo_t pod = nvmlBridgeChipInfo_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_bridge_chip_info_dtype_offsets(void) {
  nvmlBridgeChipInfo_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlBridgeChipInfo_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_bridge_chip_info_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":3187
 * 
 * cdef _get_bridge_chip_info_dtype_offsets():
 *     cdef nvmlBridgeChipInfo_t pod = nvmlBridgeChipInfo_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['type', 'fw_version'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":3188
 * cdef _get_bridge_chip_info_dtype_offsets():
 *     cdef nvmlBridgeChipInfo_t pod = nvmlBridgeChipInfo_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['type', 'fw_version'],
 *         'formats': [_numpy.int32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3188, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3188, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":3189
 *     cdef nvmlBridgeChipInfo_t pod = nvmlBridgeChipInfo_t()
 *     return _numpy.dtype({
 *         'names': ['type', 'fw_version'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.int32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3189, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3189, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_type);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_type);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_type) != (0)) __PYX_ERR(0, 3189, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_fw_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_fw_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_fw_version) != (0)) __PYX_ERR(0, 3189, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 3189, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":3190
 *     return _numpy.dtype({
 *         'names': ['type', 'fw_version'],
 *         'formats': [_numpy.int32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.type)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3190, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3190, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3190, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3190, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3190, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 3190, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 3190, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 3189, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":3192
 *         'formats': [_numpy.int32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.type)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.fwVersion)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.type)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3192, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":3193
 *         'offsets': [
 *             (<intptr_t>&(pod.type)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.fwVersion)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlBridgeChipInfo_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.fwVersion)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3193, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":3191
 *         'names': ['type', 'fw_version'],
 *         'formats': [_numpy.int32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.type)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.fwVersion)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3191, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 3191, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 3191, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 3189, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":3195
 *             (<intptr_t>&(pod.fwVersion)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlBridgeChipInfo_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlBridgeChipInfo_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3195, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 3189, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3188, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3186
 * 
 * 
 * cdef _get_bridge_chip_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlBridgeChipInfo_t pod = nvmlBridgeChipInfo_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_bridge_chip_info_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3217
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=bridge_chip_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 3217, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3217, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 3217, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3217, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 3217, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":3218
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=bridge_chip_info_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlBridgeChipInfo_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3218, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3218, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_bridge_chip_info_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3218, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3218, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 3218, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3218, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":3219
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=bridge_chip_info_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlBridgeChipInfo_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlBridgeChipInfo_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3219, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3219, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3219, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":3220
 *         arr = _numpy.empty(size, dtype=bridge_chip_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlBridgeChipInfo_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlBridgeChipInfo_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3220, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlBridgeChipInfo_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3220, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3220, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 3220, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":3221
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlBridgeChipInfo_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlBridgeChipInfo_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3221, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3221, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlBridgeChipInfo_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3221, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3221, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 3220, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 3220, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":3217
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=bridge_chip_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3223
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlBridgeChipInfo_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.BridgeChipInfo_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":3224
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.BridgeChipInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3224, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3224, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 3224, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":3225
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.BridgeChipInfo_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.BridgeChipInfo object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3225, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3225, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3225, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3225, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3225, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3225, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3225, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_BridgeChipInfo_Array;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 22 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3225, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":3224
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.BridgeChipInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":3227
 *             return f"<{__name__}.BridgeChipInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.BridgeChipInfo object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3227, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3227, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3227, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3227, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3227, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_BridgeChipInfo_object_at;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 26 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3227, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":3223
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlBridgeChipInfo_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.BridgeChipInfo_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3229
 *             return f"<{__name__}.BridgeChipInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3232
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3232, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3232, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3229
 *             return f"<{__name__}.BridgeChipInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3234
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_14BridgeChipInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":3235
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3235, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3235, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 3235, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3234
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3237
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":3238
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3238, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3238, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 3238, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":3239
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3239, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 3239, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3238
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":3241
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3241, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3241, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3237
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3243
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":3244
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3244, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 3244, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3243
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3246
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, BridgeChipInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":3247
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, BridgeChipInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":3248
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, BridgeChipInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3248, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3248, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3248, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3248, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 3248, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3248, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3248, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3248, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3248, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 3248, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":3249
 *         cdef object self_data = self._data
 *         if (not isinstance(other, BridgeChipInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":3248
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, BridgeChipInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":3250
 *         if (not isinstance(other, BridgeChipInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3250, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3250, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3250, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 3250, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3250, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3246
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, BridgeChipInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3252
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def type(self):
 *         """Union[~_numpy.int32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_4type_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_4type_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_4type___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_4type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3255
 *     def type(self):
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.type[0])
 *         return self._data.type
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3255, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 3255, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":3256
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.type[0])             # <<<<<<<<<<<<<<
 *         return self._data.type
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_type); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3256, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3256, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3256, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":3255
 *     def type(self):
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.type[0])
 *         return self._data.type
*/
  }

  /* "cuda/bindings/_nvml.pyx":3257
 *         if self._data.size == 1:
 *             return int(self._data.type[0])
 *         return self._data.type             # <<<<<<<<<<<<<<
 * 
 *     @type.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_type); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3257, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3252
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def type(self):
 *         """Union[~_numpy.int32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.type.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3259
 *         return self._data.type
 * 
 *     @type.setter             # <<<<<<<<<<<<<<
 *     def type(self, val):
 *         self._data.type = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_4type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_4type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_4type_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_4type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":3261
 *     @type.setter
 *     def type(self, val):
 *         self._data.type = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_type, __pyx_v_val) < (0)) __PYX_ERR(0, 3261, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":3259
 *         return self._data.type
 * 
 *     @type.setter             # <<<<<<<<<<<<<<
 *     def type(self, val):
 *         self._data.type = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.type.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3263
 *         self._data.type = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def fw_version(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_10fw_version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_10fw_version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_10fw_version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_10fw_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3266
 *     def fw_version(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.fw_version[0])
 *         return self._data.fw_version
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3266, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 3266, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":3267
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.fw_version[0])             # <<<<<<<<<<<<<<
 *         return self._data.fw_version
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_fw_version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3267, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3267, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3267, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":3266
 *     def fw_version(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.fw_version[0])
 *         return self._data.fw_version
*/
  }

  /* "cuda/bindings/_nvml.pyx":3268
 *         if self._data.size == 1:
 *             return int(self._data.fw_version[0])
 *         return self._data.fw_version             # <<<<<<<<<<<<<<
 * 
 *     @fw_version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_fw_version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3268, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3263
 *         self._data.type = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def fw_version(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.fw_version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3270
 *         return self._data.fw_version
 * 
 *     @fw_version.setter             # <<<<<<<<<<<<<<
 *     def fw_version(self, val):
 *         self._data.fw_version = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_10fw_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_10fw_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_10fw_version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_10fw_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":3272
 *     @fw_version.setter
 *     def fw_version(self, val):
 *         self._data.fw_version = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_fw_version, __pyx_v_val) < (0)) __PYX_ERR(0, 3272, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":3270
 *         return self._data.fw_version
 * 
 *     @fw_version.setter             # <<<<<<<<<<<<<<
 *     def fw_version(self, val):
 *         self._data.fw_version = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.fw_version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3274
 *         self._data.fw_version = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":3277
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3278
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 3278, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":3279
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3279, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 3279, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":3280
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":3281
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3281, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 3281, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":3280
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":3282
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return BridgeChipInfo.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":3283
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return BridgeChipInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":3282
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return BridgeChipInfo.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":3284
 *             if key_ < 0:
 *                 key_ += size
 *             return BridgeChipInfo.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == bridge_chip_info_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3284, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3284, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":3277
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":3285
 *                 key_ += size
 *             return BridgeChipInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == bridge_chip_info_dtype:
 *             return BridgeChipInfo.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3285, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":3286
 *             return BridgeChipInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == bridge_chip_info_dtype:             # <<<<<<<<<<<<<<
 *             return BridgeChipInfo.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3286, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3286, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 3286, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3286, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_bridge_chip_info_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3286, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3286, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 3286, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3287
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == bridge_chip_info_dtype:
 *             return BridgeChipInfo.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3287, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":3286
 *             return BridgeChipInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == bridge_chip_info_dtype:             # <<<<<<<<<<<<<<
 *             return BridgeChipInfo.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":3288
 *         if isinstance(out, _numpy.recarray) and out.dtype == bridge_chip_info_dtype:
 *             return BridgeChipInfo.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3274
 *         self._data.fw_version = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3290
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":3291
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 3291, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":3290
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3293
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an BridgeChipInfo instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_14BridgeChipInfo_14from_data, "BridgeChipInfo.from_data(data)\n\nCreate an BridgeChipInfo instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `bridge_chip_info_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_14BridgeChipInfo_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14BridgeChipInfo_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 3293, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3293, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 3293, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 3293, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3293, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 3293, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":3300
 *             data (_numpy.ndarray): a 1D array of dtype `bridge_chip_info_dtype` holding the data.
 *         """
 *         cdef BridgeChipInfo obj = BridgeChipInfo.__new__(BridgeChipInfo)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_BridgeChipInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3300, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":3301
 *         """
 *         cdef BridgeChipInfo obj = BridgeChipInfo.__new__(BridgeChipInfo)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3301, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3301, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 3301, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":3302
 *         cdef BridgeChipInfo obj = BridgeChipInfo.__new__(BridgeChipInfo)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3302, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 3302, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3301
 *         """
 *         cdef BridgeChipInfo obj = BridgeChipInfo.__new__(BridgeChipInfo)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":3303
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != bridge_chip_info_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3303, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 3303, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":3304
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != bridge_chip_info_dtype:
 *             raise ValueError("data array must be of dtype bridge_chip_info_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3304, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 3304, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3303
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != bridge_chip_info_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":3305
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != bridge_chip_info_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype bridge_chip_info_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_bridge_chip_info_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3305, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 3305, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":3306
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != bridge_chip_info_dtype:
 *             raise ValueError("data array must be of dtype bridge_chip_info_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_brid};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3306, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 3306, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3305
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != bridge_chip_info_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype bridge_chip_info_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":3307
 *         if data.dtype != bridge_chip_info_dtype:
 *             raise ValueError("data array must be of dtype bridge_chip_info_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3307, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3307, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3307, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":3309
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3293
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an BridgeChipInfo instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3311
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an BridgeChipInfo instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_14BridgeChipInfo_16from_ptr, "BridgeChipInfo.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an BridgeChipInfo instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_14BridgeChipInfo_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14BridgeChipInfo_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 3311, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 3311, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 3311, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3311, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 3311, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 3311, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 3311, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 3311, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3311, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 3312, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 3312, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3312, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":3312
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an BridgeChipInfo instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 3311, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":3311
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an BridgeChipInfo instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":3320
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef BridgeChipInfo obj = BridgeChipInfo.__new__(BridgeChipInfo)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":3321
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef BridgeChipInfo obj = BridgeChipInfo.__new__(BridgeChipInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3321, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 3321, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3320
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef BridgeChipInfo obj = BridgeChipInfo.__new__(BridgeChipInfo)
*/
  }

  /* "cuda/bindings/_nvml.pyx":3322
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef BridgeChipInfo obj = BridgeChipInfo.__new__(BridgeChipInfo)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_BridgeChipInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3322, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":3323
 *             raise ValueError("ptr must not be null (0)")
 *         cdef BridgeChipInfo obj = BridgeChipInfo.__new__(BridgeChipInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlBridgeChipInfo_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3323, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3323, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":3325
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlBridgeChipInfo_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=bridge_chip_info_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3325, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":3324
 *         cdef BridgeChipInfo obj = BridgeChipInfo.__new__(BridgeChipInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlBridgeChipInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=bridge_chip_info_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlBridgeChipInfo_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3324, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":3326
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlBridgeChipInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=bridge_chip_info_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3326, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3326, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3326, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_bridge_chip_info_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3326, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3326, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 3326, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 3326, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3326, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":3327
 *             <char*>ptr, sizeof(nvmlBridgeChipInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=bridge_chip_info_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3327, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":3329
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3311
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an BridgeChipInfo instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3213
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_14BridgeChipInfo_18__reduce_cython__, "BridgeChipInfo.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_14BridgeChipInfo_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14BridgeChipInfo_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_BridgeChipInfo, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_BridgeChipInfo, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_BridgeChipInfo, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_BridgeChipInfo, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_BridgeChipInfo); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_BridgeChipInfo, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_BridgeChipInfo, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_BridgeChipInfo, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_BridgeChipInfo__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_BridgeChipInfo); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_BridgeChipInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_BridgeChipInfo__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_14BridgeChipInfo_20__setstate_cython__, "BridgeChipInfo.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_14BridgeChipInfo_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14BridgeChipInfo_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14BridgeChipInfo_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_BridgeChipInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_BridgeChipInfo__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_BridgeChipInfo__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_BridgeChipInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_BridgeChipInfo__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3358
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlValue_t *>calloc(1, sizeof(nvmlValue_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_5Value___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":3359
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlValue_t *>calloc(1, sizeof(nvmlValue_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating Value")
*/
  __pyx_v_self->_ptr = ((nvmlValue_t *)calloc(1, (sizeof(nvmlValue_t))));

  /* "cuda/bindings/_nvml.pyx":3360
 *     def __init__(self):
 *         self._ptr = <nvmlValue_t *>calloc(1, sizeof(nvmlValue_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating Value")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":3361
 *         self._ptr = <nvmlValue_t *>calloc(1, sizeof(nvmlValue_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating Value")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3361, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_Value};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3361, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 3361, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3360
 *     def __init__(self):
 *         self._ptr = <nvmlValue_t *>calloc(1, sizeof(nvmlValue_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating Value")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":3362
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating Value")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":3363
 *             raise MemoryError("Error allocating Value")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":3364
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":3358
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlValue_t *>calloc(1, sizeof(nvmlValue_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3366
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlValue_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_5Value_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_5Value_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_5Value_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_5Value_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self) {
  nvmlValue_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlValue_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":3368
 *     def __dealloc__(self):
 *         cdef nvmlValue_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3369
 *         cdef nvmlValue_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":3370
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":3371
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":3368
 *     def __dealloc__(self):
 *         cdef nvmlValue_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":3366
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlValue_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":3373
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.Value object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":3374
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.Value object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3374, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3374, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3374, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3374, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3374, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_Value_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 17 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3374, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3373
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.Value object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3376
 *         return f"<{__name__}.Value object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3379
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3379, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3376
 *         return f"<{__name__}.Value object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3381
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_5Value__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":3382
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3381
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3384
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":3385
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3385, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3384
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3387
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef Value other_
 *         if not isinstance(other, Value):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":3389
 *     def __eq__(self, other):
 *         cdef Value other_
 *         if not isinstance(other, Value):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Value); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":3390
 *         cdef Value other_
 *         if not isinstance(other, Value):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlValue_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":3389
 *     def __eq__(self, other):
 *         cdef Value other_
 *         if not isinstance(other, Value):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":3391
 *         if not isinstance(other, Value):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlValue_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Value))))) __PYX_ERR(0, 3391, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":3392
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlValue_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlValue_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3392, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3387
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef Value other_
 *         if not isinstance(other, Value):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3394
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlValue_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlValue_t *>malloc(sizeof(nvmlValue_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_5Value_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":3395
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlValue_t *>malloc(sizeof(nvmlValue_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 3395, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3395, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3395, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 3395, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3396
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlValue_t *>malloc(sizeof(nvmlValue_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating Value")
*/
    __pyx_v_self->_ptr = ((nvmlValue_t *)malloc((sizeof(nvmlValue_t))));

    /* "cuda/bindings/_nvml.pyx":3397
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlValue_t *>malloc(sizeof(nvmlValue_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Value")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlValue_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":3398
 *             self._ptr = <nvmlValue_t *>malloc(sizeof(nvmlValue_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating Value")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlValue_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3398, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_Value};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3398, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 3398, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":3397
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlValue_t *>malloc(sizeof(nvmlValue_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Value")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlValue_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":3399
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating Value")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlValue_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3399, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3399, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 3399, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlValue_t))));

    /* "cuda/bindings/_nvml.pyx":3400
 *                 raise MemoryError("Error allocating Value")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlValue_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":3401
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlValue_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":3402
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3402, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3402, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 3402, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":3395
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlValue_t *>malloc(sizeof(nvmlValue_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":3404
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 3404, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":3394
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlValue_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlValue_t *>malloc(sizeof(nvmlValue_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3406
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def d_val(self):
 *         """float: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_5d_val_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_5d_val_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_5d_val___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_5d_val___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3409
 *     def d_val(self):
 *         """float: """
 *         return self._ptr[0].dVal             # <<<<<<<<<<<<<<
 * 
 *     @d_val.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyFloat_FromDouble((__pyx_v_self->_ptr[0]).dVal); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3409, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3406
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def d_val(self):
 *         """float: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.d_val.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3411
 *         return self._ptr[0].dVal
 * 
 *     @d_val.setter             # <<<<<<<<<<<<<<
 *     def d_val(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_5d_val_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_5d_val_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_5d_val_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_5Value_5d_val_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  double __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3413
 *     @d_val.setter
 *     def d_val(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].dVal = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3414
 *     def d_val(self, val):
 *         if self._readonly:
 *             raise ValueError("This Value instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].dVal = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Value_instance_is_read_only};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3414, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3414, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3413
 *     @d_val.setter
 *     def d_val(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].dVal = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3415
 *         if self._readonly:
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].dVal = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyFloat_AsDouble(__pyx_v_val); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 3415, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).dVal = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3411
 *         return self._ptr[0].dVal
 * 
 *     @d_val.setter             # <<<<<<<<<<<<<<
 *     def d_val(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.d_val.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3417
 *         self._ptr[0].dVal = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def si_val(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_6si_val_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_6si_val_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_6si_val___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_6si_val___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3420
 *     def si_val(self):
 *         """int: """
 *         return self._ptr[0].siVal             # <<<<<<<<<<<<<<
 * 
 *     @si_val.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_ptr[0]).siVal); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3420, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3417
 *         self._ptr[0].dVal = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def si_val(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.si_val.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3422
 *         return self._ptr[0].siVal
 * 
 *     @si_val.setter             # <<<<<<<<<<<<<<
 *     def si_val(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_6si_val_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_6si_val_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_6si_val_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_5Value_6si_val_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3424
 *     @si_val.setter
 *     def si_val(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].siVal = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3425
 *     def si_val(self, val):
 *         if self._readonly:
 *             raise ValueError("This Value instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].siVal = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Value_instance_is_read_only};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3425, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3425, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3424
 *     @si_val.setter
 *     def si_val(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].siVal = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3426
 *         if self._readonly:
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].siVal = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3426, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).siVal = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3422
 *         return self._ptr[0].siVal
 * 
 *     @si_val.setter             # <<<<<<<<<<<<<<
 *     def si_val(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.si_val.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3428
 *         self._ptr[0].siVal = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ui_val(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_6ui_val_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_6ui_val_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_6ui_val___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_6ui_val___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3431
 *     def ui_val(self):
 *         """int: """
 *         return self._ptr[0].uiVal             # <<<<<<<<<<<<<<
 * 
 *     @ui_val.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).uiVal); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3431, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3428
 *         self._ptr[0].siVal = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ui_val(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.ui_val.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3433
 *         return self._ptr[0].uiVal
 * 
 *     @ui_val.setter             # <<<<<<<<<<<<<<
 *     def ui_val(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_6ui_val_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_6ui_val_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_6ui_val_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_5Value_6ui_val_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3435
 *     @ui_val.setter
 *     def ui_val(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].uiVal = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3436
 *     def ui_val(self, val):
 *         if self._readonly:
 *             raise ValueError("This Value instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].uiVal = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Value_instance_is_read_only};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3436, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3436, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3435
 *     @ui_val.setter
 *     def ui_val(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].uiVal = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3437
 *         if self._readonly:
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].uiVal = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3437, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).uiVal = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3433
 *         return self._ptr[0].uiVal
 * 
 *     @ui_val.setter             # <<<<<<<<<<<<<<
 *     def ui_val(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.ui_val.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3439
 *         self._ptr[0].uiVal = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ul_val(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_6ul_val_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_6ul_val_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_6ul_val___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_6ul_val___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3442
 *     def ul_val(self):
 *         """int: """
 *         return self._ptr[0].ulVal             # <<<<<<<<<<<<<<
 * 
 *     @ul_val.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_long((__pyx_v_self->_ptr[0]).ulVal); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3442, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3439
 *         self._ptr[0].uiVal = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ul_val(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.ul_val.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3444
 *         return self._ptr[0].ulVal
 * 
 *     @ul_val.setter             # <<<<<<<<<<<<<<
 *     def ul_val(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_6ul_val_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_6ul_val_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_6ul_val_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_5Value_6ul_val_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned long __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3446
 *     @ul_val.setter
 *     def ul_val(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].ulVal = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3447
 *     def ul_val(self, val):
 *         if self._readonly:
 *             raise ValueError("This Value instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].ulVal = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Value_instance_is_read_only};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3447, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3447, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3446
 *     @ul_val.setter
 *     def ul_val(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].ulVal = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3448
 *         if self._readonly:
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].ulVal = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_long(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned long)-1) && PyErr_Occurred())) __PYX_ERR(0, 3448, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).ulVal = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3444
 *         return self._ptr[0].ulVal
 * 
 *     @ul_val.setter             # <<<<<<<<<<<<<<
 *     def ul_val(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.ul_val.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3450
 *         self._ptr[0].ulVal = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ull_val(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_7ull_val_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_7ull_val_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_7ull_val___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_7ull_val___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3453
 *     def ull_val(self):
 *         """int: """
 *         return self._ptr[0].ullVal             # <<<<<<<<<<<<<<
 * 
 *     @ull_val.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).ullVal); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3453, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3450
 *         self._ptr[0].ulVal = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ull_val(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.ull_val.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3455
 *         return self._ptr[0].ullVal
 * 
 *     @ull_val.setter             # <<<<<<<<<<<<<<
 *     def ull_val(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_7ull_val_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_7ull_val_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_7ull_val_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_5Value_7ull_val_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3457
 *     @ull_val.setter
 *     def ull_val(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].ullVal = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3458
 *     def ull_val(self, val):
 *         if self._readonly:
 *             raise ValueError("This Value instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].ullVal = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Value_instance_is_read_only};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3458, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3458, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3457
 *     @ull_val.setter
 *     def ull_val(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].ullVal = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3459
 *         if self._readonly:
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].ullVal = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 3459, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).ullVal = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3455
 *         return self._ptr[0].ullVal
 * 
 *     @ull_val.setter             # <<<<<<<<<<<<<<
 *     def ull_val(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.ull_val.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3461
 *         self._ptr[0].ullVal = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sll_val(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_7sll_val_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_7sll_val_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_7sll_val___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_7sll_val___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3464
 *     def sll_val(self):
 *         """int: """
 *         return self._ptr[0].sllVal             # <<<<<<<<<<<<<<
 * 
 *     @sll_val.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_PY_LONG_LONG((__pyx_v_self->_ptr[0]).sllVal); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3464, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3461
 *         self._ptr[0].ullVal = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sll_val(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.sll_val.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3466
 *         return self._ptr[0].sllVal
 * 
 *     @sll_val.setter             # <<<<<<<<<<<<<<
 *     def sll_val(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_7sll_val_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_7sll_val_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_7sll_val_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_5Value_7sll_val_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3468
 *     @sll_val.setter
 *     def sll_val(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].sllVal = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3469
 *     def sll_val(self, val):
 *         if self._readonly:
 *             raise ValueError("This Value instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sllVal = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Value_instance_is_read_only};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3469, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3469, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3468
 *     @sll_val.setter
 *     def sll_val(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].sllVal = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3470
 *         if self._readonly:
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].sllVal = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 3470, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sllVal = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3466
 *         return self._ptr[0].sllVal
 * 
 *     @sll_val.setter             # <<<<<<<<<<<<<<
 *     def sll_val(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.sll_val.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3472
 *         self._ptr[0].sllVal = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def us_val(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_6us_val_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_6us_val_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_6us_val___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_6us_val___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3475
 *     def us_val(self):
 *         """int: """
 *         return self._ptr[0].usVal             # <<<<<<<<<<<<<<
 * 
 *     @us_val.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_short((__pyx_v_self->_ptr[0]).usVal); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3475, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3472
 *         self._ptr[0].sllVal = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def us_val(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.us_val.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3477
 *         return self._ptr[0].usVal
 * 
 *     @us_val.setter             # <<<<<<<<<<<<<<
 *     def us_val(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_6us_val_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_5Value_6us_val_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_6us_val_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_5Value_6us_val_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned short __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3479
 *     @us_val.setter
 *     def us_val(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].usVal = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3480
 *     def us_val(self, val):
 *         if self._readonly:
 *             raise ValueError("This Value instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].usVal = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Value_instance_is_read_only};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3480, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3480, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3479
 *     @us_val.setter
 *     def us_val(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].usVal = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3481
 *         if self._readonly:
 *             raise ValueError("This Value instance is read-only")
 *         self._ptr[0].usVal = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_short(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned short)-1) && PyErr_Occurred())) __PYX_ERR(0, 3481, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).usVal = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3477
 *         return self._ptr[0].usVal
 * 
 *     @us_val.setter             # <<<<<<<<<<<<<<
 *     def us_val(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.us_val.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3483
 *         self._ptr[0].usVal = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Value instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_5Value_12from_data, "Value.from_data(data)\n\nCreate an Value instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `value_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_5Value_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_5Value_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_5Value_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 3483, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3483, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 3483, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 3483, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3483, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 3483, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":3490
 *             data (_numpy.ndarray): a single-element array of dtype `value_dtype` holding the data.
 *         """
 *         return __from_data(data, "value_dtype", value_dtype, Value)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_value_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3490, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_value_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Value)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3490, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3483
 *         self._ptr[0].usVal = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Value instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3492
 *         return __from_data(data, "value_dtype", value_dtype, Value)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Value instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_5Value_14from_ptr, "Value.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an Value instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_5Value_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_5Value_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_5Value_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 3492, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 3492, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 3492, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3492, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 3492, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":3493
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an Value instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 3492, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 3492, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 3492, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3492, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 3493, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3493, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 3492, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":3492
 *         return __from_data(data, "value_dtype", value_dtype, Value)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Value instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":3501
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Value obj = Value.__new__(Value)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":3502
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef Value obj = Value.__new__(Value)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3502, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 3502, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3501
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Value obj = Value.__new__(Value)
*/
  }

  /* "cuda/bindings/_nvml.pyx":3503
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Value obj = Value.__new__(Value)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlValue_t *>malloc(sizeof(nvmlValue_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_Value(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Value), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3503, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":3504
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Value obj = Value.__new__(Value)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlValue_t *>malloc(sizeof(nvmlValue_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3505
 *         cdef Value obj = Value.__new__(Value)
 *         if owner is None:
 *             obj._ptr = <nvmlValue_t *>malloc(sizeof(nvmlValue_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating Value")
*/
    __pyx_v_obj->_ptr = ((nvmlValue_t *)malloc((sizeof(nvmlValue_t))));

    /* "cuda/bindings/_nvml.pyx":3506
 *         if owner is None:
 *             obj._ptr = <nvmlValue_t *>malloc(sizeof(nvmlValue_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Value")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlValue_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":3507
 *             obj._ptr = <nvmlValue_t *>malloc(sizeof(nvmlValue_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating Value")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlValue_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3507, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_Value};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3507, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 3507, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":3506
 *         if owner is None:
 *             obj._ptr = <nvmlValue_t *>malloc(sizeof(nvmlValue_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Value")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlValue_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":3508
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating Value")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlValue_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlValue_t))));

    /* "cuda/bindings/_nvml.pyx":3509
 *                 raise MemoryError("Error allocating Value")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlValue_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":3510
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlValue_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlValue_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":3504
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Value obj = Value.__new__(Value)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlValue_t *>malloc(sizeof(nvmlValue_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":3512
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlValue_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlValue_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":3513
 *         else:
 *             obj._ptr = <nvmlValue_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":3514
 *             obj._ptr = <nvmlValue_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":3515
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":3516
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3492
 *         return __from_data(data, "value_dtype", value_dtype, Value)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Value instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_5Value_16__reduce_cython__, "Value.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_5Value_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_5Value_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_5Value_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_5Value_18__setstate_cython__, "Value.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_5Value_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_5Value_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_5Value_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5Value_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_5Value_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_5Value_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Value *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.Value.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3519
 * 
 * 
 * cdef _get__py_anon_pod0_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef _anon_pod0 pod = _anon_pod0()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod0_dtype_offsets(void) {
  _anon_pod0 __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  _anon_pod0 __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  size_t __pyx_t_12;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get__py_anon_pod0_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":3520
 * 
 * cdef _get__py_anon_pod0_dtype_offsets():
 *     cdef _anon_pod0 pod = _anon_pod0()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['controller', 'default_min_temp', 'default_max_temp', 'current_temp', 'target'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":3521
 * cdef _get__py_anon_pod0_dtype_offsets():
 *     cdef _anon_pod0 pod = _anon_pod0()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['controller', 'default_min_temp', 'default_max_temp', 'current_temp', 'target'],
 *         'formats': [_numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3521, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3521, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":3522
 *     cdef _anon_pod0 pod = _anon_pod0()
 *     return _numpy.dtype({
 *         'names': ['controller', 'default_min_temp', 'default_max_temp', 'current_temp', 'target'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3522, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3522, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_controller);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_controller);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_controller) != (0)) __PYX_ERR(0, 3522, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_default_min_temp);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_default_min_temp);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_default_min_temp) != (0)) __PYX_ERR(0, 3522, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_default_max_temp);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_default_max_temp);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_default_max_temp) != (0)) __PYX_ERR(0, 3522, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_current_temp);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_current_temp);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_current_temp) != (0)) __PYX_ERR(0, 3522, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_target);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_target);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_target) != (0)) __PYX_ERR(0, 3522, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 3522, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":3523
 *     return _numpy.dtype({
 *         'names': ['controller', 'default_min_temp', 'default_max_temp', 'current_temp', 'target'],
 *         'formats': [_numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.controller)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 3523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 3523, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 3523, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 3523, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 3523, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 3523, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 3522, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":3525
 *         'formats': [_numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32],
 *         'offsets': [
 *             (<intptr_t>&(pod.controller)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.defaultMinTemp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.defaultMaxTemp)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.controller)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3525, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":3526
 *         'offsets': [
 *             (<intptr_t>&(pod.controller)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.defaultMinTemp)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.defaultMaxTemp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.currentTemp)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.defaultMinTemp)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 3526, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":3527
 *             (<intptr_t>&(pod.controller)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.defaultMinTemp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.defaultMaxTemp)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.currentTemp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.target)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.defaultMaxTemp)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3527, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":3528
 *             (<intptr_t>&(pod.defaultMinTemp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.defaultMaxTemp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.currentTemp)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.target)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.currentTemp)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3528, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":3529
 *             (<intptr_t>&(pod.defaultMaxTemp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.currentTemp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.target)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(_anon_pod0),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.target)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3529, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":3524
 *         'names': ['controller', 'default_min_temp', 'default_max_temp', 'current_temp', 'target'],
 *         'formats': [_numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.controller)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.defaultMinTemp)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3524, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 3524, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_11) != (0)) __PYX_ERR(0, 3524, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_10) != (0)) __PYX_ERR(0, 3524, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_9) != (0)) __PYX_ERR(0, 3524, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_8) != (0)) __PYX_ERR(0, 3524, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 3522, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":3531
 *             (<intptr_t>&(pod.target)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(_anon_pod0),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(_anon_pod0))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3531, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 3522, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_12 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_12 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_12, (2-__pyx_t_12) | (__pyx_t_12*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3521, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3519
 * 
 * 
 * cdef _get__py_anon_pod0_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef _anon_pod0 pod = _anon_pod0()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_AddTraceback("cuda.bindings._nvml._get__py_anon_pod0_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3548
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <_anon_pod0 *>calloc(1, sizeof(_anon_pod0))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0___init__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":3549
 * 
 *     def __init__(self):
 *         self._ptr = <_anon_pod0 *>calloc(1, sizeof(_anon_pod0))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod0")
*/
  __pyx_v_self->_ptr = ((_anon_pod0 *)calloc(1, (sizeof(_anon_pod0))));

  /* "cuda/bindings/_nvml.pyx":3550
 *     def __init__(self):
 *         self._ptr = <_anon_pod0 *>calloc(1, sizeof(_anon_pod0))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating _py_anon_pod0")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":3551
 *         self._ptr = <_anon_pod0 *>calloc(1, sizeof(_anon_pod0))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod0")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3551, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3551, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 3551, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3550
 *     def __init__(self):
 *         self._ptr = <_anon_pod0 *>calloc(1, sizeof(_anon_pod0))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating _py_anon_pod0")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":3552
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod0")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":3553
 *             raise MemoryError("Error allocating _py_anon_pod0")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":3554
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":3548
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <_anon_pod0 *>calloc(1, sizeof(_anon_pod0))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3556
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef _anon_pod0 *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self) {
  _anon_pod0 *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  _anon_pod0 *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":3558
 *     def __dealloc__(self):
 *         cdef _anon_pod0 *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3559
 *         cdef _anon_pod0 *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":3560
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":3561
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":3558
 *     def __dealloc__(self):
 *         cdef _anon_pod0 *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":3556
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef _anon_pod0 *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":3563
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}._py_anon_pod0 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":3564
 * 
 *     def __repr__(self):
 *         return f"<{__name__}._py_anon_pod0 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3564, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3564, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3564, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3564, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3564, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_py_anon_pod0_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 25 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3564, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3563
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}._py_anon_pod0 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3566
 *         return f"<{__name__}._py_anon_pod0 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3569
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3569, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3566
 *         return f"<{__name__}._py_anon_pod0 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3571
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod0__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":3572
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3571
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3574
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":3575
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3575, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3574
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3577
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod0 other_
 *         if not isinstance(other, _py_anon_pod0):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":3579
 *     def __eq__(self, other):
 *         cdef _py_anon_pod0 other_
 *         if not isinstance(other, _py_anon_pod0):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":3580
 *         cdef _py_anon_pod0 other_
 *         if not isinstance(other, _py_anon_pod0):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod0)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":3579
 *     def __eq__(self, other):
 *         cdef _py_anon_pod0 other_
 *         if not isinstance(other, _py_anon_pod0):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":3581
 *         if not isinstance(other, _py_anon_pod0):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod0)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0))))) __PYX_ERR(0, 3581, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":3582
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod0)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(_anon_pod0))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3582, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3577
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod0 other_
 *         if not isinstance(other, _py_anon_pod0):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3584
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod0)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod0 *>malloc(sizeof(_anon_pod0))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":3585
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <_anon_pod0 *>malloc(sizeof(_anon_pod0))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 3585, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3585, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3585, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 3585, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3586
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod0 *>malloc(sizeof(_anon_pod0))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod0")
*/
    __pyx_v_self->_ptr = ((_anon_pod0 *)malloc((sizeof(_anon_pod0))));

    /* "cuda/bindings/_nvml.pyx":3587
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod0 *>malloc(sizeof(_anon_pod0))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod0")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod0))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":3588
 *             self._ptr = <_anon_pod0 *>malloc(sizeof(_anon_pod0))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod0")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod0))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3588, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod0};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3588, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 3588, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":3587
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod0 *>malloc(sizeof(_anon_pod0))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod0")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod0))
*/
    }

    /* "cuda/bindings/_nvml.pyx":3589
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod0")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod0))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3589, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3589, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 3589, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(_anon_pod0))));

    /* "cuda/bindings/_nvml.pyx":3590
 *                 raise MemoryError("Error allocating _py_anon_pod0")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod0))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":3591
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod0))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":3592
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3592, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3592, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 3592, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":3585
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <_anon_pod0 *>malloc(sizeof(_anon_pod0))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":3594
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 3594, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":3584
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod0)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod0 *>malloc(sizeof(_anon_pod0))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3596
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def controller(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_10controller_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_10controller_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_10controller___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_10controller___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3599
 *     def controller(self):
 *         """int: """
 *         return <int>(self._ptr[0].controller)             # <<<<<<<<<<<<<<
 * 
 *     @controller.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int(((int)(__pyx_v_self->_ptr[0]).controller)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3599, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3596
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def controller(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.controller.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3601
 *         return <int>(self._ptr[0].controller)
 * 
 *     @controller.setter             # <<<<<<<<<<<<<<
 *     def controller(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_10controller_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_10controller_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_10controller_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_10controller_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3603
 *     @controller.setter
 *     def controller(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod0 instance is read-only")
 *         self._ptr[0].controller = <nvmlThermalController_t><int>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3604
 *     def controller(self, val):
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod0 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].controller = <nvmlThermalController_t><int>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This__py_anon_pod0_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3604, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3604, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3603
 *     @controller.setter
 *     def controller(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod0 instance is read-only")
 *         self._ptr[0].controller = <nvmlThermalController_t><int>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3605
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod0 instance is read-only")
 *         self._ptr[0].controller = <nvmlThermalController_t><int>val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3605, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).controller = ((nvmlThermalController_t)((int)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":3601
 *         return <int>(self._ptr[0].controller)
 * 
 *     @controller.setter             # <<<<<<<<<<<<<<
 *     def controller(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.controller.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3607
 *         self._ptr[0].controller = <nvmlThermalController_t><int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def default_min_temp(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_min_temp_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_min_temp_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_min_temp___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_min_temp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3610
 *     def default_min_temp(self):
 *         """int: """
 *         return self._ptr[0].defaultMinTemp             # <<<<<<<<<<<<<<
 * 
 *     @default_min_temp.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_ptr[0]).defaultMinTemp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3610, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3607
 *         self._ptr[0].controller = <nvmlThermalController_t><int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def default_min_temp(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.default_min_temp.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3612
 *         return self._ptr[0].defaultMinTemp
 * 
 *     @default_min_temp.setter             # <<<<<<<<<<<<<<
 *     def default_min_temp(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_min_temp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_min_temp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_min_temp_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_min_temp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3614
 *     @default_min_temp.setter
 *     def default_min_temp(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod0 instance is read-only")
 *         self._ptr[0].defaultMinTemp = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3615
 *     def default_min_temp(self, val):
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod0 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].defaultMinTemp = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This__py_anon_pod0_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3615, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3615, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3614
 *     @default_min_temp.setter
 *     def default_min_temp(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod0 instance is read-only")
 *         self._ptr[0].defaultMinTemp = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3616
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod0 instance is read-only")
 *         self._ptr[0].defaultMinTemp = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3616, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).defaultMinTemp = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3612
 *         return self._ptr[0].defaultMinTemp
 * 
 *     @default_min_temp.setter             # <<<<<<<<<<<<<<
 *     def default_min_temp(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.default_min_temp.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3618
 *         self._ptr[0].defaultMinTemp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def default_max_temp(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_max_temp_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_max_temp_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_max_temp___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_max_temp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3621
 *     def default_max_temp(self):
 *         """int: """
 *         return self._ptr[0].defaultMaxTemp             # <<<<<<<<<<<<<<
 * 
 *     @default_max_temp.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_ptr[0]).defaultMaxTemp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3621, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3618
 *         self._ptr[0].defaultMinTemp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def default_max_temp(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.default_max_temp.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3623
 *         return self._ptr[0].defaultMaxTemp
 * 
 *     @default_max_temp.setter             # <<<<<<<<<<<<<<
 *     def default_max_temp(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_max_temp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_max_temp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_max_temp_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_max_temp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3625
 *     @default_max_temp.setter
 *     def default_max_temp(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod0 instance is read-only")
 *         self._ptr[0].defaultMaxTemp = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3626
 *     def default_max_temp(self, val):
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod0 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].defaultMaxTemp = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This__py_anon_pod0_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3626, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3626, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3625
 *     @default_max_temp.setter
 *     def default_max_temp(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod0 instance is read-only")
 *         self._ptr[0].defaultMaxTemp = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3627
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod0 instance is read-only")
 *         self._ptr[0].defaultMaxTemp = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3627, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).defaultMaxTemp = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3623
 *         return self._ptr[0].defaultMaxTemp
 * 
 *     @default_max_temp.setter             # <<<<<<<<<<<<<<
 *     def default_max_temp(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.default_max_temp.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3629
 *         self._ptr[0].defaultMaxTemp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def current_temp(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_12current_temp_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_12current_temp_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_12current_temp___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_12current_temp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3632
 *     def current_temp(self):
 *         """int: """
 *         return self._ptr[0].currentTemp             # <<<<<<<<<<<<<<
 * 
 *     @current_temp.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_ptr[0]).currentTemp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3632, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3629
 *         self._ptr[0].defaultMaxTemp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def current_temp(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.current_temp.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3634
 *         return self._ptr[0].currentTemp
 * 
 *     @current_temp.setter             # <<<<<<<<<<<<<<
 *     def current_temp(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_12current_temp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_12current_temp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_12current_temp_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_12current_temp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3636
 *     @current_temp.setter
 *     def current_temp(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod0 instance is read-only")
 *         self._ptr[0].currentTemp = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3637
 *     def current_temp(self, val):
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod0 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].currentTemp = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This__py_anon_pod0_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3637, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3637, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3636
 *     @current_temp.setter
 *     def current_temp(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod0 instance is read-only")
 *         self._ptr[0].currentTemp = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3638
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod0 instance is read-only")
 *         self._ptr[0].currentTemp = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3638, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).currentTemp = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3634
 *         return self._ptr[0].currentTemp
 * 
 *     @current_temp.setter             # <<<<<<<<<<<<<<
 *     def current_temp(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.current_temp.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3640
 *         self._ptr[0].currentTemp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def target(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_6target_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_6target_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_6target___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_6target___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3643
 *     def target(self):
 *         """int: """
 *         return <int>(self._ptr[0].target)             # <<<<<<<<<<<<<<
 * 
 *     @target.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int(((int)(__pyx_v_self->_ptr[0]).target)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3643, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3640
 *         self._ptr[0].currentTemp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def target(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.target.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3645
 *         return <int>(self._ptr[0].target)
 * 
 *     @target.setter             # <<<<<<<<<<<<<<
 *     def target(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_6target_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_6target_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_6target_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_6target_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3647
 *     @target.setter
 *     def target(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod0 instance is read-only")
 *         self._ptr[0].target = <nvmlThermalTarget_t><int>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3648
 *     def target(self, val):
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod0 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].target = <nvmlThermalTarget_t><int>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This__py_anon_pod0_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3648, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3648, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3647
 *     @target.setter
 *     def target(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod0 instance is read-only")
 *         self._ptr[0].target = <nvmlThermalTarget_t><int>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3649
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod0 instance is read-only")
 *         self._ptr[0].target = <nvmlThermalTarget_t><int>val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3649, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).target = ((nvmlThermalTarget_t)((int)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":3645
 *         return <int>(self._ptr[0].target)
 * 
 *     @target.setter             # <<<<<<<<<<<<<<
 *     def target(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.target.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3651
 *         self._ptr[0].target = <nvmlThermalTarget_t><int>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod0 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod0_12from_data, "_py_anon_pod0.from_data(data)\n\nCreate an _py_anon_pod0 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `_py_anon_pod0_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod0_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod0_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 3651, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3651, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 3651, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 3651, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3651, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 3651, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":3658
 *             data (_numpy.ndarray): a single-element array of dtype `_py_anon_pod0_dtype` holding the data.
 *         """
 *         return __from_data(data, "_py_anon_pod0_dtype", _py_anon_pod0_dtype, _py_anon_pod0)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_py_anon_pod0_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3658, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_py_anon_pod0_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3658, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3651
 *         self._ptr[0].target = <nvmlThermalTarget_t><int>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod0 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3660
 *         return __from_data(data, "_py_anon_pod0_dtype", _py_anon_pod0_dtype, _py_anon_pod0)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod0 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod0_14from_ptr, "_py_anon_pod0.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an _py_anon_pod0 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod0_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod0_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 3660, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 3660, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 3660, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3660, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 3660, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":3661
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an _py_anon_pod0 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 3660, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 3660, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 3660, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3660, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 3661, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3661, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 3660, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":3660
 *         return __from_data(data, "_py_anon_pod0_dtype", _py_anon_pod0_dtype, _py_anon_pod0)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod0 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":3669
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod0 obj = _py_anon_pod0.__new__(_py_anon_pod0)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":3670
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod0 obj = _py_anon_pod0.__new__(_py_anon_pod0)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3670, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 3670, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3669
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod0 obj = _py_anon_pod0.__new__(_py_anon_pod0)
*/
  }

  /* "cuda/bindings/_nvml.pyx":3671
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod0 obj = _py_anon_pod0.__new__(_py_anon_pod0)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <_anon_pod0 *>malloc(sizeof(_anon_pod0))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod0(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3671, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":3672
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod0 obj = _py_anon_pod0.__new__(_py_anon_pod0)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <_anon_pod0 *>malloc(sizeof(_anon_pod0))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3673
 *         cdef _py_anon_pod0 obj = _py_anon_pod0.__new__(_py_anon_pod0)
 *         if owner is None:
 *             obj._ptr = <_anon_pod0 *>malloc(sizeof(_anon_pod0))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod0")
*/
    __pyx_v_obj->_ptr = ((_anon_pod0 *)malloc((sizeof(_anon_pod0))));

    /* "cuda/bindings/_nvml.pyx":3674
 *         if owner is None:
 *             obj._ptr = <_anon_pod0 *>malloc(sizeof(_anon_pod0))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod0")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod0))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":3675
 *             obj._ptr = <_anon_pod0 *>malloc(sizeof(_anon_pod0))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod0")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod0))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3675, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod0};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3675, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 3675, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":3674
 *         if owner is None:
 *             obj._ptr = <_anon_pod0 *>malloc(sizeof(_anon_pod0))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod0")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod0))
*/
    }

    /* "cuda/bindings/_nvml.pyx":3676
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod0")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod0))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(_anon_pod0))));

    /* "cuda/bindings/_nvml.pyx":3677
 *                 raise MemoryError("Error allocating _py_anon_pod0")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod0))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":3678
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod0))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <_anon_pod0 *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":3672
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod0 obj = _py_anon_pod0.__new__(_py_anon_pod0)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <_anon_pod0 *>malloc(sizeof(_anon_pod0))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":3680
 *             obj._owned = True
 *         else:
 *             obj._ptr = <_anon_pod0 *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((_anon_pod0 *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":3681
 *         else:
 *             obj._ptr = <_anon_pod0 *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":3682
 *             obj._ptr = <_anon_pod0 *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":3683
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":3684
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3660
 *         return __from_data(data, "_py_anon_pod0_dtype", _py_anon_pod0_dtype, _py_anon_pod0)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod0 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod0_16__reduce_cython__, "_py_anon_pod0.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod0_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod0_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod0_18__setstate_cython__, "_py_anon_pod0.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod0_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod0_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod0_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod0.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3687
 * 
 * 
 * cdef _get_cooler_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlCoolerInfo_v1_t pod = nvmlCoolerInfo_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_cooler_info_v1_dtype_offsets(void) {
  nvmlCoolerInfo_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlCoolerInfo_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  size_t __pyx_t_11;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_cooler_info_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":3688
 * 
 * cdef _get_cooler_info_v1_dtype_offsets():
 *     cdef nvmlCoolerInfo_v1_t pod = nvmlCoolerInfo_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'ind_ex', 'signal_type', 'target'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":3689
 * cdef _get_cooler_info_v1_dtype_offsets():
 *     cdef nvmlCoolerInfo_v1_t pod = nvmlCoolerInfo_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'ind_ex', 'signal_type', 'target'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.int32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3689, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3689, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":3690
 *     cdef nvmlCoolerInfo_v1_t pod = nvmlCoolerInfo_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'ind_ex', 'signal_type', 'target'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.int32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3690, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3690, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 3690, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_ind_ex);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_ind_ex);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_ind_ex) != (0)) __PYX_ERR(0, 3690, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_signal_type);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_signal_type);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_signal_type) != (0)) __PYX_ERR(0, 3690, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_target);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_target);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_target) != (0)) __PYX_ERR(0, 3690, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 3690, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":3691
 *     return _numpy.dtype({
 *         'names': ['version', 'ind_ex', 'signal_type', 'target'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.int32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3691, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3691, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3691, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3691, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3691, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3691, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3691, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3691, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3691, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 3691, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 3691, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 3691, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 3691, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 3690, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":3693
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.int32],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.index)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.signalType)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3693, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":3694
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.index)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.signalType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.target)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.index)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3694, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":3695
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.index)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.signalType)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.target)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.signalType)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3695, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":3696
 *             (<intptr_t>&(pod.index)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.signalType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.target)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlCoolerInfo_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.target)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3696, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":3692
 *         'names': ['version', 'ind_ex', 'signal_type', 'target'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.int32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.index)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3692, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 3692, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_10) != (0)) __PYX_ERR(0, 3692, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 3692, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_8) != (0)) __PYX_ERR(0, 3692, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 3690, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":3698
 *             (<intptr_t>&(pod.target)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlCoolerInfo_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlCoolerInfo_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3698, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 3690, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_11 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_11 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_11, (2-__pyx_t_11) | (__pyx_t_11*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3689, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3687
 * 
 * 
 * cdef _get_cooler_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlCoolerInfo_v1_t pod = nvmlCoolerInfo_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_cooler_info_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3715
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlCoolerInfo_v1_t *>calloc(1, sizeof(nvmlCoolerInfo_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":3716
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlCoolerInfo_v1_t *>calloc(1, sizeof(nvmlCoolerInfo_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating CoolerInfo_v1")
*/
  __pyx_v_self->_ptr = ((nvmlCoolerInfo_v1_t *)calloc(1, (sizeof(nvmlCoolerInfo_v1_t))));

  /* "cuda/bindings/_nvml.pyx":3717
 *     def __init__(self):
 *         self._ptr = <nvmlCoolerInfo_v1_t *>calloc(1, sizeof(nvmlCoolerInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating CoolerInfo_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":3718
 *         self._ptr = <nvmlCoolerInfo_v1_t *>calloc(1, sizeof(nvmlCoolerInfo_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating CoolerInfo_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3718, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_CoolerInfo_v1};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3718, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 3718, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3717
 *     def __init__(self):
 *         self._ptr = <nvmlCoolerInfo_v1_t *>calloc(1, sizeof(nvmlCoolerInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating CoolerInfo_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":3719
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating CoolerInfo_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":3720
 *             raise MemoryError("Error allocating CoolerInfo_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":3721
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":3715
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlCoolerInfo_v1_t *>calloc(1, sizeof(nvmlCoolerInfo_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3723
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlCoolerInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self) {
  nvmlCoolerInfo_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlCoolerInfo_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":3725
 *     def __dealloc__(self):
 *         cdef nvmlCoolerInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3726
 *         cdef nvmlCoolerInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":3727
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":3728
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":3725
 *     def __dealloc__(self):
 *         cdef nvmlCoolerInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":3723
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlCoolerInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":3730
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.CoolerInfo_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":3731
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.CoolerInfo_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3731, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3731, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3731, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3731, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3731, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_CoolerInfo_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 25 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3731, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3730
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.CoolerInfo_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3733
 *         return f"<{__name__}.CoolerInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3736
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3736, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3733
 *         return f"<{__name__}.CoolerInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3738
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13CoolerInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":3739
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3738
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3741
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":3742
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3742, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3741
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3744
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef CoolerInfo_v1 other_
 *         if not isinstance(other, CoolerInfo_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":3746
 *     def __eq__(self, other):
 *         cdef CoolerInfo_v1 other_
 *         if not isinstance(other, CoolerInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":3747
 *         cdef CoolerInfo_v1 other_
 *         if not isinstance(other, CoolerInfo_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlCoolerInfo_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":3746
 *     def __eq__(self, other):
 *         cdef CoolerInfo_v1 other_
 *         if not isinstance(other, CoolerInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":3748
 *         if not isinstance(other, CoolerInfo_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlCoolerInfo_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1))))) __PYX_ERR(0, 3748, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":3749
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlCoolerInfo_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlCoolerInfo_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3749, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3744
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef CoolerInfo_v1 other_
 *         if not isinstance(other, CoolerInfo_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3751
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlCoolerInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlCoolerInfo_v1_t *>malloc(sizeof(nvmlCoolerInfo_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":3752
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlCoolerInfo_v1_t *>malloc(sizeof(nvmlCoolerInfo_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 3752, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3752, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3752, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 3752, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3753
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlCoolerInfo_v1_t *>malloc(sizeof(nvmlCoolerInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating CoolerInfo_v1")
*/
    __pyx_v_self->_ptr = ((nvmlCoolerInfo_v1_t *)malloc((sizeof(nvmlCoolerInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":3754
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlCoolerInfo_v1_t *>malloc(sizeof(nvmlCoolerInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating CoolerInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlCoolerInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":3755
 *             self._ptr = <nvmlCoolerInfo_v1_t *>malloc(sizeof(nvmlCoolerInfo_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating CoolerInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlCoolerInfo_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3755, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_CoolerInfo_v1};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3755, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 3755, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":3754
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlCoolerInfo_v1_t *>malloc(sizeof(nvmlCoolerInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating CoolerInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlCoolerInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":3756
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating CoolerInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlCoolerInfo_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3756, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3756, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 3756, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlCoolerInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":3757
 *                 raise MemoryError("Error allocating CoolerInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlCoolerInfo_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":3758
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlCoolerInfo_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":3759
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3759, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3759, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 3759, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":3752
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlCoolerInfo_v1_t *>malloc(sizeof(nvmlCoolerInfo_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":3761
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 3761, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":3751
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlCoolerInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlCoolerInfo_v1_t *>malloc(sizeof(nvmlCoolerInfo_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3763
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: the API version number"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3766
 *     def version(self):
 *         """int: the API version number"""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3766, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3763
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: the API version number"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3768
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3770
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This CoolerInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3771
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This CoolerInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_CoolerInfo_v1_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3771, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3771, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3770
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This CoolerInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3772
 *         if self._readonly:
 *             raise ValueError("This CoolerInfo_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3772, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3768
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3774
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ind_ex(self):
 *         """int: the cooler index"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6ind_ex_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6ind_ex_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6ind_ex___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6ind_ex___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3777
 *     def ind_ex(self):
 *         """int: the cooler index"""
 *         return self._ptr[0].index             # <<<<<<<<<<<<<<
 * 
 *     @ind_ex.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).index); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3777, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3774
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ind_ex(self):
 *         """int: the cooler index"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.ind_ex.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3779
 *         return self._ptr[0].index
 * 
 *     @ind_ex.setter             # <<<<<<<<<<<<<<
 *     def ind_ex(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6ind_ex_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6ind_ex_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6ind_ex_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6ind_ex_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3781
 *     @ind_ex.setter
 *     def ind_ex(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This CoolerInfo_v1 instance is read-only")
 *         self._ptr[0].index = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3782
 *     def ind_ex(self, val):
 *         if self._readonly:
 *             raise ValueError("This CoolerInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].index = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_CoolerInfo_v1_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3782, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3782, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3781
 *     @ind_ex.setter
 *     def ind_ex(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This CoolerInfo_v1 instance is read-only")
 *         self._ptr[0].index = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3783
 *         if self._readonly:
 *             raise ValueError("This CoolerInfo_v1 instance is read-only")
 *         self._ptr[0].index = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3783, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).index = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3779
 *         return self._ptr[0].index
 * 
 *     @ind_ex.setter             # <<<<<<<<<<<<<<
 *     def ind_ex(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.ind_ex.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3785
 *         self._ptr[0].index = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def signal_type(self):
 *         """int: OUT: the cooler's control signal characteristics."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_11signal_type_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_11signal_type_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_11signal_type___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_11signal_type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3788
 *     def signal_type(self):
 *         """int: OUT: the cooler's control signal characteristics."""
 *         return <int>(self._ptr[0].signalType)             # <<<<<<<<<<<<<<
 * 
 *     @signal_type.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int(((int)(__pyx_v_self->_ptr[0]).signalType)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3788, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3785
 *         self._ptr[0].index = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def signal_type(self):
 *         """int: OUT: the cooler's control signal characteristics."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.signal_type.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3790
 *         return <int>(self._ptr[0].signalType)
 * 
 *     @signal_type.setter             # <<<<<<<<<<<<<<
 *     def signal_type(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_11signal_type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_11signal_type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_11signal_type_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_11signal_type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3792
 *     @signal_type.setter
 *     def signal_type(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This CoolerInfo_v1 instance is read-only")
 *         self._ptr[0].signalType = <nvmlCoolerControl_t><int>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3793
 *     def signal_type(self, val):
 *         if self._readonly:
 *             raise ValueError("This CoolerInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].signalType = <nvmlCoolerControl_t><int>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_CoolerInfo_v1_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3793, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3793, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3792
 *     @signal_type.setter
 *     def signal_type(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This CoolerInfo_v1 instance is read-only")
 *         self._ptr[0].signalType = <nvmlCoolerControl_t><int>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3794
 *         if self._readonly:
 *             raise ValueError("This CoolerInfo_v1 instance is read-only")
 *         self._ptr[0].signalType = <nvmlCoolerControl_t><int>val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3794, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).signalType = ((nvmlCoolerControl_t)((int)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":3790
 *         return <int>(self._ptr[0].signalType)
 * 
 *     @signal_type.setter             # <<<<<<<<<<<<<<
 *     def signal_type(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.signal_type.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3796
 *         self._ptr[0].signalType = <nvmlCoolerControl_t><int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def target(self):
 *         """int: OUT: the target that cooler cools."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6target_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6target_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6target___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6target___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3799
 *     def target(self):
 *         """int: OUT: the target that cooler cools."""
 *         return <int>(self._ptr[0].target)             # <<<<<<<<<<<<<<
 * 
 *     @target.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int(((int)(__pyx_v_self->_ptr[0]).target)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3796
 *         self._ptr[0].signalType = <nvmlCoolerControl_t><int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def target(self):
 *         """int: OUT: the target that cooler cools."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.target.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3801
 *         return <int>(self._ptr[0].target)
 * 
 *     @target.setter             # <<<<<<<<<<<<<<
 *     def target(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6target_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6target_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6target_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6target_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3803
 *     @target.setter
 *     def target(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This CoolerInfo_v1 instance is read-only")
 *         self._ptr[0].target = <nvmlCoolerTarget_t><int>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3804
 *     def target(self, val):
 *         if self._readonly:
 *             raise ValueError("This CoolerInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].target = <nvmlCoolerTarget_t><int>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_CoolerInfo_v1_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3804, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3804, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3803
 *     @target.setter
 *     def target(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This CoolerInfo_v1 instance is read-only")
 *         self._ptr[0].target = <nvmlCoolerTarget_t><int>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3805
 *         if self._readonly:
 *             raise ValueError("This CoolerInfo_v1 instance is read-only")
 *         self._ptr[0].target = <nvmlCoolerTarget_t><int>val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3805, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).target = ((nvmlCoolerTarget_t)((int)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":3801
 *         return <int>(self._ptr[0].target)
 * 
 *     @target.setter             # <<<<<<<<<<<<<<
 *     def target(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.target.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3807
 *         self._ptr[0].target = <nvmlCoolerTarget_t><int>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an CoolerInfo_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13CoolerInfo_v1_12from_data, "CoolerInfo_v1.from_data(data)\n\nCreate an CoolerInfo_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `cooler_info_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13CoolerInfo_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13CoolerInfo_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 3807, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3807, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 3807, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 3807, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3807, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 3807, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":3814
 *             data (_numpy.ndarray): a single-element array of dtype `cooler_info_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "cooler_info_v1_dtype", cooler_info_v1_dtype, CoolerInfo_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_cooler_info_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3814, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_cooler_info_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3814, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3807
 *         self._ptr[0].target = <nvmlCoolerTarget_t><int>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an CoolerInfo_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3816
 *         return __from_data(data, "cooler_info_v1_dtype", cooler_info_v1_dtype, CoolerInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an CoolerInfo_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13CoolerInfo_v1_14from_ptr, "CoolerInfo_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an CoolerInfo_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13CoolerInfo_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13CoolerInfo_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 3816, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 3816, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 3816, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3816, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 3816, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":3817
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an CoolerInfo_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 3816, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 3816, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 3816, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3816, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 3817, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3817, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 3816, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":3816
 *         return __from_data(data, "cooler_info_v1_dtype", cooler_info_v1_dtype, CoolerInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an CoolerInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":3825
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef CoolerInfo_v1 obj = CoolerInfo_v1.__new__(CoolerInfo_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":3826
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef CoolerInfo_v1 obj = CoolerInfo_v1.__new__(CoolerInfo_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3826, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 3826, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3825
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef CoolerInfo_v1 obj = CoolerInfo_v1.__new__(CoolerInfo_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":3827
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef CoolerInfo_v1 obj = CoolerInfo_v1.__new__(CoolerInfo_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlCoolerInfo_v1_t *>malloc(sizeof(nvmlCoolerInfo_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_CoolerInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3827, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":3828
 *             raise ValueError("ptr must not be null (0)")
 *         cdef CoolerInfo_v1 obj = CoolerInfo_v1.__new__(CoolerInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlCoolerInfo_v1_t *>malloc(sizeof(nvmlCoolerInfo_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3829
 *         cdef CoolerInfo_v1 obj = CoolerInfo_v1.__new__(CoolerInfo_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlCoolerInfo_v1_t *>malloc(sizeof(nvmlCoolerInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating CoolerInfo_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlCoolerInfo_v1_t *)malloc((sizeof(nvmlCoolerInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":3830
 *         if owner is None:
 *             obj._ptr = <nvmlCoolerInfo_v1_t *>malloc(sizeof(nvmlCoolerInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating CoolerInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlCoolerInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":3831
 *             obj._ptr = <nvmlCoolerInfo_v1_t *>malloc(sizeof(nvmlCoolerInfo_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating CoolerInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlCoolerInfo_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3831, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_CoolerInfo_v1};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3831, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 3831, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":3830
 *         if owner is None:
 *             obj._ptr = <nvmlCoolerInfo_v1_t *>malloc(sizeof(nvmlCoolerInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating CoolerInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlCoolerInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":3832
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating CoolerInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlCoolerInfo_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlCoolerInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":3833
 *                 raise MemoryError("Error allocating CoolerInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlCoolerInfo_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":3834
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlCoolerInfo_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlCoolerInfo_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":3828
 *             raise ValueError("ptr must not be null (0)")
 *         cdef CoolerInfo_v1 obj = CoolerInfo_v1.__new__(CoolerInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlCoolerInfo_v1_t *>malloc(sizeof(nvmlCoolerInfo_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":3836
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlCoolerInfo_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlCoolerInfo_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":3837
 *         else:
 *             obj._ptr = <nvmlCoolerInfo_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":3838
 *             obj._ptr = <nvmlCoolerInfo_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":3839
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":3840
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3816
 *         return __from_data(data, "cooler_info_v1_dtype", cooler_info_v1_dtype, CoolerInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an CoolerInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13CoolerInfo_v1_16__reduce_cython__, "CoolerInfo_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13CoolerInfo_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13CoolerInfo_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13CoolerInfo_v1_18__setstate_cython__, "CoolerInfo_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13CoolerInfo_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13CoolerInfo_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13CoolerInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.CoolerInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3843
 * 
 * 
 * cdef _get_margin_temperature_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlMarginTemperature_v1_t pod = nvmlMarginTemperature_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_margin_temperature_v1_dtype_offsets(void) {
  nvmlMarginTemperature_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlMarginTemperature_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_margin_temperature_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":3844
 * 
 * cdef _get_margin_temperature_v1_dtype_offsets():
 *     cdef nvmlMarginTemperature_v1_t pod = nvmlMarginTemperature_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'margin_temperature'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":3845
 * cdef _get_margin_temperature_v1_dtype_offsets():
 *     cdef nvmlMarginTemperature_v1_t pod = nvmlMarginTemperature_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'margin_temperature'],
 *         'formats': [_numpy.uint32, _numpy.int32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3845, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3845, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":3846
 *     cdef nvmlMarginTemperature_v1_t pod = nvmlMarginTemperature_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'margin_temperature'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.int32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3846, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3846, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 3846, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_margin_temperature);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_margin_temperature);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_margin_temperature) != (0)) __PYX_ERR(0, 3846, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 3846, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":3847
 *     return _numpy.dtype({
 *         'names': ['version', 'margin_temperature'],
 *         'formats': [_numpy.uint32, _numpy.int32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3847, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3847, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3847, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3847, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3847, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 3847, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 3847, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 3846, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":3849
 *         'formats': [_numpy.uint32, _numpy.int32],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.marginTemperature)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":3850
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.marginTemperature)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlMarginTemperature_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.marginTemperature)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3850, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":3848
 *         'names': ['version', 'margin_temperature'],
 *         'formats': [_numpy.uint32, _numpy.int32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.marginTemperature)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3848, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 3848, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 3848, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 3846, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":3852
 *             (<intptr_t>&(pod.marginTemperature)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlMarginTemperature_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlMarginTemperature_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3852, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 3846, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3845, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3843
 * 
 * 
 * cdef _get_margin_temperature_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlMarginTemperature_v1_t pod = nvmlMarginTemperature_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_margin_temperature_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3869
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlMarginTemperature_v1_t *>calloc(1, sizeof(nvmlMarginTemperature_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":3870
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlMarginTemperature_v1_t *>calloc(1, sizeof(nvmlMarginTemperature_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating MarginTemperature_v1")
*/
  __pyx_v_self->_ptr = ((nvmlMarginTemperature_v1_t *)calloc(1, (sizeof(nvmlMarginTemperature_v1_t))));

  /* "cuda/bindings/_nvml.pyx":3871
 *     def __init__(self):
 *         self._ptr = <nvmlMarginTemperature_v1_t *>calloc(1, sizeof(nvmlMarginTemperature_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating MarginTemperature_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":3872
 *         self._ptr = <nvmlMarginTemperature_v1_t *>calloc(1, sizeof(nvmlMarginTemperature_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating MarginTemperature_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3872, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_MarginTemperatu};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3872, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 3872, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3871
 *     def __init__(self):
 *         self._ptr = <nvmlMarginTemperature_v1_t *>calloc(1, sizeof(nvmlMarginTemperature_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating MarginTemperature_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":3873
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating MarginTemperature_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":3874
 *             raise MemoryError("Error allocating MarginTemperature_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":3875
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":3869
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlMarginTemperature_v1_t *>calloc(1, sizeof(nvmlMarginTemperature_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.MarginTemperature_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3877
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlMarginTemperature_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self) {
  nvmlMarginTemperature_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlMarginTemperature_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":3879
 *     def __dealloc__(self):
 *         cdef nvmlMarginTemperature_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3880
 *         cdef nvmlMarginTemperature_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":3881
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":3882
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":3879
 *     def __dealloc__(self):
 *         cdef nvmlMarginTemperature_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":3877
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlMarginTemperature_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":3884
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.MarginTemperature_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":3885
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.MarginTemperature_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3885, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3885, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3885, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3885, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3885, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_MarginTemperature_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 32 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3885, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3884
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.MarginTemperature_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.MarginTemperature_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3887
 *         return f"<{__name__}.MarginTemperature_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3890
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3890, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3887
 *         return f"<{__name__}.MarginTemperature_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.MarginTemperature_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3892
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_20MarginTemperature_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":3893
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3892
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3895
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":3896
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3896, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3895
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.MarginTemperature_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3898
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef MarginTemperature_v1 other_
 *         if not isinstance(other, MarginTemperature_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":3900
 *     def __eq__(self, other):
 *         cdef MarginTemperature_v1 other_
 *         if not isinstance(other, MarginTemperature_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":3901
 *         cdef MarginTemperature_v1 other_
 *         if not isinstance(other, MarginTemperature_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlMarginTemperature_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":3900
 *     def __eq__(self, other):
 *         cdef MarginTemperature_v1 other_
 *         if not isinstance(other, MarginTemperature_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":3902
 *         if not isinstance(other, MarginTemperature_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlMarginTemperature_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1))))) __PYX_ERR(0, 3902, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":3903
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlMarginTemperature_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlMarginTemperature_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3903, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3898
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef MarginTemperature_v1 other_
 *         if not isinstance(other, MarginTemperature_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.MarginTemperature_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3905
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlMarginTemperature_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlMarginTemperature_v1_t *>malloc(sizeof(nvmlMarginTemperature_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":3906
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlMarginTemperature_v1_t *>malloc(sizeof(nvmlMarginTemperature_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 3906, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3906, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3906, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 3906, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3907
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlMarginTemperature_v1_t *>malloc(sizeof(nvmlMarginTemperature_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating MarginTemperature_v1")
*/
    __pyx_v_self->_ptr = ((nvmlMarginTemperature_v1_t *)malloc((sizeof(nvmlMarginTemperature_v1_t))));

    /* "cuda/bindings/_nvml.pyx":3908
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlMarginTemperature_v1_t *>malloc(sizeof(nvmlMarginTemperature_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating MarginTemperature_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMarginTemperature_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":3909
 *             self._ptr = <nvmlMarginTemperature_v1_t *>malloc(sizeof(nvmlMarginTemperature_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating MarginTemperature_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMarginTemperature_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3909, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_MarginTemperatu};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3909, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 3909, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":3908
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlMarginTemperature_v1_t *>malloc(sizeof(nvmlMarginTemperature_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating MarginTemperature_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMarginTemperature_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":3910
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating MarginTemperature_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMarginTemperature_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3910, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3910, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 3910, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlMarginTemperature_v1_t))));

    /* "cuda/bindings/_nvml.pyx":3911
 *                 raise MemoryError("Error allocating MarginTemperature_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMarginTemperature_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":3912
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlMarginTemperature_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":3913
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3913, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3913, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 3913, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":3906
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlMarginTemperature_v1_t *>malloc(sizeof(nvmlMarginTemperature_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":3915
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 3915, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":3905
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlMarginTemperature_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlMarginTemperature_v1_t *>malloc(sizeof(nvmlMarginTemperature_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.MarginTemperature_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3917
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3920
 *     def version(self):
 *         """int: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3920, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3917
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.MarginTemperature_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3922
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3924
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This MarginTemperature_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3925
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This MarginTemperature_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_MarginTemperature_v1_instan};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3925, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3925, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3924
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This MarginTemperature_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3926
 *         if self._readonly:
 *             raise ValueError("This MarginTemperature_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3926, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3922
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.MarginTemperature_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3928
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def margin_temperature(self):
 *         """int: The margin temperature value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18margin_temperature_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18margin_temperature_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18margin_temperature___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18margin_temperature___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":3931
 *     def margin_temperature(self):
 *         """int: The margin temperature value."""
 *         return self._ptr[0].marginTemperature             # <<<<<<<<<<<<<<
 * 
 *     @margin_temperature.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_ptr[0]).marginTemperature); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3931, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3928
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def margin_temperature(self):
 *         """int: The margin temperature value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.MarginTemperature_v1.margin_temperature.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3933
 *         return self._ptr[0].marginTemperature
 * 
 *     @margin_temperature.setter             # <<<<<<<<<<<<<<
 *     def margin_temperature(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18margin_temperature_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18margin_temperature_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18margin_temperature_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18margin_temperature_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":3935
 *     @margin_temperature.setter
 *     def margin_temperature(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This MarginTemperature_v1 instance is read-only")
 *         self._ptr[0].marginTemperature = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":3936
 *     def margin_temperature(self, val):
 *         if self._readonly:
 *             raise ValueError("This MarginTemperature_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].marginTemperature = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_MarginTemperature_v1_instan};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3936, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 3936, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3935
 *     @margin_temperature.setter
 *     def margin_temperature(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This MarginTemperature_v1 instance is read-only")
 *         self._ptr[0].marginTemperature = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":3937
 *         if self._readonly:
 *             raise ValueError("This MarginTemperature_v1 instance is read-only")
 *         self._ptr[0].marginTemperature = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3937, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).marginTemperature = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":3933
 *         return self._ptr[0].marginTemperature
 * 
 *     @margin_temperature.setter             # <<<<<<<<<<<<<<
 *     def margin_temperature(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.MarginTemperature_v1.margin_temperature.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3939
 *         self._ptr[0].marginTemperature = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an MarginTemperature_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_20MarginTemperature_v1_12from_data, "MarginTemperature_v1.from_data(data)\n\nCreate an MarginTemperature_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `margin_temperature_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_20MarginTemperature_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20MarginTemperature_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 3939, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3939, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 3939, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 3939, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3939, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 3939, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.MarginTemperature_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":3946
 *             data (_numpy.ndarray): a single-element array of dtype `margin_temperature_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "margin_temperature_v1_dtype", margin_temperature_v1_dtype, MarginTemperature_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_margin_temperature_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3946, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_margin_temperature_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3946, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3939
 *         self._ptr[0].marginTemperature = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an MarginTemperature_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.MarginTemperature_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3948
 *         return __from_data(data, "margin_temperature_v1_dtype", margin_temperature_v1_dtype, MarginTemperature_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an MarginTemperature_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_20MarginTemperature_v1_14from_ptr, "MarginTemperature_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an MarginTemperature_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_20MarginTemperature_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20MarginTemperature_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 3948, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 3948, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 3948, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3948, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 3948, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":3949
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an MarginTemperature_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 3948, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 3948, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 3948, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 3948, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 3949, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 3949, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 3948, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.MarginTemperature_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":3948
 *         return __from_data(data, "margin_temperature_v1_dtype", margin_temperature_v1_dtype, MarginTemperature_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an MarginTemperature_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":3957
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef MarginTemperature_v1 obj = MarginTemperature_v1.__new__(MarginTemperature_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":3958
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef MarginTemperature_v1 obj = MarginTemperature_v1.__new__(MarginTemperature_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3958, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 3958, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":3957
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef MarginTemperature_v1 obj = MarginTemperature_v1.__new__(MarginTemperature_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":3959
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef MarginTemperature_v1 obj = MarginTemperature_v1.__new__(MarginTemperature_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlMarginTemperature_v1_t *>malloc(sizeof(nvmlMarginTemperature_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_MarginTemperature_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3959, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":3960
 *             raise ValueError("ptr must not be null (0)")
 *         cdef MarginTemperature_v1 obj = MarginTemperature_v1.__new__(MarginTemperature_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlMarginTemperature_v1_t *>malloc(sizeof(nvmlMarginTemperature_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":3961
 *         cdef MarginTemperature_v1 obj = MarginTemperature_v1.__new__(MarginTemperature_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlMarginTemperature_v1_t *>malloc(sizeof(nvmlMarginTemperature_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating MarginTemperature_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlMarginTemperature_v1_t *)malloc((sizeof(nvmlMarginTemperature_v1_t))));

    /* "cuda/bindings/_nvml.pyx":3962
 *         if owner is None:
 *             obj._ptr = <nvmlMarginTemperature_v1_t *>malloc(sizeof(nvmlMarginTemperature_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating MarginTemperature_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMarginTemperature_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":3963
 *             obj._ptr = <nvmlMarginTemperature_v1_t *>malloc(sizeof(nvmlMarginTemperature_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating MarginTemperature_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMarginTemperature_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3963, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_MarginTemperatu};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3963, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 3963, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":3962
 *         if owner is None:
 *             obj._ptr = <nvmlMarginTemperature_v1_t *>malloc(sizeof(nvmlMarginTemperature_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating MarginTemperature_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMarginTemperature_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":3964
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating MarginTemperature_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMarginTemperature_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlMarginTemperature_v1_t))));

    /* "cuda/bindings/_nvml.pyx":3965
 *                 raise MemoryError("Error allocating MarginTemperature_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMarginTemperature_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":3966
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlMarginTemperature_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlMarginTemperature_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":3960
 *             raise ValueError("ptr must not be null (0)")
 *         cdef MarginTemperature_v1 obj = MarginTemperature_v1.__new__(MarginTemperature_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlMarginTemperature_v1_t *>malloc(sizeof(nvmlMarginTemperature_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":3968
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlMarginTemperature_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlMarginTemperature_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":3969
 *         else:
 *             obj._ptr = <nvmlMarginTemperature_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":3970
 *             obj._ptr = <nvmlMarginTemperature_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":3971
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":3972
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3948
 *         return __from_data(data, "margin_temperature_v1_dtype", margin_temperature_v1_dtype, MarginTemperature_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an MarginTemperature_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.MarginTemperature_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_20MarginTemperature_v1_16__reduce_cython__, "MarginTemperature_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_20MarginTemperature_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20MarginTemperature_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.MarginTemperature_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18__setstate_cython__, "MarginTemperature_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_20MarginTemperature_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.MarginTemperature_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.MarginTemperature_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":3975
 * 
 * 
 * cdef _get_clk_mon_fault_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlClkMonFaultInfo_t pod = nvmlClkMonFaultInfo_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_clk_mon_fault_info_dtype_offsets(void) {
  nvmlClkMonFaultInfo_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlClkMonFaultInfo_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_clk_mon_fault_info_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":3976
 * 
 * cdef _get_clk_mon_fault_info_dtype_offsets():
 *     cdef nvmlClkMonFaultInfo_t pod = nvmlClkMonFaultInfo_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['clk_api_domain', 'clk_domain_fault_mask'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":3977
 * cdef _get_clk_mon_fault_info_dtype_offsets():
 *     cdef nvmlClkMonFaultInfo_t pod = nvmlClkMonFaultInfo_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['clk_api_domain', 'clk_domain_fault_mask'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3977, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3977, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":3978
 *     cdef nvmlClkMonFaultInfo_t pod = nvmlClkMonFaultInfo_t()
 *     return _numpy.dtype({
 *         'names': ['clk_api_domain', 'clk_domain_fault_mask'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3978, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3978, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_clk_api_domain);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_clk_api_domain);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_clk_api_domain) != (0)) __PYX_ERR(0, 3978, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_clk_domain_fault_mask);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_clk_domain_fault_mask);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_clk_domain_fault_mask) != (0)) __PYX_ERR(0, 3978, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 3978, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":3979
 *     return _numpy.dtype({
 *         'names': ['clk_api_domain', 'clk_domain_fault_mask'],
 *         'formats': [_numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.clkApiDomain)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 3979, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 3979, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 3978, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":3981
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.clkApiDomain)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.clkDomainFaultMask)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.clkApiDomain)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3981, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":3982
 *         'offsets': [
 *             (<intptr_t>&(pod.clkApiDomain)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.clkDomainFaultMask)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlClkMonFaultInfo_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.clkDomainFaultMask)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3982, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":3980
 *         'names': ['clk_api_domain', 'clk_domain_fault_mask'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.clkApiDomain)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.clkDomainFaultMask)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3980, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 3980, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 3980, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 3978, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":3984
 *             (<intptr_t>&(pod.clkDomainFaultMask)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlClkMonFaultInfo_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlClkMonFaultInfo_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3984, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 3978, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3977, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":3975
 * 
 * 
 * cdef _get_clk_mon_fault_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlClkMonFaultInfo_t pod = nvmlClkMonFaultInfo_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_clk_mon_fault_info_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4006
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=clk_mon_fault_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 4006, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4006, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 4006, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4006, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 4006, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":4007
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=clk_mon_fault_info_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlClkMonFaultInfo_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4007, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4007, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_clk_mon_fault_info_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4007, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4007, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 4007, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4007, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":4008
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=clk_mon_fault_info_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlClkMonFaultInfo_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlClkMonFaultInfo_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4008, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4008, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4008, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":4009
 *         arr = _numpy.empty(size, dtype=clk_mon_fault_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlClkMonFaultInfo_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlClkMonFaultInfo_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4009, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlClkMonFaultInfo_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4009, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4009, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 4009, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":4010
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlClkMonFaultInfo_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlClkMonFaultInfo_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4010, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4010, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlClkMonFaultInfo_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4010, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4010, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 4009, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 4009, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":4006
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=clk_mon_fault_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4012
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlClkMonFaultInfo_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.ClkMonFaultInfo_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":4013
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.ClkMonFaultInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4013, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4013, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 4013, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":4014
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.ClkMonFaultInfo_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.ClkMonFaultInfo object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4014, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4014, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4014, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4014, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4014, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4014, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4014, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_ClkMonFaultInfo_Array;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 23 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4014, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4013
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.ClkMonFaultInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":4016
 *             return f"<{__name__}.ClkMonFaultInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.ClkMonFaultInfo object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4016, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4016, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4016, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4016, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4016, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_ClkMonFaultInfo_object_at;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 27 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4016, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":4012
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlClkMonFaultInfo_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.ClkMonFaultInfo_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4018
 *             return f"<{__name__}.ClkMonFaultInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4021
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4018
 *             return f"<{__name__}.ClkMonFaultInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4023
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_15ClkMonFaultInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":4024
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4024, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4024, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4024, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4023
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4026
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":4027
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4027, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4027, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 4027, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":4028
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4028, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 4028, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4027
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":4030
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4026
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4032
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":4033
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 4033, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4032
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4035
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ClkMonFaultInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":4036
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, ClkMonFaultInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":4037
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ClkMonFaultInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4037, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 4037, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4037, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 4037, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":4038
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ClkMonFaultInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4037
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ClkMonFaultInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":4039
 *         if (not isinstance(other, ClkMonFaultInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4039, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4039, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4039, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 4039, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4039, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4035
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ClkMonFaultInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4041
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def clk_api_domain(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14clk_api_domain_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14clk_api_domain_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14clk_api_domain___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14clk_api_domain___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4044
 *     def clk_api_domain(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.clk_api_domain[0])
 *         return self._data.clk_api_domain
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 4044, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":4045
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.clk_api_domain[0])             # <<<<<<<<<<<<<<
 *         return self._data.clk_api_domain
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_clk_api_domain); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4045, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4045, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4045, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4044
 *     def clk_api_domain(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.clk_api_domain[0])
 *         return self._data.clk_api_domain
*/
  }

  /* "cuda/bindings/_nvml.pyx":4046
 *         if self._data.size == 1:
 *             return int(self._data.clk_api_domain[0])
 *         return self._data.clk_api_domain             # <<<<<<<<<<<<<<
 * 
 *     @clk_api_domain.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_clk_api_domain); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4046, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4041
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def clk_api_domain(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.clk_api_domain.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4048
 *         return self._data.clk_api_domain
 * 
 *     @clk_api_domain.setter             # <<<<<<<<<<<<<<
 *     def clk_api_domain(self, val):
 *         self._data.clk_api_domain = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14clk_api_domain_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14clk_api_domain_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14clk_api_domain_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14clk_api_domain_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":4050
 *     @clk_api_domain.setter
 *     def clk_api_domain(self, val):
 *         self._data.clk_api_domain = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_clk_api_domain, __pyx_v_val) < (0)) __PYX_ERR(0, 4050, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":4048
 *         return self._data.clk_api_domain
 * 
 *     @clk_api_domain.setter             # <<<<<<<<<<<<<<
 *     def clk_api_domain(self, val):
 *         self._data.clk_api_domain = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.clk_api_domain.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4052
 *         self._data.clk_api_domain = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def clk_domain_fault_mask(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21clk_domain_fault_mask_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21clk_domain_fault_mask_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21clk_domain_fault_mask___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21clk_domain_fault_mask___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4055
 *     def clk_domain_fault_mask(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.clk_domain_fault_mask[0])
 *         return self._data.clk_domain_fault_mask
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4055, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 4055, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":4056
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.clk_domain_fault_mask[0])             # <<<<<<<<<<<<<<
 *         return self._data.clk_domain_fault_mask
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_clk_domain_fault_mask); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4056, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4056, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4056, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4055
 *     def clk_domain_fault_mask(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.clk_domain_fault_mask[0])
 *         return self._data.clk_domain_fault_mask
*/
  }

  /* "cuda/bindings/_nvml.pyx":4057
 *         if self._data.size == 1:
 *             return int(self._data.clk_domain_fault_mask[0])
 *         return self._data.clk_domain_fault_mask             # <<<<<<<<<<<<<<
 * 
 *     @clk_domain_fault_mask.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_clk_domain_fault_mask); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4057, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4052
 *         self._data.clk_api_domain = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def clk_domain_fault_mask(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.clk_domain_fault_mask.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4059
 *         return self._data.clk_domain_fault_mask
 * 
 *     @clk_domain_fault_mask.setter             # <<<<<<<<<<<<<<
 *     def clk_domain_fault_mask(self, val):
 *         self._data.clk_domain_fault_mask = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21clk_domain_fault_mask_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21clk_domain_fault_mask_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21clk_domain_fault_mask_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21clk_domain_fault_mask_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":4061
 *     @clk_domain_fault_mask.setter
 *     def clk_domain_fault_mask(self, val):
 *         self._data.clk_domain_fault_mask = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_clk_domain_fault_mask, __pyx_v_val) < (0)) __PYX_ERR(0, 4061, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":4059
 *         return self._data.clk_domain_fault_mask
 * 
 *     @clk_domain_fault_mask.setter             # <<<<<<<<<<<<<<
 *     def clk_domain_fault_mask(self, val):
 *         self._data.clk_domain_fault_mask = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.clk_domain_fault_mask.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4063
 *         self._data.clk_domain_fault_mask = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":4066
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":4067
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 4067, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":4068
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4068, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 4068, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":4069
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":4070
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4070, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 4070, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":4069
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":4071
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return ClkMonFaultInfo.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":4072
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return ClkMonFaultInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":4071
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return ClkMonFaultInfo.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":4073
 *             if key_ < 0:
 *                 key_ += size
 *             return ClkMonFaultInfo.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == clk_mon_fault_info_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4073, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4073, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4066
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":4074
 *                 key_ += size
 *             return ClkMonFaultInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == clk_mon_fault_info_dtype:
 *             return ClkMonFaultInfo.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4074, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":4075
 *             return ClkMonFaultInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == clk_mon_fault_info_dtype:             # <<<<<<<<<<<<<<
 *             return ClkMonFaultInfo.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4075, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4075, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 4075, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4075, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_clk_mon_fault_info_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4075, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4075, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 4075, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":4076
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == clk_mon_fault_info_dtype:
 *             return ClkMonFaultInfo.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4076, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4075
 *             return ClkMonFaultInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == clk_mon_fault_info_dtype:             # <<<<<<<<<<<<<<
 *             return ClkMonFaultInfo.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":4077
 *         if isinstance(out, _numpy.recarray) and out.dtype == clk_mon_fault_info_dtype:
 *             return ClkMonFaultInfo.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4063
 *         self._data.clk_domain_fault_mask = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4079
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":4080
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 4080, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":4079
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4082
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ClkMonFaultInfo instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14from_data, "ClkMonFaultInfo.from_data(data)\n\nCreate an ClkMonFaultInfo instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `clk_mon_fault_info_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 4082, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4082, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 4082, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 4082, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4082, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 4082, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":4089
 *             data (_numpy.ndarray): a 1D array of dtype `clk_mon_fault_info_dtype` holding the data.
 *         """
 *         cdef ClkMonFaultInfo obj = ClkMonFaultInfo.__new__(ClkMonFaultInfo)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ClkMonFaultInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4089, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":4090
 *         """
 *         cdef ClkMonFaultInfo obj = ClkMonFaultInfo.__new__(ClkMonFaultInfo)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 4090, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":4091
 *         cdef ClkMonFaultInfo obj = ClkMonFaultInfo.__new__(ClkMonFaultInfo)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4091, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 4091, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4090
 *         """
 *         cdef ClkMonFaultInfo obj = ClkMonFaultInfo.__new__(ClkMonFaultInfo)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":4092
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != clk_mon_fault_info_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4092, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 4092, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":4093
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != clk_mon_fault_info_dtype:
 *             raise ValueError("data array must be of dtype clk_mon_fault_info_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4093, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 4093, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4092
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != clk_mon_fault_info_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":4094
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != clk_mon_fault_info_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype clk_mon_fault_info_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4094, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_clk_mon_fault_info_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4094, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4094, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 4094, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":4095
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != clk_mon_fault_info_dtype:
 *             raise ValueError("data array must be of dtype clk_mon_fault_info_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_clk};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4095, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 4095, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4094
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != clk_mon_fault_info_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype clk_mon_fault_info_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":4096
 *         if data.dtype != clk_mon_fault_info_dtype:
 *             raise ValueError("data array must be of dtype clk_mon_fault_info_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4096, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4096, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4096, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":4098
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4082
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ClkMonFaultInfo instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4100
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ClkMonFaultInfo instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_16from_ptr, "ClkMonFaultInfo.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an ClkMonFaultInfo instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 4100, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 4100, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 4100, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4100, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 4100, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 4100, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 4100, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 4100, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4100, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4101, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 4101, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4101, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":4101
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an ClkMonFaultInfo instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 4100, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":4100
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ClkMonFaultInfo instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":4109
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ClkMonFaultInfo obj = ClkMonFaultInfo.__new__(ClkMonFaultInfo)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":4110
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ClkMonFaultInfo obj = ClkMonFaultInfo.__new__(ClkMonFaultInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4110, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 4110, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4109
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ClkMonFaultInfo obj = ClkMonFaultInfo.__new__(ClkMonFaultInfo)
*/
  }

  /* "cuda/bindings/_nvml.pyx":4111
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ClkMonFaultInfo obj = ClkMonFaultInfo.__new__(ClkMonFaultInfo)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ClkMonFaultInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4111, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":4112
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ClkMonFaultInfo obj = ClkMonFaultInfo.__new__(ClkMonFaultInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlClkMonFaultInfo_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4112, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4112, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":4114
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlClkMonFaultInfo_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=clk_mon_fault_info_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4114, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":4113
 *         cdef ClkMonFaultInfo obj = ClkMonFaultInfo.__new__(ClkMonFaultInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlClkMonFaultInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=clk_mon_fault_info_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlClkMonFaultInfo_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":4115
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlClkMonFaultInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=clk_mon_fault_info_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4115, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4115, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4115, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_clk_mon_fault_info_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4115, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4115, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 4115, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 4115, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4115, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":4116
 *             <char*>ptr, sizeof(nvmlClkMonFaultInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=clk_mon_fault_info_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4116, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4116, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4116, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":4118
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4100
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ClkMonFaultInfo instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4002
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_18__reduce_cython__, "ClkMonFaultInfo.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_ClkMonFaultInfo, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_ClkMonFaultInfo, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_ClkMonFaultInfo, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_ClkMonFaultInfo, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ClkMonFaultInfo); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_ClkMonFaultInfo, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_ClkMonFaultInfo, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_ClkMonFaultInfo, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_ClkMonFaultInfo__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ClkMonFaultInfo); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ClkMonFaultInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ClkMonFaultInfo__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_20__setstate_cython__, "ClkMonFaultInfo.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_ClkMonFaultInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_ClkMonFaultInfo__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ClkMonFaultInfo__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ClkMonFaultInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ClkMonFaultInfo__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonFaultInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4121
 * 
 * 
 * cdef _get_clock_offset_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlClockOffset_v1_t pod = nvmlClockOffset_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_clock_offset_v1_dtype_offsets(void) {
  nvmlClockOffset_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlClockOffset_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  size_t __pyx_t_13;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_clock_offset_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":4122
 * 
 * cdef _get_clock_offset_v1_dtype_offsets():
 *     cdef nvmlClockOffset_v1_t pod = nvmlClockOffset_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'type', 'pstate', 'clock_offset_m_hz', 'min_clock_offset_m_hz', 'max_clock_offset_m_hz'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":4123
 * cdef _get_clock_offset_v1_dtype_offsets():
 *     cdef nvmlClockOffset_v1_t pod = nvmlClockOffset_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'type', 'pstate', 'clock_offset_m_hz', 'min_clock_offset_m_hz', 'max_clock_offset_m_hz'],
 *         'formats': [_numpy.uint32, _numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4123, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4123, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":4124
 *     cdef nvmlClockOffset_v1_t pod = nvmlClockOffset_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'type', 'pstate', 'clock_offset_m_hz', 'min_clock_offset_m_hz', 'max_clock_offset_m_hz'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4124, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4124, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 4124, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_type);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_type);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_type) != (0)) __PYX_ERR(0, 4124, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_pstate);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_pstate);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_pstate) != (0)) __PYX_ERR(0, 4124, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_clock_offset_m_hz);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_clock_offset_m_hz);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_clock_offset_m_hz) != (0)) __PYX_ERR(0, 4124, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_min_clock_offset_m_hz);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_min_clock_offset_m_hz);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_min_clock_offset_m_hz) != (0)) __PYX_ERR(0, 4124, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_max_clock_offset_m_hz);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_max_clock_offset_m_hz);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_max_clock_offset_m_hz) != (0)) __PYX_ERR(0, 4124, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 4124, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":4125
 *     return _numpy.dtype({
 *         'names': ['version', 'type', 'pstate', 'clock_offset_m_hz', 'min_clock_offset_m_hz', 'max_clock_offset_m_hz'],
 *         'formats': [_numpy.uint32, _numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 4125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 4125, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 4125, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 4125, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 4125, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 4125, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 4125, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 4124, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":4127
 *         'formats': [_numpy.uint32, _numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.type)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pstate)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4127, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":4128
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.type)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.pstate)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.clockOffsetMHz)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.type)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 4128, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":4129
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.type)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pstate)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.clockOffsetMHz)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.minClockOffsetMHz)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.pstate)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4129, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":4130
 *             (<intptr_t>&(pod.type)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pstate)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.clockOffsetMHz)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.minClockOffsetMHz)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxClockOffsetMHz)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.clockOffsetMHz)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4130, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":4131
 *             (<intptr_t>&(pod.pstate)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.clockOffsetMHz)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.minClockOffsetMHz)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.maxClockOffsetMHz)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.minClockOffsetMHz)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4131, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":4132
 *             (<intptr_t>&(pod.clockOffsetMHz)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.minClockOffsetMHz)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxClockOffsetMHz)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlClockOffset_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.maxClockOffsetMHz)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4132, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":4126
 *         'names': ['version', 'type', 'pstate', 'clock_offset_m_hz', 'min_clock_offset_m_hz', 'max_clock_offset_m_hz'],
 *         'formats': [_numpy.uint32, _numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32, _numpy.int32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.type)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4126, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 4126, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_12) != (0)) __PYX_ERR(0, 4126, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_11) != (0)) __PYX_ERR(0, 4126, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 4126, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_9) != (0)) __PYX_ERR(0, 4126, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_8) != (0)) __PYX_ERR(0, 4126, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 4124, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":4134
 *             (<intptr_t>&(pod.maxClockOffsetMHz)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlClockOffset_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlClockOffset_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4134, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 4124, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_13 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_13 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_13, (2-__pyx_t_13) | (__pyx_t_13*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4123, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4121
 * 
 * 
 * cdef _get_clock_offset_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlClockOffset_v1_t pod = nvmlClockOffset_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_clock_offset_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4151
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlClockOffset_v1_t *>calloc(1, sizeof(nvmlClockOffset_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":4152
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlClockOffset_v1_t *>calloc(1, sizeof(nvmlClockOffset_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ClockOffset_v1")
*/
  __pyx_v_self->_ptr = ((nvmlClockOffset_v1_t *)calloc(1, (sizeof(nvmlClockOffset_v1_t))));

  /* "cuda/bindings/_nvml.pyx":4153
 *     def __init__(self):
 *         self._ptr = <nvmlClockOffset_v1_t *>calloc(1, sizeof(nvmlClockOffset_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ClockOffset_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":4154
 *         self._ptr = <nvmlClockOffset_v1_t *>calloc(1, sizeof(nvmlClockOffset_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ClockOffset_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4154, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ClockOffset_v1};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4154, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 4154, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4153
 *     def __init__(self):
 *         self._ptr = <nvmlClockOffset_v1_t *>calloc(1, sizeof(nvmlClockOffset_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ClockOffset_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":4155
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ClockOffset_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":4156
 *             raise MemoryError("Error allocating ClockOffset_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":4157
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":4151
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlClockOffset_v1_t *>calloc(1, sizeof(nvmlClockOffset_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4159
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlClockOffset_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self) {
  nvmlClockOffset_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlClockOffset_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":4161
 *     def __dealloc__(self):
 *         cdef nvmlClockOffset_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":4162
 *         cdef nvmlClockOffset_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":4163
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":4164
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":4161
 *     def __dealloc__(self):
 *         cdef nvmlClockOffset_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":4159
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlClockOffset_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":4166
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ClockOffset_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":4167
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.ClockOffset_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4167, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4167, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4167, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4167, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4167, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_ClockOffset_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 26 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4167, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4166
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ClockOffset_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4169
 *         return f"<{__name__}.ClockOffset_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4172
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4172, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4169
 *         return f"<{__name__}.ClockOffset_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4174
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_14ClockOffset_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":4175
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4174
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4177
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":4178
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4178, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4177
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4180
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ClockOffset_v1 other_
 *         if not isinstance(other, ClockOffset_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":4182
 *     def __eq__(self, other):
 *         cdef ClockOffset_v1 other_
 *         if not isinstance(other, ClockOffset_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":4183
 *         cdef ClockOffset_v1 other_
 *         if not isinstance(other, ClockOffset_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlClockOffset_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4182
 *     def __eq__(self, other):
 *         cdef ClockOffset_v1 other_
 *         if not isinstance(other, ClockOffset_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":4184
 *         if not isinstance(other, ClockOffset_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlClockOffset_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1))))) __PYX_ERR(0, 4184, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":4185
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlClockOffset_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlClockOffset_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4185, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4180
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ClockOffset_v1 other_
 *         if not isinstance(other, ClockOffset_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4187
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlClockOffset_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlClockOffset_v1_t *>malloc(sizeof(nvmlClockOffset_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":4188
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlClockOffset_v1_t *>malloc(sizeof(nvmlClockOffset_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 4188, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4188, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4188, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 4188, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":4189
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlClockOffset_v1_t *>malloc(sizeof(nvmlClockOffset_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ClockOffset_v1")
*/
    __pyx_v_self->_ptr = ((nvmlClockOffset_v1_t *)malloc((sizeof(nvmlClockOffset_v1_t))));

    /* "cuda/bindings/_nvml.pyx":4190
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlClockOffset_v1_t *>malloc(sizeof(nvmlClockOffset_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ClockOffset_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlClockOffset_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":4191
 *             self._ptr = <nvmlClockOffset_v1_t *>malloc(sizeof(nvmlClockOffset_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ClockOffset_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlClockOffset_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4191, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ClockOffset_v1};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4191, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 4191, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":4190
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlClockOffset_v1_t *>malloc(sizeof(nvmlClockOffset_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ClockOffset_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlClockOffset_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":4192
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ClockOffset_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlClockOffset_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4192, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4192, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4192, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlClockOffset_v1_t))));

    /* "cuda/bindings/_nvml.pyx":4193
 *                 raise MemoryError("Error allocating ClockOffset_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlClockOffset_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":4194
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlClockOffset_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":4195
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4195, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4195, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 4195, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":4188
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlClockOffset_v1_t *>malloc(sizeof(nvmlClockOffset_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":4197
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 4197, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":4187
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlClockOffset_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlClockOffset_v1_t *>malloc(sizeof(nvmlClockOffset_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4199
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4202
 *     def version(self):
 *         """int: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4202, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4199
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4204
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":4206
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":4207
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This ClockOffset_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ClockOffset_v1_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4207, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 4207, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4206
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":4208
 *         if self._readonly:
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4208, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":4204
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4210
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def type(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_4type_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_4type_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_4type___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_4type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4213
 *     def type(self):
 *         """int: """
 *         return <int>(self._ptr[0].type)             # <<<<<<<<<<<<<<
 * 
 *     @type.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int(((int)(__pyx_v_self->_ptr[0]).type)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4213, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4210
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def type(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.type.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4215
 *         return <int>(self._ptr[0].type)
 * 
 *     @type.setter             # <<<<<<<<<<<<<<
 *     def type(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_4type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_4type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_4type_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_4type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":4217
 *     @type.setter
 *     def type(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].type = <nvmlClockType_t><int>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":4218
 *     def type(self, val):
 *         if self._readonly:
 *             raise ValueError("This ClockOffset_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].type = <nvmlClockType_t><int>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ClockOffset_v1_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4218, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 4218, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4217
 *     @type.setter
 *     def type(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].type = <nvmlClockType_t><int>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":4219
 *         if self._readonly:
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].type = <nvmlClockType_t><int>val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4219, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).type = ((nvmlClockType_t)((int)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":4215
 *         return <int>(self._ptr[0].type)
 * 
 *     @type.setter             # <<<<<<<<<<<<<<
 *     def type(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.type.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4221
 *         self._ptr[0].type = <nvmlClockType_t><int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pstate(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_6pstate_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_6pstate_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_6pstate___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_6pstate___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4224
 *     def pstate(self):
 *         """int: """
 *         return <int>(self._ptr[0].pstate)             # <<<<<<<<<<<<<<
 * 
 *     @pstate.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int(((int)(__pyx_v_self->_ptr[0]).pstate)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4224, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4221
 *         self._ptr[0].type = <nvmlClockType_t><int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pstate(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.pstate.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4226
 *         return <int>(self._ptr[0].pstate)
 * 
 *     @pstate.setter             # <<<<<<<<<<<<<<
 *     def pstate(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_6pstate_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_6pstate_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_6pstate_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_6pstate_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":4228
 *     @pstate.setter
 *     def pstate(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].pstate = <nvmlPstates_t><int>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":4229
 *     def pstate(self, val):
 *         if self._readonly:
 *             raise ValueError("This ClockOffset_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].pstate = <nvmlPstates_t><int>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ClockOffset_v1_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4229, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 4229, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4228
 *     @pstate.setter
 *     def pstate(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].pstate = <nvmlPstates_t><int>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":4230
 *         if self._readonly:
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].pstate = <nvmlPstates_t><int>val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4230, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).pstate = ((nvmlPstates_t)((int)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":4226
 *         return <int>(self._ptr[0].pstate)
 * 
 *     @pstate.setter             # <<<<<<<<<<<<<<
 *     def pstate(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.pstate.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4232
 *         self._ptr[0].pstate = <nvmlPstates_t><int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def clock_offset_m_hz(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_17clock_offset_m_hz_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_17clock_offset_m_hz_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_17clock_offset_m_hz___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_17clock_offset_m_hz___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4235
 *     def clock_offset_m_hz(self):
 *         """int: """
 *         return self._ptr[0].clockOffsetMHz             # <<<<<<<<<<<<<<
 * 
 *     @clock_offset_m_hz.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_ptr[0]).clockOffsetMHz); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4235, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4232
 *         self._ptr[0].pstate = <nvmlPstates_t><int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def clock_offset_m_hz(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.clock_offset_m_hz.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4237
 *         return self._ptr[0].clockOffsetMHz
 * 
 *     @clock_offset_m_hz.setter             # <<<<<<<<<<<<<<
 *     def clock_offset_m_hz(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_17clock_offset_m_hz_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_17clock_offset_m_hz_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_17clock_offset_m_hz_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_17clock_offset_m_hz_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":4239
 *     @clock_offset_m_hz.setter
 *     def clock_offset_m_hz(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].clockOffsetMHz = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":4240
 *     def clock_offset_m_hz(self, val):
 *         if self._readonly:
 *             raise ValueError("This ClockOffset_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].clockOffsetMHz = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ClockOffset_v1_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4240, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 4240, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4239
 *     @clock_offset_m_hz.setter
 *     def clock_offset_m_hz(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].clockOffsetMHz = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":4241
 *         if self._readonly:
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].clockOffsetMHz = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4241, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).clockOffsetMHz = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":4237
 *         return self._ptr[0].clockOffsetMHz
 * 
 *     @clock_offset_m_hz.setter             # <<<<<<<<<<<<<<
 *     def clock_offset_m_hz(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.clock_offset_m_hz.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4243
 *         self._ptr[0].clockOffsetMHz = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def min_clock_offset_m_hz(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_21min_clock_offset_m_hz_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_21min_clock_offset_m_hz_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_21min_clock_offset_m_hz___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_21min_clock_offset_m_hz___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4246
 *     def min_clock_offset_m_hz(self):
 *         """int: """
 *         return self._ptr[0].minClockOffsetMHz             # <<<<<<<<<<<<<<
 * 
 *     @min_clock_offset_m_hz.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_ptr[0]).minClockOffsetMHz); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4246, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4243
 *         self._ptr[0].clockOffsetMHz = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def min_clock_offset_m_hz(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.min_clock_offset_m_hz.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4248
 *         return self._ptr[0].minClockOffsetMHz
 * 
 *     @min_clock_offset_m_hz.setter             # <<<<<<<<<<<<<<
 *     def min_clock_offset_m_hz(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_21min_clock_offset_m_hz_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_21min_clock_offset_m_hz_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_21min_clock_offset_m_hz_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_21min_clock_offset_m_hz_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":4250
 *     @min_clock_offset_m_hz.setter
 *     def min_clock_offset_m_hz(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].minClockOffsetMHz = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":4251
 *     def min_clock_offset_m_hz(self, val):
 *         if self._readonly:
 *             raise ValueError("This ClockOffset_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].minClockOffsetMHz = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ClockOffset_v1_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4251, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 4251, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4250
 *     @min_clock_offset_m_hz.setter
 *     def min_clock_offset_m_hz(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].minClockOffsetMHz = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":4252
 *         if self._readonly:
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].minClockOffsetMHz = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4252, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).minClockOffsetMHz = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":4248
 *         return self._ptr[0].minClockOffsetMHz
 * 
 *     @min_clock_offset_m_hz.setter             # <<<<<<<<<<<<<<
 *     def min_clock_offset_m_hz(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.min_clock_offset_m_hz.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4254
 *         self._ptr[0].minClockOffsetMHz = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def max_clock_offset_m_hz(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_21max_clock_offset_m_hz_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_21max_clock_offset_m_hz_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_21max_clock_offset_m_hz___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_21max_clock_offset_m_hz___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4257
 *     def max_clock_offset_m_hz(self):
 *         """int: """
 *         return self._ptr[0].maxClockOffsetMHz             # <<<<<<<<<<<<<<
 * 
 *     @max_clock_offset_m_hz.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_ptr[0]).maxClockOffsetMHz); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4257, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4254
 *         self._ptr[0].minClockOffsetMHz = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def max_clock_offset_m_hz(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.max_clock_offset_m_hz.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4259
 *         return self._ptr[0].maxClockOffsetMHz
 * 
 *     @max_clock_offset_m_hz.setter             # <<<<<<<<<<<<<<
 *     def max_clock_offset_m_hz(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_21max_clock_offset_m_hz_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_21max_clock_offset_m_hz_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_21max_clock_offset_m_hz_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_21max_clock_offset_m_hz_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":4261
 *     @max_clock_offset_m_hz.setter
 *     def max_clock_offset_m_hz(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].maxClockOffsetMHz = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":4262
 *     def max_clock_offset_m_hz(self, val):
 *         if self._readonly:
 *             raise ValueError("This ClockOffset_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].maxClockOffsetMHz = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ClockOffset_v1_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4262, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 4262, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4261
 *     @max_clock_offset_m_hz.setter
 *     def max_clock_offset_m_hz(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].maxClockOffsetMHz = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":4263
 *         if self._readonly:
 *             raise ValueError("This ClockOffset_v1 instance is read-only")
 *         self._ptr[0].maxClockOffsetMHz = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4263, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).maxClockOffsetMHz = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":4259
 *         return self._ptr[0].maxClockOffsetMHz
 * 
 *     @max_clock_offset_m_hz.setter             # <<<<<<<<<<<<<<
 *     def max_clock_offset_m_hz(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.max_clock_offset_m_hz.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4265
 *         self._ptr[0].maxClockOffsetMHz = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ClockOffset_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_14ClockOffset_v1_12from_data, "ClockOffset_v1.from_data(data)\n\nCreate an ClockOffset_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `clock_offset_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_14ClockOffset_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14ClockOffset_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 4265, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4265, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 4265, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 4265, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4265, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 4265, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":4272
 *             data (_numpy.ndarray): a single-element array of dtype `clock_offset_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "clock_offset_v1_dtype", clock_offset_v1_dtype, ClockOffset_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_clock_offset_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4272, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_clock_offset_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4272, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4265
 *         self._ptr[0].maxClockOffsetMHz = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ClockOffset_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4274
 *         return __from_data(data, "clock_offset_v1_dtype", clock_offset_v1_dtype, ClockOffset_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ClockOffset_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_14ClockOffset_v1_14from_ptr, "ClockOffset_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an ClockOffset_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_14ClockOffset_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14ClockOffset_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 4274, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 4274, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 4274, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4274, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 4274, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":4275
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an ClockOffset_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 4274, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 4274, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 4274, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4274, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4275, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4275, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 4274, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":4274
 *         return __from_data(data, "clock_offset_v1_dtype", clock_offset_v1_dtype, ClockOffset_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ClockOffset_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":4283
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ClockOffset_v1 obj = ClockOffset_v1.__new__(ClockOffset_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":4284
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ClockOffset_v1 obj = ClockOffset_v1.__new__(ClockOffset_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4284, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 4284, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4283
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ClockOffset_v1 obj = ClockOffset_v1.__new__(ClockOffset_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":4285
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ClockOffset_v1 obj = ClockOffset_v1.__new__(ClockOffset_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlClockOffset_v1_t *>malloc(sizeof(nvmlClockOffset_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ClockOffset_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4285, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":4286
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ClockOffset_v1 obj = ClockOffset_v1.__new__(ClockOffset_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlClockOffset_v1_t *>malloc(sizeof(nvmlClockOffset_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":4287
 *         cdef ClockOffset_v1 obj = ClockOffset_v1.__new__(ClockOffset_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlClockOffset_v1_t *>malloc(sizeof(nvmlClockOffset_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ClockOffset_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlClockOffset_v1_t *)malloc((sizeof(nvmlClockOffset_v1_t))));

    /* "cuda/bindings/_nvml.pyx":4288
 *         if owner is None:
 *             obj._ptr = <nvmlClockOffset_v1_t *>malloc(sizeof(nvmlClockOffset_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ClockOffset_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlClockOffset_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":4289
 *             obj._ptr = <nvmlClockOffset_v1_t *>malloc(sizeof(nvmlClockOffset_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ClockOffset_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlClockOffset_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4289, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ClockOffset_v1};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4289, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 4289, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":4288
 *         if owner is None:
 *             obj._ptr = <nvmlClockOffset_v1_t *>malloc(sizeof(nvmlClockOffset_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ClockOffset_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlClockOffset_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":4290
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ClockOffset_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlClockOffset_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlClockOffset_v1_t))));

    /* "cuda/bindings/_nvml.pyx":4291
 *                 raise MemoryError("Error allocating ClockOffset_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlClockOffset_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":4292
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlClockOffset_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlClockOffset_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":4286
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ClockOffset_v1 obj = ClockOffset_v1.__new__(ClockOffset_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlClockOffset_v1_t *>malloc(sizeof(nvmlClockOffset_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":4294
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlClockOffset_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlClockOffset_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":4295
 *         else:
 *             obj._ptr = <nvmlClockOffset_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":4296
 *             obj._ptr = <nvmlClockOffset_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":4297
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":4298
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4274
 *         return __from_data(data, "clock_offset_v1_dtype", clock_offset_v1_dtype, ClockOffset_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ClockOffset_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_14ClockOffset_v1_16__reduce_cython__, "ClockOffset_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_14ClockOffset_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14ClockOffset_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_14ClockOffset_v1_18__setstate_cython__, "ClockOffset_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_14ClockOffset_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14ClockOffset_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14ClockOffset_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ClockOffset_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4301
 * 
 * 
 * cdef _get_fan_speed_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlFanSpeedInfo_v1_t pod = nvmlFanSpeedInfo_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_fan_speed_info_v1_dtype_offsets(void) {
  nvmlFanSpeedInfo_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlFanSpeedInfo_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_fan_speed_info_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":4302
 * 
 * cdef _get_fan_speed_info_v1_dtype_offsets():
 *     cdef nvmlFanSpeedInfo_v1_t pod = nvmlFanSpeedInfo_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'fan', 'speed'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":4303
 * cdef _get_fan_speed_info_v1_dtype_offsets():
 *     cdef nvmlFanSpeedInfo_v1_t pod = nvmlFanSpeedInfo_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'fan', 'speed'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4303, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4303, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":4304
 *     cdef nvmlFanSpeedInfo_v1_t pod = nvmlFanSpeedInfo_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'fan', 'speed'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4304, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4304, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 4304, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_fan);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_fan);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_fan) != (0)) __PYX_ERR(0, 4304, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_speed);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_speed);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_speed) != (0)) __PYX_ERR(0, 4304, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 4304, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":4305
 *     return _numpy.dtype({
 *         'names': ['version', 'fan', 'speed'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 4305, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 4305, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 4305, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 4304, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":4307
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.fan)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.speed)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4307, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":4308
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.fan)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.speed)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.fan)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4308, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":4309
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.fan)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.speed)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlFanSpeedInfo_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.speed)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4309, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":4306
 *         'names': ['version', 'fan', 'speed'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.fan)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4306, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 4306, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 4306, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 4306, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 4304, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":4311
 *             (<intptr_t>&(pod.speed)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlFanSpeedInfo_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlFanSpeedInfo_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4311, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 4304, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4303, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4301
 * 
 * 
 * cdef _get_fan_speed_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlFanSpeedInfo_v1_t pod = nvmlFanSpeedInfo_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_fan_speed_info_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4328
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlFanSpeedInfo_v1_t *>calloc(1, sizeof(nvmlFanSpeedInfo_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":4329
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlFanSpeedInfo_v1_t *>calloc(1, sizeof(nvmlFanSpeedInfo_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating FanSpeedInfo_v1")
*/
  __pyx_v_self->_ptr = ((nvmlFanSpeedInfo_v1_t *)calloc(1, (sizeof(nvmlFanSpeedInfo_v1_t))));

  /* "cuda/bindings/_nvml.pyx":4330
 *     def __init__(self):
 *         self._ptr = <nvmlFanSpeedInfo_v1_t *>calloc(1, sizeof(nvmlFanSpeedInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating FanSpeedInfo_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":4331
 *         self._ptr = <nvmlFanSpeedInfo_v1_t *>calloc(1, sizeof(nvmlFanSpeedInfo_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating FanSpeedInfo_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4331, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_FanSpeedInfo_v1};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4331, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 4331, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4330
 *     def __init__(self):
 *         self._ptr = <nvmlFanSpeedInfo_v1_t *>calloc(1, sizeof(nvmlFanSpeedInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating FanSpeedInfo_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":4332
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating FanSpeedInfo_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":4333
 *             raise MemoryError("Error allocating FanSpeedInfo_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":4334
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":4328
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlFanSpeedInfo_v1_t *>calloc(1, sizeof(nvmlFanSpeedInfo_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4336
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlFanSpeedInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self) {
  nvmlFanSpeedInfo_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlFanSpeedInfo_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":4338
 *     def __dealloc__(self):
 *         cdef nvmlFanSpeedInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":4339
 *         cdef nvmlFanSpeedInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":4340
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":4341
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":4338
 *     def __dealloc__(self):
 *         cdef nvmlFanSpeedInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":4336
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlFanSpeedInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":4343
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.FanSpeedInfo_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":4344
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.FanSpeedInfo_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4344, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4344, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4344, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4344, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4344, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_FanSpeedInfo_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 27 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4344, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4343
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.FanSpeedInfo_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4346
 *         return f"<{__name__}.FanSpeedInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4349
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4349, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4346
 *         return f"<{__name__}.FanSpeedInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4351
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":4352
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4351
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4354
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":4355
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4355, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4354
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4357
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef FanSpeedInfo_v1 other_
 *         if not isinstance(other, FanSpeedInfo_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":4359
 *     def __eq__(self, other):
 *         cdef FanSpeedInfo_v1 other_
 *         if not isinstance(other, FanSpeedInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":4360
 *         cdef FanSpeedInfo_v1 other_
 *         if not isinstance(other, FanSpeedInfo_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlFanSpeedInfo_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4359
 *     def __eq__(self, other):
 *         cdef FanSpeedInfo_v1 other_
 *         if not isinstance(other, FanSpeedInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":4361
 *         if not isinstance(other, FanSpeedInfo_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlFanSpeedInfo_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1))))) __PYX_ERR(0, 4361, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":4362
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlFanSpeedInfo_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlFanSpeedInfo_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4362, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4357
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef FanSpeedInfo_v1 other_
 *         if not isinstance(other, FanSpeedInfo_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4364
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlFanSpeedInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlFanSpeedInfo_v1_t *>malloc(sizeof(nvmlFanSpeedInfo_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":4365
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlFanSpeedInfo_v1_t *>malloc(sizeof(nvmlFanSpeedInfo_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 4365, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4365, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4365, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 4365, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":4366
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlFanSpeedInfo_v1_t *>malloc(sizeof(nvmlFanSpeedInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating FanSpeedInfo_v1")
*/
    __pyx_v_self->_ptr = ((nvmlFanSpeedInfo_v1_t *)malloc((sizeof(nvmlFanSpeedInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":4367
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlFanSpeedInfo_v1_t *>malloc(sizeof(nvmlFanSpeedInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating FanSpeedInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlFanSpeedInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":4368
 *             self._ptr = <nvmlFanSpeedInfo_v1_t *>malloc(sizeof(nvmlFanSpeedInfo_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating FanSpeedInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlFanSpeedInfo_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4368, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_FanSpeedInfo_v1};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4368, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 4368, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":4367
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlFanSpeedInfo_v1_t *>malloc(sizeof(nvmlFanSpeedInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating FanSpeedInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlFanSpeedInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":4369
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating FanSpeedInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlFanSpeedInfo_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4369, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4369, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4369, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlFanSpeedInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":4370
 *                 raise MemoryError("Error allocating FanSpeedInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlFanSpeedInfo_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":4371
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlFanSpeedInfo_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":4372
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4372, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4372, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 4372, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":4365
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlFanSpeedInfo_v1_t *>malloc(sizeof(nvmlFanSpeedInfo_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":4374
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 4374, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":4364
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlFanSpeedInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlFanSpeedInfo_v1_t *>malloc(sizeof(nvmlFanSpeedInfo_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4376
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: the API version number"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4379
 *     def version(self):
 *         """int: the API version number"""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4379, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4376
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: the API version number"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4381
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":4383
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This FanSpeedInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":4384
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This FanSpeedInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_FanSpeedInfo_v1_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4384, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 4384, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4383
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This FanSpeedInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":4385
 *         if self._readonly:
 *             raise ValueError("This FanSpeedInfo_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4385, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":4381
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4387
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def fan(self):
 *         """int: the fan index"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3fan_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3fan_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3fan___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3fan___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4390
 *     def fan(self):
 *         """int: the fan index"""
 *         return self._ptr[0].fan             # <<<<<<<<<<<<<<
 * 
 *     @fan.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).fan); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4390, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4387
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def fan(self):
 *         """int: the fan index"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.fan.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4392
 *         return self._ptr[0].fan
 * 
 *     @fan.setter             # <<<<<<<<<<<<<<
 *     def fan(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3fan_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3fan_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3fan_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3fan_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":4394
 *     @fan.setter
 *     def fan(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This FanSpeedInfo_v1 instance is read-only")
 *         self._ptr[0].fan = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":4395
 *     def fan(self, val):
 *         if self._readonly:
 *             raise ValueError("This FanSpeedInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].fan = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_FanSpeedInfo_v1_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4395, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 4395, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4394
 *     @fan.setter
 *     def fan(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This FanSpeedInfo_v1 instance is read-only")
 *         self._ptr[0].fan = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":4396
 *         if self._readonly:
 *             raise ValueError("This FanSpeedInfo_v1 instance is read-only")
 *         self._ptr[0].fan = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4396, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).fan = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":4392
 *         return self._ptr[0].fan
 * 
 *     @fan.setter             # <<<<<<<<<<<<<<
 *     def fan(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.fan.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4398
 *         self._ptr[0].fan = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def speed(self):
 *         """int: OUT: the fan speed in RPM."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_5speed_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_5speed_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_5speed___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_5speed___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4401
 *     def speed(self):
 *         """int: OUT: the fan speed in RPM."""
 *         return self._ptr[0].speed             # <<<<<<<<<<<<<<
 * 
 *     @speed.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).speed); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4398
 *         self._ptr[0].fan = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def speed(self):
 *         """int: OUT: the fan speed in RPM."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.speed.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4403
 *         return self._ptr[0].speed
 * 
 *     @speed.setter             # <<<<<<<<<<<<<<
 *     def speed(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_5speed_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_5speed_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_5speed_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_5speed_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":4405
 *     @speed.setter
 *     def speed(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This FanSpeedInfo_v1 instance is read-only")
 *         self._ptr[0].speed = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":4406
 *     def speed(self, val):
 *         if self._readonly:
 *             raise ValueError("This FanSpeedInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].speed = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_FanSpeedInfo_v1_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4406, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 4406, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4405
 *     @speed.setter
 *     def speed(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This FanSpeedInfo_v1 instance is read-only")
 *         self._ptr[0].speed = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":4407
 *         if self._readonly:
 *             raise ValueError("This FanSpeedInfo_v1 instance is read-only")
 *         self._ptr[0].speed = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4407, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).speed = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":4403
 *         return self._ptr[0].speed
 * 
 *     @speed.setter             # <<<<<<<<<<<<<<
 *     def speed(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.speed.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4409
 *         self._ptr[0].speed = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an FanSpeedInfo_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_12from_data, "FanSpeedInfo_v1.from_data(data)\n\nCreate an FanSpeedInfo_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `fan_speed_info_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 4409, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4409, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 4409, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 4409, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4409, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 4409, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":4416
 *             data (_numpy.ndarray): a single-element array of dtype `fan_speed_info_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "fan_speed_info_v1_dtype", fan_speed_info_v1_dtype, FanSpeedInfo_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_fan_speed_info_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4416, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_fan_speed_info_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4416, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4409
 *         self._ptr[0].speed = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an FanSpeedInfo_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4418
 *         return __from_data(data, "fan_speed_info_v1_dtype", fan_speed_info_v1_dtype, FanSpeedInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an FanSpeedInfo_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_14from_ptr, "FanSpeedInfo_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an FanSpeedInfo_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 4418, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 4418, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 4418, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4418, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 4418, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":4419
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an FanSpeedInfo_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 4418, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 4418, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 4418, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4418, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4419, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4419, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 4418, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":4418
 *         return __from_data(data, "fan_speed_info_v1_dtype", fan_speed_info_v1_dtype, FanSpeedInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an FanSpeedInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":4427
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FanSpeedInfo_v1 obj = FanSpeedInfo_v1.__new__(FanSpeedInfo_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":4428
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef FanSpeedInfo_v1 obj = FanSpeedInfo_v1.__new__(FanSpeedInfo_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4428, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 4428, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4427
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FanSpeedInfo_v1 obj = FanSpeedInfo_v1.__new__(FanSpeedInfo_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":4429
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FanSpeedInfo_v1 obj = FanSpeedInfo_v1.__new__(FanSpeedInfo_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlFanSpeedInfo_v1_t *>malloc(sizeof(nvmlFanSpeedInfo_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_FanSpeedInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4429, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":4430
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FanSpeedInfo_v1 obj = FanSpeedInfo_v1.__new__(FanSpeedInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlFanSpeedInfo_v1_t *>malloc(sizeof(nvmlFanSpeedInfo_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":4431
 *         cdef FanSpeedInfo_v1 obj = FanSpeedInfo_v1.__new__(FanSpeedInfo_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlFanSpeedInfo_v1_t *>malloc(sizeof(nvmlFanSpeedInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating FanSpeedInfo_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlFanSpeedInfo_v1_t *)malloc((sizeof(nvmlFanSpeedInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":4432
 *         if owner is None:
 *             obj._ptr = <nvmlFanSpeedInfo_v1_t *>malloc(sizeof(nvmlFanSpeedInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating FanSpeedInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlFanSpeedInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":4433
 *             obj._ptr = <nvmlFanSpeedInfo_v1_t *>malloc(sizeof(nvmlFanSpeedInfo_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating FanSpeedInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlFanSpeedInfo_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4433, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_FanSpeedInfo_v1};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4433, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 4433, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":4432
 *         if owner is None:
 *             obj._ptr = <nvmlFanSpeedInfo_v1_t *>malloc(sizeof(nvmlFanSpeedInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating FanSpeedInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlFanSpeedInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":4434
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating FanSpeedInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlFanSpeedInfo_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlFanSpeedInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":4435
 *                 raise MemoryError("Error allocating FanSpeedInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlFanSpeedInfo_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":4436
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlFanSpeedInfo_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlFanSpeedInfo_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":4430
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FanSpeedInfo_v1 obj = FanSpeedInfo_v1.__new__(FanSpeedInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlFanSpeedInfo_v1_t *>malloc(sizeof(nvmlFanSpeedInfo_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":4438
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlFanSpeedInfo_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlFanSpeedInfo_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":4439
 *         else:
 *             obj._ptr = <nvmlFanSpeedInfo_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":4440
 *             obj._ptr = <nvmlFanSpeedInfo_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":4441
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":4442
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4418
 *         return __from_data(data, "fan_speed_info_v1_dtype", fan_speed_info_v1_dtype, FanSpeedInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an FanSpeedInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_16__reduce_cython__, "FanSpeedInfo_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_18__setstate_cython__, "FanSpeedInfo_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FanSpeedInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4445
 * 
 * 
 * cdef _get_device_perf_modes_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlDevicePerfModes_v1_t pod = nvmlDevicePerfModes_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_device_perf_modes_v1_dtype_offsets(void) {
  nvmlDevicePerfModes_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlDevicePerfModes_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_device_perf_modes_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":4446
 * 
 * cdef _get_device_perf_modes_v1_dtype_offsets():
 *     cdef nvmlDevicePerfModes_v1_t pod = nvmlDevicePerfModes_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'str'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":4447
 * cdef _get_device_perf_modes_v1_dtype_offsets():
 *     cdef nvmlDevicePerfModes_v1_t pod = nvmlDevicePerfModes_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'str'],
 *         'formats': [_numpy.uint32, _numpy.int8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4447, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4447, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":4448
 *     cdef nvmlDevicePerfModes_v1_t pod = nvmlDevicePerfModes_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'str'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.int8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4448, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4448, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 4448, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_str);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_str);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_str) != (0)) __PYX_ERR(0, 4448, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 4448, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":4449
 *     return _numpy.dtype({
 *         'names': ['version', 'str'],
 *         'formats': [_numpy.uint32, _numpy.int8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4449, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4449, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4449, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4449, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4449, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 4449, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 4449, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 4448, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":4451
 *         'formats': [_numpy.uint32, _numpy.int8],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.str)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4451, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":4452
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.str)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlDevicePerfModes_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.str)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4452, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":4450
 *         'names': ['version', 'str'],
 *         'formats': [_numpy.uint32, _numpy.int8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.str)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4450, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 4450, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 4450, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 4448, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":4454
 *             (<intptr_t>&(pod.str)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlDevicePerfModes_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlDevicePerfModes_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4454, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 4448, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4447, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4445
 * 
 * 
 * cdef _get_device_perf_modes_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlDevicePerfModes_v1_t pod = nvmlDevicePerfModes_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_device_perf_modes_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4471
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlDevicePerfModes_v1_t *>calloc(1, sizeof(nvmlDevicePerfModes_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":4472
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlDevicePerfModes_v1_t *>calloc(1, sizeof(nvmlDevicePerfModes_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DevicePerfModes_v1")
*/
  __pyx_v_self->_ptr = ((nvmlDevicePerfModes_v1_t *)calloc(1, (sizeof(nvmlDevicePerfModes_v1_t))));

  /* "cuda/bindings/_nvml.pyx":4473
 *     def __init__(self):
 *         self._ptr = <nvmlDevicePerfModes_v1_t *>calloc(1, sizeof(nvmlDevicePerfModes_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating DevicePerfModes_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":4474
 *         self._ptr = <nvmlDevicePerfModes_v1_t *>calloc(1, sizeof(nvmlDevicePerfModes_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DevicePerfModes_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4474, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DevicePerfModes};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4474, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 4474, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4473
 *     def __init__(self):
 *         self._ptr = <nvmlDevicePerfModes_v1_t *>calloc(1, sizeof(nvmlDevicePerfModes_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating DevicePerfModes_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":4475
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DevicePerfModes_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":4476
 *             raise MemoryError("Error allocating DevicePerfModes_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":4477
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":4471
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlDevicePerfModes_v1_t *>calloc(1, sizeof(nvmlDevicePerfModes_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePerfModes_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4479
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlDevicePerfModes_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self) {
  nvmlDevicePerfModes_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlDevicePerfModes_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":4481
 *     def __dealloc__(self):
 *         cdef nvmlDevicePerfModes_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":4482
 *         cdef nvmlDevicePerfModes_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":4483
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":4484
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":4481
 *     def __dealloc__(self):
 *         cdef nvmlDevicePerfModes_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":4479
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlDevicePerfModes_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":4486
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.DevicePerfModes_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":4487
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.DevicePerfModes_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4487, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4487, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4487, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4487, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4487, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_DevicePerfModes_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 30 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4487, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4486
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.DevicePerfModes_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePerfModes_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4489
 *         return f"<{__name__}.DevicePerfModes_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4492
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4492, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4489
 *         return f"<{__name__}.DevicePerfModes_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePerfModes_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4494
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_18DevicePerfModes_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":4495
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4494
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4497
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":4498
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4498, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4497
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePerfModes_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4500
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef DevicePerfModes_v1 other_
 *         if not isinstance(other, DevicePerfModes_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":4502
 *     def __eq__(self, other):
 *         cdef DevicePerfModes_v1 other_
 *         if not isinstance(other, DevicePerfModes_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":4503
 *         cdef DevicePerfModes_v1 other_
 *         if not isinstance(other, DevicePerfModes_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDevicePerfModes_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4502
 *     def __eq__(self, other):
 *         cdef DevicePerfModes_v1 other_
 *         if not isinstance(other, DevicePerfModes_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":4504
 *         if not isinstance(other, DevicePerfModes_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDevicePerfModes_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1))))) __PYX_ERR(0, 4504, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":4505
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDevicePerfModes_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlDevicePerfModes_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4505, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4500
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef DevicePerfModes_v1 other_
 *         if not isinstance(other, DevicePerfModes_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePerfModes_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4507
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDevicePerfModes_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDevicePerfModes_v1_t *>malloc(sizeof(nvmlDevicePerfModes_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":4508
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlDevicePerfModes_v1_t *>malloc(sizeof(nvmlDevicePerfModes_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 4508, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4508, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4508, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 4508, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":4509
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDevicePerfModes_v1_t *>malloc(sizeof(nvmlDevicePerfModes_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DevicePerfModes_v1")
*/
    __pyx_v_self->_ptr = ((nvmlDevicePerfModes_v1_t *)malloc((sizeof(nvmlDevicePerfModes_v1_t))));

    /* "cuda/bindings/_nvml.pyx":4510
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDevicePerfModes_v1_t *>malloc(sizeof(nvmlDevicePerfModes_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DevicePerfModes_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDevicePerfModes_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":4511
 *             self._ptr = <nvmlDevicePerfModes_v1_t *>malloc(sizeof(nvmlDevicePerfModes_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DevicePerfModes_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDevicePerfModes_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4511, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DevicePerfModes};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4511, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 4511, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":4510
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDevicePerfModes_v1_t *>malloc(sizeof(nvmlDevicePerfModes_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DevicePerfModes_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDevicePerfModes_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":4512
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DevicePerfModes_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDevicePerfModes_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4512, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4512, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4512, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlDevicePerfModes_v1_t))));

    /* "cuda/bindings/_nvml.pyx":4513
 *                 raise MemoryError("Error allocating DevicePerfModes_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDevicePerfModes_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":4514
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDevicePerfModes_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":4515
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4515, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4515, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 4515, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":4508
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlDevicePerfModes_v1_t *>malloc(sizeof(nvmlDevicePerfModes_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":4517
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 4517, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":4507
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDevicePerfModes_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDevicePerfModes_v1_t *>malloc(sizeof(nvmlDevicePerfModes_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePerfModes_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4519
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: the API version number"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4522
 *     def version(self):
 *         """int: the API version number"""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4522, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4519
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: the API version number"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePerfModes_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4524
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":4526
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DevicePerfModes_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":4527
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This DevicePerfModes_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DevicePerfModes_v1_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4527, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 4527, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4526
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DevicePerfModes_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":4528
 *         if self._readonly:
 *             raise ValueError("This DevicePerfModes_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4528, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":4524
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePerfModes_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4530
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def str(self):
 *         """~_numpy.int8: (array of length 2048).OUT: the performance modes string."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3str_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3str_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3str___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3str___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4533
 *     def str(self):
 *         """~_numpy.int8: (array of length 2048).OUT: the performance modes string."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].str)             # <<<<<<<<<<<<<<
 * 
 *     @str.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).str); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4533, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4530
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def str(self):
 *         """~_numpy.int8: (array of length 2048).OUT: the performance modes string."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePerfModes_v1.str.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4535
 *         return cpython.PyUnicode_FromString(self._ptr[0].str)
 * 
 *     @str.setter             # <<<<<<<<<<<<<<
 *     def str(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3str_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3str_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3str_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3str_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":4537
 *     @str.setter
 *     def str(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DevicePerfModes_v1 instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":4538
 *     def str(self, val):
 *         if self._readonly:
 *             raise ValueError("This DevicePerfModes_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 2048:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DevicePerfModes_v1_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4538, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 4538, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4537
 *     @str.setter
 *     def str(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DevicePerfModes_v1 instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":4539
 *         if self._readonly:
 *             raise ValueError("This DevicePerfModes_v1 instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 2048:
 *             raise ValueError("String too long for field str, max length is 2047")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4539, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 4539, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":4540
 *             raise ValueError("This DevicePerfModes_v1 instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 2048:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field str, max length is 2047")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 4540, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 4540, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 0x800);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":4541
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 2048:
 *             raise ValueError("String too long for field str, max length is 2047")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].str), <void *>ptr, 2048)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_str_ma};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4541, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 4541, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4540
 *             raise ValueError("This DevicePerfModes_v1 instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 2048:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field str, max length is 2047")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":4542
 *         if len(buf) >= 2048:
 *             raise ValueError("String too long for field str, max length is 2047")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].str), <void *>ptr, 2048)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 4542, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 4542, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":4543
 *             raise ValueError("String too long for field str, max length is 2047")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].str), <void *>ptr, 2048)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).str), ((void *)__pyx_v_ptr), 0x800));

  /* "cuda/bindings/_nvml.pyx":4535
 *         return cpython.PyUnicode_FromString(self._ptr[0].str)
 * 
 *     @str.setter             # <<<<<<<<<<<<<<
 *     def str(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePerfModes_v1.str.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4545
 *         memcpy(<void *>(self._ptr[0].str), <void *>ptr, 2048)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DevicePerfModes_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_12from_data, "DevicePerfModes_v1.from_data(data)\n\nCreate an DevicePerfModes_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `device_perf_modes_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 4545, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4545, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 4545, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 4545, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4545, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 4545, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePerfModes_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":4552
 *             data (_numpy.ndarray): a single-element array of dtype `device_perf_modes_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "device_perf_modes_v1_dtype", device_perf_modes_v1_dtype, DevicePerfModes_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_device_perf_modes_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4552, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_device_perf_modes_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4552, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4545
 *         memcpy(<void *>(self._ptr[0].str), <void *>ptr, 2048)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DevicePerfModes_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePerfModes_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4554
 *         return __from_data(data, "device_perf_modes_v1_dtype", device_perf_modes_v1_dtype, DevicePerfModes_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DevicePerfModes_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_14from_ptr, "DevicePerfModes_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an DevicePerfModes_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 4554, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 4554, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 4554, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4554, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 4554, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":4555
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an DevicePerfModes_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 4554, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 4554, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 4554, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4554, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4555, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4555, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 4554, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePerfModes_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":4554
 *         return __from_data(data, "device_perf_modes_v1_dtype", device_perf_modes_v1_dtype, DevicePerfModes_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DevicePerfModes_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":4563
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DevicePerfModes_v1 obj = DevicePerfModes_v1.__new__(DevicePerfModes_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":4564
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef DevicePerfModes_v1 obj = DevicePerfModes_v1.__new__(DevicePerfModes_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4564, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 4564, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4563
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DevicePerfModes_v1 obj = DevicePerfModes_v1.__new__(DevicePerfModes_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":4565
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DevicePerfModes_v1 obj = DevicePerfModes_v1.__new__(DevicePerfModes_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlDevicePerfModes_v1_t *>malloc(sizeof(nvmlDevicePerfModes_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_DevicePerfModes_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4565, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":4566
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DevicePerfModes_v1 obj = DevicePerfModes_v1.__new__(DevicePerfModes_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlDevicePerfModes_v1_t *>malloc(sizeof(nvmlDevicePerfModes_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":4567
 *         cdef DevicePerfModes_v1 obj = DevicePerfModes_v1.__new__(DevicePerfModes_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlDevicePerfModes_v1_t *>malloc(sizeof(nvmlDevicePerfModes_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DevicePerfModes_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlDevicePerfModes_v1_t *)malloc((sizeof(nvmlDevicePerfModes_v1_t))));

    /* "cuda/bindings/_nvml.pyx":4568
 *         if owner is None:
 *             obj._ptr = <nvmlDevicePerfModes_v1_t *>malloc(sizeof(nvmlDevicePerfModes_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DevicePerfModes_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDevicePerfModes_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":4569
 *             obj._ptr = <nvmlDevicePerfModes_v1_t *>malloc(sizeof(nvmlDevicePerfModes_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DevicePerfModes_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDevicePerfModes_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4569, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DevicePerfModes};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4569, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 4569, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":4568
 *         if owner is None:
 *             obj._ptr = <nvmlDevicePerfModes_v1_t *>malloc(sizeof(nvmlDevicePerfModes_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DevicePerfModes_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDevicePerfModes_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":4570
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DevicePerfModes_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDevicePerfModes_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlDevicePerfModes_v1_t))));

    /* "cuda/bindings/_nvml.pyx":4571
 *                 raise MemoryError("Error allocating DevicePerfModes_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDevicePerfModes_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":4572
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDevicePerfModes_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlDevicePerfModes_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":4566
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DevicePerfModes_v1 obj = DevicePerfModes_v1.__new__(DevicePerfModes_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlDevicePerfModes_v1_t *>malloc(sizeof(nvmlDevicePerfModes_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":4574
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlDevicePerfModes_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlDevicePerfModes_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":4575
 *         else:
 *             obj._ptr = <nvmlDevicePerfModes_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":4576
 *             obj._ptr = <nvmlDevicePerfModes_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":4577
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":4578
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4554
 *         return __from_data(data, "device_perf_modes_v1_dtype", device_perf_modes_v1_dtype, DevicePerfModes_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DevicePerfModes_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePerfModes_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_16__reduce_cython__, "DevicePerfModes_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePerfModes_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_18__setstate_cython__, "DevicePerfModes_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePerfModes_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePerfModes_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4581
 * 
 * 
 * cdef _get_device_current_clock_freqs_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlDeviceCurrentClockFreqs_v1_t pod = nvmlDeviceCurrentClockFreqs_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_device_current_clock_freqs_v1_dtype_offsets(void) {
  nvmlDeviceCurrentClockFreqs_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlDeviceCurrentClockFreqs_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_device_current_clock_freqs_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":4582
 * 
 * cdef _get_device_current_clock_freqs_v1_dtype_offsets():
 *     cdef nvmlDeviceCurrentClockFreqs_v1_t pod = nvmlDeviceCurrentClockFreqs_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'str'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":4583
 * cdef _get_device_current_clock_freqs_v1_dtype_offsets():
 *     cdef nvmlDeviceCurrentClockFreqs_v1_t pod = nvmlDeviceCurrentClockFreqs_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'str'],
 *         'formats': [_numpy.uint32, _numpy.int8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4583, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4583, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":4584
 *     cdef nvmlDeviceCurrentClockFreqs_v1_t pod = nvmlDeviceCurrentClockFreqs_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'str'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.int8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4584, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4584, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 4584, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_str);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_str);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_str) != (0)) __PYX_ERR(0, 4584, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 4584, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":4585
 *     return _numpy.dtype({
 *         'names': ['version', 'str'],
 *         'formats': [_numpy.uint32, _numpy.int8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4585, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4585, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4585, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4585, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4585, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 4585, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 4585, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 4584, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":4587
 *         'formats': [_numpy.uint32, _numpy.int8],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.str)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4587, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":4588
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.str)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlDeviceCurrentClockFreqs_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.str)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4588, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":4586
 *         'names': ['version', 'str'],
 *         'formats': [_numpy.uint32, _numpy.int8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.str)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4586, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 4586, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 4586, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 4584, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":4590
 *             (<intptr_t>&(pod.str)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlDeviceCurrentClockFreqs_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlDeviceCurrentClockFreqs_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4590, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 4584, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4583, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4581
 * 
 * 
 * cdef _get_device_current_clock_freqs_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlDeviceCurrentClockFreqs_v1_t pod = nvmlDeviceCurrentClockFreqs_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_device_current_clock_freqs_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4607
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>calloc(1, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":4608
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>calloc(1, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")
*/
  __pyx_v_self->_ptr = ((nvmlDeviceCurrentClockFreqs_v1_t *)calloc(1, (sizeof(nvmlDeviceCurrentClockFreqs_v1_t))));

  /* "cuda/bindings/_nvml.pyx":4609
 *     def __init__(self):
 *         self._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>calloc(1, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":4610
 *         self._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>calloc(1, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4610, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DeviceCurrentCl};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4610, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 4610, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4609
 *     def __init__(self):
 *         self._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>calloc(1, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":4611
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":4612
 *             raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":4613
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":4607
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>calloc(1, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCurrentClockFreqs_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4615
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlDeviceCurrentClockFreqs_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self) {
  nvmlDeviceCurrentClockFreqs_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlDeviceCurrentClockFreqs_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":4617
 *     def __dealloc__(self):
 *         cdef nvmlDeviceCurrentClockFreqs_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":4618
 *         cdef nvmlDeviceCurrentClockFreqs_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":4619
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":4620
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":4617
 *     def __dealloc__(self):
 *         cdef nvmlDeviceCurrentClockFreqs_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":4615
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlDeviceCurrentClockFreqs_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":4622
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.DeviceCurrentClockFreqs_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":4623
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.DeviceCurrentClockFreqs_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4623, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4623, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4623, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4623, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4623, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_DeviceCurrentClockFreqs_v1_obje;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 38 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4623, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4622
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.DeviceCurrentClockFreqs_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCurrentClockFreqs_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4625
 *         return f"<{__name__}.DeviceCurrentClockFreqs_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4628
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4628, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4625
 *         return f"<{__name__}.DeviceCurrentClockFreqs_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCurrentClockFreqs_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4630
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":4631
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4630
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4633
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":4634
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4634, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4633
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCurrentClockFreqs_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4636
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef DeviceCurrentClockFreqs_v1 other_
 *         if not isinstance(other, DeviceCurrentClockFreqs_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":4638
 *     def __eq__(self, other):
 *         cdef DeviceCurrentClockFreqs_v1 other_
 *         if not isinstance(other, DeviceCurrentClockFreqs_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":4639
 *         cdef DeviceCurrentClockFreqs_v1 other_
 *         if not isinstance(other, DeviceCurrentClockFreqs_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceCurrentClockFreqs_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4638
 *     def __eq__(self, other):
 *         cdef DeviceCurrentClockFreqs_v1 other_
 *         if not isinstance(other, DeviceCurrentClockFreqs_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":4640
 *         if not isinstance(other, DeviceCurrentClockFreqs_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceCurrentClockFreqs_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1))))) __PYX_ERR(0, 4640, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":4641
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceCurrentClockFreqs_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlDeviceCurrentClockFreqs_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4641, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4636
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef DeviceCurrentClockFreqs_v1 other_
 *         if not isinstance(other, DeviceCurrentClockFreqs_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCurrentClockFreqs_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4643
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceCurrentClockFreqs_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>malloc(sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":4644
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>malloc(sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 4644, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4644, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4644, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 4644, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":4645
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>malloc(sizeof(nvmlDeviceCurrentClockFreqs_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")
*/
    __pyx_v_self->_ptr = ((nvmlDeviceCurrentClockFreqs_v1_t *)malloc((sizeof(nvmlDeviceCurrentClockFreqs_v1_t))));

    /* "cuda/bindings/_nvml.pyx":4646
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>malloc(sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":4647
 *             self._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>malloc(sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4647, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DeviceCurrentCl};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4647, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 4647, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":4646
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>malloc(sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":4648
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4648, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4648, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4648, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlDeviceCurrentClockFreqs_v1_t))));

    /* "cuda/bindings/_nvml.pyx":4649
 *                 raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":4650
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":4651
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4651, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4651, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 4651, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":4644
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>malloc(sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":4653
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 4653, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":4643
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceCurrentClockFreqs_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>malloc(sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCurrentClockFreqs_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4655
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: the API version number"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4658
 *     def version(self):
 *         """int: the API version number"""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4658, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4655
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: the API version number"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCurrentClockFreqs_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4660
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":4662
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceCurrentClockFreqs_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":4663
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This DeviceCurrentClockFreqs_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DeviceCurrentClockFreqs_v1};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4663, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 4663, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4662
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceCurrentClockFreqs_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":4664
 *         if self._readonly:
 *             raise ValueError("This DeviceCurrentClockFreqs_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4664, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":4660
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCurrentClockFreqs_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4666
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def str(self):
 *         """~_numpy.int8: (array of length 2048).OUT: the current clock frequency string."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3str_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3str_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3str___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3str___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4669
 *     def str(self):
 *         """~_numpy.int8: (array of length 2048).OUT: the current clock frequency string."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].str)             # <<<<<<<<<<<<<<
 * 
 *     @str.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).str); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4669, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4666
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def str(self):
 *         """~_numpy.int8: (array of length 2048).OUT: the current clock frequency string."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCurrentClockFreqs_v1.str.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4671
 *         return cpython.PyUnicode_FromString(self._ptr[0].str)
 * 
 *     @str.setter             # <<<<<<<<<<<<<<
 *     def str(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3str_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3str_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3str_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3str_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":4673
 *     @str.setter
 *     def str(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceCurrentClockFreqs_v1 instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":4674
 *     def str(self, val):
 *         if self._readonly:
 *             raise ValueError("This DeviceCurrentClockFreqs_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 2048:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DeviceCurrentClockFreqs_v1};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4674, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 4674, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4673
 *     @str.setter
 *     def str(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceCurrentClockFreqs_v1 instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":4675
 *         if self._readonly:
 *             raise ValueError("This DeviceCurrentClockFreqs_v1 instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 2048:
 *             raise ValueError("String too long for field str, max length is 2047")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4675, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 4675, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":4676
 *             raise ValueError("This DeviceCurrentClockFreqs_v1 instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 2048:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field str, max length is 2047")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 4676, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 4676, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 0x800);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":4677
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 2048:
 *             raise ValueError("String too long for field str, max length is 2047")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].str), <void *>ptr, 2048)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_str_ma};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4677, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 4677, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4676
 *             raise ValueError("This DeviceCurrentClockFreqs_v1 instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 2048:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field str, max length is 2047")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":4678
 *         if len(buf) >= 2048:
 *             raise ValueError("String too long for field str, max length is 2047")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].str), <void *>ptr, 2048)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 4678, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 4678, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":4679
 *             raise ValueError("String too long for field str, max length is 2047")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].str), <void *>ptr, 2048)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).str), ((void *)__pyx_v_ptr), 0x800));

  /* "cuda/bindings/_nvml.pyx":4671
 *         return cpython.PyUnicode_FromString(self._ptr[0].str)
 * 
 *     @str.setter             # <<<<<<<<<<<<<<
 *     def str(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCurrentClockFreqs_v1.str.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4681
 *         memcpy(<void *>(self._ptr[0].str), <void *>ptr, 2048)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DeviceCurrentClockFreqs_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_12from_data, "DeviceCurrentClockFreqs_v1.from_data(data)\n\nCreate an DeviceCurrentClockFreqs_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `device_current_clock_freqs_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 4681, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4681, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 4681, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 4681, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4681, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 4681, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCurrentClockFreqs_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":4688
 *             data (_numpy.ndarray): a single-element array of dtype `device_current_clock_freqs_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "device_current_clock_freqs_v1_dtype", device_current_clock_freqs_v1_dtype, DeviceCurrentClockFreqs_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_device_current_clock_freqs_v1_dt); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4688, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_device_current_clock_freqs_v1_dt, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4688, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4681
 *         memcpy(<void *>(self._ptr[0].str), <void *>ptr, 2048)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DeviceCurrentClockFreqs_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCurrentClockFreqs_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4690
 *         return __from_data(data, "device_current_clock_freqs_v1_dtype", device_current_clock_freqs_v1_dtype, DeviceCurrentClockFreqs_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DeviceCurrentClockFreqs_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_14from_ptr, "DeviceCurrentClockFreqs_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an DeviceCurrentClockFreqs_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 4690, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 4690, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 4690, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4690, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 4690, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":4691
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an DeviceCurrentClockFreqs_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 4690, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 4690, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 4690, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4690, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4691, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4691, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 4690, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCurrentClockFreqs_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":4690
 *         return __from_data(data, "device_current_clock_freqs_v1_dtype", device_current_clock_freqs_v1_dtype, DeviceCurrentClockFreqs_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DeviceCurrentClockFreqs_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":4699
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceCurrentClockFreqs_v1 obj = DeviceCurrentClockFreqs_v1.__new__(DeviceCurrentClockFreqs_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":4700
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef DeviceCurrentClockFreqs_v1 obj = DeviceCurrentClockFreqs_v1.__new__(DeviceCurrentClockFreqs_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4700, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 4700, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4699
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceCurrentClockFreqs_v1 obj = DeviceCurrentClockFreqs_v1.__new__(DeviceCurrentClockFreqs_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":4701
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceCurrentClockFreqs_v1 obj = DeviceCurrentClockFreqs_v1.__new__(DeviceCurrentClockFreqs_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>malloc(sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4701, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":4702
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceCurrentClockFreqs_v1 obj = DeviceCurrentClockFreqs_v1.__new__(DeviceCurrentClockFreqs_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>malloc(sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":4703
 *         cdef DeviceCurrentClockFreqs_v1 obj = DeviceCurrentClockFreqs_v1.__new__(DeviceCurrentClockFreqs_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>malloc(sizeof(nvmlDeviceCurrentClockFreqs_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlDeviceCurrentClockFreqs_v1_t *)malloc((sizeof(nvmlDeviceCurrentClockFreqs_v1_t))));

    /* "cuda/bindings/_nvml.pyx":4704
 *         if owner is None:
 *             obj._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>malloc(sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":4705
 *             obj._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>malloc(sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4705, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DeviceCurrentCl};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4705, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 4705, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":4704
 *         if owner is None:
 *             obj._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>malloc(sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":4706
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlDeviceCurrentClockFreqs_v1_t))));

    /* "cuda/bindings/_nvml.pyx":4707
 *                 raise MemoryError("Error allocating DeviceCurrentClockFreqs_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":4708
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":4702
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceCurrentClockFreqs_v1 obj = DeviceCurrentClockFreqs_v1.__new__(DeviceCurrentClockFreqs_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>malloc(sizeof(nvmlDeviceCurrentClockFreqs_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":4710
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlDeviceCurrentClockFreqs_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":4711
 *         else:
 *             obj._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":4712
 *             obj._ptr = <nvmlDeviceCurrentClockFreqs_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":4713
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":4714
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4690
 *         return __from_data(data, "device_current_clock_freqs_v1_dtype", device_current_clock_freqs_v1_dtype, DeviceCurrentClockFreqs_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DeviceCurrentClockFreqs_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCurrentClockFreqs_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_16__reduce_cython__, "DeviceCurrentClockFreqs_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCurrentClockFreqs_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_18__setstate_cython__, "DeviceCurrentClockFreqs_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCurrentClockFreqs_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCurrentClockFreqs_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4717
 * 
 * 
 * cdef _get_process_utilization_sample_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlProcessUtilizationSample_t pod = nvmlProcessUtilizationSample_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_process_utilization_sample_dtype_offsets(void) {
  nvmlProcessUtilizationSample_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlProcessUtilizationSample_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  size_t __pyx_t_13;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_process_utilization_sample_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":4718
 * 
 * cdef _get_process_utilization_sample_dtype_offsets():
 *     cdef nvmlProcessUtilizationSample_t pod = nvmlProcessUtilizationSample_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['pid', 'time_stamp', 'sm_util', 'mem_util', 'enc_util', 'dec_util'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":4719
 * cdef _get_process_utilization_sample_dtype_offsets():
 *     cdef nvmlProcessUtilizationSample_t pod = nvmlProcessUtilizationSample_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['pid', 'time_stamp', 'sm_util', 'mem_util', 'enc_util', 'dec_util'],
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4719, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4719, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":4720
 *     cdef nvmlProcessUtilizationSample_t pod = nvmlProcessUtilizationSample_t()
 *     return _numpy.dtype({
 *         'names': ['pid', 'time_stamp', 'sm_util', 'mem_util', 'enc_util', 'dec_util'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4720, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4720, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_pid);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_pid);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_pid) != (0)) __PYX_ERR(0, 4720, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_time_stamp);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_time_stamp);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_time_stamp) != (0)) __PYX_ERR(0, 4720, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_sm_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_sm_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_sm_util) != (0)) __PYX_ERR(0, 4720, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_mem_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_mem_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_mem_util) != (0)) __PYX_ERR(0, 4720, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_enc_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_enc_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_enc_util) != (0)) __PYX_ERR(0, 4720, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_dec_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_dec_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_dec_util) != (0)) __PYX_ERR(0, 4720, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 4720, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":4721
 *     return _numpy.dtype({
 *         'names': ['pid', 'time_stamp', 'sm_util', 'mem_util', 'enc_util', 'dec_util'],
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4721, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4721, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4721, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4721, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4721, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4721, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4721, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4721, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4721, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4721, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4721, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 4721, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4721, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 4721, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 4721, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 4721, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 4721, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 4721, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 4721, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 4720, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":4723
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.pid)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4723, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":4724
 *         'offsets': [
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.timeStamp)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 4724, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":4725
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.smUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4725, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":4726
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.memUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4726, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":4727
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.encUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4727, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":4728
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlProcessUtilizationSample_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.decUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4728, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":4722
 *         'names': ['pid', 'time_stamp', 'sm_util', 'mem_util', 'enc_util', 'dec_util'],
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4722, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 4722, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_12) != (0)) __PYX_ERR(0, 4722, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_11) != (0)) __PYX_ERR(0, 4722, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 4722, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_9) != (0)) __PYX_ERR(0, 4722, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_8) != (0)) __PYX_ERR(0, 4722, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 4720, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":4730
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlProcessUtilizationSample_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlProcessUtilizationSample_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4730, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 4720, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_13 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_13 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_13, (2-__pyx_t_13) | (__pyx_t_13*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4719, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4717
 * 
 * 
 * cdef _get_process_utilization_sample_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlProcessUtilizationSample_t pod = nvmlProcessUtilizationSample_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_process_utilization_sample_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4752
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=process_utilization_sample_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 4752, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4752, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 4752, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4752, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 4752, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":4753
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=process_utilization_sample_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlProcessUtilizationSample_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4753, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4753, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_process_utilization_sample_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4753, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4753, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 4753, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4753, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":4754
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=process_utilization_sample_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlProcessUtilizationSample_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessUtilizationSample_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4754, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4754, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4754, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":4755
 *         arr = _numpy.empty(size, dtype=process_utilization_sample_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlProcessUtilizationSample_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessUtilizationSample_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4755, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlProcessUtilizationSample_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4755, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4755, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 4755, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":4756
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlProcessUtilizationSample_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessUtilizationSample_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4756, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4756, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlProcessUtilizationSample_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4756, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4756, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 4755, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 4755, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":4752
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=process_utilization_sample_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4758
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessUtilizationSample_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.ProcessUtilizationSample_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":4759
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.ProcessUtilizationSample_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4759, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4759, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 4759, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":4760
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.ProcessUtilizationSample_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.ProcessUtilizationSample object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4760, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4760, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4760, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4760, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4760, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4760, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4760, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_ProcessUtilizationSample_Array;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 32 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4760, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4759
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.ProcessUtilizationSample_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":4762
 *             return f"<{__name__}.ProcessUtilizationSample_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.ProcessUtilizationSample object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4762, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4762, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4762, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4762, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4762, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_ProcessUtilizationSample_object;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 36 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4762, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":4758
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessUtilizationSample_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.ProcessUtilizationSample_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4764
 *             return f"<{__name__}.ProcessUtilizationSample object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4767
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4767, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4767, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4764
 *             return f"<{__name__}.ProcessUtilizationSample object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4769
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_24ProcessUtilizationSample__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":4770
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4770, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4770, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4770, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4769
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4772
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":4773
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4773, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4773, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 4773, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":4774
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4774, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 4774, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4773
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":4776
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4776, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4776, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4772
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4778
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":4779
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4779, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 4779, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4778
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4781
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessUtilizationSample)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":4782
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, ProcessUtilizationSample)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":4783
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessUtilizationSample)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4783, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4783, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4783, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4783, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 4783, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4783, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4783, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4783, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4783, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 4783, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":4784
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessUtilizationSample)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4783
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessUtilizationSample)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":4785
 *         if (not isinstance(other, ProcessUtilizationSample)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4785, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4785, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4785, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 4785, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4785, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4781
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessUtilizationSample)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4787
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3pid_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3pid_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3pid___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3pid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4790
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.pid[0])
 *         return self._data.pid
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4790, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 4790, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":4791
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.pid[0])             # <<<<<<<<<<<<<<
 *         return self._data.pid
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4791, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4791, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4791, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4790
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.pid[0])
 *         return self._data.pid
*/
  }

  /* "cuda/bindings/_nvml.pyx":4792
 *         if self._data.size == 1:
 *             return int(self._data.pid[0])
 *         return self._data.pid             # <<<<<<<<<<<<<<
 * 
 *     @pid.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4792, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4787
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.pid.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4794
 *         return self._data.pid
 * 
 *     @pid.setter             # <<<<<<<<<<<<<<
 *     def pid(self, val):
 *         self._data.pid = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3pid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3pid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3pid_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3pid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":4796
 *     @pid.setter
 *     def pid(self, val):
 *         self._data.pid = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid, __pyx_v_val) < (0)) __PYX_ERR(0, 4796, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":4794
 *         return self._data.pid
 * 
 *     @pid.setter             # <<<<<<<<<<<<<<
 *     def pid(self, val):
 *         self._data.pid = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.pid.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4798
 *         self._data.pid = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_10time_stamp_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_10time_stamp_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_10time_stamp___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_10time_stamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4801
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.time_stamp[0])
 *         return self._data.time_stamp
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4801, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 4801, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":4802
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.time_stamp[0])             # <<<<<<<<<<<<<<
 *         return self._data.time_stamp
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_stamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4802, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4802, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4802, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4801
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.time_stamp[0])
 *         return self._data.time_stamp
*/
  }

  /* "cuda/bindings/_nvml.pyx":4803
 *         if self._data.size == 1:
 *             return int(self._data.time_stamp[0])
 *         return self._data.time_stamp             # <<<<<<<<<<<<<<
 * 
 *     @time_stamp.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_stamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4803, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4798
 *         self._data.pid = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.time_stamp.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4805
 *         return self._data.time_stamp
 * 
 *     @time_stamp.setter             # <<<<<<<<<<<<<<
 *     def time_stamp(self, val):
 *         self._data.time_stamp = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_10time_stamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_10time_stamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_10time_stamp_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_10time_stamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":4807
 *     @time_stamp.setter
 *     def time_stamp(self, val):
 *         self._data.time_stamp = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_stamp, __pyx_v_val) < (0)) __PYX_ERR(0, 4807, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":4805
 *         return self._data.time_stamp
 * 
 *     @time_stamp.setter             # <<<<<<<<<<<<<<
 *     def time_stamp(self, val):
 *         self._data.time_stamp = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.time_stamp.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4809
 *         self._data.time_stamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sm_util(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7sm_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7sm_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7sm_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7sm_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4812
 *     def sm_util(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.sm_util[0])
 *         return self._data.sm_util
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4812, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 4812, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":4813
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.sm_util[0])             # <<<<<<<<<<<<<<
 *         return self._data.sm_util
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sm_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4813, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4813, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4813, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4812
 *     def sm_util(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.sm_util[0])
 *         return self._data.sm_util
*/
  }

  /* "cuda/bindings/_nvml.pyx":4814
 *         if self._data.size == 1:
 *             return int(self._data.sm_util[0])
 *         return self._data.sm_util             # <<<<<<<<<<<<<<
 * 
 *     @sm_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sm_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4814, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4809
 *         self._data.time_stamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sm_util(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.sm_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4816
 *         return self._data.sm_util
 * 
 *     @sm_util.setter             # <<<<<<<<<<<<<<
 *     def sm_util(self, val):
 *         self._data.sm_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7sm_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7sm_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7sm_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7sm_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":4818
 *     @sm_util.setter
 *     def sm_util(self, val):
 *         self._data.sm_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sm_util, __pyx_v_val) < (0)) __PYX_ERR(0, 4818, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":4816
 *         return self._data.sm_util
 * 
 *     @sm_util.setter             # <<<<<<<<<<<<<<
 *     def sm_util(self, val):
 *         self._data.sm_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.sm_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4820
 *         self._data.sm_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def mem_util(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8mem_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8mem_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8mem_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8mem_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4823
 *     def mem_util(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.mem_util[0])
 *         return self._data.mem_util
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4823, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 4823, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":4824
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.mem_util[0])             # <<<<<<<<<<<<<<
 *         return self._data.mem_util
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_mem_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4824, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4824, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4824, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4823
 *     def mem_util(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.mem_util[0])
 *         return self._data.mem_util
*/
  }

  /* "cuda/bindings/_nvml.pyx":4825
 *         if self._data.size == 1:
 *             return int(self._data.mem_util[0])
 *         return self._data.mem_util             # <<<<<<<<<<<<<<
 * 
 *     @mem_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_mem_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4825, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4820
 *         self._data.sm_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def mem_util(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.mem_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4827
 *         return self._data.mem_util
 * 
 *     @mem_util.setter             # <<<<<<<<<<<<<<
 *     def mem_util(self, val):
 *         self._data.mem_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8mem_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8mem_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8mem_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8mem_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":4829
 *     @mem_util.setter
 *     def mem_util(self, val):
 *         self._data.mem_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_mem_util, __pyx_v_val) < (0)) __PYX_ERR(0, 4829, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":4827
 *         return self._data.mem_util
 * 
 *     @mem_util.setter             # <<<<<<<<<<<<<<
 *     def mem_util(self, val):
 *         self._data.mem_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.mem_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4831
 *         self._data.mem_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def enc_util(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8enc_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8enc_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8enc_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8enc_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4834
 *     def enc_util(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.enc_util[0])
 *         return self._data.enc_util
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4834, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 4834, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":4835
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.enc_util[0])             # <<<<<<<<<<<<<<
 *         return self._data.enc_util
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_enc_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4835, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4835, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4835, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4834
 *     def enc_util(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.enc_util[0])
 *         return self._data.enc_util
*/
  }

  /* "cuda/bindings/_nvml.pyx":4836
 *         if self._data.size == 1:
 *             return int(self._data.enc_util[0])
 *         return self._data.enc_util             # <<<<<<<<<<<<<<
 * 
 *     @enc_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_enc_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4836, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4831
 *         self._data.mem_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def enc_util(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.enc_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4838
 *         return self._data.enc_util
 * 
 *     @enc_util.setter             # <<<<<<<<<<<<<<
 *     def enc_util(self, val):
 *         self._data.enc_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8enc_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8enc_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8enc_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8enc_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":4840
 *     @enc_util.setter
 *     def enc_util(self, val):
 *         self._data.enc_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_enc_util, __pyx_v_val) < (0)) __PYX_ERR(0, 4840, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":4838
 *         return self._data.enc_util
 * 
 *     @enc_util.setter             # <<<<<<<<<<<<<<
 *     def enc_util(self, val):
 *         self._data.enc_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.enc_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4842
 *         self._data.enc_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def dec_util(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8dec_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8dec_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8dec_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8dec_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4845
 *     def dec_util(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.dec_util[0])
 *         return self._data.dec_util
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4845, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 4845, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":4846
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.dec_util[0])             # <<<<<<<<<<<<<<
 *         return self._data.dec_util
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_dec_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4846, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4846, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4846, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4845
 *     def dec_util(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.dec_util[0])
 *         return self._data.dec_util
*/
  }

  /* "cuda/bindings/_nvml.pyx":4847
 *         if self._data.size == 1:
 *             return int(self._data.dec_util[0])
 *         return self._data.dec_util             # <<<<<<<<<<<<<<
 * 
 *     @dec_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_dec_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4847, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4842
 *         self._data.enc_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def dec_util(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.dec_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4849
 *         return self._data.dec_util
 * 
 *     @dec_util.setter             # <<<<<<<<<<<<<<
 *     def dec_util(self, val):
 *         self._data.dec_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8dec_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8dec_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8dec_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8dec_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":4851
 *     @dec_util.setter
 *     def dec_util(self, val):
 *         self._data.dec_util = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_dec_util, __pyx_v_val) < (0)) __PYX_ERR(0, 4851, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":4849
 *         return self._data.dec_util
 * 
 *     @dec_util.setter             # <<<<<<<<<<<<<<
 *     def dec_util(self, val):
 *         self._data.dec_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.dec_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4853
 *         self._data.dec_util = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":4856
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":4857
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 4857, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":4858
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4858, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 4858, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":4859
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":4860
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4860, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 4860, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":4859
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":4861
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return ProcessUtilizationSample.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":4862
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return ProcessUtilizationSample.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":4861
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return ProcessUtilizationSample.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":4863
 *             if key_ < 0:
 *                 key_ += size
 *             return ProcessUtilizationSample.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_utilization_sample_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4863, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4863, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4856
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":4864
 *                 key_ += size
 *             return ProcessUtilizationSample.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_utilization_sample_dtype:
 *             return ProcessUtilizationSample.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4864, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":4865
 *             return ProcessUtilizationSample.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_utilization_sample_dtype:             # <<<<<<<<<<<<<<
 *             return ProcessUtilizationSample.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4865, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4865, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 4865, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4865, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_process_utilization_sample_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4865, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4865, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 4865, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":4866
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_utilization_sample_dtype:
 *             return ProcessUtilizationSample.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4866, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4865
 *             return ProcessUtilizationSample.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_utilization_sample_dtype:             # <<<<<<<<<<<<<<
 *             return ProcessUtilizationSample.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":4867
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_utilization_sample_dtype:
 *             return ProcessUtilizationSample.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4853
 *         self._data.dec_util = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4869
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":4870
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 4870, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":4869
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4872
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessUtilizationSample instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_14from_data, "ProcessUtilizationSample.from_data(data)\n\nCreate an ProcessUtilizationSample instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `process_utilization_sample_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 4872, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4872, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 4872, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 4872, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4872, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 4872, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":4879
 *             data (_numpy.ndarray): a 1D array of dtype `process_utilization_sample_dtype` holding the data.
 *         """
 *         cdef ProcessUtilizationSample obj = ProcessUtilizationSample.__new__(ProcessUtilizationSample)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessUtilizationSample(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4879, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":4880
 *         """
 *         cdef ProcessUtilizationSample obj = ProcessUtilizationSample.__new__(ProcessUtilizationSample)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4880, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4880, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 4880, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":4881
 *         cdef ProcessUtilizationSample obj = ProcessUtilizationSample.__new__(ProcessUtilizationSample)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4881, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 4881, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4880
 *         """
 *         cdef ProcessUtilizationSample obj = ProcessUtilizationSample.__new__(ProcessUtilizationSample)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":4882
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_utilization_sample_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4882, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 4882, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":4883
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != process_utilization_sample_dtype:
 *             raise ValueError("data array must be of dtype process_utilization_sample_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4883, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 4883, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4882
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_utilization_sample_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":4884
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_utilization_sample_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype process_utilization_sample_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4884, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_process_utilization_sample_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4884, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4884, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 4884, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":4885
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_utilization_sample_dtype:
 *             raise ValueError("data array must be of dtype process_utilization_sample_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_proc_3};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4885, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 4885, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4884
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_utilization_sample_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype process_utilization_sample_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":4886
 *         if data.dtype != process_utilization_sample_dtype:
 *             raise ValueError("data array must be of dtype process_utilization_sample_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4886, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4886, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4886, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":4888
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4872
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessUtilizationSample instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4890
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ProcessUtilizationSample instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_16from_ptr, "ProcessUtilizationSample.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an ProcessUtilizationSample instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 4890, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 4890, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 4890, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4890, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 4890, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 4890, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 4890, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 4890, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4890, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4891, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 4891, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4891, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":4891
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an ProcessUtilizationSample instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 4890, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":4890
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ProcessUtilizationSample instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":4899
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessUtilizationSample obj = ProcessUtilizationSample.__new__(ProcessUtilizationSample)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":4900
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ProcessUtilizationSample obj = ProcessUtilizationSample.__new__(ProcessUtilizationSample)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4900, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 4900, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4899
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessUtilizationSample obj = ProcessUtilizationSample.__new__(ProcessUtilizationSample)
*/
  }

  /* "cuda/bindings/_nvml.pyx":4901
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessUtilizationSample obj = ProcessUtilizationSample.__new__(ProcessUtilizationSample)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessUtilizationSample(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4901, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":4902
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessUtilizationSample obj = ProcessUtilizationSample.__new__(ProcessUtilizationSample)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlProcessUtilizationSample_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4902, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4902, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":4904
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlProcessUtilizationSample_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=process_utilization_sample_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 4904, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":4903
 *         cdef ProcessUtilizationSample obj = ProcessUtilizationSample.__new__(ProcessUtilizationSample)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlProcessUtilizationSample_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=process_utilization_sample_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlProcessUtilizationSample_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4903, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":4905
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlProcessUtilizationSample_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=process_utilization_sample_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4905, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4905, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4905, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_process_utilization_sample_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4905, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4905, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 4905, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 4905, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4905, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":4906
 *             <char*>ptr, sizeof(nvmlProcessUtilizationSample_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=process_utilization_sample_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4906, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4906, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4906, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":4908
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4890
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ProcessUtilizationSample instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4748
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_18__reduce_cython__, "ProcessUtilizationSample.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_ProcessUtilizationSample, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_ProcessUtilizationSample, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_ProcessUtilizationSample, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_ProcessUtilizationSample, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ProcessUtilizatio); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_ProcessUtilizationSample, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_ProcessUtilizationSample, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_ProcessUtilizationSample, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_ProcessUtilizationSample__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ProcessUtilizatio); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ProcessUtilizationSample, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ProcessUtilizationSample__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_20__setstate_cython__, "ProcessUtilizationSample.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_ProcessUtilizationSample, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_ProcessUtilizationSample__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ProcessUtilizationSample__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ProcessUtilizationSample, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ProcessUtilizationSample__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationSample.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4911
 * 
 * 
 * cdef _get_process_utilization_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlProcessUtilizationInfo_v1_t pod = nvmlProcessUtilizationInfo_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_process_utilization_info_v1_dtype_offsets(void) {
  nvmlProcessUtilizationInfo_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlProcessUtilizationInfo_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  PyObject *__pyx_t_14 = NULL;
  size_t __pyx_t_15;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_process_utilization_info_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":4912
 * 
 * cdef _get_process_utilization_info_v1_dtype_offsets():
 *     cdef nvmlProcessUtilizationInfo_v1_t pod = nvmlProcessUtilizationInfo_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['time_stamp', 'pid', 'sm_util', 'mem_util', 'enc_util', 'dec_util', 'jpg_util', 'ofa_util'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":4913
 * cdef _get_process_utilization_info_v1_dtype_offsets():
 *     cdef nvmlProcessUtilizationInfo_v1_t pod = nvmlProcessUtilizationInfo_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['time_stamp', 'pid', 'sm_util', 'mem_util', 'enc_util', 'dec_util', 'jpg_util', 'ofa_util'],
 *         'formats': [_numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4913, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4913, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":4914
 *     cdef nvmlProcessUtilizationInfo_v1_t pod = nvmlProcessUtilizationInfo_v1_t()
 *     return _numpy.dtype({
 *         'names': ['time_stamp', 'pid', 'sm_util', 'mem_util', 'enc_util', 'dec_util', 'jpg_util', 'ofa_util'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4914, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4914, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_time_stamp);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_time_stamp);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_time_stamp) != (0)) __PYX_ERR(0, 4914, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_pid);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_pid);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_pid) != (0)) __PYX_ERR(0, 4914, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_sm_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_sm_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_sm_util) != (0)) __PYX_ERR(0, 4914, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_mem_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_mem_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_mem_util) != (0)) __PYX_ERR(0, 4914, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_enc_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_enc_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_enc_util) != (0)) __PYX_ERR(0, 4914, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_dec_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_dec_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_dec_util) != (0)) __PYX_ERR(0, 4914, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_jpg_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_jpg_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_jpg_util) != (0)) __PYX_ERR(0, 4914, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_ofa_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_ofa_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_mstate_global->__pyx_n_u_ofa_util) != (0)) __PYX_ERR(0, 4914, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 4914, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":4915
 *     return _numpy.dtype({
 *         'names': ['time_stamp', 'pid', 'sm_util', 'mem_util', 'enc_util', 'dec_util', 'jpg_util', 'ofa_util'],
 *         'formats': [_numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 4915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 4915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 4915, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 4915, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 4915, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 4915, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 4915, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 4915, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 4915, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_t_14) != (0)) __PYX_ERR(0, 4915, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  __pyx_t_14 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 4914, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":4917
 *         'formats': [_numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.timeStamp)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4917, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":4918
 *         'offsets': [
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_14 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.pid)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 4918, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);

  /* "cuda/bindings/_nvml.pyx":4919
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.smUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4919, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":4920
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.memUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 4920, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":4921
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.jpgUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.encUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4921, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":4922
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.jpgUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ofaUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.decUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4922, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":4923
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.jpgUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.ofaUtil)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.jpgUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4923, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":4924
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.jpgUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ofaUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlProcessUtilizationInfo_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.ofaUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4924, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":4916
 *         'names': ['time_stamp', 'pid', 'sm_util', 'mem_util', 'enc_util', 'dec_util', 'jpg_util', 'ofa_util'],
 *         'formats': [_numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4916, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 4916, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_14) != (0)) __PYX_ERR(0, 4916, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_13) != (0)) __PYX_ERR(0, 4916, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_12) != (0)) __PYX_ERR(0, 4916, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 4916, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_10) != (0)) __PYX_ERR(0, 4916, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_9) != (0)) __PYX_ERR(0, 4916, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 7, __pyx_t_8) != (0)) __PYX_ERR(0, 4916, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_14 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 4914, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":4926
 *             (<intptr_t>&(pod.ofaUtil)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlProcessUtilizationInfo_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlProcessUtilizationInfo_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4926, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 4914, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_15 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_15 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_15, (2-__pyx_t_15) | (__pyx_t_15*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4913, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4911
 * 
 * 
 * cdef _get_process_utilization_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlProcessUtilizationInfo_v1_t pod = nvmlProcessUtilizationInfo_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_XDECREF(__pyx_t_14);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_process_utilization_info_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4948
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=process_utilization_info_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 4948, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4948, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 4948, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 4948, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 4948, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":4949
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=process_utilization_info_v1_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlProcessUtilizationInfo_v1_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4949, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4949, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_process_utilization_info_v1_dtyp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4949, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4949, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 4949, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4949, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":4950
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=process_utilization_info_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlProcessUtilizationInfo_v1_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessUtilizationInfo_v1_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4950, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4950, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4950, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":4951
 *         arr = _numpy.empty(size, dtype=process_utilization_info_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlProcessUtilizationInfo_v1_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessUtilizationInfo_v1_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4951, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlProcessUtilizationInfo_v1_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4951, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4951, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 4951, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":4952
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlProcessUtilizationInfo_v1_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessUtilizationInfo_v1_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4952, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4952, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlProcessUtilizationInfo_v1_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4952, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4952, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 4951, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 4951, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":4948
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=process_utilization_info_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4954
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessUtilizationInfo_v1_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.ProcessUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":4955
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.ProcessUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4955, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4955, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 4955, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":4956
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.ProcessUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.ProcessUtilizationInfo_v1 object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4956, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4956, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4956, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4956, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4956, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4956, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4956, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_ProcessUtilizationInfo_v1_Array;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 33 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4956, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4955
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.ProcessUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":4958
 *             return f"<{__name__}.ProcessUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.ProcessUtilizationInfo_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4958, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4958, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4958, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4958, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4958, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_ProcessUtilizationInfo_v1_objec;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 37 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4958, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":4954
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlProcessUtilizationInfo_v1_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.ProcessUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4960
 *             return f"<{__name__}.ProcessUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4963
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4963, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4963, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4960
 *             return f"<{__name__}.ProcessUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4965
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":4966
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4966, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4966, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4966, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4965
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4968
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":4969
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4969, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4969, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 4969, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":4970
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4970, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 4970, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":4969
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":4972
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4972, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4972, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4968
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4974
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":4975
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4975, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 4975, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4974
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4977
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":4978
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, ProcessUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":4979
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4979, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 4979, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4979, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 4979, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":4980
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4979
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":4981
 *         if (not isinstance(other, ProcessUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4981, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4981, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4981, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 4981, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4981, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4977
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ProcessUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4983
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_10time_stamp_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_10time_stamp_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_10time_stamp___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_10time_stamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4986
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.time_stamp[0])
 *         return self._data.time_stamp
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4986, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 4986, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":4987
 *         """Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."""
 *         if self._data.size == 1:
 *             return int(self._data.time_stamp[0])             # <<<<<<<<<<<<<<
 *         return self._data.time_stamp
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_stamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4987, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4987, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4987, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4986
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.time_stamp[0])
 *         return self._data.time_stamp
*/
  }

  /* "cuda/bindings/_nvml.pyx":4988
 *         if self._data.size == 1:
 *             return int(self._data.time_stamp[0])
 *         return self._data.time_stamp             # <<<<<<<<<<<<<<
 * 
 *     @time_stamp.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_stamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4988, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4983
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.time_stamp.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4990
 *         return self._data.time_stamp
 * 
 *     @time_stamp.setter             # <<<<<<<<<<<<<<
 *     def time_stamp(self, val):
 *         self._data.time_stamp = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_10time_stamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_10time_stamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_10time_stamp_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_10time_stamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":4992
 *     @time_stamp.setter
 *     def time_stamp(self, val):
 *         self._data.time_stamp = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_stamp, __pyx_v_val) < (0)) __PYX_ERR(0, 4992, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":4990
 *         return self._data.time_stamp
 * 
 *     @time_stamp.setter             # <<<<<<<<<<<<<<
 *     def time_stamp(self, val):
 *         self._data.time_stamp = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.time_stamp.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4994
 *         self._data.time_stamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: PID of process."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3pid_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3pid_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3pid___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3pid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":4997
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: PID of process."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.pid[0])
 *         return self._data.pid
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4997, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 4997, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":4998
 *         """Union[~_numpy.uint32, int]: PID of process."""
 *         if self._data.size == 1:
 *             return int(self._data.pid[0])             # <<<<<<<<<<<<<<
 *         return self._data.pid
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4998, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4998, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4998, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":4997
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: PID of process."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.pid[0])
 *         return self._data.pid
*/
  }

  /* "cuda/bindings/_nvml.pyx":4999
 *         if self._data.size == 1:
 *             return int(self._data.pid[0])
 *         return self._data.pid             # <<<<<<<<<<<<<<
 * 
 *     @pid.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4999, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":4994
 *         self._data.time_stamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: PID of process."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.pid.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5001
 *         return self._data.pid
 * 
 *     @pid.setter             # <<<<<<<<<<<<<<
 *     def pid(self, val):
 *         self._data.pid = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3pid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3pid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3pid_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3pid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":5003
 *     @pid.setter
 *     def pid(self, val):
 *         self._data.pid = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid, __pyx_v_val) < (0)) __PYX_ERR(0, 5003, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":5001
 *         return self._data.pid
 * 
 *     @pid.setter             # <<<<<<<<<<<<<<
 *     def pid(self, val):
 *         self._data.pid = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.pid.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5005
 *         self._data.pid = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sm_util(self):
 *         """Union[~_numpy.uint32, int]: SM (3D/Compute) Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7sm_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7sm_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7sm_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7sm_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5008
 *     def sm_util(self):
 *         """Union[~_numpy.uint32, int]: SM (3D/Compute) Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.sm_util[0])
 *         return self._data.sm_util
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5008, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 5008, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":5009
 *         """Union[~_numpy.uint32, int]: SM (3D/Compute) Util Value."""
 *         if self._data.size == 1:
 *             return int(self._data.sm_util[0])             # <<<<<<<<<<<<<<
 *         return self._data.sm_util
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sm_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5009, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5009, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5009, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":5008
 *     def sm_util(self):
 *         """Union[~_numpy.uint32, int]: SM (3D/Compute) Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.sm_util[0])
 *         return self._data.sm_util
*/
  }

  /* "cuda/bindings/_nvml.pyx":5010
 *         if self._data.size == 1:
 *             return int(self._data.sm_util[0])
 *         return self._data.sm_util             # <<<<<<<<<<<<<<
 * 
 *     @sm_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sm_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5010, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5005
 *         self._data.pid = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sm_util(self):
 *         """Union[~_numpy.uint32, int]: SM (3D/Compute) Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.sm_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5012
 *         return self._data.sm_util
 * 
 *     @sm_util.setter             # <<<<<<<<<<<<<<
 *     def sm_util(self, val):
 *         self._data.sm_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7sm_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7sm_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7sm_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7sm_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":5014
 *     @sm_util.setter
 *     def sm_util(self, val):
 *         self._data.sm_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sm_util, __pyx_v_val) < (0)) __PYX_ERR(0, 5014, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":5012
 *         return self._data.sm_util
 * 
 *     @sm_util.setter             # <<<<<<<<<<<<<<
 *     def sm_util(self, val):
 *         self._data.sm_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.sm_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5016
 *         self._data.sm_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def mem_util(self):
 *         """Union[~_numpy.uint32, int]: Frame Buffer Memory Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8mem_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8mem_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8mem_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8mem_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5019
 *     def mem_util(self):
 *         """Union[~_numpy.uint32, int]: Frame Buffer Memory Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.mem_util[0])
 *         return self._data.mem_util
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5019, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 5019, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":5020
 *         """Union[~_numpy.uint32, int]: Frame Buffer Memory Util Value."""
 *         if self._data.size == 1:
 *             return int(self._data.mem_util[0])             # <<<<<<<<<<<<<<
 *         return self._data.mem_util
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_mem_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5020, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5020, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5020, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":5019
 *     def mem_util(self):
 *         """Union[~_numpy.uint32, int]: Frame Buffer Memory Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.mem_util[0])
 *         return self._data.mem_util
*/
  }

  /* "cuda/bindings/_nvml.pyx":5021
 *         if self._data.size == 1:
 *             return int(self._data.mem_util[0])
 *         return self._data.mem_util             # <<<<<<<<<<<<<<
 * 
 *     @mem_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_mem_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5016
 *         self._data.sm_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def mem_util(self):
 *         """Union[~_numpy.uint32, int]: Frame Buffer Memory Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.mem_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5023
 *         return self._data.mem_util
 * 
 *     @mem_util.setter             # <<<<<<<<<<<<<<
 *     def mem_util(self, val):
 *         self._data.mem_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8mem_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8mem_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8mem_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8mem_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":5025
 *     @mem_util.setter
 *     def mem_util(self, val):
 *         self._data.mem_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_mem_util, __pyx_v_val) < (0)) __PYX_ERR(0, 5025, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":5023
 *         return self._data.mem_util
 * 
 *     @mem_util.setter             # <<<<<<<<<<<<<<
 *     def mem_util(self, val):
 *         self._data.mem_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.mem_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5027
 *         self._data.mem_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def enc_util(self):
 *         """Union[~_numpy.uint32, int]: Encoder Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8enc_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8enc_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8enc_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8enc_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5030
 *     def enc_util(self):
 *         """Union[~_numpy.uint32, int]: Encoder Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.enc_util[0])
 *         return self._data.enc_util
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 5030, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":5031
 *         """Union[~_numpy.uint32, int]: Encoder Util Value."""
 *         if self._data.size == 1:
 *             return int(self._data.enc_util[0])             # <<<<<<<<<<<<<<
 *         return self._data.enc_util
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_enc_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5031, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5031, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5031, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":5030
 *     def enc_util(self):
 *         """Union[~_numpy.uint32, int]: Encoder Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.enc_util[0])
 *         return self._data.enc_util
*/
  }

  /* "cuda/bindings/_nvml.pyx":5032
 *         if self._data.size == 1:
 *             return int(self._data.enc_util[0])
 *         return self._data.enc_util             # <<<<<<<<<<<<<<
 * 
 *     @enc_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_enc_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5032, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5027
 *         self._data.mem_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def enc_util(self):
 *         """Union[~_numpy.uint32, int]: Encoder Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.enc_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5034
 *         return self._data.enc_util
 * 
 *     @enc_util.setter             # <<<<<<<<<<<<<<
 *     def enc_util(self, val):
 *         self._data.enc_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8enc_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8enc_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8enc_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8enc_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":5036
 *     @enc_util.setter
 *     def enc_util(self, val):
 *         self._data.enc_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_enc_util, __pyx_v_val) < (0)) __PYX_ERR(0, 5036, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":5034
 *         return self._data.enc_util
 * 
 *     @enc_util.setter             # <<<<<<<<<<<<<<
 *     def enc_util(self, val):
 *         self._data.enc_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.enc_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5038
 *         self._data.enc_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def dec_util(self):
 *         """Union[~_numpy.uint32, int]: Decoder Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8dec_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8dec_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8dec_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8dec_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5041
 *     def dec_util(self):
 *         """Union[~_numpy.uint32, int]: Decoder Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.dec_util[0])
 *         return self._data.dec_util
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5041, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 5041, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":5042
 *         """Union[~_numpy.uint32, int]: Decoder Util Value."""
 *         if self._data.size == 1:
 *             return int(self._data.dec_util[0])             # <<<<<<<<<<<<<<
 *         return self._data.dec_util
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_dec_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5042, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5042, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5042, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":5041
 *     def dec_util(self):
 *         """Union[~_numpy.uint32, int]: Decoder Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.dec_util[0])
 *         return self._data.dec_util
*/
  }

  /* "cuda/bindings/_nvml.pyx":5043
 *         if self._data.size == 1:
 *             return int(self._data.dec_util[0])
 *         return self._data.dec_util             # <<<<<<<<<<<<<<
 * 
 *     @dec_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_dec_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5043, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5038
 *         self._data.enc_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def dec_util(self):
 *         """Union[~_numpy.uint32, int]: Decoder Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.dec_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5045
 *         return self._data.dec_util
 * 
 *     @dec_util.setter             # <<<<<<<<<<<<<<
 *     def dec_util(self, val):
 *         self._data.dec_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8dec_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8dec_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8dec_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8dec_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":5047
 *     @dec_util.setter
 *     def dec_util(self, val):
 *         self._data.dec_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_dec_util, __pyx_v_val) < (0)) __PYX_ERR(0, 5047, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":5045
 *         return self._data.dec_util
 * 
 *     @dec_util.setter             # <<<<<<<<<<<<<<
 *     def dec_util(self, val):
 *         self._data.dec_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.dec_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5049
 *         self._data.dec_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def jpg_util(self):
 *         """Union[~_numpy.uint32, int]: Jpeg Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8jpg_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8jpg_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8jpg_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8jpg_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5052
 *     def jpg_util(self):
 *         """Union[~_numpy.uint32, int]: Jpeg Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.jpg_util[0])
 *         return self._data.jpg_util
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5052, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 5052, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":5053
 *         """Union[~_numpy.uint32, int]: Jpeg Util Value."""
 *         if self._data.size == 1:
 *             return int(self._data.jpg_util[0])             # <<<<<<<<<<<<<<
 *         return self._data.jpg_util
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_jpg_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5053, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5053, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5053, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":5052
 *     def jpg_util(self):
 *         """Union[~_numpy.uint32, int]: Jpeg Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.jpg_util[0])
 *         return self._data.jpg_util
*/
  }

  /* "cuda/bindings/_nvml.pyx":5054
 *         if self._data.size == 1:
 *             return int(self._data.jpg_util[0])
 *         return self._data.jpg_util             # <<<<<<<<<<<<<<
 * 
 *     @jpg_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_jpg_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5054, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5049
 *         self._data.dec_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def jpg_util(self):
 *         """Union[~_numpy.uint32, int]: Jpeg Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.jpg_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5056
 *         return self._data.jpg_util
 * 
 *     @jpg_util.setter             # <<<<<<<<<<<<<<
 *     def jpg_util(self, val):
 *         self._data.jpg_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8jpg_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8jpg_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8jpg_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8jpg_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":5058
 *     @jpg_util.setter
 *     def jpg_util(self, val):
 *         self._data.jpg_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_jpg_util, __pyx_v_val) < (0)) __PYX_ERR(0, 5058, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":5056
 *         return self._data.jpg_util
 * 
 *     @jpg_util.setter             # <<<<<<<<<<<<<<
 *     def jpg_util(self, val):
 *         self._data.jpg_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.jpg_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5060
 *         self._data.jpg_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ofa_util(self):
 *         """Union[~_numpy.uint32, int]: Ofa Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8ofa_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8ofa_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8ofa_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8ofa_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5063
 *     def ofa_util(self):
 *         """Union[~_numpy.uint32, int]: Ofa Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.ofa_util[0])
 *         return self._data.ofa_util
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5063, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 5063, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":5064
 *         """Union[~_numpy.uint32, int]: Ofa Util Value."""
 *         if self._data.size == 1:
 *             return int(self._data.ofa_util[0])             # <<<<<<<<<<<<<<
 *         return self._data.ofa_util
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ofa_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5064, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5064, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5064, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":5063
 *     def ofa_util(self):
 *         """Union[~_numpy.uint32, int]: Ofa Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.ofa_util[0])
 *         return self._data.ofa_util
*/
  }

  /* "cuda/bindings/_nvml.pyx":5065
 *         if self._data.size == 1:
 *             return int(self._data.ofa_util[0])
 *         return self._data.ofa_util             # <<<<<<<<<<<<<<
 * 
 *     @ofa_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ofa_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5065, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5060
 *         self._data.jpg_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ofa_util(self):
 *         """Union[~_numpy.uint32, int]: Ofa Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.ofa_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5067
 *         return self._data.ofa_util
 * 
 *     @ofa_util.setter             # <<<<<<<<<<<<<<
 *     def ofa_util(self, val):
 *         self._data.ofa_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8ofa_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8ofa_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8ofa_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8ofa_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":5069
 *     @ofa_util.setter
 *     def ofa_util(self, val):
 *         self._data.ofa_util = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ofa_util, __pyx_v_val) < (0)) __PYX_ERR(0, 5069, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":5067
 *         return self._data.ofa_util
 * 
 *     @ofa_util.setter             # <<<<<<<<<<<<<<
 *     def ofa_util(self, val):
 *         self._data.ofa_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.ofa_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5071
 *         self._data.ofa_util = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":5074
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":5075
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 5075, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":5076
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5076, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 5076, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":5077
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":5078
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5078, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 5078, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":5077
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":5079
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return ProcessUtilizationInfo_v1.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":5080
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return ProcessUtilizationInfo_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":5079
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return ProcessUtilizationInfo_v1.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":5081
 *             if key_ < 0:
 *                 key_ += size
 *             return ProcessUtilizationInfo_v1.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_utilization_info_v1_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5081, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5081, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":5074
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":5082
 *                 key_ += size
 *             return ProcessUtilizationInfo_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_utilization_info_v1_dtype:
 *             return ProcessUtilizationInfo_v1.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5082, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":5083
 *             return ProcessUtilizationInfo_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_utilization_info_v1_dtype:             # <<<<<<<<<<<<<<
 *             return ProcessUtilizationInfo_v1.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5083, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5083, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 5083, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5083, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_process_utilization_info_v1_dtyp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5083, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5083, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 5083, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":5084
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_utilization_info_v1_dtype:
 *             return ProcessUtilizationInfo_v1.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5084, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":5083
 *             return ProcessUtilizationInfo_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_utilization_info_v1_dtype:             # <<<<<<<<<<<<<<
 *             return ProcessUtilizationInfo_v1.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":5085
 *         if isinstance(out, _numpy.recarray) and out.dtype == process_utilization_info_v1_dtype:
 *             return ProcessUtilizationInfo_v1.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5071
 *         self._data.ofa_util = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5087
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":5088
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 5088, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":5087
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5090
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessUtilizationInfo_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_14from_data, "ProcessUtilizationInfo_v1.from_data(data)\n\nCreate an ProcessUtilizationInfo_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `process_utilization_info_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 5090, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5090, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 5090, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 5090, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5090, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 5090, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":5097
 *             data (_numpy.ndarray): a 1D array of dtype `process_utilization_info_v1_dtype` holding the data.
 *         """
 *         cdef ProcessUtilizationInfo_v1 obj = ProcessUtilizationInfo_v1.__new__(ProcessUtilizationInfo_v1)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5097, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":5098
 *         """
 *         cdef ProcessUtilizationInfo_v1 obj = ProcessUtilizationInfo_v1.__new__(ProcessUtilizationInfo_v1)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5098, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5098, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 5098, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":5099
 *         cdef ProcessUtilizationInfo_v1 obj = ProcessUtilizationInfo_v1.__new__(ProcessUtilizationInfo_v1)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5099, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 5099, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5098
 *         """
 *         cdef ProcessUtilizationInfo_v1 obj = ProcessUtilizationInfo_v1.__new__(ProcessUtilizationInfo_v1)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":5100
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_utilization_info_v1_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5100, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 5100, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":5101
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != process_utilization_info_v1_dtype:
 *             raise ValueError("data array must be of dtype process_utilization_info_v1_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5101, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 5101, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5100
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_utilization_info_v1_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":5102
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_utilization_info_v1_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype process_utilization_info_v1_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5102, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_process_utilization_info_v1_dtyp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5102, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5102, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 5102, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":5103
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_utilization_info_v1_dtype:
 *             raise ValueError("data array must be of dtype process_utilization_info_v1_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_proc_4};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5103, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 5103, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5102
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != process_utilization_info_v1_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype process_utilization_info_v1_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":5104
 *         if data.dtype != process_utilization_info_v1_dtype:
 *             raise ValueError("data array must be of dtype process_utilization_info_v1_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5104, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":5106
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5090
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessUtilizationInfo_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5108
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ProcessUtilizationInfo_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_16from_ptr, "ProcessUtilizationInfo_v1.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an ProcessUtilizationInfo_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 5108, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 5108, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 5108, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5108, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 5108, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 5108, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 5108, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 5108, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5108, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 5109, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 5109, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5109, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":5109
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an ProcessUtilizationInfo_v1 instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 5108, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":5108
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ProcessUtilizationInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":5117
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessUtilizationInfo_v1 obj = ProcessUtilizationInfo_v1.__new__(ProcessUtilizationInfo_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":5118
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ProcessUtilizationInfo_v1 obj = ProcessUtilizationInfo_v1.__new__(ProcessUtilizationInfo_v1)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5118, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 5118, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5117
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessUtilizationInfo_v1 obj = ProcessUtilizationInfo_v1.__new__(ProcessUtilizationInfo_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":5119
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessUtilizationInfo_v1 obj = ProcessUtilizationInfo_v1.__new__(ProcessUtilizationInfo_v1)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5119, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":5120
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessUtilizationInfo_v1 obj = ProcessUtilizationInfo_v1.__new__(ProcessUtilizationInfo_v1)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlProcessUtilizationInfo_v1_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5120, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5120, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":5122
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlProcessUtilizationInfo_v1_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=process_utilization_info_v1_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5122, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":5121
 *         cdef ProcessUtilizationInfo_v1 obj = ProcessUtilizationInfo_v1.__new__(ProcessUtilizationInfo_v1)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlProcessUtilizationInfo_v1_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=process_utilization_info_v1_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlProcessUtilizationInfo_v1_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5121, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":5123
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlProcessUtilizationInfo_v1_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=process_utilization_info_v1_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5123, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5123, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5123, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_process_utilization_info_v1_dtyp); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 5123, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 5123, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 5123, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 5123, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5123, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":5124
 *             <char*>ptr, sizeof(nvmlProcessUtilizationInfo_v1_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=process_utilization_info_v1_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 5124, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 5124, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5124, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":5126
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5108
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ProcessUtilizationInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":4944
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_18__reduce_cython__, "ProcessUtilizationInfo_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_ProcessUtilizationInfo_v1, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_ProcessUtilizationInfo_v1, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_ProcessUtilizationInfo_v1, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_ProcessUtilizationInfo_v1, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ProcessUtilizatio_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_ProcessUtilizationInfo_v1, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_ProcessUtilizationInfo_v1, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_ProcessUtilizationInfo_v1, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ProcessUtilizatio_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ProcessUtilizationInfo_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_20__setstate_cython__, "ProcessUtilizationInfo_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_ProcessUtilizationInfo_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ProcessUtilizationInfo_v1__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ProcessUtilizationInfo_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessUtilizationInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5129
 * 
 * 
 * cdef _get_ecc_sram_error_status_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlEccSramErrorStatus_v1_t pod = nvmlEccSramErrorStatus_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_ecc_sram_error_status_v1_dtype_offsets(void) {
  nvmlEccSramErrorStatus_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlEccSramErrorStatus_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  PyObject *__pyx_t_14 = NULL;
  PyObject *__pyx_t_15 = NULL;
  PyObject *__pyx_t_16 = NULL;
  PyObject *__pyx_t_17 = NULL;
  PyObject *__pyx_t_18 = NULL;
  PyObject *__pyx_t_19 = NULL;
  size_t __pyx_t_20;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ecc_sram_error_status_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":5130
 * 
 * cdef _get_ecc_sram_error_status_v1_dtype_offsets():
 *     cdef nvmlEccSramErrorStatus_v1_t pod = nvmlEccSramErrorStatus_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'aggregate_unc_parity', 'aggregate_unc_sec_ded', 'aggregate_cor', 'volatile_unc_parity', 'volatile_unc_sec_ded', 'volatile_cor', 'aggregate_unc_bucket_l2', 'aggregate_unc_bucket_sm', 'aggregate_unc_bucket_pcie', 'aggregate_unc_bucket_mcu', 'aggregate_unc_bucket_other', 'b_threshold_exceeded'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":5131
 * cdef _get_ecc_sram_error_status_v1_dtype_offsets():
 *     cdef nvmlEccSramErrorStatus_v1_t pod = nvmlEccSramErrorStatus_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'aggregate_unc_parity', 'aggregate_unc_sec_ded', 'aggregate_cor', 'volatile_unc_parity', 'volatile_unc_sec_ded', 'volatile_cor', 'aggregate_unc_bucket_l2', 'aggregate_unc_bucket_sm', 'aggregate_unc_bucket_pcie', 'aggregate_unc_bucket_mcu', 'aggregate_unc_bucket_other', 'b_threshold_exceeded'],
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5131, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5131, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":5132
 *     cdef nvmlEccSramErrorStatus_v1_t pod = nvmlEccSramErrorStatus_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'aggregate_unc_parity', 'aggregate_unc_sec_ded', 'aggregate_cor', 'volatile_unc_parity', 'volatile_unc_sec_ded', 'volatile_cor', 'aggregate_unc_bucket_l2', 'aggregate_unc_bucket_sm', 'aggregate_unc_bucket_pcie', 'aggregate_unc_bucket_mcu', 'aggregate_unc_bucket_other', 'b_threshold_exceeded'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5132, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(13); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5132, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 5132, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_aggregate_unc_parity);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_aggregate_unc_parity);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_aggregate_unc_parity) != (0)) __PYX_ERR(0, 5132, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_aggregate_unc_sec_ded);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_aggregate_unc_sec_ded);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_aggregate_unc_sec_ded) != (0)) __PYX_ERR(0, 5132, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_aggregate_cor);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_aggregate_cor);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_aggregate_cor) != (0)) __PYX_ERR(0, 5132, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_volatile_unc_parity);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_volatile_unc_parity);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_volatile_unc_parity) != (0)) __PYX_ERR(0, 5132, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_volatile_unc_sec_ded);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_volatile_unc_sec_ded);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_volatile_unc_sec_ded) != (0)) __PYX_ERR(0, 5132, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_volatile_cor);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_volatile_cor);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_volatile_cor) != (0)) __PYX_ERR(0, 5132, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_aggregate_unc_bucket_l2);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_aggregate_unc_bucket_l2);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_mstate_global->__pyx_n_u_aggregate_unc_bucket_l2) != (0)) __PYX_ERR(0, 5132, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_aggregate_unc_bucket_sm);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_aggregate_unc_bucket_sm);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_mstate_global->__pyx_n_u_aggregate_unc_bucket_sm) != (0)) __PYX_ERR(0, 5132, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_aggregate_unc_bucket_pcie);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_aggregate_unc_bucket_pcie);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 9, __pyx_mstate_global->__pyx_n_u_aggregate_unc_bucket_pcie) != (0)) __PYX_ERR(0, 5132, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_aggregate_unc_bucket_mcu);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_aggregate_unc_bucket_mcu);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 10, __pyx_mstate_global->__pyx_n_u_aggregate_unc_bucket_mcu) != (0)) __PYX_ERR(0, 5132, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_aggregate_unc_bucket_other);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_aggregate_unc_bucket_other);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 11, __pyx_mstate_global->__pyx_n_u_aggregate_unc_bucket_other) != (0)) __PYX_ERR(0, 5132, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_b_threshold_exceeded);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_b_threshold_exceeded);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 12, __pyx_mstate_global->__pyx_n_u_b_threshold_exceeded) != (0)) __PYX_ERR(0, 5132, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 5132, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":5133
 *     return _numpy.dtype({
 *         'names': ['version', 'aggregate_unc_parity', 'aggregate_unc_sec_ded', 'aggregate_cor', 'volatile_unc_parity', 'volatile_unc_sec_ded', 'volatile_cor', 'aggregate_unc_bucket_l2', 'aggregate_unc_bucket_sm', 'aggregate_unc_bucket_pcie', 'aggregate_unc_bucket_mcu', 'aggregate_unc_bucket_other', 'b_threshold_exceeded'],
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_15 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_16 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_17 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_17);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_18 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_18);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_19 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_19);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(13); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 5133, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 5133, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 5133, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 5133, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 5133, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 5133, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 5133, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_t_14) != (0)) __PYX_ERR(0, 5133, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_t_15) != (0)) __PYX_ERR(0, 5133, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_16);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 9, __pyx_t_16) != (0)) __PYX_ERR(0, 5133, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_17);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 10, __pyx_t_17) != (0)) __PYX_ERR(0, 5133, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_18);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 11, __pyx_t_18) != (0)) __PYX_ERR(0, 5133, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_19);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 12, __pyx_t_19) != (0)) __PYX_ERR(0, 5133, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  __pyx_t_14 = 0;
  __pyx_t_15 = 0;
  __pyx_t_16 = 0;
  __pyx_t_17 = 0;
  __pyx_t_18 = 0;
  __pyx_t_19 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 5132, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":5135
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.aggregateUncParity)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncSecDed)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5135, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":5136
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncParity)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.aggregateUncSecDed)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateCor)) - (<intptr_t>&pod),
*/
  __pyx_t_19 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.aggregateUncParity)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 5136, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_19);

  /* "cuda/bindings/_nvml.pyx":5137
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncParity)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncSecDed)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.aggregateCor)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.volatileUncParity)) - (<intptr_t>&pod),
*/
  __pyx_t_18 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.aggregateUncSecDed)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 5137, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_18);

  /* "cuda/bindings/_nvml.pyx":5138
 *             (<intptr_t>&(pod.aggregateUncParity)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncSecDed)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateCor)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.volatileUncParity)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.volatileUncSecDed)) - (<intptr_t>&pod),
*/
  __pyx_t_17 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.aggregateCor)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 5138, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_17);

  /* "cuda/bindings/_nvml.pyx":5139
 *             (<intptr_t>&(pod.aggregateUncSecDed)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateCor)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.volatileUncParity)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.volatileUncSecDed)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.volatileCor)) - (<intptr_t>&pod),
*/
  __pyx_t_16 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.volatileUncParity)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 5139, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);

  /* "cuda/bindings/_nvml.pyx":5140
 *             (<intptr_t>&(pod.aggregateCor)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.volatileUncParity)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.volatileUncSecDed)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.volatileCor)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncBucketL2)) - (<intptr_t>&pod),
*/
  __pyx_t_15 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.volatileUncSecDed)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 5140, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);

  /* "cuda/bindings/_nvml.pyx":5141
 *             (<intptr_t>&(pod.volatileUncParity)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.volatileUncSecDed)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.volatileCor)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.aggregateUncBucketL2)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncBucketSm)) - (<intptr_t>&pod),
*/
  __pyx_t_14 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.volatileCor)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 5141, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);

  /* "cuda/bindings/_nvml.pyx":5142
 *             (<intptr_t>&(pod.volatileUncSecDed)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.volatileCor)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncBucketL2)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.aggregateUncBucketSm)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncBucketPcie)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.aggregateUncBucketL2)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 5142, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":5143
 *             (<intptr_t>&(pod.volatileCor)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncBucketL2)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncBucketSm)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.aggregateUncBucketPcie)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncBucketMcu)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.aggregateUncBucketSm)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 5143, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":5144
 *             (<intptr_t>&(pod.aggregateUncBucketL2)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncBucketSm)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncBucketPcie)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.aggregateUncBucketMcu)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncBucketOther)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.aggregateUncBucketPcie)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 5144, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":5145
 *             (<intptr_t>&(pod.aggregateUncBucketSm)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncBucketPcie)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncBucketMcu)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.aggregateUncBucketOther)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bThresholdExceeded)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.aggregateUncBucketMcu)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 5145, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":5146
 *             (<intptr_t>&(pod.aggregateUncBucketPcie)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncBucketMcu)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncBucketOther)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bThresholdExceeded)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.aggregateUncBucketOther)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 5146, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":5147
 *             (<intptr_t>&(pod.aggregateUncBucketMcu)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncBucketOther)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bThresholdExceeded)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlEccSramErrorStatus_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bThresholdExceeded)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 5147, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":5134
 *         'names': ['version', 'aggregate_unc_parity', 'aggregate_unc_sec_ded', 'aggregate_cor', 'volatile_unc_parity', 'volatile_unc_sec_ded', 'volatile_cor', 'aggregate_unc_bucket_l2', 'aggregate_unc_bucket_sm', 'aggregate_unc_bucket_pcie', 'aggregate_unc_bucket_mcu', 'aggregate_unc_bucket_other', 'b_threshold_exceeded'],
 *         'formats': [_numpy.uint32, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.aggregateUncParity)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(13); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5134, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 5134, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_19);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_19) != (0)) __PYX_ERR(0, 5134, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_18);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_18) != (0)) __PYX_ERR(0, 5134, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_17);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_17) != (0)) __PYX_ERR(0, 5134, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_16);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_16) != (0)) __PYX_ERR(0, 5134, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_15) != (0)) __PYX_ERR(0, 5134, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_14) != (0)) __PYX_ERR(0, 5134, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 7, __pyx_t_13) != (0)) __PYX_ERR(0, 5134, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 8, __pyx_t_12) != (0)) __PYX_ERR(0, 5134, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 9, __pyx_t_11) != (0)) __PYX_ERR(0, 5134, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 10, __pyx_t_10) != (0)) __PYX_ERR(0, 5134, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 11, __pyx_t_9) != (0)) __PYX_ERR(0, 5134, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 12, __pyx_t_8) != (0)) __PYX_ERR(0, 5134, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_19 = 0;
  __pyx_t_18 = 0;
  __pyx_t_17 = 0;
  __pyx_t_16 = 0;
  __pyx_t_15 = 0;
  __pyx_t_14 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 5132, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":5149
 *             (<intptr_t>&(pod.bThresholdExceeded)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlEccSramErrorStatus_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlEccSramErrorStatus_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5149, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 5132, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_20 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_20 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_20, (2-__pyx_t_20) | (__pyx_t_20*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5131, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5129
 * 
 * 
 * cdef _get_ecc_sram_error_status_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlEccSramErrorStatus_v1_t pod = nvmlEccSramErrorStatus_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_XDECREF(__pyx_t_14);
  __Pyx_XDECREF(__pyx_t_15);
  __Pyx_XDECREF(__pyx_t_16);
  __Pyx_XDECREF(__pyx_t_17);
  __Pyx_XDECREF(__pyx_t_18);
  __Pyx_XDECREF(__pyx_t_19);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_ecc_sram_error_status_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5166
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlEccSramErrorStatus_v1_t *>calloc(1, sizeof(nvmlEccSramErrorStatus_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":5167
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlEccSramErrorStatus_v1_t *>calloc(1, sizeof(nvmlEccSramErrorStatus_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating EccSramErrorStatus_v1")
*/
  __pyx_v_self->_ptr = ((nvmlEccSramErrorStatus_v1_t *)calloc(1, (sizeof(nvmlEccSramErrorStatus_v1_t))));

  /* "cuda/bindings/_nvml.pyx":5168
 *     def __init__(self):
 *         self._ptr = <nvmlEccSramErrorStatus_v1_t *>calloc(1, sizeof(nvmlEccSramErrorStatus_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating EccSramErrorStatus_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":5169
 *         self._ptr = <nvmlEccSramErrorStatus_v1_t *>calloc(1, sizeof(nvmlEccSramErrorStatus_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating EccSramErrorStatus_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5169, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_EccSramErrorSta};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5169, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 5169, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5168
 *     def __init__(self):
 *         self._ptr = <nvmlEccSramErrorStatus_v1_t *>calloc(1, sizeof(nvmlEccSramErrorStatus_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating EccSramErrorStatus_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":5170
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating EccSramErrorStatus_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":5171
 *             raise MemoryError("Error allocating EccSramErrorStatus_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":5172
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":5166
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlEccSramErrorStatus_v1_t *>calloc(1, sizeof(nvmlEccSramErrorStatus_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5174
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlEccSramErrorStatus_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  nvmlEccSramErrorStatus_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlEccSramErrorStatus_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":5176
 *     def __dealloc__(self):
 *         cdef nvmlEccSramErrorStatus_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":5177
 *         cdef nvmlEccSramErrorStatus_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":5178
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":5179
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":5176
 *     def __dealloc__(self):
 *         cdef nvmlEccSramErrorStatus_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":5174
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlEccSramErrorStatus_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":5181
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.EccSramErrorStatus_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":5182
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.EccSramErrorStatus_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5182, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5182, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5182, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5182, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5182, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_EccSramErrorStatus_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 33 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5182, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5181
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.EccSramErrorStatus_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5184
 *         return f"<{__name__}.EccSramErrorStatus_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5187
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5187, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5184
 *         return f"<{__name__}.EccSramErrorStatus_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5189
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":5190
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5189
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5192
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":5193
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5193, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5192
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5195
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef EccSramErrorStatus_v1 other_
 *         if not isinstance(other, EccSramErrorStatus_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":5197
 *     def __eq__(self, other):
 *         cdef EccSramErrorStatus_v1 other_
 *         if not isinstance(other, EccSramErrorStatus_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":5198
 *         cdef EccSramErrorStatus_v1 other_
 *         if not isinstance(other, EccSramErrorStatus_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlEccSramErrorStatus_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":5197
 *     def __eq__(self, other):
 *         cdef EccSramErrorStatus_v1 other_
 *         if not isinstance(other, EccSramErrorStatus_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":5199
 *         if not isinstance(other, EccSramErrorStatus_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlEccSramErrorStatus_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1))))) __PYX_ERR(0, 5199, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":5200
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlEccSramErrorStatus_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlEccSramErrorStatus_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5200, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5195
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef EccSramErrorStatus_v1 other_
 *         if not isinstance(other, EccSramErrorStatus_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5202
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlEccSramErrorStatus_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlEccSramErrorStatus_v1_t *>malloc(sizeof(nvmlEccSramErrorStatus_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":5203
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlEccSramErrorStatus_v1_t *>malloc(sizeof(nvmlEccSramErrorStatus_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 5203, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 5203, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":5204
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlEccSramErrorStatus_v1_t *>malloc(sizeof(nvmlEccSramErrorStatus_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating EccSramErrorStatus_v1")
*/
    __pyx_v_self->_ptr = ((nvmlEccSramErrorStatus_v1_t *)malloc((sizeof(nvmlEccSramErrorStatus_v1_t))));

    /* "cuda/bindings/_nvml.pyx":5205
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlEccSramErrorStatus_v1_t *>malloc(sizeof(nvmlEccSramErrorStatus_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating EccSramErrorStatus_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEccSramErrorStatus_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":5206
 *             self._ptr = <nvmlEccSramErrorStatus_v1_t *>malloc(sizeof(nvmlEccSramErrorStatus_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating EccSramErrorStatus_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEccSramErrorStatus_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5206, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_EccSramErrorSta};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5206, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 5206, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":5205
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlEccSramErrorStatus_v1_t *>malloc(sizeof(nvmlEccSramErrorStatus_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating EccSramErrorStatus_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEccSramErrorStatus_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":5207
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating EccSramErrorStatus_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEccSramErrorStatus_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5207, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5207, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 5207, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlEccSramErrorStatus_v1_t))));

    /* "cuda/bindings/_nvml.pyx":5208
 *                 raise MemoryError("Error allocating EccSramErrorStatus_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEccSramErrorStatus_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":5209
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEccSramErrorStatus_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":5210
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5210, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5210, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 5210, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":5203
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlEccSramErrorStatus_v1_t *>malloc(sizeof(nvmlEccSramErrorStatus_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":5212
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 5212, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":5202
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlEccSramErrorStatus_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlEccSramErrorStatus_v1_t *>malloc(sizeof(nvmlEccSramErrorStatus_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5214
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: the API version number"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5217
 *     def version(self):
 *         """int: the API version number"""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5217, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5214
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: the API version number"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5219
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5221
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5222
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EccSramErrorStatus_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5222, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5222, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5221
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5223
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5223, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5219
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5225
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def aggregate_unc_parity(self):
 *         """int: aggregate uncorrectable parity error count"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20aggregate_unc_parity_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20aggregate_unc_parity_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20aggregate_unc_parity___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20aggregate_unc_parity___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5228
 *     def aggregate_unc_parity(self):
 *         """int: aggregate uncorrectable parity error count"""
 *         return self._ptr[0].aggregateUncParity             # <<<<<<<<<<<<<<
 * 
 *     @aggregate_unc_parity.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).aggregateUncParity); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5228, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5225
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def aggregate_unc_parity(self):
 *         """int: aggregate uncorrectable parity error count"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.aggregate_unc_parity.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5230
 *         return self._ptr[0].aggregateUncParity
 * 
 *     @aggregate_unc_parity.setter             # <<<<<<<<<<<<<<
 *     def aggregate_unc_parity(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20aggregate_unc_parity_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20aggregate_unc_parity_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20aggregate_unc_parity_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20aggregate_unc_parity_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5232
 *     @aggregate_unc_parity.setter
 *     def aggregate_unc_parity(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncParity = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5233
 *     def aggregate_unc_parity(self, val):
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].aggregateUncParity = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EccSramErrorStatus_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5233, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5233, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5232
 *     @aggregate_unc_parity.setter
 *     def aggregate_unc_parity(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncParity = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5234
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncParity = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 5234, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).aggregateUncParity = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5230
 *         return self._ptr[0].aggregateUncParity
 * 
 *     @aggregate_unc_parity.setter             # <<<<<<<<<<<<<<
 *     def aggregate_unc_parity(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.aggregate_unc_parity.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5236
 *         self._ptr[0].aggregateUncParity = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def aggregate_unc_sec_ded(self):
 *         """int: aggregate uncorrectable SEC-DED error count"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_21aggregate_unc_sec_ded_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_21aggregate_unc_sec_ded_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_21aggregate_unc_sec_ded___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_21aggregate_unc_sec_ded___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5239
 *     def aggregate_unc_sec_ded(self):
 *         """int: aggregate uncorrectable SEC-DED error count"""
 *         return self._ptr[0].aggregateUncSecDed             # <<<<<<<<<<<<<<
 * 
 *     @aggregate_unc_sec_ded.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).aggregateUncSecDed); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5236
 *         self._ptr[0].aggregateUncParity = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def aggregate_unc_sec_ded(self):
 *         """int: aggregate uncorrectable SEC-DED error count"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.aggregate_unc_sec_ded.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5241
 *         return self._ptr[0].aggregateUncSecDed
 * 
 *     @aggregate_unc_sec_ded.setter             # <<<<<<<<<<<<<<
 *     def aggregate_unc_sec_ded(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_21aggregate_unc_sec_ded_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_21aggregate_unc_sec_ded_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_21aggregate_unc_sec_ded_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_21aggregate_unc_sec_ded_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5243
 *     @aggregate_unc_sec_ded.setter
 *     def aggregate_unc_sec_ded(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncSecDed = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5244
 *     def aggregate_unc_sec_ded(self, val):
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].aggregateUncSecDed = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EccSramErrorStatus_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5244, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5244, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5243
 *     @aggregate_unc_sec_ded.setter
 *     def aggregate_unc_sec_ded(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncSecDed = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5245
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncSecDed = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 5245, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).aggregateUncSecDed = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5241
 *         return self._ptr[0].aggregateUncSecDed
 * 
 *     @aggregate_unc_sec_ded.setter             # <<<<<<<<<<<<<<
 *     def aggregate_unc_sec_ded(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.aggregate_unc_sec_ded.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5247
 *         self._ptr[0].aggregateUncSecDed = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def aggregate_cor(self):
 *         """int: aggregate correctable error count"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13aggregate_cor_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13aggregate_cor_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13aggregate_cor___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13aggregate_cor___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5250
 *     def aggregate_cor(self):
 *         """int: aggregate correctable error count"""
 *         return self._ptr[0].aggregateCor             # <<<<<<<<<<<<<<
 * 
 *     @aggregate_cor.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).aggregateCor); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5250, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5247
 *         self._ptr[0].aggregateUncSecDed = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def aggregate_cor(self):
 *         """int: aggregate correctable error count"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.aggregate_cor.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5252
 *         return self._ptr[0].aggregateCor
 * 
 *     @aggregate_cor.setter             # <<<<<<<<<<<<<<
 *     def aggregate_cor(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13aggregate_cor_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13aggregate_cor_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13aggregate_cor_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13aggregate_cor_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5254
 *     @aggregate_cor.setter
 *     def aggregate_cor(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateCor = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5255
 *     def aggregate_cor(self, val):
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].aggregateCor = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EccSramErrorStatus_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5255, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5255, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5254
 *     @aggregate_cor.setter
 *     def aggregate_cor(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateCor = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5256
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateCor = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 5256, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).aggregateCor = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5252
 *         return self._ptr[0].aggregateCor
 * 
 *     @aggregate_cor.setter             # <<<<<<<<<<<<<<
 *     def aggregate_cor(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.aggregate_cor.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5258
 *         self._ptr[0].aggregateCor = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def volatile_unc_parity(self):
 *         """int: volatile uncorrectable parity error count"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19volatile_unc_parity_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19volatile_unc_parity_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19volatile_unc_parity___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19volatile_unc_parity___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5261
 *     def volatile_unc_parity(self):
 *         """int: volatile uncorrectable parity error count"""
 *         return self._ptr[0].volatileUncParity             # <<<<<<<<<<<<<<
 * 
 *     @volatile_unc_parity.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).volatileUncParity); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5261, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5258
 *         self._ptr[0].aggregateCor = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def volatile_unc_parity(self):
 *         """int: volatile uncorrectable parity error count"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.volatile_unc_parity.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5263
 *         return self._ptr[0].volatileUncParity
 * 
 *     @volatile_unc_parity.setter             # <<<<<<<<<<<<<<
 *     def volatile_unc_parity(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19volatile_unc_parity_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19volatile_unc_parity_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19volatile_unc_parity_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19volatile_unc_parity_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5265
 *     @volatile_unc_parity.setter
 *     def volatile_unc_parity(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].volatileUncParity = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5266
 *     def volatile_unc_parity(self, val):
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].volatileUncParity = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EccSramErrorStatus_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5266, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5266, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5265
 *     @volatile_unc_parity.setter
 *     def volatile_unc_parity(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].volatileUncParity = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5267
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].volatileUncParity = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 5267, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).volatileUncParity = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5263
 *         return self._ptr[0].volatileUncParity
 * 
 *     @volatile_unc_parity.setter             # <<<<<<<<<<<<<<
 *     def volatile_unc_parity(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.volatile_unc_parity.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5269
 *         self._ptr[0].volatileUncParity = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def volatile_unc_sec_ded(self):
 *         """int: volatile uncorrectable SEC-DED error count"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20volatile_unc_sec_ded_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20volatile_unc_sec_ded_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20volatile_unc_sec_ded___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20volatile_unc_sec_ded___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5272
 *     def volatile_unc_sec_ded(self):
 *         """int: volatile uncorrectable SEC-DED error count"""
 *         return self._ptr[0].volatileUncSecDed             # <<<<<<<<<<<<<<
 * 
 *     @volatile_unc_sec_ded.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).volatileUncSecDed); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5272, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5269
 *         self._ptr[0].volatileUncParity = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def volatile_unc_sec_ded(self):
 *         """int: volatile uncorrectable SEC-DED error count"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.volatile_unc_sec_ded.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5274
 *         return self._ptr[0].volatileUncSecDed
 * 
 *     @volatile_unc_sec_ded.setter             # <<<<<<<<<<<<<<
 *     def volatile_unc_sec_ded(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20volatile_unc_sec_ded_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20volatile_unc_sec_ded_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20volatile_unc_sec_ded_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20volatile_unc_sec_ded_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5276
 *     @volatile_unc_sec_ded.setter
 *     def volatile_unc_sec_ded(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].volatileUncSecDed = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5277
 *     def volatile_unc_sec_ded(self, val):
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].volatileUncSecDed = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EccSramErrorStatus_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5277, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5277, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5276
 *     @volatile_unc_sec_ded.setter
 *     def volatile_unc_sec_ded(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].volatileUncSecDed = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5278
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].volatileUncSecDed = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 5278, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).volatileUncSecDed = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5274
 *         return self._ptr[0].volatileUncSecDed
 * 
 *     @volatile_unc_sec_ded.setter             # <<<<<<<<<<<<<<
 *     def volatile_unc_sec_ded(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.volatile_unc_sec_ded.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5280
 *         self._ptr[0].volatileUncSecDed = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def volatile_cor(self):
 *         """int: volatile correctable error count"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12volatile_cor_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12volatile_cor_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12volatile_cor___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12volatile_cor___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5283
 *     def volatile_cor(self):
 *         """int: volatile correctable error count"""
 *         return self._ptr[0].volatileCor             # <<<<<<<<<<<<<<
 * 
 *     @volatile_cor.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).volatileCor); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5283, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5280
 *         self._ptr[0].volatileUncSecDed = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def volatile_cor(self):
 *         """int: volatile correctable error count"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.volatile_cor.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5285
 *         return self._ptr[0].volatileCor
 * 
 *     @volatile_cor.setter             # <<<<<<<<<<<<<<
 *     def volatile_cor(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12volatile_cor_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12volatile_cor_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12volatile_cor_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12volatile_cor_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5287
 *     @volatile_cor.setter
 *     def volatile_cor(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].volatileCor = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5288
 *     def volatile_cor(self, val):
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].volatileCor = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EccSramErrorStatus_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5288, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5288, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5287
 *     @volatile_cor.setter
 *     def volatile_cor(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].volatileCor = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5289
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].volatileCor = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 5289, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).volatileCor = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5285
 *         return self._ptr[0].volatileCor
 * 
 *     @volatile_cor.setter             # <<<<<<<<<<<<<<
 *     def volatile_cor(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.volatile_cor.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5291
 *         self._ptr[0].volatileCor = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_l2(self):
 *         """int: aggregate uncorrectable error count for L2 cache bucket"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_l2_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_l2_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_l2___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_l2___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5294
 *     def aggregate_unc_bucket_l2(self):
 *         """int: aggregate uncorrectable error count for L2 cache bucket"""
 *         return self._ptr[0].aggregateUncBucketL2             # <<<<<<<<<<<<<<
 * 
 *     @aggregate_unc_bucket_l2.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).aggregateUncBucketL2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5294, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5291
 *         self._ptr[0].volatileCor = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_l2(self):
 *         """int: aggregate uncorrectable error count for L2 cache bucket"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.aggregate_unc_bucket_l2.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5296
 *         return self._ptr[0].aggregateUncBucketL2
 * 
 *     @aggregate_unc_bucket_l2.setter             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_l2(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_l2_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_l2_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_l2_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_l2_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5298
 *     @aggregate_unc_bucket_l2.setter
 *     def aggregate_unc_bucket_l2(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncBucketL2 = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5299
 *     def aggregate_unc_bucket_l2(self, val):
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].aggregateUncBucketL2 = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EccSramErrorStatus_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5299, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5299, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5298
 *     @aggregate_unc_bucket_l2.setter
 *     def aggregate_unc_bucket_l2(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncBucketL2 = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5300
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncBucketL2 = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 5300, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).aggregateUncBucketL2 = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5296
 *         return self._ptr[0].aggregateUncBucketL2
 * 
 *     @aggregate_unc_bucket_l2.setter             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_l2(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.aggregate_unc_bucket_l2.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5302
 *         self._ptr[0].aggregateUncBucketL2 = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_sm(self):
 *         """int: aggregate uncorrectable error count for SM bucket"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_sm_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_sm_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_sm___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_sm___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5305
 *     def aggregate_unc_bucket_sm(self):
 *         """int: aggregate uncorrectable error count for SM bucket"""
 *         return self._ptr[0].aggregateUncBucketSm             # <<<<<<<<<<<<<<
 * 
 *     @aggregate_unc_bucket_sm.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).aggregateUncBucketSm); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5302
 *         self._ptr[0].aggregateUncBucketL2 = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_sm(self):
 *         """int: aggregate uncorrectable error count for SM bucket"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.aggregate_unc_bucket_sm.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5307
 *         return self._ptr[0].aggregateUncBucketSm
 * 
 *     @aggregate_unc_bucket_sm.setter             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_sm(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_sm_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_sm_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_sm_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_sm_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5309
 *     @aggregate_unc_bucket_sm.setter
 *     def aggregate_unc_bucket_sm(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncBucketSm = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5310
 *     def aggregate_unc_bucket_sm(self, val):
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].aggregateUncBucketSm = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EccSramErrorStatus_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5310, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5310, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5309
 *     @aggregate_unc_bucket_sm.setter
 *     def aggregate_unc_bucket_sm(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncBucketSm = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5311
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncBucketSm = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 5311, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).aggregateUncBucketSm = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5307
 *         return self._ptr[0].aggregateUncBucketSm
 * 
 *     @aggregate_unc_bucket_sm.setter             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_sm(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.aggregate_unc_bucket_sm.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5313
 *         self._ptr[0].aggregateUncBucketSm = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_pcie(self):
 *         """int: aggregate uncorrectable error count for PCIE bucket"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_25aggregate_unc_bucket_pcie_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_25aggregate_unc_bucket_pcie_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_25aggregate_unc_bucket_pcie___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_25aggregate_unc_bucket_pcie___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5316
 *     def aggregate_unc_bucket_pcie(self):
 *         """int: aggregate uncorrectable error count for PCIE bucket"""
 *         return self._ptr[0].aggregateUncBucketPcie             # <<<<<<<<<<<<<<
 * 
 *     @aggregate_unc_bucket_pcie.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).aggregateUncBucketPcie); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5316, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5313
 *         self._ptr[0].aggregateUncBucketSm = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_pcie(self):
 *         """int: aggregate uncorrectable error count for PCIE bucket"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.aggregate_unc_bucket_pcie.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5318
 *         return self._ptr[0].aggregateUncBucketPcie
 * 
 *     @aggregate_unc_bucket_pcie.setter             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_pcie(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_25aggregate_unc_bucket_pcie_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_25aggregate_unc_bucket_pcie_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_25aggregate_unc_bucket_pcie_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_25aggregate_unc_bucket_pcie_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5320
 *     @aggregate_unc_bucket_pcie.setter
 *     def aggregate_unc_bucket_pcie(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncBucketPcie = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5321
 *     def aggregate_unc_bucket_pcie(self, val):
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].aggregateUncBucketPcie = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EccSramErrorStatus_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5321, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5321, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5320
 *     @aggregate_unc_bucket_pcie.setter
 *     def aggregate_unc_bucket_pcie(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncBucketPcie = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5322
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncBucketPcie = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 5322, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).aggregateUncBucketPcie = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5318
 *         return self._ptr[0].aggregateUncBucketPcie
 * 
 *     @aggregate_unc_bucket_pcie.setter             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_pcie(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.aggregate_unc_bucket_pcie.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5324
 *         self._ptr[0].aggregateUncBucketPcie = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_mcu(self):
 *         """int: aggregate uncorrectable error count for Microcontroller bucket"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_24aggregate_unc_bucket_mcu_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_24aggregate_unc_bucket_mcu_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_24aggregate_unc_bucket_mcu___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_24aggregate_unc_bucket_mcu___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5327
 *     def aggregate_unc_bucket_mcu(self):
 *         """int: aggregate uncorrectable error count for Microcontroller bucket"""
 *         return self._ptr[0].aggregateUncBucketMcu             # <<<<<<<<<<<<<<
 * 
 *     @aggregate_unc_bucket_mcu.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).aggregateUncBucketMcu); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5324
 *         self._ptr[0].aggregateUncBucketPcie = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_mcu(self):
 *         """int: aggregate uncorrectable error count for Microcontroller bucket"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.aggregate_unc_bucket_mcu.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5329
 *         return self._ptr[0].aggregateUncBucketMcu
 * 
 *     @aggregate_unc_bucket_mcu.setter             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_mcu(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_24aggregate_unc_bucket_mcu_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_24aggregate_unc_bucket_mcu_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_24aggregate_unc_bucket_mcu_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_24aggregate_unc_bucket_mcu_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5331
 *     @aggregate_unc_bucket_mcu.setter
 *     def aggregate_unc_bucket_mcu(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncBucketMcu = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5332
 *     def aggregate_unc_bucket_mcu(self, val):
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].aggregateUncBucketMcu = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EccSramErrorStatus_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5332, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5332, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5331
 *     @aggregate_unc_bucket_mcu.setter
 *     def aggregate_unc_bucket_mcu(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncBucketMcu = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5333
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncBucketMcu = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 5333, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).aggregateUncBucketMcu = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5329
 *         return self._ptr[0].aggregateUncBucketMcu
 * 
 *     @aggregate_unc_bucket_mcu.setter             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_mcu(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.aggregate_unc_bucket_mcu.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5335
 *         self._ptr[0].aggregateUncBucketMcu = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_other(self):
 *         """int: aggregate uncorrectable error count for Other bucket"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_26aggregate_unc_bucket_other_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_26aggregate_unc_bucket_other_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_26aggregate_unc_bucket_other___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_26aggregate_unc_bucket_other___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5338
 *     def aggregate_unc_bucket_other(self):
 *         """int: aggregate uncorrectable error count for Other bucket"""
 *         return self._ptr[0].aggregateUncBucketOther             # <<<<<<<<<<<<<<
 * 
 *     @aggregate_unc_bucket_other.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).aggregateUncBucketOther); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5338, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5335
 *         self._ptr[0].aggregateUncBucketMcu = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_other(self):
 *         """int: aggregate uncorrectable error count for Other bucket"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.aggregate_unc_bucket_other.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5340
 *         return self._ptr[0].aggregateUncBucketOther
 * 
 *     @aggregate_unc_bucket_other.setter             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_other(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_26aggregate_unc_bucket_other_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_26aggregate_unc_bucket_other_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_26aggregate_unc_bucket_other_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_26aggregate_unc_bucket_other_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5342
 *     @aggregate_unc_bucket_other.setter
 *     def aggregate_unc_bucket_other(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncBucketOther = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5343
 *     def aggregate_unc_bucket_other(self, val):
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].aggregateUncBucketOther = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EccSramErrorStatus_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5343, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5343, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5342
 *     @aggregate_unc_bucket_other.setter
 *     def aggregate_unc_bucket_other(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncBucketOther = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5344
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].aggregateUncBucketOther = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 5344, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).aggregateUncBucketOther = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5340
 *         return self._ptr[0].aggregateUncBucketOther
 * 
 *     @aggregate_unc_bucket_other.setter             # <<<<<<<<<<<<<<
 *     def aggregate_unc_bucket_other(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.aggregate_unc_bucket_other.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5346
 *         self._ptr[0].aggregateUncBucketOther = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def b_threshold_exceeded(self):
 *         """int: if the error threshold of field diag is exceeded"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20b_threshold_exceeded_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20b_threshold_exceeded_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20b_threshold_exceeded___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20b_threshold_exceeded___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5349
 *     def b_threshold_exceeded(self):
 *         """int: if the error threshold of field diag is exceeded"""
 *         return self._ptr[0].bThresholdExceeded             # <<<<<<<<<<<<<<
 * 
 *     @b_threshold_exceeded.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).bThresholdExceeded); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5349, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5346
 *         self._ptr[0].aggregateUncBucketOther = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def b_threshold_exceeded(self):
 *         """int: if the error threshold of field diag is exceeded"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.b_threshold_exceeded.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5351
 *         return self._ptr[0].bThresholdExceeded
 * 
 *     @b_threshold_exceeded.setter             # <<<<<<<<<<<<<<
 *     def b_threshold_exceeded(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20b_threshold_exceeded_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20b_threshold_exceeded_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20b_threshold_exceeded_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20b_threshold_exceeded_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5353
 *     @b_threshold_exceeded.setter
 *     def b_threshold_exceeded(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].bThresholdExceeded = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5354
 *     def b_threshold_exceeded(self, val):
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].bThresholdExceeded = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EccSramErrorStatus_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5354, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5354, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5353
 *     @b_threshold_exceeded.setter
 *     def b_threshold_exceeded(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].bThresholdExceeded = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5355
 *         if self._readonly:
 *             raise ValueError("This EccSramErrorStatus_v1 instance is read-only")
 *         self._ptr[0].bThresholdExceeded = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5355, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).bThresholdExceeded = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5351
 *         return self._ptr[0].bThresholdExceeded
 * 
 *     @b_threshold_exceeded.setter             # <<<<<<<<<<<<<<
 *     def b_threshold_exceeded(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.b_threshold_exceeded.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5357
 *         self._ptr[0].bThresholdExceeded = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an EccSramErrorStatus_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12from_data, "EccSramErrorStatus_v1.from_data(data)\n\nCreate an EccSramErrorStatus_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `ecc_sram_error_status_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 5357, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5357, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 5357, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 5357, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5357, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 5357, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":5364
 *             data (_numpy.ndarray): a single-element array of dtype `ecc_sram_error_status_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "ecc_sram_error_status_v1_dtype", ecc_sram_error_status_v1_dtype, EccSramErrorStatus_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ecc_sram_error_status_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5364, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ecc_sram_error_status_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5364, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5357
 *         self._ptr[0].bThresholdExceeded = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an EccSramErrorStatus_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5366
 *         return __from_data(data, "ecc_sram_error_status_v1_dtype", ecc_sram_error_status_v1_dtype, EccSramErrorStatus_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an EccSramErrorStatus_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_14from_ptr, "EccSramErrorStatus_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an EccSramErrorStatus_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 5366, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 5366, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 5366, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5366, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 5366, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":5367
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an EccSramErrorStatus_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 5366, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 5366, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 5366, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5366, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 5367, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5367, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 5366, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":5366
 *         return __from_data(data, "ecc_sram_error_status_v1_dtype", ecc_sram_error_status_v1_dtype, EccSramErrorStatus_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an EccSramErrorStatus_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":5375
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EccSramErrorStatus_v1 obj = EccSramErrorStatus_v1.__new__(EccSramErrorStatus_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":5376
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef EccSramErrorStatus_v1 obj = EccSramErrorStatus_v1.__new__(EccSramErrorStatus_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5376, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 5376, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5375
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EccSramErrorStatus_v1 obj = EccSramErrorStatus_v1.__new__(EccSramErrorStatus_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":5377
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EccSramErrorStatus_v1 obj = EccSramErrorStatus_v1.__new__(EccSramErrorStatus_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlEccSramErrorStatus_v1_t *>malloc(sizeof(nvmlEccSramErrorStatus_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5377, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":5378
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EccSramErrorStatus_v1 obj = EccSramErrorStatus_v1.__new__(EccSramErrorStatus_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlEccSramErrorStatus_v1_t *>malloc(sizeof(nvmlEccSramErrorStatus_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":5379
 *         cdef EccSramErrorStatus_v1 obj = EccSramErrorStatus_v1.__new__(EccSramErrorStatus_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlEccSramErrorStatus_v1_t *>malloc(sizeof(nvmlEccSramErrorStatus_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating EccSramErrorStatus_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlEccSramErrorStatus_v1_t *)malloc((sizeof(nvmlEccSramErrorStatus_v1_t))));

    /* "cuda/bindings/_nvml.pyx":5380
 *         if owner is None:
 *             obj._ptr = <nvmlEccSramErrorStatus_v1_t *>malloc(sizeof(nvmlEccSramErrorStatus_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating EccSramErrorStatus_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEccSramErrorStatus_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":5381
 *             obj._ptr = <nvmlEccSramErrorStatus_v1_t *>malloc(sizeof(nvmlEccSramErrorStatus_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating EccSramErrorStatus_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEccSramErrorStatus_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5381, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_EccSramErrorSta};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5381, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 5381, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":5380
 *         if owner is None:
 *             obj._ptr = <nvmlEccSramErrorStatus_v1_t *>malloc(sizeof(nvmlEccSramErrorStatus_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating EccSramErrorStatus_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEccSramErrorStatus_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":5382
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating EccSramErrorStatus_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEccSramErrorStatus_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlEccSramErrorStatus_v1_t))));

    /* "cuda/bindings/_nvml.pyx":5383
 *                 raise MemoryError("Error allocating EccSramErrorStatus_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEccSramErrorStatus_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":5384
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEccSramErrorStatus_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlEccSramErrorStatus_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":5378
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EccSramErrorStatus_v1 obj = EccSramErrorStatus_v1.__new__(EccSramErrorStatus_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlEccSramErrorStatus_v1_t *>malloc(sizeof(nvmlEccSramErrorStatus_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":5386
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlEccSramErrorStatus_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlEccSramErrorStatus_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":5387
 *         else:
 *             obj._ptr = <nvmlEccSramErrorStatus_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":5388
 *             obj._ptr = <nvmlEccSramErrorStatus_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":5389
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":5390
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5366
 *         return __from_data(data, "ecc_sram_error_status_v1_dtype", ecc_sram_error_status_v1_dtype, EccSramErrorStatus_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an EccSramErrorStatus_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_16__reduce_cython__, "EccSramErrorStatus_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_18__setstate_cython__, "EccSramErrorStatus_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramErrorStatus_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5393
 * 
 * 
 * cdef _get_platform_info_v2_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlPlatformInfo_v2_t pod = nvmlPlatformInfo_v2_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_platform_info_v2_dtype_offsets(void) {
  nvmlPlatformInfo_v2_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlPlatformInfo_v2_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  PyObject *__pyx_t_14 = NULL;
  size_t __pyx_t_15;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_platform_info_v2_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":5394
 * 
 * cdef _get_platform_info_v2_dtype_offsets():
 *     cdef nvmlPlatformInfo_v2_t pod = nvmlPlatformInfo_v2_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'ib_guid', 'chassis_serial_number', 'slot_number', 'tray_ind_ex', 'host_id', 'peer_type', 'module_id'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":5395
 * cdef _get_platform_info_v2_dtype_offsets():
 *     cdef nvmlPlatformInfo_v2_t pod = nvmlPlatformInfo_v2_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'ib_guid', 'chassis_serial_number', 'slot_number', 'tray_ind_ex', 'host_id', 'peer_type', 'module_id'],
 *         'formats': [_numpy.uint32, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5395, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5395, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":5396
 *     cdef nvmlPlatformInfo_v2_t pod = nvmlPlatformInfo_v2_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'ib_guid', 'chassis_serial_number', 'slot_number', 'tray_ind_ex', 'host_id', 'peer_type', 'module_id'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 5396, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_ib_guid);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_ib_guid);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_ib_guid) != (0)) __PYX_ERR(0, 5396, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_chassis_serial_number);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_chassis_serial_number);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_chassis_serial_number) != (0)) __PYX_ERR(0, 5396, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_slot_number);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_slot_number);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_slot_number) != (0)) __PYX_ERR(0, 5396, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_tray_ind_ex);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_tray_ind_ex);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_tray_ind_ex) != (0)) __PYX_ERR(0, 5396, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_host_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_host_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_host_id) != (0)) __PYX_ERR(0, 5396, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_peer_type);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_peer_type);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_peer_type) != (0)) __PYX_ERR(0, 5396, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_module_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_module_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_mstate_global->__pyx_n_u_module_id) != (0)) __PYX_ERR(0, 5396, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 5396, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":5397
 *     return _numpy.dtype({
 *         'names': ['version', 'ib_guid', 'chassis_serial_number', 'slot_number', 'tray_ind_ex', 'host_id', 'peer_type', 'module_id'],
 *         'formats': [_numpy.uint32, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 5397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 5397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 5397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 5397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 5397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 5397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 5397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 5397, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 5397, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 5397, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 5397, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 5397, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 5397, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 5397, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_t_14) != (0)) __PYX_ERR(0, 5397, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  __pyx_t_14 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 5396, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":5399
 *         'formats': [_numpy.uint32, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.ibGuid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.chassisSerialNumber)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":5400
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ibGuid)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.chassisSerialNumber)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.slotNumber)) - (<intptr_t>&pod),
*/
  __pyx_t_14 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.ibGuid)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 5400, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);

  /* "cuda/bindings/_nvml.pyx":5401
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ibGuid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.chassisSerialNumber)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.slotNumber)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.trayIndex)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.chassisSerialNumber)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 5401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":5402
 *             (<intptr_t>&(pod.ibGuid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.chassisSerialNumber)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.slotNumber)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.trayIndex)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hostId)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.slotNumber)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 5402, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":5403
 *             (<intptr_t>&(pod.chassisSerialNumber)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.slotNumber)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.trayIndex)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.hostId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.peerType)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.trayIndex)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 5403, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":5404
 *             (<intptr_t>&(pod.slotNumber)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.trayIndex)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hostId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.peerType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.moduleId)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.hostId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 5404, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":5405
 *             (<intptr_t>&(pod.trayIndex)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hostId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.peerType)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.moduleId)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.peerType)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 5405, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":5406
 *             (<intptr_t>&(pod.hostId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.peerType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.moduleId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlPlatformInfo_v2_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.moduleId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 5406, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":5398
 *         'names': ['version', 'ib_guid', 'chassis_serial_number', 'slot_number', 'tray_ind_ex', 'host_id', 'peer_type', 'module_id'],
 *         'formats': [_numpy.uint32, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8, _numpy.uint8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ibGuid)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5398, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 5398, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_14) != (0)) __PYX_ERR(0, 5398, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_13) != (0)) __PYX_ERR(0, 5398, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_12) != (0)) __PYX_ERR(0, 5398, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 5398, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_10) != (0)) __PYX_ERR(0, 5398, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_9) != (0)) __PYX_ERR(0, 5398, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 7, __pyx_t_8) != (0)) __PYX_ERR(0, 5398, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_14 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 5396, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":5408
 *             (<intptr_t>&(pod.moduleId)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlPlatformInfo_v2_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlPlatformInfo_v2_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5408, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 5396, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_15 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_15 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_15, (2-__pyx_t_15) | (__pyx_t_15*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5395, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5393
 * 
 * 
 * cdef _get_platform_info_v2_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlPlatformInfo_v2_t pod = nvmlPlatformInfo_v2_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_XDECREF(__pyx_t_14);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_platform_info_v2_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5425
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlPlatformInfo_v2_t *>calloc(1, sizeof(nvmlPlatformInfo_v2_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":5426
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlPlatformInfo_v2_t *>calloc(1, sizeof(nvmlPlatformInfo_v2_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating PlatformInfo_v2")
*/
  __pyx_v_self->_ptr = ((nvmlPlatformInfo_v2_t *)calloc(1, (sizeof(nvmlPlatformInfo_v2_t))));

  /* "cuda/bindings/_nvml.pyx":5427
 *     def __init__(self):
 *         self._ptr = <nvmlPlatformInfo_v2_t *>calloc(1, sizeof(nvmlPlatformInfo_v2_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating PlatformInfo_v2")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":5428
 *         self._ptr = <nvmlPlatformInfo_v2_t *>calloc(1, sizeof(nvmlPlatformInfo_v2_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating PlatformInfo_v2")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5428, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_PlatformInfo_v2};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5428, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 5428, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5427
 *     def __init__(self):
 *         self._ptr = <nvmlPlatformInfo_v2_t *>calloc(1, sizeof(nvmlPlatformInfo_v2_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating PlatformInfo_v2")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":5429
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating PlatformInfo_v2")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":5430
 *             raise MemoryError("Error allocating PlatformInfo_v2")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":5431
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":5425
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlPlatformInfo_v2_t *>calloc(1, sizeof(nvmlPlatformInfo_v2_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5433
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlPlatformInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self) {
  nvmlPlatformInfo_v2_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlPlatformInfo_v2_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":5435
 *     def __dealloc__(self):
 *         cdef nvmlPlatformInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":5436
 *         cdef nvmlPlatformInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":5437
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":5438
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":5435
 *     def __dealloc__(self):
 *         cdef nvmlPlatformInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":5433
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlPlatformInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":5440
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.PlatformInfo_v2 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":5441
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.PlatformInfo_v2 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_PlatformInfo_v2_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 27 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5440
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.PlatformInfo_v2 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5443
 *         return f"<{__name__}.PlatformInfo_v2 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5446
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5446, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5443
 *         return f"<{__name__}.PlatformInfo_v2 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5448
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_15PlatformInfo_v2__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":5449
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5448
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5451
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":5452
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5452, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5451
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5454
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef PlatformInfo_v2 other_
 *         if not isinstance(other, PlatformInfo_v2):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":5456
 *     def __eq__(self, other):
 *         cdef PlatformInfo_v2 other_
 *         if not isinstance(other, PlatformInfo_v2):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":5457
 *         cdef PlatformInfo_v2 other_
 *         if not isinstance(other, PlatformInfo_v2):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPlatformInfo_v2_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":5456
 *     def __eq__(self, other):
 *         cdef PlatformInfo_v2 other_
 *         if not isinstance(other, PlatformInfo_v2):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":5458
 *         if not isinstance(other, PlatformInfo_v2):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPlatformInfo_v2_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2))))) __PYX_ERR(0, 5458, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":5459
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPlatformInfo_v2_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlPlatformInfo_v2_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5459, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5454
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef PlatformInfo_v2 other_
 *         if not isinstance(other, PlatformInfo_v2):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5461
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPlatformInfo_v2_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPlatformInfo_v2_t *>malloc(sizeof(nvmlPlatformInfo_v2_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":5462
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlPlatformInfo_v2_t *>malloc(sizeof(nvmlPlatformInfo_v2_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 5462, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5462, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5462, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 5462, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":5463
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPlatformInfo_v2_t *>malloc(sizeof(nvmlPlatformInfo_v2_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating PlatformInfo_v2")
*/
    __pyx_v_self->_ptr = ((nvmlPlatformInfo_v2_t *)malloc((sizeof(nvmlPlatformInfo_v2_t))));

    /* "cuda/bindings/_nvml.pyx":5464
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPlatformInfo_v2_t *>malloc(sizeof(nvmlPlatformInfo_v2_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating PlatformInfo_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPlatformInfo_v2_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":5465
 *             self._ptr = <nvmlPlatformInfo_v2_t *>malloc(sizeof(nvmlPlatformInfo_v2_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating PlatformInfo_v2")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPlatformInfo_v2_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5465, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_PlatformInfo_v2};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5465, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 5465, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":5464
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPlatformInfo_v2_t *>malloc(sizeof(nvmlPlatformInfo_v2_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating PlatformInfo_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPlatformInfo_v2_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":5466
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating PlatformInfo_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPlatformInfo_v2_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5466, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5466, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 5466, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlPlatformInfo_v2_t))));

    /* "cuda/bindings/_nvml.pyx":5467
 *                 raise MemoryError("Error allocating PlatformInfo_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPlatformInfo_v2_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":5468
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPlatformInfo_v2_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":5469
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5469, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5469, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 5469, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":5462
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlPlatformInfo_v2_t *>malloc(sizeof(nvmlPlatformInfo_v2_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":5471
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 5471, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":5461
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPlatformInfo_v2_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPlatformInfo_v2_t *>malloc(sizeof(nvmlPlatformInfo_v2_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5473
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: the API version number"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5476
 *     def version(self):
 *         """int: the API version number"""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5476, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5473
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: the API version number"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5478
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5480
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5481
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PlatformInfo_v2_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5481, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5481, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5480
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5482
 *         if self._readonly:
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5482, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5478
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5484
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ib_guid(self):
 *         """~_numpy.uint8: (array of length 16).Infiniband GUID reported by platform (for Blackwell, ibGuid is 8 bytes so indices 8-15 are zero)"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7ib_guid_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7ib_guid_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7ib_guid___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7ib_guid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5487
 *     def ib_guid(self):
 *         """~_numpy.uint8: (array of length 16).Infiniband GUID reported by platform (for Blackwell, ibGuid is 8 bytes so indices 8-15 are zero)"""
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(&(self._ptr[0].ibGuid))
 *         return _numpy.asarray(arr)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5487, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 5 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5487, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[3], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 5487, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_3, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 5487, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 5487, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 5487, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_allocate_buffer, Py_False, __pyx_t_5, __pyx_callargs+1, 4) < (0)) __PYX_ERR(0, 5487, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5487, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":5488
 *         """~_numpy.uint8: (array of length 16).Infiniband GUID reported by platform (for Blackwell, ibGuid is 8 bytes so indices 8-15 are zero)"""
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].ibGuid))             # <<<<<<<<<<<<<<
 *         return _numpy.asarray(arr)
 * 
*/
  __pyx_v_arr->data = ((char *)(&(__pyx_v_self->_ptr[0]).ibGuid));

  /* "cuda/bindings/_nvml.pyx":5489
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].ibGuid))
 *         return _numpy.asarray(arr)             # <<<<<<<<<<<<<<
 * 
 *     @ib_guid.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5489, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5489, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, ((PyObject *)__pyx_v_arr)};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5489, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5484
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ib_guid(self):
 *         """~_numpy.uint8: (array of length 16).Infiniband GUID reported by platform (for Blackwell, ibGuid is 8 bytes so indices 8-15 are zero)"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.ib_guid.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5491
 *         return _numpy.asarray(arr)
 * 
 *     @ib_guid.setter             # <<<<<<<<<<<<<<
 *     def ib_guid(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7ib_guid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7ib_guid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7ib_guid_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7ib_guid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  Py_ssize_t __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5493
 *     @ib_guid.setter
 *     def ib_guid(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5494
 *     def ib_guid(self, val):
 *         if self._readonly:
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PlatformInfo_v2_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5494, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5494, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5493
 *     @ib_guid.setter
 *     def ib_guid(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":5495
 *         if self._readonly:
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")             # <<<<<<<<<<<<<<
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].ibGuid)), <void *>(arr.data), sizeof(unsigned char) * len(val))
*/
  __pyx_t_2 = NULL;
  __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5495, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[3], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 5495, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 5495, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 5495, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 5495, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5495, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":5496
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(&(self._ptr[0].ibGuid)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
*/
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5496, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5496, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5496, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5496, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_3 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_3 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_5, __pyx_v_val};
    __pyx_t_4 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5496, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_6, __pyx_t_4, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 5496, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_4);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5496, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (__Pyx_PyObject_SetSlice(((PyObject *)__pyx_v_arr), __pyx_t_1, 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[0], 0, 0, 1) < (0)) __PYX_ERR(0, 5496, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":5497
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].ibGuid)), <void *>(arr.data), sizeof(unsigned char) * len(val))             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_7 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 5497, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).ibGuid)), ((void *)__pyx_v_arr->data), ((sizeof(unsigned char)) * __pyx_t_7)));

  /* "cuda/bindings/_nvml.pyx":5491
 *         return _numpy.asarray(arr)
 * 
 *     @ib_guid.setter             # <<<<<<<<<<<<<<
 *     def ib_guid(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.ib_guid.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5499
 *         memcpy(<void *>(&(self._ptr[0].ibGuid)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def chassis_serial_number(self):
 *         """~_numpy.uint8: (array of length 16).Serial number of the chassis containing this GPU (for Blackwell it is 13 bytes so indices 13-15 are zero)"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_21chassis_serial_number_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_21chassis_serial_number_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_21chassis_serial_number___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_21chassis_serial_number___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5502
 *     def chassis_serial_number(self):
 *         """~_numpy.uint8: (array of length 16).Serial number of the chassis containing this GPU (for Blackwell it is 13 bytes so indices 13-15 are zero)"""
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(&(self._ptr[0].chassisSerialNumber))
 *         return _numpy.asarray(arr)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5502, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 5 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5502, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[3], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 5502, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_3, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 5502, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 5502, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 5502, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_allocate_buffer, Py_False, __pyx_t_5, __pyx_callargs+1, 4) < (0)) __PYX_ERR(0, 5502, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5502, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":5503
 *         """~_numpy.uint8: (array of length 16).Serial number of the chassis containing this GPU (for Blackwell it is 13 bytes so indices 13-15 are zero)"""
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].chassisSerialNumber))             # <<<<<<<<<<<<<<
 *         return _numpy.asarray(arr)
 * 
*/
  __pyx_v_arr->data = ((char *)(&(__pyx_v_self->_ptr[0]).chassisSerialNumber));

  /* "cuda/bindings/_nvml.pyx":5504
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].chassisSerialNumber))
 *         return _numpy.asarray(arr)             # <<<<<<<<<<<<<<
 * 
 *     @chassis_serial_number.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, ((PyObject *)__pyx_v_arr)};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5504, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5499
 *         memcpy(<void *>(&(self._ptr[0].ibGuid)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def chassis_serial_number(self):
 *         """~_numpy.uint8: (array of length 16).Serial number of the chassis containing this GPU (for Blackwell it is 13 bytes so indices 13-15 are zero)"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.chassis_serial_number.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5506
 *         return _numpy.asarray(arr)
 * 
 *     @chassis_serial_number.setter             # <<<<<<<<<<<<<<
 *     def chassis_serial_number(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_21chassis_serial_number_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_21chassis_serial_number_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_21chassis_serial_number_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_21chassis_serial_number_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  Py_ssize_t __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5508
 *     @chassis_serial_number.setter
 *     def chassis_serial_number(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5509
 *     def chassis_serial_number(self, val):
 *         if self._readonly:
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PlatformInfo_v2_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5509, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5509, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5508
 *     @chassis_serial_number.setter
 *     def chassis_serial_number(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":5510
 *         if self._readonly:
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")             # <<<<<<<<<<<<<<
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].chassisSerialNumber)), <void *>(arr.data), sizeof(unsigned char) * len(val))
*/
  __pyx_t_2 = NULL;
  __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5510, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5510, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[3], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 5510, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 5510, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 5510, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 5510, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5510, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":5511
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(&(self._ptr[0].chassisSerialNumber)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
*/
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5511, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5511, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5511, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5511, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_3 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_3 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_5, __pyx_v_val};
    __pyx_t_4 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5511, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_6, __pyx_t_4, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 5511, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_4);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5511, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (__Pyx_PyObject_SetSlice(((PyObject *)__pyx_v_arr), __pyx_t_1, 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[0], 0, 0, 1) < (0)) __PYX_ERR(0, 5511, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":5512
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].chassisSerialNumber)), <void *>(arr.data), sizeof(unsigned char) * len(val))             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_7 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 5512, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).chassisSerialNumber)), ((void *)__pyx_v_arr->data), ((sizeof(unsigned char)) * __pyx_t_7)));

  /* "cuda/bindings/_nvml.pyx":5506
 *         return _numpy.asarray(arr)
 * 
 *     @chassis_serial_number.setter             # <<<<<<<<<<<<<<
 *     def chassis_serial_number(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.chassis_serial_number.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5514
 *         memcpy(<void *>(&(self._ptr[0].chassisSerialNumber)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def slot_number(self):
 *         """int: The slot number in the chassis containing this GPU (includes switches)"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11slot_number_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11slot_number_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11slot_number___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11slot_number___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5517
 *     def slot_number(self):
 *         """int: The slot number in the chassis containing this GPU (includes switches)"""
 *         return self._ptr[0].slotNumber             # <<<<<<<<<<<<<<
 * 
 *     @slot_number.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_char((__pyx_v_self->_ptr[0]).slotNumber); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5517, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5514
 *         memcpy(<void *>(&(self._ptr[0].chassisSerialNumber)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def slot_number(self):
 *         """int: The slot number in the chassis containing this GPU (includes switches)"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.slot_number.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5519
 *         return self._ptr[0].slotNumber
 * 
 *     @slot_number.setter             # <<<<<<<<<<<<<<
 *     def slot_number(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11slot_number_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11slot_number_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11slot_number_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11slot_number_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned char __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5521
 *     @slot_number.setter
 *     def slot_number(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].slotNumber = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5522
 *     def slot_number(self, val):
 *         if self._readonly:
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].slotNumber = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PlatformInfo_v2_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5522, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5522, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5521
 *     @slot_number.setter
 *     def slot_number(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].slotNumber = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5523
 *         if self._readonly:
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].slotNumber = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_char(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned char)-1) && PyErr_Occurred())) __PYX_ERR(0, 5523, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).slotNumber = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5519
 *         return self._ptr[0].slotNumber
 * 
 *     @slot_number.setter             # <<<<<<<<<<<<<<
 *     def slot_number(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.slot_number.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5525
 *         self._ptr[0].slotNumber = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def tray_ind_ex(self):
 *         """int: The tray index within the compute slots in the chassis containing this GPU (does not include switches)"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11tray_ind_ex_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11tray_ind_ex_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11tray_ind_ex___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11tray_ind_ex___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5528
 *     def tray_ind_ex(self):
 *         """int: The tray index within the compute slots in the chassis containing this GPU (does not include switches)"""
 *         return self._ptr[0].trayIndex             # <<<<<<<<<<<<<<
 * 
 *     @tray_ind_ex.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_char((__pyx_v_self->_ptr[0]).trayIndex); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5528, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5525
 *         self._ptr[0].slotNumber = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def tray_ind_ex(self):
 *         """int: The tray index within the compute slots in the chassis containing this GPU (does not include switches)"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.tray_ind_ex.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5530
 *         return self._ptr[0].trayIndex
 * 
 *     @tray_ind_ex.setter             # <<<<<<<<<<<<<<
 *     def tray_ind_ex(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11tray_ind_ex_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11tray_ind_ex_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11tray_ind_ex_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11tray_ind_ex_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned char __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5532
 *     @tray_ind_ex.setter
 *     def tray_ind_ex(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].trayIndex = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5533
 *     def tray_ind_ex(self, val):
 *         if self._readonly:
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].trayIndex = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PlatformInfo_v2_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5533, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5533, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5532
 *     @tray_ind_ex.setter
 *     def tray_ind_ex(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].trayIndex = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5534
 *         if self._readonly:
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].trayIndex = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_char(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned char)-1) && PyErr_Occurred())) __PYX_ERR(0, 5534, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).trayIndex = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5530
 *         return self._ptr[0].trayIndex
 * 
 *     @tray_ind_ex.setter             # <<<<<<<<<<<<<<
 *     def tray_ind_ex(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.tray_ind_ex.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5536
 *         self._ptr[0].trayIndex = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def host_id(self):
 *         """int: Index of the node within the slot containing this GPU."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7host_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7host_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7host_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7host_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5539
 *     def host_id(self):
 *         """int: Index of the node within the slot containing this GPU."""
 *         return self._ptr[0].hostId             # <<<<<<<<<<<<<<
 * 
 *     @host_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_char((__pyx_v_self->_ptr[0]).hostId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5539, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5536
 *         self._ptr[0].trayIndex = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def host_id(self):
 *         """int: Index of the node within the slot containing this GPU."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.host_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5541
 *         return self._ptr[0].hostId
 * 
 *     @host_id.setter             # <<<<<<<<<<<<<<
 *     def host_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7host_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7host_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7host_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7host_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned char __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5543
 *     @host_id.setter
 *     def host_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].hostId = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5544
 *     def host_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].hostId = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PlatformInfo_v2_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5544, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5544, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5543
 *     @host_id.setter
 *     def host_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].hostId = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5545
 *         if self._readonly:
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].hostId = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_char(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned char)-1) && PyErr_Occurred())) __PYX_ERR(0, 5545, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).hostId = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5541
 *         return self._ptr[0].hostId
 * 
 *     @host_id.setter             # <<<<<<<<<<<<<<
 *     def host_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.host_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5547
 *         self._ptr[0].hostId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def peer_type(self):
 *         """int: Platform indicated NVLink-peer type (e.g. switch present or not)"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9peer_type_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9peer_type_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9peer_type___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9peer_type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5550
 *     def peer_type(self):
 *         """int: Platform indicated NVLink-peer type (e.g. switch present or not)"""
 *         return self._ptr[0].peerType             # <<<<<<<<<<<<<<
 * 
 *     @peer_type.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_char((__pyx_v_self->_ptr[0]).peerType); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5550, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5547
 *         self._ptr[0].hostId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def peer_type(self):
 *         """int: Platform indicated NVLink-peer type (e.g. switch present or not)"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.peer_type.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5552
 *         return self._ptr[0].peerType
 * 
 *     @peer_type.setter             # <<<<<<<<<<<<<<
 *     def peer_type(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9peer_type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9peer_type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9peer_type_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9peer_type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned char __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5554
 *     @peer_type.setter
 *     def peer_type(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].peerType = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5555
 *     def peer_type(self, val):
 *         if self._readonly:
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].peerType = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PlatformInfo_v2_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5555, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5555, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5554
 *     @peer_type.setter
 *     def peer_type(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].peerType = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5556
 *         if self._readonly:
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].peerType = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_char(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned char)-1) && PyErr_Occurred())) __PYX_ERR(0, 5556, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).peerType = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5552
 *         return self._ptr[0].peerType
 * 
 *     @peer_type.setter             # <<<<<<<<<<<<<<
 *     def peer_type(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.peer_type.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5558
 *         self._ptr[0].peerType = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def module_id(self):
 *         """int: ID of this GPU within the node."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9module_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9module_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9module_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9module_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5561
 *     def module_id(self):
 *         """int: ID of this GPU within the node."""
 *         return self._ptr[0].moduleId             # <<<<<<<<<<<<<<
 * 
 *     @module_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_char((__pyx_v_self->_ptr[0]).moduleId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5561, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5558
 *         self._ptr[0].peerType = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def module_id(self):
 *         """int: ID of this GPU within the node."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.module_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5563
 *         return self._ptr[0].moduleId
 * 
 *     @module_id.setter             # <<<<<<<<<<<<<<
 *     def module_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9module_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9module_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9module_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9module_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned char __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5565
 *     @module_id.setter
 *     def module_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].moduleId = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5566
 *     def module_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].moduleId = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PlatformInfo_v2_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5566, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5566, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5565
 *     @module_id.setter
 *     def module_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].moduleId = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5567
 *         if self._readonly:
 *             raise ValueError("This PlatformInfo_v2 instance is read-only")
 *         self._ptr[0].moduleId = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_char(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned char)-1) && PyErr_Occurred())) __PYX_ERR(0, 5567, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).moduleId = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5563
 *         return self._ptr[0].moduleId
 * 
 *     @module_id.setter             # <<<<<<<<<<<<<<
 *     def module_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.module_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5569
 *         self._ptr[0].moduleId = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an PlatformInfo_v2 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15PlatformInfo_v2_12from_data, "PlatformInfo_v2.from_data(data)\n\nCreate an PlatformInfo_v2 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `platform_info_v2_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15PlatformInfo_v2_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15PlatformInfo_v2_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 5569, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5569, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 5569, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 5569, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5569, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 5569, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":5576
 *             data (_numpy.ndarray): a single-element array of dtype `platform_info_v2_dtype` holding the data.
 *         """
 *         return __from_data(data, "platform_info_v2_dtype", platform_info_v2_dtype, PlatformInfo_v2)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_platform_info_v2_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5576, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_platform_info_v2_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5576, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5569
 *         self._ptr[0].moduleId = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an PlatformInfo_v2 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5578
 *         return __from_data(data, "platform_info_v2_dtype", platform_info_v2_dtype, PlatformInfo_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an PlatformInfo_v2 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15PlatformInfo_v2_14from_ptr, "PlatformInfo_v2.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an PlatformInfo_v2 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15PlatformInfo_v2_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15PlatformInfo_v2_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 5578, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 5578, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 5578, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5578, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 5578, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":5579
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an PlatformInfo_v2 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 5578, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 5578, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 5578, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5578, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 5579, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5579, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 5578, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":5578
 *         return __from_data(data, "platform_info_v2_dtype", platform_info_v2_dtype, PlatformInfo_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an PlatformInfo_v2 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":5587
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PlatformInfo_v2 obj = PlatformInfo_v2.__new__(PlatformInfo_v2)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":5588
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef PlatformInfo_v2 obj = PlatformInfo_v2.__new__(PlatformInfo_v2)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5588, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 5588, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5587
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PlatformInfo_v2 obj = PlatformInfo_v2.__new__(PlatformInfo_v2)
*/
  }

  /* "cuda/bindings/_nvml.pyx":5589
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PlatformInfo_v2 obj = PlatformInfo_v2.__new__(PlatformInfo_v2)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlPlatformInfo_v2_t *>malloc(sizeof(nvmlPlatformInfo_v2_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_PlatformInfo_v2(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5589, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":5590
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PlatformInfo_v2 obj = PlatformInfo_v2.__new__(PlatformInfo_v2)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlPlatformInfo_v2_t *>malloc(sizeof(nvmlPlatformInfo_v2_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":5591
 *         cdef PlatformInfo_v2 obj = PlatformInfo_v2.__new__(PlatformInfo_v2)
 *         if owner is None:
 *             obj._ptr = <nvmlPlatformInfo_v2_t *>malloc(sizeof(nvmlPlatformInfo_v2_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating PlatformInfo_v2")
*/
    __pyx_v_obj->_ptr = ((nvmlPlatformInfo_v2_t *)malloc((sizeof(nvmlPlatformInfo_v2_t))));

    /* "cuda/bindings/_nvml.pyx":5592
 *         if owner is None:
 *             obj._ptr = <nvmlPlatformInfo_v2_t *>malloc(sizeof(nvmlPlatformInfo_v2_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating PlatformInfo_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPlatformInfo_v2_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":5593
 *             obj._ptr = <nvmlPlatformInfo_v2_t *>malloc(sizeof(nvmlPlatformInfo_v2_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating PlatformInfo_v2")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPlatformInfo_v2_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5593, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_PlatformInfo_v2};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5593, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 5593, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":5592
 *         if owner is None:
 *             obj._ptr = <nvmlPlatformInfo_v2_t *>malloc(sizeof(nvmlPlatformInfo_v2_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating PlatformInfo_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPlatformInfo_v2_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":5594
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating PlatformInfo_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPlatformInfo_v2_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlPlatformInfo_v2_t))));

    /* "cuda/bindings/_nvml.pyx":5595
 *                 raise MemoryError("Error allocating PlatformInfo_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPlatformInfo_v2_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":5596
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPlatformInfo_v2_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlPlatformInfo_v2_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":5590
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PlatformInfo_v2 obj = PlatformInfo_v2.__new__(PlatformInfo_v2)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlPlatformInfo_v2_t *>malloc(sizeof(nvmlPlatformInfo_v2_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":5598
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlPlatformInfo_v2_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlPlatformInfo_v2_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":5599
 *         else:
 *             obj._ptr = <nvmlPlatformInfo_v2_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":5600
 *             obj._ptr = <nvmlPlatformInfo_v2_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":5601
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":5602
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5578
 *         return __from_data(data, "platform_info_v2_dtype", platform_info_v2_dtype, PlatformInfo_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an PlatformInfo_v2 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15PlatformInfo_v2_16__reduce_cython__, "PlatformInfo_v2.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15PlatformInfo_v2_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15PlatformInfo_v2_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15PlatformInfo_v2_18__setstate_cython__, "PlatformInfo_v2.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15PlatformInfo_v2_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15PlatformInfo_v2_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15PlatformInfo_v2_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.PlatformInfo_v2.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5605
 * 
 * 
 * cdef _get__py_anon_pod1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef _anon_pod1 pod = _anon_pod1()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod1_dtype_offsets(void) {
  _anon_pod1 __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  _anon_pod1 __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  size_t __pyx_t_11;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get__py_anon_pod1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":5606
 * 
 * cdef _get__py_anon_pod1_dtype_offsets():
 *     cdef _anon_pod1 pod = _anon_pod1()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['b_is_present', 'percentage', 'inc_threshold', 'dec_threshold'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":5607
 * cdef _get__py_anon_pod1_dtype_offsets():
 *     cdef _anon_pod1 pod = _anon_pod1()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['b_is_present', 'percentage', 'inc_threshold', 'dec_threshold'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":5608
 *     cdef _anon_pod1 pod = _anon_pod1()
 *     return _numpy.dtype({
 *         'names': ['b_is_present', 'percentage', 'inc_threshold', 'dec_threshold'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5608, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5608, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_b_is_present);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_b_is_present);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_b_is_present) != (0)) __PYX_ERR(0, 5608, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_percentage);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_percentage);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_percentage) != (0)) __PYX_ERR(0, 5608, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_inc_threshold);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_inc_threshold);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_inc_threshold) != (0)) __PYX_ERR(0, 5608, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_dec_threshold);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_dec_threshold);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_dec_threshold) != (0)) __PYX_ERR(0, 5608, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 5608, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":5609
 *     return _numpy.dtype({
 *         'names': ['b_is_present', 'percentage', 'inc_threshold', 'dec_threshold'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.bIsPresent)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5609, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5609, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5609, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 5609, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5609, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 5609, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5609, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 5609, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5609, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 5609, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 5609, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 5609, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 5609, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 5608, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":5611
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.bIsPresent)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.percentage)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.incThreshold)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bIsPresent)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5611, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":5612
 *         'offsets': [
 *             (<intptr_t>&(pod.bIsPresent)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.percentage)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.incThreshold)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decThreshold)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.percentage)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 5612, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":5613
 *             (<intptr_t>&(pod.bIsPresent)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.percentage)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.incThreshold)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.decThreshold)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.incThreshold)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 5613, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":5614
 *             (<intptr_t>&(pod.percentage)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.incThreshold)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decThreshold)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(_anon_pod1),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.decThreshold)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 5614, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":5610
 *         'names': ['b_is_present', 'percentage', 'inc_threshold', 'dec_threshold'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bIsPresent)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.percentage)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5610, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 5610, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_10) != (0)) __PYX_ERR(0, 5610, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 5610, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_8) != (0)) __PYX_ERR(0, 5610, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 5608, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":5616
 *             (<intptr_t>&(pod.decThreshold)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(_anon_pod1),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(_anon_pod1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5616, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 5608, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_11 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_11 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_11, (2-__pyx_t_11) | (__pyx_t_11*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5607, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5605
 * 
 * 
 * cdef _get__py_anon_pod1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef _anon_pod1 pod = _anon_pod1()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml._get__py_anon_pod1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5633
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <_anon_pod1 *>calloc(1, sizeof(_anon_pod1))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":5634
 * 
 *     def __init__(self):
 *         self._ptr = <_anon_pod1 *>calloc(1, sizeof(_anon_pod1))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod1")
*/
  __pyx_v_self->_ptr = ((_anon_pod1 *)calloc(1, (sizeof(_anon_pod1))));

  /* "cuda/bindings/_nvml.pyx":5635
 *     def __init__(self):
 *         self._ptr = <_anon_pod1 *>calloc(1, sizeof(_anon_pod1))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating _py_anon_pod1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":5636
 *         self._ptr = <_anon_pod1 *>calloc(1, sizeof(_anon_pod1))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5636, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod1};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5636, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 5636, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5635
 *     def __init__(self):
 *         self._ptr = <_anon_pod1 *>calloc(1, sizeof(_anon_pod1))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating _py_anon_pod1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":5637
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":5638
 *             raise MemoryError("Error allocating _py_anon_pod1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":5639
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":5633
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <_anon_pod1 *>calloc(1, sizeof(_anon_pod1))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5641
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef _anon_pod1 *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self) {
  _anon_pod1 *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  _anon_pod1 *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":5643
 *     def __dealloc__(self):
 *         cdef _anon_pod1 *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":5644
 *         cdef _anon_pod1 *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":5645
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":5646
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":5643
 *     def __dealloc__(self):
 *         cdef _anon_pod1 *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":5641
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef _anon_pod1 *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":5648
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}._py_anon_pod1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":5649
 * 
 *     def __repr__(self):
 *         return f"<{__name__}._py_anon_pod1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5649, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5649, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5649, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5649, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5649, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_py_anon_pod1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 25 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5649, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5648
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}._py_anon_pod1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5651
 *         return f"<{__name__}._py_anon_pod1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5654
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5654, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5651
 *         return f"<{__name__}._py_anon_pod1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5656
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":5657
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5656
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5659
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":5660
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5660, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5659
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5662
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod1 other_
 *         if not isinstance(other, _py_anon_pod1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":5664
 *     def __eq__(self, other):
 *         cdef _py_anon_pod1 other_
 *         if not isinstance(other, _py_anon_pod1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":5665
 *         cdef _py_anon_pod1 other_
 *         if not isinstance(other, _py_anon_pod1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod1)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":5664
 *     def __eq__(self, other):
 *         cdef _py_anon_pod1 other_
 *         if not isinstance(other, _py_anon_pod1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":5666
 *         if not isinstance(other, _py_anon_pod1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod1)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1))))) __PYX_ERR(0, 5666, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":5667
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod1)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(_anon_pod1))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5667, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5662
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod1 other_
 *         if not isinstance(other, _py_anon_pod1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5669
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod1)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod1 *>malloc(sizeof(_anon_pod1))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":5670
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <_anon_pod1 *>malloc(sizeof(_anon_pod1))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 5670, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5670, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5670, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 5670, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":5671
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod1 *>malloc(sizeof(_anon_pod1))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod1")
*/
    __pyx_v_self->_ptr = ((_anon_pod1 *)malloc((sizeof(_anon_pod1))));

    /* "cuda/bindings/_nvml.pyx":5672
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod1 *>malloc(sizeof(_anon_pod1))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod1))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":5673
 *             self._ptr = <_anon_pod1 *>malloc(sizeof(_anon_pod1))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod1))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5673, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod1};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5673, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 5673, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":5672
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod1 *>malloc(sizeof(_anon_pod1))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod1))
*/
    }

    /* "cuda/bindings/_nvml.pyx":5674
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod1))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5674, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5674, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 5674, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(_anon_pod1))));

    /* "cuda/bindings/_nvml.pyx":5675
 *                 raise MemoryError("Error allocating _py_anon_pod1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod1))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":5676
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod1))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":5677
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5677, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5677, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 5677, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":5670
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <_anon_pod1 *>malloc(sizeof(_anon_pod1))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":5679
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 5679, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":5669
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod1)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod1 *>malloc(sizeof(_anon_pod1))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5681
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def b_is_present(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_12b_is_present_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_12b_is_present_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_12b_is_present___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_12b_is_present___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5684
 *     def b_is_present(self):
 *         """int: """
 *         return self._ptr[0].bIsPresent             # <<<<<<<<<<<<<<
 * 
 *     @b_is_present.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).bIsPresent); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5684, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5681
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def b_is_present(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.b_is_present.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5686
 *         return self._ptr[0].bIsPresent
 * 
 *     @b_is_present.setter             # <<<<<<<<<<<<<<
 *     def b_is_present(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_12b_is_present_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_12b_is_present_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_12b_is_present_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_12b_is_present_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5688
 *     @b_is_present.setter
 *     def b_is_present(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod1 instance is read-only")
 *         self._ptr[0].bIsPresent = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5689
 *     def b_is_present(self, val):
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].bIsPresent = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This__py_anon_pod1_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5689, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5689, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5688
 *     @b_is_present.setter
 *     def b_is_present(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod1 instance is read-only")
 *         self._ptr[0].bIsPresent = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5690
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod1 instance is read-only")
 *         self._ptr[0].bIsPresent = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5690, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).bIsPresent = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5686
 *         return self._ptr[0].bIsPresent
 * 
 *     @b_is_present.setter             # <<<<<<<<<<<<<<
 *     def b_is_present(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.b_is_present.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5692
 *         self._ptr[0].bIsPresent = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def percentage(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_10percentage_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_10percentage_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_10percentage___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_10percentage___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5695
 *     def percentage(self):
 *         """int: """
 *         return self._ptr[0].percentage             # <<<<<<<<<<<<<<
 * 
 *     @percentage.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).percentage); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5695, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5692
 *         self._ptr[0].bIsPresent = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def percentage(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.percentage.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5697
 *         return self._ptr[0].percentage
 * 
 *     @percentage.setter             # <<<<<<<<<<<<<<
 *     def percentage(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_10percentage_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_10percentage_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_10percentage_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_10percentage_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5699
 *     @percentage.setter
 *     def percentage(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod1 instance is read-only")
 *         self._ptr[0].percentage = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5700
 *     def percentage(self, val):
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].percentage = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This__py_anon_pod1_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5700, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5700, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5699
 *     @percentage.setter
 *     def percentage(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod1 instance is read-only")
 *         self._ptr[0].percentage = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5701
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod1 instance is read-only")
 *         self._ptr[0].percentage = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5701, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).percentage = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5697
 *         return self._ptr[0].percentage
 * 
 *     @percentage.setter             # <<<<<<<<<<<<<<
 *     def percentage(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.percentage.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5703
 *         self._ptr[0].percentage = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def inc_threshold(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_13inc_threshold_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_13inc_threshold_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_13inc_threshold___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_13inc_threshold___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5706
 *     def inc_threshold(self):
 *         """int: """
 *         return self._ptr[0].incThreshold             # <<<<<<<<<<<<<<
 * 
 *     @inc_threshold.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).incThreshold); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5706, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5703
 *         self._ptr[0].percentage = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def inc_threshold(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.inc_threshold.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5708
 *         return self._ptr[0].incThreshold
 * 
 *     @inc_threshold.setter             # <<<<<<<<<<<<<<
 *     def inc_threshold(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_13inc_threshold_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_13inc_threshold_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_13inc_threshold_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_13inc_threshold_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5710
 *     @inc_threshold.setter
 *     def inc_threshold(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod1 instance is read-only")
 *         self._ptr[0].incThreshold = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5711
 *     def inc_threshold(self, val):
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].incThreshold = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This__py_anon_pod1_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5711, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5711, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5710
 *     @inc_threshold.setter
 *     def inc_threshold(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod1 instance is read-only")
 *         self._ptr[0].incThreshold = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5712
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod1 instance is read-only")
 *         self._ptr[0].incThreshold = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5712, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).incThreshold = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5708
 *         return self._ptr[0].incThreshold
 * 
 *     @inc_threshold.setter             # <<<<<<<<<<<<<<
 *     def inc_threshold(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.inc_threshold.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5714
 *         self._ptr[0].incThreshold = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def dec_threshold(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_13dec_threshold_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_13dec_threshold_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_13dec_threshold___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_13dec_threshold___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5717
 *     def dec_threshold(self):
 *         """int: """
 *         return self._ptr[0].decThreshold             # <<<<<<<<<<<<<<
 * 
 *     @dec_threshold.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).decThreshold); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5717, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5714
 *         self._ptr[0].incThreshold = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def dec_threshold(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.dec_threshold.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5719
 *         return self._ptr[0].decThreshold
 * 
 *     @dec_threshold.setter             # <<<<<<<<<<<<<<
 *     def dec_threshold(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_13dec_threshold_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_13dec_threshold_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_13dec_threshold_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_13dec_threshold_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5721
 *     @dec_threshold.setter
 *     def dec_threshold(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod1 instance is read-only")
 *         self._ptr[0].decThreshold = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5722
 *     def dec_threshold(self, val):
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].decThreshold = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This__py_anon_pod1_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5722, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5722, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5721
 *     @dec_threshold.setter
 *     def dec_threshold(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod1 instance is read-only")
 *         self._ptr[0].decThreshold = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5723
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod1 instance is read-only")
 *         self._ptr[0].decThreshold = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5723, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).decThreshold = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5719
 *         return self._ptr[0].decThreshold
 * 
 *     @dec_threshold.setter             # <<<<<<<<<<<<<<
 *     def dec_threshold(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.dec_threshold.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5725
 *         self._ptr[0].decThreshold = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod1_12from_data, "_py_anon_pod1.from_data(data)\n\nCreate an _py_anon_pod1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `_py_anon_pod1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 5725, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5725, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 5725, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 5725, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5725, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 5725, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":5732
 *             data (_numpy.ndarray): a single-element array of dtype `_py_anon_pod1_dtype` holding the data.
 *         """
 *         return __from_data(data, "_py_anon_pod1_dtype", _py_anon_pod1_dtype, _py_anon_pod1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_py_anon_pod1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5732, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_py_anon_pod1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5732, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5725
 *         self._ptr[0].decThreshold = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5734
 *         return __from_data(data, "_py_anon_pod1_dtype", _py_anon_pod1_dtype, _py_anon_pod1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod1_14from_ptr, "_py_anon_pod1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an _py_anon_pod1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 5734, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 5734, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 5734, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5734, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 5734, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":5735
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an _py_anon_pod1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 5734, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 5734, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 5734, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5734, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 5735, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5735, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 5734, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":5734
 *         return __from_data(data, "_py_anon_pod1_dtype", _py_anon_pod1_dtype, _py_anon_pod1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":5743
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod1 obj = _py_anon_pod1.__new__(_py_anon_pod1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":5744
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod1 obj = _py_anon_pod1.__new__(_py_anon_pod1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5744, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 5744, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5743
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod1 obj = _py_anon_pod1.__new__(_py_anon_pod1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":5745
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod1 obj = _py_anon_pod1.__new__(_py_anon_pod1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <_anon_pod1 *>malloc(sizeof(_anon_pod1))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5745, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":5746
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod1 obj = _py_anon_pod1.__new__(_py_anon_pod1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <_anon_pod1 *>malloc(sizeof(_anon_pod1))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":5747
 *         cdef _py_anon_pod1 obj = _py_anon_pod1.__new__(_py_anon_pod1)
 *         if owner is None:
 *             obj._ptr = <_anon_pod1 *>malloc(sizeof(_anon_pod1))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod1")
*/
    __pyx_v_obj->_ptr = ((_anon_pod1 *)malloc((sizeof(_anon_pod1))));

    /* "cuda/bindings/_nvml.pyx":5748
 *         if owner is None:
 *             obj._ptr = <_anon_pod1 *>malloc(sizeof(_anon_pod1))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod1))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":5749
 *             obj._ptr = <_anon_pod1 *>malloc(sizeof(_anon_pod1))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod1))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5749, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod1};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5749, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 5749, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":5748
 *         if owner is None:
 *             obj._ptr = <_anon_pod1 *>malloc(sizeof(_anon_pod1))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod1))
*/
    }

    /* "cuda/bindings/_nvml.pyx":5750
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod1))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(_anon_pod1))));

    /* "cuda/bindings/_nvml.pyx":5751
 *                 raise MemoryError("Error allocating _py_anon_pod1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod1))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":5752
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod1))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <_anon_pod1 *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":5746
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod1 obj = _py_anon_pod1.__new__(_py_anon_pod1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <_anon_pod1 *>malloc(sizeof(_anon_pod1))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":5754
 *             obj._owned = True
 *         else:
 *             obj._ptr = <_anon_pod1 *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((_anon_pod1 *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":5755
 *         else:
 *             obj._ptr = <_anon_pod1 *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":5756
 *             obj._ptr = <_anon_pod1 *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":5757
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":5758
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5734
 *         return __from_data(data, "_py_anon_pod1_dtype", _py_anon_pod1_dtype, _py_anon_pod1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod1_16__reduce_cython__, "_py_anon_pod1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod1_18__setstate_cython__, "_py_anon_pod1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5761
 * 
 * 
 * cdef _get_vgpu_heterogeneous_mode_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuHeterogeneousMode_v1_t pod = nvmlVgpuHeterogeneousMode_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_heterogeneous_mode_v1_dtype_offsets(void) {
  nvmlVgpuHeterogeneousMode_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuHeterogeneousMode_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_heterogeneous_mode_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":5762
 * 
 * cdef _get_vgpu_heterogeneous_mode_v1_dtype_offsets():
 *     cdef nvmlVgpuHeterogeneousMode_v1_t pod = nvmlVgpuHeterogeneousMode_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'mode'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":5763
 * cdef _get_vgpu_heterogeneous_mode_v1_dtype_offsets():
 *     cdef nvmlVgpuHeterogeneousMode_v1_t pod = nvmlVgpuHeterogeneousMode_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5763, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5763, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":5764
 *     cdef nvmlVgpuHeterogeneousMode_v1_t pod = nvmlVgpuHeterogeneousMode_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'mode'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5764, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5764, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 5764, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_mode);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_mode);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_mode) != (0)) __PYX_ERR(0, 5764, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 5764, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":5765
 *     return _numpy.dtype({
 *         'names': ['version', 'mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5765, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5765, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5765, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 5765, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5765, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 5765, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 5765, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 5764, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":5767
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.mode)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5767, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":5768
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.mode)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuHeterogeneousMode_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.mode)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 5768, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":5766
 *         'names': ['version', 'mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.mode)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5766, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 5766, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 5766, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 5764, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":5770
 *             (<intptr_t>&(pod.mode)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuHeterogeneousMode_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuHeterogeneousMode_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5770, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 5764, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5763, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5761
 * 
 * 
 * cdef _get_vgpu_heterogeneous_mode_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuHeterogeneousMode_v1_t pod = nvmlVgpuHeterogeneousMode_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_heterogeneous_mode_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5787
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>calloc(1, sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":5788
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>calloc(1, sizeof(nvmlVgpuHeterogeneousMode_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuHeterogeneousMode_v1_t *)calloc(1, (sizeof(nvmlVgpuHeterogeneousMode_v1_t))));

  /* "cuda/bindings/_nvml.pyx":5789
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>calloc(1, sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":5790
 *         self._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>calloc(1, sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5790, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuHeterogeneo};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5790, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 5790, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5789
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>calloc(1, sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":5791
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":5792
 *             raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":5793
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":5787
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>calloc(1, sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuHeterogeneousMode_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5795
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuHeterogeneousMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self) {
  nvmlVgpuHeterogeneousMode_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuHeterogeneousMode_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":5797
 *     def __dealloc__(self):
 *         cdef nvmlVgpuHeterogeneousMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":5798
 *         cdef nvmlVgpuHeterogeneousMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":5799
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":5800
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":5797
 *     def __dealloc__(self):
 *         cdef nvmlVgpuHeterogeneousMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":5795
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuHeterogeneousMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":5802
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuHeterogeneousMode_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":5803
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuHeterogeneousMode_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5803, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5803, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5803, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5803, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5803, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuHeterogeneousMode_v1_object;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 36 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5803, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5802
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuHeterogeneousMode_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuHeterogeneousMode_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5805
 *         return f"<{__name__}.VgpuHeterogeneousMode_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5808
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5808, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5805
 *         return f"<{__name__}.VgpuHeterogeneousMode_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuHeterogeneousMode_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5810
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":5811
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5810
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5813
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":5814
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5814, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5813
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuHeterogeneousMode_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5816
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuHeterogeneousMode_v1 other_
 *         if not isinstance(other, VgpuHeterogeneousMode_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":5818
 *     def __eq__(self, other):
 *         cdef VgpuHeterogeneousMode_v1 other_
 *         if not isinstance(other, VgpuHeterogeneousMode_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":5819
 *         cdef VgpuHeterogeneousMode_v1 other_
 *         if not isinstance(other, VgpuHeterogeneousMode_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuHeterogeneousMode_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":5818
 *     def __eq__(self, other):
 *         cdef VgpuHeterogeneousMode_v1 other_
 *         if not isinstance(other, VgpuHeterogeneousMode_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":5820
 *         if not isinstance(other, VgpuHeterogeneousMode_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuHeterogeneousMode_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1))))) __PYX_ERR(0, 5820, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":5821
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuHeterogeneousMode_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuHeterogeneousMode_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5821, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5816
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuHeterogeneousMode_v1 other_
 *         if not isinstance(other, VgpuHeterogeneousMode_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuHeterogeneousMode_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5823
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuHeterogeneousMode_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>malloc(sizeof(nvmlVgpuHeterogeneousMode_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":5824
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>malloc(sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 5824, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5824, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5824, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 5824, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":5825
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>malloc(sizeof(nvmlVgpuHeterogeneousMode_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuHeterogeneousMode_v1_t *)malloc((sizeof(nvmlVgpuHeterogeneousMode_v1_t))));

    /* "cuda/bindings/_nvml.pyx":5826
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>malloc(sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuHeterogeneousMode_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":5827
 *             self._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>malloc(sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5827, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuHeterogeneo};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5827, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 5827, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":5826
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>malloc(sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuHeterogeneousMode_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":5828
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuHeterogeneousMode_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5828, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5828, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 5828, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuHeterogeneousMode_v1_t))));

    /* "cuda/bindings/_nvml.pyx":5829
 *                 raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":5830
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":5831
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5831, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5831, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 5831, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":5824
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>malloc(sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":5833
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 5833, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":5823
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuHeterogeneousMode_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>malloc(sizeof(nvmlVgpuHeterogeneousMode_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuHeterogeneousMode_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5835
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5838
 *     def version(self):
 *         """int: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5838, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5835
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuHeterogeneousMode_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5840
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5842
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuHeterogeneousMode_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5843
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuHeterogeneousMode_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuHeterogeneousMode_v1_in};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5843, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5843, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5842
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuHeterogeneousMode_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5844
 *         if self._readonly:
 *             raise ValueError("This VgpuHeterogeneousMode_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5844, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5840
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuHeterogeneousMode_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5846
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def mode(self):
 *         """int: The vGPU heterogeneous mode."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_4mode_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_4mode_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_4mode___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_4mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5849
 *     def mode(self):
 *         """int: The vGPU heterogeneous mode."""
 *         return self._ptr[0].mode             # <<<<<<<<<<<<<<
 * 
 *     @mode.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).mode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5846
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def mode(self):
 *         """int: The vGPU heterogeneous mode."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuHeterogeneousMode_v1.mode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5851
 *         return self._ptr[0].mode
 * 
 *     @mode.setter             # <<<<<<<<<<<<<<
 *     def mode(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_4mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_4mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_4mode_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_4mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5853
 *     @mode.setter
 *     def mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuHeterogeneousMode_v1 instance is read-only")
 *         self._ptr[0].mode = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5854
 *     def mode(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuHeterogeneousMode_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].mode = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuHeterogeneousMode_v1_in};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5854, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5854, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5853
 *     @mode.setter
 *     def mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuHeterogeneousMode_v1 instance is read-only")
 *         self._ptr[0].mode = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5855
 *         if self._readonly:
 *             raise ValueError("This VgpuHeterogeneousMode_v1 instance is read-only")
 *         self._ptr[0].mode = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5855, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).mode = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5851
 *         return self._ptr[0].mode
 * 
 *     @mode.setter             # <<<<<<<<<<<<<<
 *     def mode(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuHeterogeneousMode_v1.mode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5857
 *         self._ptr[0].mode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuHeterogeneousMode_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_12from_data, "VgpuHeterogeneousMode_v1.from_data(data)\n\nCreate an VgpuHeterogeneousMode_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_heterogeneous_mode_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 5857, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5857, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 5857, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 5857, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5857, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 5857, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuHeterogeneousMode_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":5864
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_heterogeneous_mode_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_heterogeneous_mode_v1_dtype", vgpu_heterogeneous_mode_v1_dtype, VgpuHeterogeneousMode_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_heterogeneous_mode_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5864, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_heterogeneous_mode_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5864, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5857
 *         self._ptr[0].mode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuHeterogeneousMode_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuHeterogeneousMode_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5866
 *         return __from_data(data, "vgpu_heterogeneous_mode_v1_dtype", vgpu_heterogeneous_mode_v1_dtype, VgpuHeterogeneousMode_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuHeterogeneousMode_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_14from_ptr, "VgpuHeterogeneousMode_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuHeterogeneousMode_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 5866, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 5866, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 5866, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5866, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 5866, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":5867
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuHeterogeneousMode_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 5866, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 5866, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 5866, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5866, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 5867, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5867, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 5866, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuHeterogeneousMode_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":5866
 *         return __from_data(data, "vgpu_heterogeneous_mode_v1_dtype", vgpu_heterogeneous_mode_v1_dtype, VgpuHeterogeneousMode_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuHeterogeneousMode_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":5875
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuHeterogeneousMode_v1 obj = VgpuHeterogeneousMode_v1.__new__(VgpuHeterogeneousMode_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":5876
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuHeterogeneousMode_v1 obj = VgpuHeterogeneousMode_v1.__new__(VgpuHeterogeneousMode_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5876, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 5876, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5875
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuHeterogeneousMode_v1 obj = VgpuHeterogeneousMode_v1.__new__(VgpuHeterogeneousMode_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":5877
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuHeterogeneousMode_v1 obj = VgpuHeterogeneousMode_v1.__new__(VgpuHeterogeneousMode_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>malloc(sizeof(nvmlVgpuHeterogeneousMode_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5877, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":5878
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuHeterogeneousMode_v1 obj = VgpuHeterogeneousMode_v1.__new__(VgpuHeterogeneousMode_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>malloc(sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":5879
 *         cdef VgpuHeterogeneousMode_v1 obj = VgpuHeterogeneousMode_v1.__new__(VgpuHeterogeneousMode_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>malloc(sizeof(nvmlVgpuHeterogeneousMode_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuHeterogeneousMode_v1_t *)malloc((sizeof(nvmlVgpuHeterogeneousMode_v1_t))));

    /* "cuda/bindings/_nvml.pyx":5880
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>malloc(sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuHeterogeneousMode_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":5881
 *             obj._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>malloc(sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5881, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuHeterogeneo};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5881, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 5881, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":5880
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>malloc(sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuHeterogeneousMode_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":5882
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuHeterogeneousMode_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuHeterogeneousMode_v1_t))));

    /* "cuda/bindings/_nvml.pyx":5883
 *                 raise MemoryError("Error allocating VgpuHeterogeneousMode_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":5884
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":5878
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuHeterogeneousMode_v1 obj = VgpuHeterogeneousMode_v1.__new__(VgpuHeterogeneousMode_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>malloc(sizeof(nvmlVgpuHeterogeneousMode_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":5886
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuHeterogeneousMode_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":5887
 *         else:
 *             obj._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":5888
 *             obj._ptr = <nvmlVgpuHeterogeneousMode_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":5889
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":5890
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5866
 *         return __from_data(data, "vgpu_heterogeneous_mode_v1_dtype", vgpu_heterogeneous_mode_v1_dtype, VgpuHeterogeneousMode_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuHeterogeneousMode_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuHeterogeneousMode_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_16__reduce_cython__, "VgpuHeterogeneousMode_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuHeterogeneousMode_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_18__setstate_cython__, "VgpuHeterogeneousMode_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuHeterogeneousMode_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuHeterogeneousMode_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5893
 * 
 * 
 * cdef _get_vgpu_placement_id_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuPlacementId_v1_t pod = nvmlVgpuPlacementId_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_placement_id_v1_dtype_offsets(void) {
  nvmlVgpuPlacementId_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuPlacementId_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_placement_id_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":5894
 * 
 * cdef _get_vgpu_placement_id_v1_dtype_offsets():
 *     cdef nvmlVgpuPlacementId_v1_t pod = nvmlVgpuPlacementId_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'placement_id'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":5895
 * cdef _get_vgpu_placement_id_v1_dtype_offsets():
 *     cdef nvmlVgpuPlacementId_v1_t pod = nvmlVgpuPlacementId_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'placement_id'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5895, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5895, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":5896
 *     cdef nvmlVgpuPlacementId_v1_t pod = nvmlVgpuPlacementId_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'placement_id'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5896, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5896, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 5896, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_placement_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_placement_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_placement_id) != (0)) __PYX_ERR(0, 5896, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 5896, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":5897
 *     return _numpy.dtype({
 *         'names': ['version', 'placement_id'],
 *         'formats': [_numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5897, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5897, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5897, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 5897, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5897, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 5897, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 5897, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 5896, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":5899
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.placementId)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 5899, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":5900
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.placementId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuPlacementId_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.placementId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 5900, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":5898
 *         'names': ['version', 'placement_id'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.placementId)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5898, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 5898, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 5898, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 5896, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":5902
 *             (<intptr_t>&(pod.placementId)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuPlacementId_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuPlacementId_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 5902, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 5896, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5895, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5893
 * 
 * 
 * cdef _get_vgpu_placement_id_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuPlacementId_v1_t pod = nvmlVgpuPlacementId_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_placement_id_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5919
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuPlacementId_v1_t *>calloc(1, sizeof(nvmlVgpuPlacementId_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":5920
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuPlacementId_v1_t *>calloc(1, sizeof(nvmlVgpuPlacementId_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuPlacementId_v1")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuPlacementId_v1_t *)calloc(1, (sizeof(nvmlVgpuPlacementId_v1_t))));

  /* "cuda/bindings/_nvml.pyx":5921
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuPlacementId_v1_t *>calloc(1, sizeof(nvmlVgpuPlacementId_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuPlacementId_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":5922
 *         self._ptr = <nvmlVgpuPlacementId_v1_t *>calloc(1, sizeof(nvmlVgpuPlacementId_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuPlacementId_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5922, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuPlacementId};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5922, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 5922, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5921
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuPlacementId_v1_t *>calloc(1, sizeof(nvmlVgpuPlacementId_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuPlacementId_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":5923
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuPlacementId_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":5924
 *             raise MemoryError("Error allocating VgpuPlacementId_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":5925
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":5919
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuPlacementId_v1_t *>calloc(1, sizeof(nvmlVgpuPlacementId_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementId_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5927
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuPlacementId_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self) {
  nvmlVgpuPlacementId_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuPlacementId_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":5929
 *     def __dealloc__(self):
 *         cdef nvmlVgpuPlacementId_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":5930
 *         cdef nvmlVgpuPlacementId_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":5931
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":5932
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":5929
 *     def __dealloc__(self):
 *         cdef nvmlVgpuPlacementId_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":5927
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuPlacementId_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":5934
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuPlacementId_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":5935
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuPlacementId_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5935, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5935, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5935, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5935, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5935, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuPlacementId_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 30 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5935, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5934
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuPlacementId_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementId_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5937
 *         return f"<{__name__}.VgpuPlacementId_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5940
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5940, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5937
 *         return f"<{__name__}.VgpuPlacementId_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementId_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5942
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":5943
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5942
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5945
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":5946
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5946, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5945
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementId_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5948
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuPlacementId_v1 other_
 *         if not isinstance(other, VgpuPlacementId_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":5950
 *     def __eq__(self, other):
 *         cdef VgpuPlacementId_v1 other_
 *         if not isinstance(other, VgpuPlacementId_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":5951
 *         cdef VgpuPlacementId_v1 other_
 *         if not isinstance(other, VgpuPlacementId_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPlacementId_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":5950
 *     def __eq__(self, other):
 *         cdef VgpuPlacementId_v1 other_
 *         if not isinstance(other, VgpuPlacementId_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":5952
 *         if not isinstance(other, VgpuPlacementId_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPlacementId_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1))))) __PYX_ERR(0, 5952, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":5953
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPlacementId_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuPlacementId_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5948
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuPlacementId_v1 other_
 *         if not isinstance(other, VgpuPlacementId_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementId_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5955
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPlacementId_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPlacementId_v1_t *>malloc(sizeof(nvmlVgpuPlacementId_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":5956
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuPlacementId_v1_t *>malloc(sizeof(nvmlVgpuPlacementId_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 5956, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 5956, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5956, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 5956, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":5957
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPlacementId_v1_t *>malloc(sizeof(nvmlVgpuPlacementId_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPlacementId_v1")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuPlacementId_v1_t *)malloc((sizeof(nvmlVgpuPlacementId_v1_t))));

    /* "cuda/bindings/_nvml.pyx":5958
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPlacementId_v1_t *>malloc(sizeof(nvmlVgpuPlacementId_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuPlacementId_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPlacementId_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":5959
 *             self._ptr = <nvmlVgpuPlacementId_v1_t *>malloc(sizeof(nvmlVgpuPlacementId_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPlacementId_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPlacementId_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5959, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuPlacementId};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5959, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 5959, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":5958
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPlacementId_v1_t *>malloc(sizeof(nvmlVgpuPlacementId_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuPlacementId_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPlacementId_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":5960
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPlacementId_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPlacementId_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5960, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5960, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 5960, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuPlacementId_v1_t))));

    /* "cuda/bindings/_nvml.pyx":5961
 *                 raise MemoryError("Error allocating VgpuPlacementId_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPlacementId_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":5962
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPlacementId_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":5963
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5963, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 5963, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 5963, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":5956
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuPlacementId_v1_t *>malloc(sizeof(nvmlVgpuPlacementId_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":5965
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 5965, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":5955
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPlacementId_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPlacementId_v1_t *>malloc(sizeof(nvmlVgpuPlacementId_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementId_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5967
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5970
 *     def version(self):
 *         """int: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5970, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5967
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementId_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5972
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5974
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPlacementId_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5975
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuPlacementId_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuPlacementId_v1_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5975, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5975, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5974
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPlacementId_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5976
 *         if self._readonly:
 *             raise ValueError("This VgpuPlacementId_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5976, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5972
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementId_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5978
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def placement_id(self):
 *         """int: Placement ID of the active vGPU instance."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12placement_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12placement_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12placement_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12placement_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":5981
 *     def placement_id(self):
 *         """int: Placement ID of the active vGPU instance."""
 *         return self._ptr[0].placementId             # <<<<<<<<<<<<<<
 * 
 *     @placement_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).placementId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5981, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5978
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def placement_id(self):
 *         """int: Placement ID of the active vGPU instance."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementId_v1.placement_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5983
 *         return self._ptr[0].placementId
 * 
 *     @placement_id.setter             # <<<<<<<<<<<<<<
 *     def placement_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12placement_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12placement_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12placement_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12placement_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":5985
 *     @placement_id.setter
 *     def placement_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPlacementId_v1 instance is read-only")
 *         self._ptr[0].placementId = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":5986
 *     def placement_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuPlacementId_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].placementId = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuPlacementId_v1_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5986, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 5986, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":5985
 *     @placement_id.setter
 *     def placement_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPlacementId_v1 instance is read-only")
 *         self._ptr[0].placementId = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":5987
 *         if self._readonly:
 *             raise ValueError("This VgpuPlacementId_v1 instance is read-only")
 *         self._ptr[0].placementId = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5987, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).placementId = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":5983
 *         return self._ptr[0].placementId
 * 
 *     @placement_id.setter             # <<<<<<<<<<<<<<
 *     def placement_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementId_v1.placement_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5989
 *         self._ptr[0].placementId = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuPlacementId_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12from_data, "VgpuPlacementId_v1.from_data(data)\n\nCreate an VgpuPlacementId_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_placement_id_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 5989, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5989, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 5989, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 5989, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5989, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 5989, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementId_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":5996
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_placement_id_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_placement_id_v1_dtype", vgpu_placement_id_v1_dtype, VgpuPlacementId_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_placement_id_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5996, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_placement_id_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5996, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5989
 *         self._ptr[0].placementId = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuPlacementId_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementId_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":5998
 *         return __from_data(data, "vgpu_placement_id_v1_dtype", vgpu_placement_id_v1_dtype, VgpuPlacementId_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuPlacementId_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_14from_ptr, "VgpuPlacementId_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuPlacementId_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 5998, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 5998, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 5998, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5998, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 5998, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":5999
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuPlacementId_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 5998, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 5998, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 5998, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 5998, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 5999, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 5999, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 5998, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementId_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":5998
 *         return __from_data(data, "vgpu_placement_id_v1_dtype", vgpu_placement_id_v1_dtype, VgpuPlacementId_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuPlacementId_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":6007
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPlacementId_v1 obj = VgpuPlacementId_v1.__new__(VgpuPlacementId_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":6008
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuPlacementId_v1 obj = VgpuPlacementId_v1.__new__(VgpuPlacementId_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6008, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 6008, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6007
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPlacementId_v1 obj = VgpuPlacementId_v1.__new__(VgpuPlacementId_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":6009
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPlacementId_v1 obj = VgpuPlacementId_v1.__new__(VgpuPlacementId_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuPlacementId_v1_t *>malloc(sizeof(nvmlVgpuPlacementId_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPlacementId_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6009, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":6010
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPlacementId_v1 obj = VgpuPlacementId_v1.__new__(VgpuPlacementId_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuPlacementId_v1_t *>malloc(sizeof(nvmlVgpuPlacementId_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6011
 *         cdef VgpuPlacementId_v1 obj = VgpuPlacementId_v1.__new__(VgpuPlacementId_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuPlacementId_v1_t *>malloc(sizeof(nvmlVgpuPlacementId_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPlacementId_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuPlacementId_v1_t *)malloc((sizeof(nvmlVgpuPlacementId_v1_t))));

    /* "cuda/bindings/_nvml.pyx":6012
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuPlacementId_v1_t *>malloc(sizeof(nvmlVgpuPlacementId_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuPlacementId_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPlacementId_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":6013
 *             obj._ptr = <nvmlVgpuPlacementId_v1_t *>malloc(sizeof(nvmlVgpuPlacementId_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPlacementId_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPlacementId_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6013, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuPlacementId};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6013, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 6013, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":6012
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuPlacementId_v1_t *>malloc(sizeof(nvmlVgpuPlacementId_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuPlacementId_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPlacementId_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":6014
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPlacementId_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPlacementId_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuPlacementId_v1_t))));

    /* "cuda/bindings/_nvml.pyx":6015
 *                 raise MemoryError("Error allocating VgpuPlacementId_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPlacementId_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":6016
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPlacementId_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuPlacementId_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":6010
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPlacementId_v1 obj = VgpuPlacementId_v1.__new__(VgpuPlacementId_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuPlacementId_v1_t *>malloc(sizeof(nvmlVgpuPlacementId_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":6018
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuPlacementId_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuPlacementId_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":6019
 *         else:
 *             obj._ptr = <nvmlVgpuPlacementId_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":6020
 *             obj._ptr = <nvmlVgpuPlacementId_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":6021
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":6022
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":5998
 *         return __from_data(data, "vgpu_placement_id_v1_dtype", vgpu_placement_id_v1_dtype, VgpuPlacementId_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuPlacementId_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementId_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_16__reduce_cython__, "VgpuPlacementId_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementId_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_18__setstate_cython__, "VgpuPlacementId_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementId_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementId_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6025
 * 
 * 
 * cdef _get_vgpu_placement_list_v2_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuPlacementList_v2_t pod = nvmlVgpuPlacementList_v2_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_placement_list_v2_dtype_offsets(void) {
  nvmlVgpuPlacementList_v2_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuPlacementList_v2_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  size_t __pyx_t_12;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_placement_list_v2_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":6026
 * 
 * cdef _get_vgpu_placement_list_v2_dtype_offsets():
 *     cdef nvmlVgpuPlacementList_v2_t pod = nvmlVgpuPlacementList_v2_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'placement_size', 'count', 'placement_ids', 'mode'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":6027
 * cdef _get_vgpu_placement_list_v2_dtype_offsets():
 *     cdef nvmlVgpuPlacementList_v2_t pod = nvmlVgpuPlacementList_v2_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'placement_size', 'count', 'placement_ids', 'mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.intp, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6027, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6027, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":6028
 *     cdef nvmlVgpuPlacementList_v2_t pod = nvmlVgpuPlacementList_v2_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'placement_size', 'count', 'placement_ids', 'mode'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.intp, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6028, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6028, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 6028, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_placement_size);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_placement_size);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_placement_size) != (0)) __PYX_ERR(0, 6028, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_count) != (0)) __PYX_ERR(0, 6028, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_placement_ids);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_placement_ids);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_placement_ids) != (0)) __PYX_ERR(0, 6028, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_mode);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_mode);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_mode) != (0)) __PYX_ERR(0, 6028, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 6028, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":6029
 *     return _numpy.dtype({
 *         'names': ['version', 'placement_size', 'count', 'placement_ids', 'mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.intp, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6029, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6029, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6029, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 6029, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6029, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 6029, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6029, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_intp); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 6029, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6029, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 6029, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6029, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 6029, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 6029, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 6029, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 6029, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 6029, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 6028, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":6031
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.intp, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.placementSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6031, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":6032
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.placementSize)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.placementIds)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.placementSize)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 6032, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":6033
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.placementSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.placementIds)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.mode)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.count)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 6033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":6034
 *             (<intptr_t>&(pod.placementSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.placementIds)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.mode)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.placementIds)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 6034, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":6035
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.placementIds)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.mode)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuPlacementList_v2_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.mode)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 6035, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":6030
 *         'names': ['version', 'placement_size', 'count', 'placement_ids', 'mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.intp, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.placementSize)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 6030, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_11) != (0)) __PYX_ERR(0, 6030, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_10) != (0)) __PYX_ERR(0, 6030, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_9) != (0)) __PYX_ERR(0, 6030, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_8) != (0)) __PYX_ERR(0, 6030, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 6028, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":6037
 *             (<intptr_t>&(pod.mode)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuPlacementList_v2_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuPlacementList_v2_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 6028, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_12 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_12 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_12, (2-__pyx_t_12) | (__pyx_t_12*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6027, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6025
 * 
 * 
 * cdef _get_vgpu_placement_list_v2_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuPlacementList_v2_t pod = nvmlVgpuPlacementList_v2_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_placement_list_v2_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6055
 *         dict _refs
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuPlacementList_v2_t *>calloc(1, sizeof(nvmlVgpuPlacementList_v2_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":6056
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuPlacementList_v2_t *>calloc(1, sizeof(nvmlVgpuPlacementList_v2_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuPlacementList_v2")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuPlacementList_v2_t *)calloc(1, (sizeof(nvmlVgpuPlacementList_v2_t))));

  /* "cuda/bindings/_nvml.pyx":6057
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuPlacementList_v2_t *>calloc(1, sizeof(nvmlVgpuPlacementList_v2_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuPlacementList_v2")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":6058
 *         self._ptr = <nvmlVgpuPlacementList_v2_t *>calloc(1, sizeof(nvmlVgpuPlacementList_v2_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuPlacementList_v2")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6058, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuPlacementLi};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6058, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 6058, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6057
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuPlacementList_v2_t *>calloc(1, sizeof(nvmlVgpuPlacementList_v2_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuPlacementList_v2")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":6059
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuPlacementList_v2")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":6060
 *             raise MemoryError("Error allocating VgpuPlacementList_v2")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 *         self._refs = {}
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":6061
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 *         self._refs = {}
 * 
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":6062
 *         self._owned = True
 *         self._readonly = False
 *         self._refs = {}             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6062, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_self->_refs);
  __Pyx_DECREF(__pyx_v_self->_refs);
  __pyx_v_self->_refs = ((PyObject*)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":6055
 *         dict _refs
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuPlacementList_v2_t *>calloc(1, sizeof(nvmlVgpuPlacementList_v2_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6064
 *         self._refs = {}
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuPlacementList_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self) {
  nvmlVgpuPlacementList_v2_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuPlacementList_v2_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":6066
 *     def __dealloc__(self):
 *         cdef nvmlVgpuPlacementList_v2_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6067
 *         cdef nvmlVgpuPlacementList_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":6068
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":6069
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":6066
 *     def __dealloc__(self):
 *         cdef nvmlVgpuPlacementList_v2_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":6064
 *         self._refs = {}
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuPlacementList_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":6071
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuPlacementList_v2 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":6072
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuPlacementList_v2 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6072, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6072, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6072, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6072, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6072, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuPlacementList_v2_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 32 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6072, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6071
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuPlacementList_v2 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6074
 *         return f"<{__name__}.VgpuPlacementList_v2 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6077
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6077, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6074
 *         return f"<{__name__}.VgpuPlacementList_v2 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6079
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":6080
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6079
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6082
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":6083
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6083, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6082
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6085
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuPlacementList_v2 other_
 *         if not isinstance(other, VgpuPlacementList_v2):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":6087
 *     def __eq__(self, other):
 *         cdef VgpuPlacementList_v2 other_
 *         if not isinstance(other, VgpuPlacementList_v2):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":6088
 *         cdef VgpuPlacementList_v2 other_
 *         if not isinstance(other, VgpuPlacementList_v2):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPlacementList_v2_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6087
 *     def __eq__(self, other):
 *         cdef VgpuPlacementList_v2 other_
 *         if not isinstance(other, VgpuPlacementList_v2):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":6089
 *         if not isinstance(other, VgpuPlacementList_v2):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPlacementList_v2_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2))))) __PYX_ERR(0, 6089, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":6090
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPlacementList_v2_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuPlacementList_v2_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6085
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuPlacementList_v2 other_
 *         if not isinstance(other, VgpuPlacementList_v2):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6092
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPlacementList_v2_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPlacementList_v2_t *>malloc(sizeof(nvmlVgpuPlacementList_v2_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":6093
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuPlacementList_v2_t *>malloc(sizeof(nvmlVgpuPlacementList_v2_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 6093, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6093, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6093, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 6093, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6094
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPlacementList_v2_t *>malloc(sizeof(nvmlVgpuPlacementList_v2_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPlacementList_v2")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuPlacementList_v2_t *)malloc((sizeof(nvmlVgpuPlacementList_v2_t))));

    /* "cuda/bindings/_nvml.pyx":6095
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPlacementList_v2_t *>malloc(sizeof(nvmlVgpuPlacementList_v2_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuPlacementList_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPlacementList_v2_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":6096
 *             self._ptr = <nvmlVgpuPlacementList_v2_t *>malloc(sizeof(nvmlVgpuPlacementList_v2_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPlacementList_v2")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPlacementList_v2_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6096, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuPlacementLi};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6096, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 6096, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":6095
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPlacementList_v2_t *>malloc(sizeof(nvmlVgpuPlacementList_v2_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuPlacementList_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPlacementList_v2_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":6097
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPlacementList_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPlacementList_v2_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6097, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6097, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 6097, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuPlacementList_v2_t))));

    /* "cuda/bindings/_nvml.pyx":6098
 *                 raise MemoryError("Error allocating VgpuPlacementList_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPlacementList_v2_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":6099
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPlacementList_v2_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":6100
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6100, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6100, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 6100, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":6093
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuPlacementList_v2_t *>malloc(sizeof(nvmlVgpuPlacementList_v2_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":6102
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 6102, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":6092
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPlacementList_v2_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPlacementList_v2_t *>malloc(sizeof(nvmlVgpuPlacementList_v2_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6104
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6107
 *     def version(self):
 *         """int: IN: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6107, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6104
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6109
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":6111
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPlacementList_v2 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":6112
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuPlacementList_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuPlacementList_v2_instan};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6112, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 6112, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6111
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPlacementList_v2 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":6113
 *         if self._readonly:
 *             raise ValueError("This VgpuPlacementList_v2 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 6113, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":6109
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6115
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def placement_size(self):
 *         """int: OUT: The number of slots occupied by the vGPU type."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14placement_size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14placement_size_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14placement_size___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14placement_size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6118
 *     def placement_size(self):
 *         """int: OUT: The number of slots occupied by the vGPU type."""
 *         return self._ptr[0].placementSize             # <<<<<<<<<<<<<<
 * 
 *     @placement_size.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).placementSize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6118, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6115
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def placement_size(self):
 *         """int: OUT: The number of slots occupied by the vGPU type."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.placement_size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6120
 *         return self._ptr[0].placementSize
 * 
 *     @placement_size.setter             # <<<<<<<<<<<<<<
 *     def placement_size(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14placement_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14placement_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14placement_size_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14placement_size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":6122
 *     @placement_size.setter
 *     def placement_size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPlacementList_v2 instance is read-only")
 *         self._ptr[0].placementSize = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":6123
 *     def placement_size(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuPlacementList_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].placementSize = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuPlacementList_v2_instan};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6123, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 6123, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6122
 *     @placement_size.setter
 *     def placement_size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPlacementList_v2 instance is read-only")
 *         self._ptr[0].placementSize = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":6124
 *         if self._readonly:
 *             raise ValueError("This VgpuPlacementList_v2 instance is read-only")
 *         self._ptr[0].placementSize = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 6124, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).placementSize = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":6120
 *         return self._ptr[0].placementSize
 * 
 *     @placement_size.setter             # <<<<<<<<<<<<<<
 *     def placement_size(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.placement_size.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6126
 *         self._ptr[0].placementSize = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def placement_ids(self):
 *         """int: IN/OUT: Placement IDs for the vGPU type."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13placement_ids_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13placement_ids_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13placement_ids___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13placement_ids___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6129
 *     def placement_ids(self):
 *         """int: IN/OUT: Placement IDs for the vGPU type."""
 *         if self._ptr[0].placementIds == NULL:             # <<<<<<<<<<<<<<
 *             return []
 *         cdef view.array arr = view.array(shape=(self._ptr[0].count,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False)
*/
  __pyx_t_1 = ((__pyx_v_self->_ptr[0]).placementIds == NULL);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6130
 *         """int: IN/OUT: Placement IDs for the vGPU type."""
 *         if self._ptr[0].placementIds == NULL:
 *             return []             # <<<<<<<<<<<<<<
 *         cdef view.array arr = view.array(shape=(self._ptr[0].count,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(self._ptr[0].placementIds)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6130, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_r = __pyx_t_2;
    __pyx_t_2 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6129
 *     def placement_ids(self):
 *         """int: IN/OUT: Placement IDs for the vGPU type."""
 *         if self._ptr[0].placementIds == NULL:             # <<<<<<<<<<<<<<
 *             return []
 *         cdef view.array arr = view.array(shape=(self._ptr[0].count,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False)
*/
  }

  /* "cuda/bindings/_nvml.pyx":6131
 *         if self._ptr[0].placementIds == NULL:
 *             return []
 *         cdef view.array arr = view.array(shape=(self._ptr[0].count,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(self._ptr[0].placementIds)
 *         return _numpy.asarray(arr)
*/
  __pyx_t_3 = NULL;
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).count); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6131, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6131, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 6131, __pyx_L1_error);
  __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6131, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 5 : 0)] = {__pyx_t_3, NULL};
    __pyx_t_7 = __Pyx_MakeVectorcallBuilderKwds(5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6131, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_5, __pyx_t_7, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 6131, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_7, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 6131, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_7, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 6131, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_7, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 6131, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_allocate_buffer, Py_False, __pyx_t_7, __pyx_callargs+1, 4) < (0)) __PYX_ERR(0, 6131, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_7);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6131, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_2);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":6132
 *             return []
 *         cdef view.array arr = view.array(shape=(self._ptr[0].count,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(self._ptr[0].placementIds)             # <<<<<<<<<<<<<<
 *         return _numpy.asarray(arr)
 * 
*/
  __pyx_v_arr->data = ((char *)(__pyx_v_self->_ptr[0]).placementIds);

  /* "cuda/bindings/_nvml.pyx":6133
 *         cdef view.array arr = view.array(shape=(self._ptr[0].count,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(self._ptr[0].placementIds)
 *         return _numpy.asarray(arr)             # <<<<<<<<<<<<<<
 * 
 *     @placement_ids.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_7 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_6 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_7);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_7);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_6 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, ((PyObject *)__pyx_v_arr)};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6133, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6126
 *         self._ptr[0].placementSize = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def placement_ids(self):
 *         """int: IN/OUT: Placement IDs for the vGPU type."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.placement_ids.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6135
 *         return _numpy.asarray(arr)
 * 
 *     @placement_ids.setter             # <<<<<<<<<<<<<<
 *     def placement_ids(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13placement_ids_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13placement_ids_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13placement_ids_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13placement_ids_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":6137
 *     @placement_ids.setter
 *     def placement_ids(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPlacementList_v2 instance is read-only")
 *         cdef view.array arr = view.array(shape=(len(val),), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":6138
 *     def placement_ids(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuPlacementList_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef view.array arr = view.array(shape=(len(val),), itemsize=sizeof(unsigned int), format="I", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint32)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuPlacementList_v2_instan};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6138, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 6138, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6137
 *     @placement_ids.setter
 *     def placement_ids(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPlacementList_v2 instance is read-only")
 *         cdef view.array arr = view.array(shape=(len(val),), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":6139
 *         if self._readonly:
 *             raise ValueError("This VgpuPlacementList_v2 instance is read-only")
 *         cdef view.array arr = view.array(shape=(len(val),), itemsize=sizeof(unsigned int), format="I", mode="c")             # <<<<<<<<<<<<<<
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint32)
 *         self._ptr[0].placementIds = <unsigned int*><intptr_t>(arr.data)
*/
  __pyx_t_2 = NULL;
  __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 6139, __pyx_L1_error)
  __pyx_t_5 = PyLong_FromSsize_t(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6139, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6139, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_5);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 6139, __pyx_L1_error);
  __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6139, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_7 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6139, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_6, __pyx_t_7, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 6139, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_5, __pyx_t_7, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 6139, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_7, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 6139, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_7, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 6139, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_7);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6139, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":6140
 *             raise ValueError("This VgpuPlacementList_v2 instance is read-only")
 *         cdef view.array arr = view.array(shape=(len(val),), itemsize=sizeof(unsigned int), format="I", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint32)             # <<<<<<<<<<<<<<
 *         self._ptr[0].placementIds = <unsigned int*><intptr_t>(arr.data)
 *         self._ptr[0].count = len(val)
*/
  __pyx_t_7 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6140, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6140, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6140, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6140, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_3 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_6))) {
    __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6);
    assert(__pyx_t_7);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_6);
    __Pyx_INCREF(__pyx_t_7);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_6, __pyx__function);
    __pyx_t_3 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_7, __pyx_v_val};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6140, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_2, __pyx_t_5, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 6140, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_6, __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6140, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (__Pyx_PyObject_SetSlice(((PyObject *)__pyx_v_arr), __pyx_t_1, 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[0], 0, 0, 1) < (0)) __PYX_ERR(0, 6140, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":6141
 *         cdef view.array arr = view.array(shape=(len(val),), itemsize=sizeof(unsigned int), format="I", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint32)
 *         self._ptr[0].placementIds = <unsigned int*><intptr_t>(arr.data)             # <<<<<<<<<<<<<<
 *         self._ptr[0].count = len(val)
 *         self._refs["placement_ids"] = arr
*/
  (__pyx_v_self->_ptr[0]).placementIds = ((unsigned int *)((intptr_t)__pyx_v_arr->data));

  /* "cuda/bindings/_nvml.pyx":6142
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint32)
 *         self._ptr[0].placementIds = <unsigned int*><intptr_t>(arr.data)
 *         self._ptr[0].count = len(val)             # <<<<<<<<<<<<<<
 *         self._refs["placement_ids"] = arr
 * 
*/
  __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 6142, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).count = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":6143
 *         self._ptr[0].placementIds = <unsigned int*><intptr_t>(arr.data)
 *         self._ptr[0].count = len(val)
 *         self._refs["placement_ids"] = arr             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (unlikely(__pyx_v_self->_refs == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
    __PYX_ERR(0, 6143, __pyx_L1_error)
  }
  if (unlikely((PyDict_SetItem(__pyx_v_self->_refs, __pyx_mstate_global->__pyx_n_u_placement_ids, ((PyObject *)__pyx_v_arr)) < 0))) __PYX_ERR(0, 6143, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":6135
 *         return _numpy.asarray(arr)
 * 
 *     @placement_ids.setter             # <<<<<<<<<<<<<<
 *     def placement_ids(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.placement_ids.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6145
 *         self._refs["placement_ids"] = arr
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def mode(self):
 *         """int: IN: The vGPU mode. Either NVML_VGPU_PGPU_HETEROGENEOUS_MODE or NVML_VGPU_PGPU_HOMOGENEOUS_MODE."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_4mode_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_4mode_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_4mode___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_4mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6148
 *     def mode(self):
 *         """int: IN: The vGPU mode. Either NVML_VGPU_PGPU_HETEROGENEOUS_MODE or NVML_VGPU_PGPU_HOMOGENEOUS_MODE."""
 *         return self._ptr[0].mode             # <<<<<<<<<<<<<<
 * 
 *     @mode.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).mode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6148, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6145
 *         self._refs["placement_ids"] = arr
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def mode(self):
 *         """int: IN: The vGPU mode. Either NVML_VGPU_PGPU_HETEROGENEOUS_MODE or NVML_VGPU_PGPU_HOMOGENEOUS_MODE."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.mode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6150
 *         return self._ptr[0].mode
 * 
 *     @mode.setter             # <<<<<<<<<<<<<<
 *     def mode(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_4mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_4mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_4mode_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_4mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":6152
 *     @mode.setter
 *     def mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPlacementList_v2 instance is read-only")
 *         self._ptr[0].mode = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":6153
 *     def mode(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuPlacementList_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].mode = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuPlacementList_v2_instan};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6153, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 6153, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6152
 *     @mode.setter
 *     def mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPlacementList_v2 instance is read-only")
 *         self._ptr[0].mode = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":6154
 *         if self._readonly:
 *             raise ValueError("This VgpuPlacementList_v2 instance is read-only")
 *         self._ptr[0].mode = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 6154, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).mode = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":6150
 *         return self._ptr[0].mode
 * 
 *     @mode.setter             # <<<<<<<<<<<<<<
 *     def mode(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.mode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6156
 *         self._ptr[0].mode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuPlacementList_v2 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_12from_data, "VgpuPlacementList_v2.from_data(data)\n\nCreate an VgpuPlacementList_v2 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_placement_list_v2_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 6156, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6156, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 6156, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 6156, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6156, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 6156, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":6163
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_placement_list_v2_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_placement_list_v2_dtype", vgpu_placement_list_v2_dtype, VgpuPlacementList_v2)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_placement_list_v2_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_placement_list_v2_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6156
 *         self._ptr[0].mode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuPlacementList_v2 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6165
 *         return __from_data(data, "vgpu_placement_list_v2_dtype", vgpu_placement_list_v2_dtype, VgpuPlacementList_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuPlacementList_v2 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14from_ptr, "VgpuPlacementList_v2.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuPlacementList_v2 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 6165, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 6165, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 6165, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6165, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 6165, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":6166
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuPlacementList_v2 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 6165, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 6165, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 6165, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6165, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 6166, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 6166, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 6165, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":6165
 *         return __from_data(data, "vgpu_placement_list_v2_dtype", vgpu_placement_list_v2_dtype, VgpuPlacementList_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuPlacementList_v2 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":6174
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPlacementList_v2 obj = VgpuPlacementList_v2.__new__(VgpuPlacementList_v2)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":6175
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuPlacementList_v2 obj = VgpuPlacementList_v2.__new__(VgpuPlacementList_v2)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6175, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 6175, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6174
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPlacementList_v2 obj = VgpuPlacementList_v2.__new__(VgpuPlacementList_v2)
*/
  }

  /* "cuda/bindings/_nvml.pyx":6176
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPlacementList_v2 obj = VgpuPlacementList_v2.__new__(VgpuPlacementList_v2)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuPlacementList_v2_t *>malloc(sizeof(nvmlVgpuPlacementList_v2_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPlacementList_v2(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6176, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":6177
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPlacementList_v2 obj = VgpuPlacementList_v2.__new__(VgpuPlacementList_v2)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuPlacementList_v2_t *>malloc(sizeof(nvmlVgpuPlacementList_v2_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6178
 *         cdef VgpuPlacementList_v2 obj = VgpuPlacementList_v2.__new__(VgpuPlacementList_v2)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuPlacementList_v2_t *>malloc(sizeof(nvmlVgpuPlacementList_v2_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPlacementList_v2")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuPlacementList_v2_t *)malloc((sizeof(nvmlVgpuPlacementList_v2_t))));

    /* "cuda/bindings/_nvml.pyx":6179
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuPlacementList_v2_t *>malloc(sizeof(nvmlVgpuPlacementList_v2_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuPlacementList_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPlacementList_v2_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":6180
 *             obj._ptr = <nvmlVgpuPlacementList_v2_t *>malloc(sizeof(nvmlVgpuPlacementList_v2_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPlacementList_v2")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPlacementList_v2_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6180, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuPlacementLi};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6180, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 6180, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":6179
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuPlacementList_v2_t *>malloc(sizeof(nvmlVgpuPlacementList_v2_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuPlacementList_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPlacementList_v2_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":6181
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPlacementList_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPlacementList_v2_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuPlacementList_v2_t))));

    /* "cuda/bindings/_nvml.pyx":6182
 *                 raise MemoryError("Error allocating VgpuPlacementList_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPlacementList_v2_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":6183
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPlacementList_v2_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuPlacementList_v2_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":6177
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPlacementList_v2 obj = VgpuPlacementList_v2.__new__(VgpuPlacementList_v2)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuPlacementList_v2_t *>malloc(sizeof(nvmlVgpuPlacementList_v2_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":6185
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuPlacementList_v2_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuPlacementList_v2_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":6186
 *         else:
 *             obj._ptr = <nvmlVgpuPlacementList_v2_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":6187
 *             obj._ptr = <nvmlVgpuPlacementList_v2_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         obj._refs = {}
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":6188
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         obj._refs = {}
 *         return obj
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":6189
 *             obj._owned = False
 *         obj._readonly = readonly
 *         obj._refs = {}             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6189, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_refs);
  __Pyx_DECREF(__pyx_v_obj->_refs);
  __pyx_v_obj->_refs = ((PyObject*)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":6190
 *         obj._readonly = readonly
 *         obj._refs = {}
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6165
 *         return __from_data(data, "vgpu_placement_list_v2_dtype", vgpu_placement_list_v2_dtype, VgpuPlacementList_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuPlacementList_v2 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_16__reduce_cython__, "VgpuPlacementList_v2.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_18__setstate_cython__, "VgpuPlacementList_v2.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPlacementList_v2.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6193
 * 
 * 
 * cdef _get_vgpu_type_bar1info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuTypeBar1Info_v1_t pod = nvmlVgpuTypeBar1Info_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_type_bar1info_v1_dtype_offsets(void) {
  nvmlVgpuTypeBar1Info_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuTypeBar1Info_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_type_bar1info_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":6194
 * 
 * cdef _get_vgpu_type_bar1info_v1_dtype_offsets():
 *     cdef nvmlVgpuTypeBar1Info_v1_t pod = nvmlVgpuTypeBar1Info_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'bar1size'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":6195
 * cdef _get_vgpu_type_bar1info_v1_dtype_offsets():
 *     cdef nvmlVgpuTypeBar1Info_v1_t pod = nvmlVgpuTypeBar1Info_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'bar1size'],
 *         'formats': [_numpy.uint32, _numpy.uint64],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6195, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6195, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":6196
 *     cdef nvmlVgpuTypeBar1Info_v1_t pod = nvmlVgpuTypeBar1Info_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'bar1size'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint64],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6196, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6196, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 6196, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_bar1size);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_bar1size);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_bar1size) != (0)) __PYX_ERR(0, 6196, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 6196, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":6197
 *     return _numpy.dtype({
 *         'names': ['version', 'bar1size'],
 *         'formats': [_numpy.uint32, _numpy.uint64],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6197, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6197, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6197, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 6197, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6197, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 6197, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 6197, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 6196, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":6199
 *         'formats': [_numpy.uint32, _numpy.uint64],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bar1Size)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6199, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":6200
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bar1Size)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuTypeBar1Info_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bar1Size)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 6200, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":6198
 *         'names': ['version', 'bar1size'],
 *         'formats': [_numpy.uint32, _numpy.uint64],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bar1Size)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6198, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 6198, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 6198, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 6196, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":6202
 *             (<intptr_t>&(pod.bar1Size)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuTypeBar1Info_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuTypeBar1Info_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6202, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 6196, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6195, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6193
 * 
 * 
 * cdef _get_vgpu_type_bar1info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuTypeBar1Info_v1_t pod = nvmlVgpuTypeBar1Info_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_type_bar1info_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6219
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuTypeBar1Info_v1_t *>calloc(1, sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":6220
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuTypeBar1Info_v1_t *>calloc(1, sizeof(nvmlVgpuTypeBar1Info_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuTypeBar1Info_v1")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuTypeBar1Info_v1_t *)calloc(1, (sizeof(nvmlVgpuTypeBar1Info_v1_t))));

  /* "cuda/bindings/_nvml.pyx":6221
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuTypeBar1Info_v1_t *>calloc(1, sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuTypeBar1Info_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":6222
 *         self._ptr = <nvmlVgpuTypeBar1Info_v1_t *>calloc(1, sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuTypeBar1Info_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6222, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuTypeBar1Inf};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6222, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 6222, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6221
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuTypeBar1Info_v1_t *>calloc(1, sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuTypeBar1Info_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":6223
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuTypeBar1Info_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":6224
 *             raise MemoryError("Error allocating VgpuTypeBar1Info_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":6225
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":6219
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuTypeBar1Info_v1_t *>calloc(1, sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeBar1Info_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6227
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuTypeBar1Info_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self) {
  nvmlVgpuTypeBar1Info_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuTypeBar1Info_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":6229
 *     def __dealloc__(self):
 *         cdef nvmlVgpuTypeBar1Info_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6230
 *         cdef nvmlVgpuTypeBar1Info_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":6231
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":6232
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":6229
 *     def __dealloc__(self):
 *         cdef nvmlVgpuTypeBar1Info_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":6227
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuTypeBar1Info_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":6234
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuTypeBar1Info_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":6235
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuTypeBar1Info_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6235, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6235, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6235, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6235, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6235, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuTypeBar1Info_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 31 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6235, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6234
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuTypeBar1Info_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeBar1Info_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6237
 *         return f"<{__name__}.VgpuTypeBar1Info_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6240
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6240, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6237
 *         return f"<{__name__}.VgpuTypeBar1Info_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeBar1Info_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6242
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":6243
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6242
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6245
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":6246
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6246, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6245
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeBar1Info_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6248
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuTypeBar1Info_v1 other_
 *         if not isinstance(other, VgpuTypeBar1Info_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":6250
 *     def __eq__(self, other):
 *         cdef VgpuTypeBar1Info_v1 other_
 *         if not isinstance(other, VgpuTypeBar1Info_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":6251
 *         cdef VgpuTypeBar1Info_v1 other_
 *         if not isinstance(other, VgpuTypeBar1Info_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuTypeBar1Info_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6250
 *     def __eq__(self, other):
 *         cdef VgpuTypeBar1Info_v1 other_
 *         if not isinstance(other, VgpuTypeBar1Info_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":6252
 *         if not isinstance(other, VgpuTypeBar1Info_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuTypeBar1Info_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1))))) __PYX_ERR(0, 6252, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":6253
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuTypeBar1Info_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuTypeBar1Info_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6253, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6248
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuTypeBar1Info_v1 other_
 *         if not isinstance(other, VgpuTypeBar1Info_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeBar1Info_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6255
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuTypeBar1Info_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuTypeBar1Info_v1_t *>malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":6256
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuTypeBar1Info_v1_t *>malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 6256, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6256, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6256, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 6256, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6257
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuTypeBar1Info_v1_t *>malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeBar1Info_v1")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuTypeBar1Info_v1_t *)malloc((sizeof(nvmlVgpuTypeBar1Info_v1_t))));

    /* "cuda/bindings/_nvml.pyx":6258
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuTypeBar1Info_v1_t *>malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuTypeBar1Info_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeBar1Info_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":6259
 *             self._ptr = <nvmlVgpuTypeBar1Info_v1_t *>malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeBar1Info_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6259, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuTypeBar1Inf};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6259, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 6259, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":6258
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuTypeBar1Info_v1_t *>malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuTypeBar1Info_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeBar1Info_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":6260
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeBar1Info_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeBar1Info_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6260, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6260, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 6260, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuTypeBar1Info_v1_t))));

    /* "cuda/bindings/_nvml.pyx":6261
 *                 raise MemoryError("Error allocating VgpuTypeBar1Info_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":6262
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":6263
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6263, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6263, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 6263, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":6256
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuTypeBar1Info_v1_t *>malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":6265
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 6265, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":6255
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuTypeBar1Info_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuTypeBar1Info_v1_t *>malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeBar1Info_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6267
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6270
 *     def version(self):
 *         """int: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6270, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6267
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeBar1Info_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6272
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":6274
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuTypeBar1Info_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":6275
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuTypeBar1Info_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuTypeBar1Info_v1_instanc};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6275, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 6275, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6274
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuTypeBar1Info_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":6276
 *         if self._readonly:
 *             raise ValueError("This VgpuTypeBar1Info_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 6276, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":6272
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeBar1Info_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6278
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bar1size(self):
 *         """int: BAR1 size in megabytes."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_8bar1size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_8bar1size_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_8bar1size___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_8bar1size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6281
 *     def bar1size(self):
 *         """int: BAR1 size in megabytes."""
 *         return self._ptr[0].bar1Size             # <<<<<<<<<<<<<<
 * 
 *     @bar1size.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).bar1Size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6281, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6278
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bar1size(self):
 *         """int: BAR1 size in megabytes."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeBar1Info_v1.bar1size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6283
 *         return self._ptr[0].bar1Size
 * 
 *     @bar1size.setter             # <<<<<<<<<<<<<<
 *     def bar1size(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_8bar1size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_8bar1size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_8bar1size_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_8bar1size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":6285
 *     @bar1size.setter
 *     def bar1size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuTypeBar1Info_v1 instance is read-only")
 *         self._ptr[0].bar1Size = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":6286
 *     def bar1size(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuTypeBar1Info_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].bar1Size = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuTypeBar1Info_v1_instanc};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6286, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 6286, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6285
 *     @bar1size.setter
 *     def bar1size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuTypeBar1Info_v1 instance is read-only")
 *         self._ptr[0].bar1Size = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":6287
 *         if self._readonly:
 *             raise ValueError("This VgpuTypeBar1Info_v1 instance is read-only")
 *         self._ptr[0].bar1Size = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 6287, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).bar1Size = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":6283
 *         return self._ptr[0].bar1Size
 * 
 *     @bar1size.setter             # <<<<<<<<<<<<<<
 *     def bar1size(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeBar1Info_v1.bar1size.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6289
 *         self._ptr[0].bar1Size = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuTypeBar1Info_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_12from_data, "VgpuTypeBar1Info_v1.from_data(data)\n\nCreate an VgpuTypeBar1Info_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_type_bar1info_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 6289, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6289, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 6289, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 6289, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6289, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 6289, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeBar1Info_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":6296
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_type_bar1info_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_type_bar1info_v1_dtype", vgpu_type_bar1info_v1_dtype, VgpuTypeBar1Info_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_type_bar1info_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6296, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_type_bar1info_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6296, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6289
 *         self._ptr[0].bar1Size = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuTypeBar1Info_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeBar1Info_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6298
 *         return __from_data(data, "vgpu_type_bar1info_v1_dtype", vgpu_type_bar1info_v1_dtype, VgpuTypeBar1Info_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuTypeBar1Info_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_14from_ptr, "VgpuTypeBar1Info_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuTypeBar1Info_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 6298, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 6298, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 6298, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6298, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 6298, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":6299
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuTypeBar1Info_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 6298, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 6298, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 6298, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6298, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 6299, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 6299, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 6298, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeBar1Info_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":6298
 *         return __from_data(data, "vgpu_type_bar1info_v1_dtype", vgpu_type_bar1info_v1_dtype, VgpuTypeBar1Info_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuTypeBar1Info_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":6307
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuTypeBar1Info_v1 obj = VgpuTypeBar1Info_v1.__new__(VgpuTypeBar1Info_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":6308
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuTypeBar1Info_v1 obj = VgpuTypeBar1Info_v1.__new__(VgpuTypeBar1Info_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6308, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 6308, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6307
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuTypeBar1Info_v1 obj = VgpuTypeBar1Info_v1.__new__(VgpuTypeBar1Info_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":6309
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuTypeBar1Info_v1 obj = VgpuTypeBar1Info_v1.__new__(VgpuTypeBar1Info_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuTypeBar1Info_v1_t *>malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6309, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":6310
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuTypeBar1Info_v1 obj = VgpuTypeBar1Info_v1.__new__(VgpuTypeBar1Info_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuTypeBar1Info_v1_t *>malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6311
 *         cdef VgpuTypeBar1Info_v1 obj = VgpuTypeBar1Info_v1.__new__(VgpuTypeBar1Info_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuTypeBar1Info_v1_t *>malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeBar1Info_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuTypeBar1Info_v1_t *)malloc((sizeof(nvmlVgpuTypeBar1Info_v1_t))));

    /* "cuda/bindings/_nvml.pyx":6312
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuTypeBar1Info_v1_t *>malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuTypeBar1Info_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeBar1Info_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":6313
 *             obj._ptr = <nvmlVgpuTypeBar1Info_v1_t *>malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeBar1Info_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6313, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuTypeBar1Inf};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6313, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 6313, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":6312
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuTypeBar1Info_v1_t *>malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuTypeBar1Info_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeBar1Info_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":6314
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeBar1Info_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeBar1Info_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuTypeBar1Info_v1_t))));

    /* "cuda/bindings/_nvml.pyx":6315
 *                 raise MemoryError("Error allocating VgpuTypeBar1Info_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":6316
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuTypeBar1Info_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":6310
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuTypeBar1Info_v1 obj = VgpuTypeBar1Info_v1.__new__(VgpuTypeBar1Info_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuTypeBar1Info_v1_t *>malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":6318
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuTypeBar1Info_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuTypeBar1Info_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":6319
 *         else:
 *             obj._ptr = <nvmlVgpuTypeBar1Info_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":6320
 *             obj._ptr = <nvmlVgpuTypeBar1Info_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":6321
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":6322
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6298
 *         return __from_data(data, "vgpu_type_bar1info_v1_dtype", vgpu_type_bar1info_v1_dtype, VgpuTypeBar1Info_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuTypeBar1Info_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeBar1Info_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_16__reduce_cython__, "VgpuTypeBar1Info_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeBar1Info_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_18__setstate_cython__, "VgpuTypeBar1Info_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeBar1Info_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeBar1Info_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6325
 * 
 * 
 * cdef _get_vgpu_process_utilization_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuProcessUtilizationInfo_v1_t pod = nvmlVgpuProcessUtilizationInfo_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_process_utilization_info_v1_dtype_offsets(void) {
  nvmlVgpuProcessUtilizationInfo_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuProcessUtilizationInfo_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  PyObject *__pyx_t_14 = NULL;
  PyObject *__pyx_t_15 = NULL;
  PyObject *__pyx_t_16 = NULL;
  size_t __pyx_t_17;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_process_utilization_info_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":6326
 * 
 * cdef _get_vgpu_process_utilization_info_v1_dtype_offsets():
 *     cdef nvmlVgpuProcessUtilizationInfo_v1_t pod = nvmlVgpuProcessUtilizationInfo_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['process_name', 'time_stamp', 'vgpu_instance', 'pid', 'sm_util', 'mem_util', 'enc_util', 'dec_util', 'jpg_util', 'ofa_util'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":6327
 * cdef _get_vgpu_process_utilization_info_v1_dtype_offsets():
 *     cdef nvmlVgpuProcessUtilizationInfo_v1_t pod = nvmlVgpuProcessUtilizationInfo_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['process_name', 'time_stamp', 'vgpu_instance', 'pid', 'sm_util', 'mem_util', 'enc_util', 'dec_util', 'jpg_util', 'ofa_util'],
 *         'formats': [_numpy.int8, _numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":6328
 *     cdef nvmlVgpuProcessUtilizationInfo_v1_t pod = nvmlVgpuProcessUtilizationInfo_v1_t()
 *     return _numpy.dtype({
 *         'names': ['process_name', 'time_stamp', 'vgpu_instance', 'pid', 'sm_util', 'mem_util', 'enc_util', 'dec_util', 'jpg_util', 'ofa_util'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.int8, _numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6328, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(10); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6328, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_process_name);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_process_name);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_process_name) != (0)) __PYX_ERR(0, 6328, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_time_stamp);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_time_stamp);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_time_stamp) != (0)) __PYX_ERR(0, 6328, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_vgpu_instance);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_vgpu_instance);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_vgpu_instance) != (0)) __PYX_ERR(0, 6328, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_pid);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_pid);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_pid) != (0)) __PYX_ERR(0, 6328, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_sm_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_sm_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_sm_util) != (0)) __PYX_ERR(0, 6328, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_mem_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_mem_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_mem_util) != (0)) __PYX_ERR(0, 6328, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_enc_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_enc_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_enc_util) != (0)) __PYX_ERR(0, 6328, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_dec_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_dec_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_mstate_global->__pyx_n_u_dec_util) != (0)) __PYX_ERR(0, 6328, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_jpg_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_jpg_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_mstate_global->__pyx_n_u_jpg_util) != (0)) __PYX_ERR(0, 6328, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_ofa_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_ofa_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 9, __pyx_mstate_global->__pyx_n_u_ofa_util) != (0)) __PYX_ERR(0, 6328, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 6328, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":6329
 *     return _numpy.dtype({
 *         'names': ['process_name', 'time_stamp', 'vgpu_instance', 'pid', 'sm_util', 'mem_util', 'enc_util', 'dec_util', 'jpg_util', 'ofa_util'],
 *         'formats': [_numpy.int8, _numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.processName)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_15 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_16 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(10); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 6329, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 6329, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 6329, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 6329, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 6329, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 6329, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 6329, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_t_14) != (0)) __PYX_ERR(0, 6329, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_t_15) != (0)) __PYX_ERR(0, 6329, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_16);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 9, __pyx_t_16) != (0)) __PYX_ERR(0, 6329, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  __pyx_t_14 = 0;
  __pyx_t_15 = 0;
  __pyx_t_16 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 6328, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":6331
 *         'formats': [_numpy.int8, _numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.processName)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.processName)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6331, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":6332
 *         'offsets': [
 *             (<intptr_t>&(pod.processName)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
*/
  __pyx_t_16 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.timeStamp)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 6332, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);

  /* "cuda/bindings/_nvml.pyx":6333
 *             (<intptr_t>&(pod.processName)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_15 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vgpuInstance)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 6333, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);

  /* "cuda/bindings/_nvml.pyx":6334
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_14 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.pid)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 6334, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);

  /* "cuda/bindings/_nvml.pyx":6335
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.smUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 6335, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":6336
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.memUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 6336, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":6337
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.jpgUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.encUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 6337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":6338
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.jpgUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ofaUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.decUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 6338, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":6339
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.jpgUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.ofaUtil)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.jpgUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 6339, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":6340
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.jpgUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ofaUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuProcessUtilizationInfo_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.ofaUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 6340, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":6330
 *         'names': ['process_name', 'time_stamp', 'vgpu_instance', 'pid', 'sm_util', 'mem_util', 'enc_util', 'dec_util', 'jpg_util', 'ofa_util'],
 *         'formats': [_numpy.int8, _numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.processName)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(10); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6330, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 6330, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_16);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_16) != (0)) __PYX_ERR(0, 6330, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_15) != (0)) __PYX_ERR(0, 6330, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_14) != (0)) __PYX_ERR(0, 6330, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_13) != (0)) __PYX_ERR(0, 6330, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 6330, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_11) != (0)) __PYX_ERR(0, 6330, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 7, __pyx_t_10) != (0)) __PYX_ERR(0, 6330, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 8, __pyx_t_9) != (0)) __PYX_ERR(0, 6330, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 9, __pyx_t_8) != (0)) __PYX_ERR(0, 6330, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_16 = 0;
  __pyx_t_15 = 0;
  __pyx_t_14 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 6328, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":6342
 *             (<intptr_t>&(pod.ofaUtil)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuProcessUtilizationInfo_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuProcessUtilizationInfo_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6342, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 6328, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_17 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_17 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_17, (2-__pyx_t_17) | (__pyx_t_17*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6327, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6325
 * 
 * 
 * cdef _get_vgpu_process_utilization_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuProcessUtilizationInfo_v1_t pod = nvmlVgpuProcessUtilizationInfo_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_XDECREF(__pyx_t_14);
  __Pyx_XDECREF(__pyx_t_15);
  __Pyx_XDECREF(__pyx_t_16);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_process_utilization_info_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6364
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=vgpu_process_utilization_info_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 6364, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6364, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 6364, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6364, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 6364, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":6365
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=vgpu_process_utilization_info_v1_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlVgpuProcessUtilizationInfo_v1_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6365, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6365, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_vgpu_process_utilization_info_v1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6365, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6365, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 6365, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6365, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":6366
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=vgpu_process_utilization_info_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlVgpuProcessUtilizationInfo_v1_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlVgpuProcessUtilizationInfo_v1_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6366, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6366, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6366, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":6367
 *         arr = _numpy.empty(size, dtype=vgpu_process_utilization_info_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlVgpuProcessUtilizationInfo_v1_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlVgpuProcessUtilizationInfo_v1_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6367, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuProcessUtilizationInfo_v1_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6367, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6367, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 6367, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":6368
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlVgpuProcessUtilizationInfo_v1_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlVgpuProcessUtilizationInfo_v1_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6368, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6368, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlVgpuProcessUtilizationInfo_v1_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6368, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6368, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 6367, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 6367, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":6364
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=vgpu_process_utilization_info_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6370
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlVgpuProcessUtilizationInfo_v1_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.VgpuProcessUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":6371
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.VgpuProcessUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6371, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6371, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 6371, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":6372
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.VgpuProcessUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.VgpuProcessUtilizationInfo_v1 object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6372, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6372, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6372, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6372, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6372, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6372, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6372, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_VgpuProcessUtilizationInfo_v1_A;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 37 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6372, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6371
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.VgpuProcessUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":6374
 *             return f"<{__name__}.VgpuProcessUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.VgpuProcessUtilizationInfo_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6374, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6374, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6374, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6374, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6374, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_VgpuProcessUtilizationInfo_v1_o;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 41 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6374, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":6370
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlVgpuProcessUtilizationInfo_v1_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.VgpuProcessUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6376
 *             return f"<{__name__}.VgpuProcessUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6379
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6379, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6379, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6376
 *             return f"<{__name__}.VgpuProcessUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6381
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":6382
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6382, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6382, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 6382, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6381
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6384
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":6385
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6385, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6385, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 6385, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":6386
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6386, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 6386, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6385
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":6388
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6388, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6388, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6384
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6390
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":6391
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6391, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 6391, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6390
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6393
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, VgpuProcessUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":6394
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, VgpuProcessUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":6395
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, VgpuProcessUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6395, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6395, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6395, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6395, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 6395, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6395, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6395, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6395, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6395, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 6395, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":6396
 *         cdef object self_data = self._data
 *         if (not isinstance(other, VgpuProcessUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6395
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, VgpuProcessUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":6397
 *         if (not isinstance(other, VgpuProcessUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6397, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6397, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 6397, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6393
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, VgpuProcessUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6399
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def process_name(self):
 *         """~_numpy.int8: (array of length 64).Name of process running within the vGPU VM."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_12process_name_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_12process_name_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_12process_name___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_12process_name___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6402
 *     def process_name(self):
 *         """~_numpy.int8: (array of length 64).Name of process running within the vGPU VM."""
 *         return self._data.process_name             # <<<<<<<<<<<<<<
 * 
 *     @process_name.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_process_name); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6402, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6399
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def process_name(self):
 *         """~_numpy.int8: (array of length 64).Name of process running within the vGPU VM."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.process_name.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6404
 *         return self._data.process_name
 * 
 *     @process_name.setter             # <<<<<<<<<<<<<<
 *     def process_name(self, val):
 *         self._data.process_name = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_12process_name_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_12process_name_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_12process_name_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_12process_name_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":6406
 *     @process_name.setter
 *     def process_name(self, val):
 *         self._data.process_name = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_process_name, __pyx_v_val) < (0)) __PYX_ERR(0, 6406, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":6404
 *         return self._data.process_name
 * 
 *     @process_name.setter             # <<<<<<<<<<<<<<
 *     def process_name(self, val):
 *         self._data.process_name = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.process_name.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6408
 *         self._data.process_name = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_10time_stamp_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_10time_stamp_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_10time_stamp___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_10time_stamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6411
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.time_stamp[0])
 *         return self._data.time_stamp
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6411, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 6411, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":6412
 *         """Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."""
 *         if self._data.size == 1:
 *             return int(self._data.time_stamp[0])             # <<<<<<<<<<<<<<
 *         return self._data.time_stamp
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_stamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6412, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6412, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6412, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6411
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.time_stamp[0])
 *         return self._data.time_stamp
*/
  }

  /* "cuda/bindings/_nvml.pyx":6413
 *         if self._data.size == 1:
 *             return int(self._data.time_stamp[0])
 *         return self._data.time_stamp             # <<<<<<<<<<<<<<
 * 
 *     @time_stamp.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_stamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6413, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6408
 *         self._data.process_name = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.time_stamp.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6415
 *         return self._data.time_stamp
 * 
 *     @time_stamp.setter             # <<<<<<<<<<<<<<
 *     def time_stamp(self, val):
 *         self._data.time_stamp = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_10time_stamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_10time_stamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_10time_stamp_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_10time_stamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":6417
 *     @time_stamp.setter
 *     def time_stamp(self, val):
 *         self._data.time_stamp = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_stamp, __pyx_v_val) < (0)) __PYX_ERR(0, 6417, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":6415
 *         return self._data.time_stamp
 * 
 *     @time_stamp.setter             # <<<<<<<<<<<<<<
 *     def time_stamp(self, val):
 *         self._data.time_stamp = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.time_stamp.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6419
 *         self._data.time_stamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_instance(self):
 *         """Union[~_numpy.uint32, int]: vGPU Instance"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_13vgpu_instance_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_13vgpu_instance_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_13vgpu_instance___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_13vgpu_instance___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6422
 *     def vgpu_instance(self):
 *         """Union[~_numpy.uint32, int]: vGPU Instance"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.vgpu_instance[0])
 *         return self._data.vgpu_instance
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6422, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 6422, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":6423
 *         """Union[~_numpy.uint32, int]: vGPU Instance"""
 *         if self._data.size == 1:
 *             return int(self._data.vgpu_instance[0])             # <<<<<<<<<<<<<<
 *         return self._data.vgpu_instance
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_vgpu_instance); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6423, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6423, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6423, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6422
 *     def vgpu_instance(self):
 *         """Union[~_numpy.uint32, int]: vGPU Instance"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.vgpu_instance[0])
 *         return self._data.vgpu_instance
*/
  }

  /* "cuda/bindings/_nvml.pyx":6424
 *         if self._data.size == 1:
 *             return int(self._data.vgpu_instance[0])
 *         return self._data.vgpu_instance             # <<<<<<<<<<<<<<
 * 
 *     @vgpu_instance.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_vgpu_instance); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6424, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6419
 *         self._data.time_stamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_instance(self):
 *         """Union[~_numpy.uint32, int]: vGPU Instance"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.vgpu_instance.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6426
 *         return self._data.vgpu_instance
 * 
 *     @vgpu_instance.setter             # <<<<<<<<<<<<<<
 *     def vgpu_instance(self, val):
 *         self._data.vgpu_instance = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_13vgpu_instance_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_13vgpu_instance_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_13vgpu_instance_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_13vgpu_instance_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":6428
 *     @vgpu_instance.setter
 *     def vgpu_instance(self, val):
 *         self._data.vgpu_instance = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_vgpu_instance, __pyx_v_val) < (0)) __PYX_ERR(0, 6428, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":6426
 *         return self._data.vgpu_instance
 * 
 *     @vgpu_instance.setter             # <<<<<<<<<<<<<<
 *     def vgpu_instance(self, val):
 *         self._data.vgpu_instance = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.vgpu_instance.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6430
 *         self._data.vgpu_instance = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: PID of process running within the vGPU VM."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3pid_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3pid_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3pid___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3pid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6433
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: PID of process running within the vGPU VM."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.pid[0])
 *         return self._data.pid
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6433, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 6433, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":6434
 *         """Union[~_numpy.uint32, int]: PID of process running within the vGPU VM."""
 *         if self._data.size == 1:
 *             return int(self._data.pid[0])             # <<<<<<<<<<<<<<
 *         return self._data.pid
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6434, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6434, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6434, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6433
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: PID of process running within the vGPU VM."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.pid[0])
 *         return self._data.pid
*/
  }

  /* "cuda/bindings/_nvml.pyx":6435
 *         if self._data.size == 1:
 *             return int(self._data.pid[0])
 *         return self._data.pid             # <<<<<<<<<<<<<<
 * 
 *     @pid.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6435, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6430
 *         self._data.vgpu_instance = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: PID of process running within the vGPU VM."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.pid.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6437
 *         return self._data.pid
 * 
 *     @pid.setter             # <<<<<<<<<<<<<<
 *     def pid(self, val):
 *         self._data.pid = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3pid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3pid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3pid_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3pid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":6439
 *     @pid.setter
 *     def pid(self, val):
 *         self._data.pid = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid, __pyx_v_val) < (0)) __PYX_ERR(0, 6439, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":6437
 *         return self._data.pid
 * 
 *     @pid.setter             # <<<<<<<<<<<<<<
 *     def pid(self, val):
 *         self._data.pid = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.pid.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6441
 *         self._data.pid = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sm_util(self):
 *         """Union[~_numpy.uint32, int]: SM (3D/Compute) Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7sm_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7sm_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7sm_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7sm_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6444
 *     def sm_util(self):
 *         """Union[~_numpy.uint32, int]: SM (3D/Compute) Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.sm_util[0])
 *         return self._data.sm_util
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6444, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 6444, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":6445
 *         """Union[~_numpy.uint32, int]: SM (3D/Compute) Util Value."""
 *         if self._data.size == 1:
 *             return int(self._data.sm_util[0])             # <<<<<<<<<<<<<<
 *         return self._data.sm_util
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sm_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6445, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6445, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6445, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6444
 *     def sm_util(self):
 *         """Union[~_numpy.uint32, int]: SM (3D/Compute) Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.sm_util[0])
 *         return self._data.sm_util
*/
  }

  /* "cuda/bindings/_nvml.pyx":6446
 *         if self._data.size == 1:
 *             return int(self._data.sm_util[0])
 *         return self._data.sm_util             # <<<<<<<<<<<<<<
 * 
 *     @sm_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sm_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6446, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6441
 *         self._data.pid = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sm_util(self):
 *         """Union[~_numpy.uint32, int]: SM (3D/Compute) Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.sm_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6448
 *         return self._data.sm_util
 * 
 *     @sm_util.setter             # <<<<<<<<<<<<<<
 *     def sm_util(self, val):
 *         self._data.sm_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7sm_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7sm_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7sm_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7sm_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":6450
 *     @sm_util.setter
 *     def sm_util(self, val):
 *         self._data.sm_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sm_util, __pyx_v_val) < (0)) __PYX_ERR(0, 6450, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":6448
 *         return self._data.sm_util
 * 
 *     @sm_util.setter             # <<<<<<<<<<<<<<
 *     def sm_util(self, val):
 *         self._data.sm_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.sm_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6452
 *         self._data.sm_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def mem_util(self):
 *         """Union[~_numpy.uint32, int]: Frame Buffer Memory Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8mem_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8mem_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8mem_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8mem_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6455
 *     def mem_util(self):
 *         """Union[~_numpy.uint32, int]: Frame Buffer Memory Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.mem_util[0])
 *         return self._data.mem_util
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6455, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 6455, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":6456
 *         """Union[~_numpy.uint32, int]: Frame Buffer Memory Util Value."""
 *         if self._data.size == 1:
 *             return int(self._data.mem_util[0])             # <<<<<<<<<<<<<<
 *         return self._data.mem_util
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_mem_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6456, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6456, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6456, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6455
 *     def mem_util(self):
 *         """Union[~_numpy.uint32, int]: Frame Buffer Memory Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.mem_util[0])
 *         return self._data.mem_util
*/
  }

  /* "cuda/bindings/_nvml.pyx":6457
 *         if self._data.size == 1:
 *             return int(self._data.mem_util[0])
 *         return self._data.mem_util             # <<<<<<<<<<<<<<
 * 
 *     @mem_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_mem_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6457, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6452
 *         self._data.sm_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def mem_util(self):
 *         """Union[~_numpy.uint32, int]: Frame Buffer Memory Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.mem_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6459
 *         return self._data.mem_util
 * 
 *     @mem_util.setter             # <<<<<<<<<<<<<<
 *     def mem_util(self, val):
 *         self._data.mem_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8mem_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8mem_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8mem_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8mem_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":6461
 *     @mem_util.setter
 *     def mem_util(self, val):
 *         self._data.mem_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_mem_util, __pyx_v_val) < (0)) __PYX_ERR(0, 6461, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":6459
 *         return self._data.mem_util
 * 
 *     @mem_util.setter             # <<<<<<<<<<<<<<
 *     def mem_util(self, val):
 *         self._data.mem_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.mem_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6463
 *         self._data.mem_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def enc_util(self):
 *         """Union[~_numpy.uint32, int]: Encoder Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8enc_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8enc_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8enc_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8enc_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6466
 *     def enc_util(self):
 *         """Union[~_numpy.uint32, int]: Encoder Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.enc_util[0])
 *         return self._data.enc_util
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6466, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 6466, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":6467
 *         """Union[~_numpy.uint32, int]: Encoder Util Value."""
 *         if self._data.size == 1:
 *             return int(self._data.enc_util[0])             # <<<<<<<<<<<<<<
 *         return self._data.enc_util
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_enc_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6467, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6467, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6467, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6466
 *     def enc_util(self):
 *         """Union[~_numpy.uint32, int]: Encoder Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.enc_util[0])
 *         return self._data.enc_util
*/
  }

  /* "cuda/bindings/_nvml.pyx":6468
 *         if self._data.size == 1:
 *             return int(self._data.enc_util[0])
 *         return self._data.enc_util             # <<<<<<<<<<<<<<
 * 
 *     @enc_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_enc_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6468, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6463
 *         self._data.mem_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def enc_util(self):
 *         """Union[~_numpy.uint32, int]: Encoder Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.enc_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6470
 *         return self._data.enc_util
 * 
 *     @enc_util.setter             # <<<<<<<<<<<<<<
 *     def enc_util(self, val):
 *         self._data.enc_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8enc_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8enc_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8enc_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8enc_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":6472
 *     @enc_util.setter
 *     def enc_util(self, val):
 *         self._data.enc_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_enc_util, __pyx_v_val) < (0)) __PYX_ERR(0, 6472, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":6470
 *         return self._data.enc_util
 * 
 *     @enc_util.setter             # <<<<<<<<<<<<<<
 *     def enc_util(self, val):
 *         self._data.enc_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.enc_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6474
 *         self._data.enc_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def dec_util(self):
 *         """Union[~_numpy.uint32, int]: Decoder Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8dec_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8dec_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8dec_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8dec_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6477
 *     def dec_util(self):
 *         """Union[~_numpy.uint32, int]: Decoder Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.dec_util[0])
 *         return self._data.dec_util
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6477, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 6477, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":6478
 *         """Union[~_numpy.uint32, int]: Decoder Util Value."""
 *         if self._data.size == 1:
 *             return int(self._data.dec_util[0])             # <<<<<<<<<<<<<<
 *         return self._data.dec_util
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_dec_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6478, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6478, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6478, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6477
 *     def dec_util(self):
 *         """Union[~_numpy.uint32, int]: Decoder Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.dec_util[0])
 *         return self._data.dec_util
*/
  }

  /* "cuda/bindings/_nvml.pyx":6479
 *         if self._data.size == 1:
 *             return int(self._data.dec_util[0])
 *         return self._data.dec_util             # <<<<<<<<<<<<<<
 * 
 *     @dec_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_dec_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6479, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6474
 *         self._data.enc_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def dec_util(self):
 *         """Union[~_numpy.uint32, int]: Decoder Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.dec_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6481
 *         return self._data.dec_util
 * 
 *     @dec_util.setter             # <<<<<<<<<<<<<<
 *     def dec_util(self, val):
 *         self._data.dec_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8dec_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8dec_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8dec_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8dec_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":6483
 *     @dec_util.setter
 *     def dec_util(self, val):
 *         self._data.dec_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_dec_util, __pyx_v_val) < (0)) __PYX_ERR(0, 6483, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":6481
 *         return self._data.dec_util
 * 
 *     @dec_util.setter             # <<<<<<<<<<<<<<
 *     def dec_util(self, val):
 *         self._data.dec_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.dec_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6485
 *         self._data.dec_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def jpg_util(self):
 *         """Union[~_numpy.uint32, int]: Jpeg Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8jpg_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8jpg_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8jpg_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8jpg_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6488
 *     def jpg_util(self):
 *         """Union[~_numpy.uint32, int]: Jpeg Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.jpg_util[0])
 *         return self._data.jpg_util
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6488, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 6488, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":6489
 *         """Union[~_numpy.uint32, int]: Jpeg Util Value."""
 *         if self._data.size == 1:
 *             return int(self._data.jpg_util[0])             # <<<<<<<<<<<<<<
 *         return self._data.jpg_util
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_jpg_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6489, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6489, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6489, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6488
 *     def jpg_util(self):
 *         """Union[~_numpy.uint32, int]: Jpeg Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.jpg_util[0])
 *         return self._data.jpg_util
*/
  }

  /* "cuda/bindings/_nvml.pyx":6490
 *         if self._data.size == 1:
 *             return int(self._data.jpg_util[0])
 *         return self._data.jpg_util             # <<<<<<<<<<<<<<
 * 
 *     @jpg_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_jpg_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6490, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6485
 *         self._data.dec_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def jpg_util(self):
 *         """Union[~_numpy.uint32, int]: Jpeg Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.jpg_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6492
 *         return self._data.jpg_util
 * 
 *     @jpg_util.setter             # <<<<<<<<<<<<<<
 *     def jpg_util(self, val):
 *         self._data.jpg_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8jpg_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8jpg_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8jpg_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8jpg_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":6494
 *     @jpg_util.setter
 *     def jpg_util(self, val):
 *         self._data.jpg_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_jpg_util, __pyx_v_val) < (0)) __PYX_ERR(0, 6494, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":6492
 *         return self._data.jpg_util
 * 
 *     @jpg_util.setter             # <<<<<<<<<<<<<<
 *     def jpg_util(self, val):
 *         self._data.jpg_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.jpg_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6496
 *         self._data.jpg_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ofa_util(self):
 *         """Union[~_numpy.uint32, int]: Ofa Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8ofa_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8ofa_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8ofa_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8ofa_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6499
 *     def ofa_util(self):
 *         """Union[~_numpy.uint32, int]: Ofa Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.ofa_util[0])
 *         return self._data.ofa_util
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6499, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 6499, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":6500
 *         """Union[~_numpy.uint32, int]: Ofa Util Value."""
 *         if self._data.size == 1:
 *             return int(self._data.ofa_util[0])             # <<<<<<<<<<<<<<
 *         return self._data.ofa_util
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ofa_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6500, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6500, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6500, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6499
 *     def ofa_util(self):
 *         """Union[~_numpy.uint32, int]: Ofa Util Value."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.ofa_util[0])
 *         return self._data.ofa_util
*/
  }

  /* "cuda/bindings/_nvml.pyx":6501
 *         if self._data.size == 1:
 *             return int(self._data.ofa_util[0])
 *         return self._data.ofa_util             # <<<<<<<<<<<<<<
 * 
 *     @ofa_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ofa_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6501, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6496
 *         self._data.jpg_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ofa_util(self):
 *         """Union[~_numpy.uint32, int]: Ofa Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.ofa_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6503
 *         return self._data.ofa_util
 * 
 *     @ofa_util.setter             # <<<<<<<<<<<<<<
 *     def ofa_util(self, val):
 *         self._data.ofa_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8ofa_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8ofa_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8ofa_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8ofa_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":6505
 *     @ofa_util.setter
 *     def ofa_util(self, val):
 *         self._data.ofa_util = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ofa_util, __pyx_v_val) < (0)) __PYX_ERR(0, 6505, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":6503
 *         return self._data.ofa_util
 * 
 *     @ofa_util.setter             # <<<<<<<<<<<<<<
 *     def ofa_util(self, val):
 *         self._data.ofa_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.ofa_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6507
 *         self._data.ofa_util = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":6510
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6511
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 6511, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":6512
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6512, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 6512, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":6513
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":6514
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6514, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 6514, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":6513
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":6515
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return VgpuProcessUtilizationInfo_v1.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":6516
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return VgpuProcessUtilizationInfo_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":6515
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return VgpuProcessUtilizationInfo_v1.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":6517
 *             if key_ < 0:
 *                 key_ += size
 *             return VgpuProcessUtilizationInfo_v1.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_process_utilization_info_v1_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6517, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6517, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6510
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":6518
 *                 key_ += size
 *             return VgpuProcessUtilizationInfo_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_process_utilization_info_v1_dtype:
 *             return VgpuProcessUtilizationInfo_v1.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6518, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":6519
 *             return VgpuProcessUtilizationInfo_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_process_utilization_info_v1_dtype:             # <<<<<<<<<<<<<<
 *             return VgpuProcessUtilizationInfo_v1.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6519, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6519, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 6519, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6519, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_vgpu_process_utilization_info_v1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6519, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6519, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 6519, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6520
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_process_utilization_info_v1_dtype:
 *             return VgpuProcessUtilizationInfo_v1.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6520, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6519
 *             return VgpuProcessUtilizationInfo_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_process_utilization_info_v1_dtype:             # <<<<<<<<<<<<<<
 *             return VgpuProcessUtilizationInfo_v1.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":6521
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_process_utilization_info_v1_dtype:
 *             return VgpuProcessUtilizationInfo_v1.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6507
 *         self._data.ofa_util = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6523
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":6524
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 6524, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":6523
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6526
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuProcessUtilizationInfo_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_14from_data, "VgpuProcessUtilizationInfo_v1.from_data(data)\n\nCreate an VgpuProcessUtilizationInfo_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `vgpu_process_utilization_info_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 6526, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6526, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 6526, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 6526, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6526, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 6526, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":6533
 *             data (_numpy.ndarray): a 1D array of dtype `vgpu_process_utilization_info_v1_dtype` holding the data.
 *         """
 *         cdef VgpuProcessUtilizationInfo_v1 obj = VgpuProcessUtilizationInfo_v1.__new__(VgpuProcessUtilizationInfo_v1)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6533, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":6534
 *         """
 *         cdef VgpuProcessUtilizationInfo_v1 obj = VgpuProcessUtilizationInfo_v1.__new__(VgpuProcessUtilizationInfo_v1)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6534, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6534, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 6534, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":6535
 *         cdef VgpuProcessUtilizationInfo_v1 obj = VgpuProcessUtilizationInfo_v1.__new__(VgpuProcessUtilizationInfo_v1)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6535, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 6535, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6534
 *         """
 *         cdef VgpuProcessUtilizationInfo_v1 obj = VgpuProcessUtilizationInfo_v1.__new__(VgpuProcessUtilizationInfo_v1)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":6536
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != vgpu_process_utilization_info_v1_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6536, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 6536, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":6537
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != vgpu_process_utilization_info_v1_dtype:
 *             raise ValueError("data array must be of dtype vgpu_process_utilization_info_v1_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6537, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 6537, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6536
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != vgpu_process_utilization_info_v1_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":6538
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != vgpu_process_utilization_info_v1_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype vgpu_process_utilization_info_v1_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6538, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_process_utilization_info_v1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6538, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6538, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 6538, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":6539
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != vgpu_process_utilization_info_v1_dtype:
 *             raise ValueError("data array must be of dtype vgpu_process_utilization_info_v1_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_vgpu};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6539, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 6539, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6538
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != vgpu_process_utilization_info_v1_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype vgpu_process_utilization_info_v1_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":6540
 *         if data.dtype != vgpu_process_utilization_info_v1_dtype:
 *             raise ValueError("data array must be of dtype vgpu_process_utilization_info_v1_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6540, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6540, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6540, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":6542
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6526
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuProcessUtilizationInfo_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6544
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an VgpuProcessUtilizationInfo_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_16from_ptr, "VgpuProcessUtilizationInfo_v1.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an VgpuProcessUtilizationInfo_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 6544, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 6544, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 6544, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6544, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 6544, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 6544, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 6544, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 6544, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6544, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 6545, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 6545, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 6545, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":6545
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an VgpuProcessUtilizationInfo_v1 instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 6544, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":6544
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an VgpuProcessUtilizationInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":6553
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuProcessUtilizationInfo_v1 obj = VgpuProcessUtilizationInfo_v1.__new__(VgpuProcessUtilizationInfo_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":6554
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuProcessUtilizationInfo_v1 obj = VgpuProcessUtilizationInfo_v1.__new__(VgpuProcessUtilizationInfo_v1)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6554, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 6554, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6553
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuProcessUtilizationInfo_v1 obj = VgpuProcessUtilizationInfo_v1.__new__(VgpuProcessUtilizationInfo_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":6555
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuProcessUtilizationInfo_v1 obj = VgpuProcessUtilizationInfo_v1.__new__(VgpuProcessUtilizationInfo_v1)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6555, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":6556
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuProcessUtilizationInfo_v1 obj = VgpuProcessUtilizationInfo_v1.__new__(VgpuProcessUtilizationInfo_v1)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlVgpuProcessUtilizationInfo_v1_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6556, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6556, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":6558
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlVgpuProcessUtilizationInfo_v1_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=vgpu_process_utilization_info_v1_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 6558, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":6557
 *         cdef VgpuProcessUtilizationInfo_v1 obj = VgpuProcessUtilizationInfo_v1.__new__(VgpuProcessUtilizationInfo_v1)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlVgpuProcessUtilizationInfo_v1_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=vgpu_process_utilization_info_v1_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlVgpuProcessUtilizationInfo_v1_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":6559
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlVgpuProcessUtilizationInfo_v1_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=vgpu_process_utilization_info_v1_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6559, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6559, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6559, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_vgpu_process_utilization_info_v1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 6559, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 6559, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 6559, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 6559, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6559, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":6560
 *             <char*>ptr, sizeof(nvmlVgpuProcessUtilizationInfo_v1_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=vgpu_process_utilization_info_v1_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 6560, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 6560, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6560, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":6562
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6544
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an VgpuProcessUtilizationInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6360
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_18__reduce_cython__, "VgpuProcessUtilizationInfo_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_VgpuProcessUtilizationInfo_v1, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_VgpuProcessUtilizationInfo_v1, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_VgpuProcessUtilizationInfo_v1, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_VgpuProcessUtilizationInfo_v1, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_VgpuProcessUtiliz); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_VgpuProcessUtilizationInfo_v1, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_VgpuProcessUtilizationInfo_v1, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_VgpuProcessUtilizationInfo_v1, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_VgpuProcessUtiliz); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_VgpuProcessUtilizationInfo_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_20__setstate_cython__, "VgpuProcessUtilizationInfo_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_VgpuProcessUtilizationInfo_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_VgpuProcessUtilizationInfo_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6565
 * 
 * 
 * cdef _get_vgpu_runtime_state_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuRuntimeState_v1_t pod = nvmlVgpuRuntimeState_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_runtime_state_v1_dtype_offsets(void) {
  nvmlVgpuRuntimeState_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuRuntimeState_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_runtime_state_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":6566
 * 
 * cdef _get_vgpu_runtime_state_v1_dtype_offsets():
 *     cdef nvmlVgpuRuntimeState_v1_t pod = nvmlVgpuRuntimeState_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'size_'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":6567
 * cdef _get_vgpu_runtime_state_v1_dtype_offsets():
 *     cdef nvmlVgpuRuntimeState_v1_t pod = nvmlVgpuRuntimeState_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'size_'],
 *         'formats': [_numpy.uint32, _numpy.uint64],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6567, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6567, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":6568
 *     cdef nvmlVgpuRuntimeState_v1_t pod = nvmlVgpuRuntimeState_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'size_'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint64],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 6568, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_size_2);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_size_2);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_size_2) != (0)) __PYX_ERR(0, 6568, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 6568, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":6569
 *     return _numpy.dtype({
 *         'names': ['version', 'size_'],
 *         'formats': [_numpy.uint32, _numpy.uint64],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6569, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6569, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6569, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 6569, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6569, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 6569, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 6569, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 6568, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":6571
 *         'formats': [_numpy.uint32, _numpy.uint64],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.size)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6571, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":6572
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.size)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuRuntimeState_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.size)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 6572, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":6570
 *         'names': ['version', 'size_'],
 *         'formats': [_numpy.uint32, _numpy.uint64],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.size)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6570, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 6570, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 6570, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 6568, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":6574
 *             (<intptr_t>&(pod.size)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuRuntimeState_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuRuntimeState_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6574, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 6568, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6567, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6565
 * 
 * 
 * cdef _get_vgpu_runtime_state_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuRuntimeState_v1_t pod = nvmlVgpuRuntimeState_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_runtime_state_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6591
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuRuntimeState_v1_t *>calloc(1, sizeof(nvmlVgpuRuntimeState_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":6592
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuRuntimeState_v1_t *>calloc(1, sizeof(nvmlVgpuRuntimeState_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuRuntimeState_v1")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuRuntimeState_v1_t *)calloc(1, (sizeof(nvmlVgpuRuntimeState_v1_t))));

  /* "cuda/bindings/_nvml.pyx":6593
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuRuntimeState_v1_t *>calloc(1, sizeof(nvmlVgpuRuntimeState_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuRuntimeState_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":6594
 *         self._ptr = <nvmlVgpuRuntimeState_v1_t *>calloc(1, sizeof(nvmlVgpuRuntimeState_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuRuntimeState_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6594, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuRuntimeStat};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6594, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 6594, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6593
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuRuntimeState_v1_t *>calloc(1, sizeof(nvmlVgpuRuntimeState_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuRuntimeState_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":6595
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuRuntimeState_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":6596
 *             raise MemoryError("Error allocating VgpuRuntimeState_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":6597
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":6591
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuRuntimeState_v1_t *>calloc(1, sizeof(nvmlVgpuRuntimeState_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuRuntimeState_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6599
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuRuntimeState_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self) {
  nvmlVgpuRuntimeState_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuRuntimeState_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":6601
 *     def __dealloc__(self):
 *         cdef nvmlVgpuRuntimeState_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6602
 *         cdef nvmlVgpuRuntimeState_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":6603
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":6604
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":6601
 *     def __dealloc__(self):
 *         cdef nvmlVgpuRuntimeState_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":6599
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuRuntimeState_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":6606
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuRuntimeState_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":6607
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuRuntimeState_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuRuntimeState_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 31 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6606
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuRuntimeState_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuRuntimeState_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6609
 *         return f"<{__name__}.VgpuRuntimeState_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6612
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6612, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6609
 *         return f"<{__name__}.VgpuRuntimeState_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuRuntimeState_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6614
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":6615
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6614
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6617
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":6618
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6618, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6617
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuRuntimeState_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6620
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuRuntimeState_v1 other_
 *         if not isinstance(other, VgpuRuntimeState_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":6622
 *     def __eq__(self, other):
 *         cdef VgpuRuntimeState_v1 other_
 *         if not isinstance(other, VgpuRuntimeState_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":6623
 *         cdef VgpuRuntimeState_v1 other_
 *         if not isinstance(other, VgpuRuntimeState_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuRuntimeState_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6622
 *     def __eq__(self, other):
 *         cdef VgpuRuntimeState_v1 other_
 *         if not isinstance(other, VgpuRuntimeState_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":6624
 *         if not isinstance(other, VgpuRuntimeState_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuRuntimeState_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1))))) __PYX_ERR(0, 6624, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":6625
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuRuntimeState_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuRuntimeState_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6625, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6620
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuRuntimeState_v1 other_
 *         if not isinstance(other, VgpuRuntimeState_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuRuntimeState_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6627
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuRuntimeState_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuRuntimeState_v1_t *>malloc(sizeof(nvmlVgpuRuntimeState_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":6628
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuRuntimeState_v1_t *>malloc(sizeof(nvmlVgpuRuntimeState_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 6628, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6628, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6628, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 6628, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6629
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuRuntimeState_v1_t *>malloc(sizeof(nvmlVgpuRuntimeState_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuRuntimeState_v1")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuRuntimeState_v1_t *)malloc((sizeof(nvmlVgpuRuntimeState_v1_t))));

    /* "cuda/bindings/_nvml.pyx":6630
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuRuntimeState_v1_t *>malloc(sizeof(nvmlVgpuRuntimeState_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuRuntimeState_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuRuntimeState_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":6631
 *             self._ptr = <nvmlVgpuRuntimeState_v1_t *>malloc(sizeof(nvmlVgpuRuntimeState_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuRuntimeState_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuRuntimeState_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6631, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuRuntimeStat};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6631, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 6631, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":6630
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuRuntimeState_v1_t *>malloc(sizeof(nvmlVgpuRuntimeState_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuRuntimeState_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuRuntimeState_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":6632
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuRuntimeState_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuRuntimeState_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6632, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6632, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 6632, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuRuntimeState_v1_t))));

    /* "cuda/bindings/_nvml.pyx":6633
 *                 raise MemoryError("Error allocating VgpuRuntimeState_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuRuntimeState_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":6634
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuRuntimeState_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":6635
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6635, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6635, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 6635, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":6628
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuRuntimeState_v1_t *>malloc(sizeof(nvmlVgpuRuntimeState_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":6637
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 6637, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":6627
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuRuntimeState_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuRuntimeState_v1_t *>malloc(sizeof(nvmlVgpuRuntimeState_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuRuntimeState_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6639
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6642
 *     def version(self):
 *         """int: IN: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6642, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6639
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuRuntimeState_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6644
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":6646
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuRuntimeState_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":6647
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuRuntimeState_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuRuntimeState_v1_instanc};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6647, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 6647, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6646
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuRuntimeState_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":6648
 *         if self._readonly:
 *             raise ValueError("This VgpuRuntimeState_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 6648, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":6644
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuRuntimeState_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6650
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def size_(self):
 *         """int: OUT: The runtime state size of the vGPU instance."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_5size__1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_5size__1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_5size____get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_5size____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6653
 *     def size_(self):
 *         """int: OUT: The runtime state size of the vGPU instance."""
 *         return self._ptr[0].size             # <<<<<<<<<<<<<<
 * 
 *     @size_.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6653, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6650
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def size_(self):
 *         """int: OUT: The runtime state size of the vGPU instance."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuRuntimeState_v1.size_.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6655
 *         return self._ptr[0].size
 * 
 *     @size_.setter             # <<<<<<<<<<<<<<
 *     def size_(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_5size__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_5size__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_5size__2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_5size__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":6657
 *     @size_.setter
 *     def size_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuRuntimeState_v1 instance is read-only")
 *         self._ptr[0].size = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":6658
 *     def size_(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuRuntimeState_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].size = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuRuntimeState_v1_instanc};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6658, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 6658, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6657
 *     @size_.setter
 *     def size_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuRuntimeState_v1 instance is read-only")
 *         self._ptr[0].size = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":6659
 *         if self._readonly:
 *             raise ValueError("This VgpuRuntimeState_v1 instance is read-only")
 *         self._ptr[0].size = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 6659, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).size = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":6655
 *         return self._ptr[0].size
 * 
 *     @size_.setter             # <<<<<<<<<<<<<<
 *     def size_(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuRuntimeState_v1.size_.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6661
 *         self._ptr[0].size = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuRuntimeState_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_12from_data, "VgpuRuntimeState_v1.from_data(data)\n\nCreate an VgpuRuntimeState_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_runtime_state_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 6661, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6661, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 6661, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 6661, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6661, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 6661, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuRuntimeState_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":6668
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_runtime_state_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_runtime_state_v1_dtype", vgpu_runtime_state_v1_dtype, VgpuRuntimeState_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_runtime_state_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6668, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_runtime_state_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6668, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6661
 *         self._ptr[0].size = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuRuntimeState_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuRuntimeState_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6670
 *         return __from_data(data, "vgpu_runtime_state_v1_dtype", vgpu_runtime_state_v1_dtype, VgpuRuntimeState_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuRuntimeState_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_14from_ptr, "VgpuRuntimeState_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuRuntimeState_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 6670, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 6670, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 6670, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6670, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 6670, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":6671
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuRuntimeState_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 6670, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 6670, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 6670, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6670, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 6671, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 6671, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 6670, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuRuntimeState_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":6670
 *         return __from_data(data, "vgpu_runtime_state_v1_dtype", vgpu_runtime_state_v1_dtype, VgpuRuntimeState_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuRuntimeState_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":6679
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuRuntimeState_v1 obj = VgpuRuntimeState_v1.__new__(VgpuRuntimeState_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":6680
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuRuntimeState_v1 obj = VgpuRuntimeState_v1.__new__(VgpuRuntimeState_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6680, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 6680, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6679
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuRuntimeState_v1 obj = VgpuRuntimeState_v1.__new__(VgpuRuntimeState_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":6681
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuRuntimeState_v1 obj = VgpuRuntimeState_v1.__new__(VgpuRuntimeState_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuRuntimeState_v1_t *>malloc(sizeof(nvmlVgpuRuntimeState_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6681, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":6682
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuRuntimeState_v1 obj = VgpuRuntimeState_v1.__new__(VgpuRuntimeState_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuRuntimeState_v1_t *>malloc(sizeof(nvmlVgpuRuntimeState_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6683
 *         cdef VgpuRuntimeState_v1 obj = VgpuRuntimeState_v1.__new__(VgpuRuntimeState_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuRuntimeState_v1_t *>malloc(sizeof(nvmlVgpuRuntimeState_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuRuntimeState_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuRuntimeState_v1_t *)malloc((sizeof(nvmlVgpuRuntimeState_v1_t))));

    /* "cuda/bindings/_nvml.pyx":6684
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuRuntimeState_v1_t *>malloc(sizeof(nvmlVgpuRuntimeState_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuRuntimeState_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuRuntimeState_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":6685
 *             obj._ptr = <nvmlVgpuRuntimeState_v1_t *>malloc(sizeof(nvmlVgpuRuntimeState_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuRuntimeState_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuRuntimeState_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6685, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuRuntimeStat};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6685, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 6685, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":6684
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuRuntimeState_v1_t *>malloc(sizeof(nvmlVgpuRuntimeState_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuRuntimeState_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuRuntimeState_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":6686
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuRuntimeState_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuRuntimeState_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuRuntimeState_v1_t))));

    /* "cuda/bindings/_nvml.pyx":6687
 *                 raise MemoryError("Error allocating VgpuRuntimeState_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuRuntimeState_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":6688
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuRuntimeState_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuRuntimeState_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":6682
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuRuntimeState_v1 obj = VgpuRuntimeState_v1.__new__(VgpuRuntimeState_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuRuntimeState_v1_t *>malloc(sizeof(nvmlVgpuRuntimeState_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":6690
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuRuntimeState_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuRuntimeState_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":6691
 *         else:
 *             obj._ptr = <nvmlVgpuRuntimeState_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":6692
 *             obj._ptr = <nvmlVgpuRuntimeState_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":6693
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":6694
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6670
 *         return __from_data(data, "vgpu_runtime_state_v1_dtype", vgpu_runtime_state_v1_dtype, VgpuRuntimeState_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuRuntimeState_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuRuntimeState_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_16__reduce_cython__, "VgpuRuntimeState_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuRuntimeState_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_18__setstate_cython__, "VgpuRuntimeState_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuRuntimeState_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuRuntimeState_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6697
 * 
 * 
 * cdef _get__py_anon_pod2_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef _anon_pod2 pod = _anon_pod2()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod2_dtype_offsets(void) {
  _anon_pod2 __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  _anon_pod2 __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get__py_anon_pod2_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":6698
 * 
 * cdef _get__py_anon_pod2_dtype_offsets():
 *     cdef _anon_pod2 pod = _anon_pod2()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['avg_factor', 'timeslice'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":6699
 * cdef _get__py_anon_pod2_dtype_offsets():
 *     cdef _anon_pod2 pod = _anon_pod2()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['avg_factor', 'timeslice'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6699, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6699, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":6700
 *     cdef _anon_pod2 pod = _anon_pod2()
 *     return _numpy.dtype({
 *         'names': ['avg_factor', 'timeslice'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6700, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6700, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_avg_factor);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_avg_factor);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_avg_factor) != (0)) __PYX_ERR(0, 6700, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_timeslice);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_timeslice);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_timeslice) != (0)) __PYX_ERR(0, 6700, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 6700, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":6701
 *     return _numpy.dtype({
 *         'names': ['avg_factor', 'timeslice'],
 *         'formats': [_numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.avgFactor)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6701, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6701, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6701, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 6701, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6701, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 6701, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 6701, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 6700, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":6703
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.avgFactor)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.timeslice)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.avgFactor)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6703, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":6704
 *         'offsets': [
 *             (<intptr_t>&(pod.avgFactor)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.timeslice)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(_anon_pod2),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.timeslice)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 6704, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":6702
 *         'names': ['avg_factor', 'timeslice'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.avgFactor)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.timeslice)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6702, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 6702, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 6702, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 6700, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":6706
 *             (<intptr_t>&(pod.timeslice)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(_anon_pod2),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(_anon_pod2))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6706, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 6700, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6699, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6697
 * 
 * 
 * cdef _get__py_anon_pod2_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef _anon_pod2 pod = _anon_pod2()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get__py_anon_pod2_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6723
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <_anon_pod2 *>calloc(1, sizeof(_anon_pod2))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2___init__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":6724
 * 
 *     def __init__(self):
 *         self._ptr = <_anon_pod2 *>calloc(1, sizeof(_anon_pod2))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod2")
*/
  __pyx_v_self->_ptr = ((_anon_pod2 *)calloc(1, (sizeof(_anon_pod2))));

  /* "cuda/bindings/_nvml.pyx":6725
 *     def __init__(self):
 *         self._ptr = <_anon_pod2 *>calloc(1, sizeof(_anon_pod2))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating _py_anon_pod2")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":6726
 *         self._ptr = <_anon_pod2 *>calloc(1, sizeof(_anon_pod2))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod2")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6726, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod2};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6726, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 6726, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6725
 *     def __init__(self):
 *         self._ptr = <_anon_pod2 *>calloc(1, sizeof(_anon_pod2))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating _py_anon_pod2")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":6727
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod2")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":6728
 *             raise MemoryError("Error allocating _py_anon_pod2")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":6729
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":6723
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <_anon_pod2 *>calloc(1, sizeof(_anon_pod2))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod2.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6731
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef _anon_pod2 *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self) {
  _anon_pod2 *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  _anon_pod2 *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":6733
 *     def __dealloc__(self):
 *         cdef _anon_pod2 *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6734
 *         cdef _anon_pod2 *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":6735
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":6736
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":6733
 *     def __dealloc__(self):
 *         cdef _anon_pod2 *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":6731
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef _anon_pod2 *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":6738
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}._py_anon_pod2 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":6739
 * 
 *     def __repr__(self):
 *         return f"<{__name__}._py_anon_pod2 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6739, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6739, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6739, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6739, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6739, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_py_anon_pod2_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 25 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6739, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6738
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}._py_anon_pod2 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod2.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6741
 *         return f"<{__name__}._py_anon_pod2 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6744
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6744, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6741
 *         return f"<{__name__}._py_anon_pod2 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod2.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6746
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod2__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":6747
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6746
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6749
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":6750
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6750, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6749
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod2.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6752
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod2 other_
 *         if not isinstance(other, _py_anon_pod2):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":6754
 *     def __eq__(self, other):
 *         cdef _py_anon_pod2 other_
 *         if not isinstance(other, _py_anon_pod2):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":6755
 *         cdef _py_anon_pod2 other_
 *         if not isinstance(other, _py_anon_pod2):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod2)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6754
 *     def __eq__(self, other):
 *         cdef _py_anon_pod2 other_
 *         if not isinstance(other, _py_anon_pod2):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":6756
 *         if not isinstance(other, _py_anon_pod2):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod2)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2))))) __PYX_ERR(0, 6756, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":6757
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod2)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(_anon_pod2))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6757, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6752
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod2 other_
 *         if not isinstance(other, _py_anon_pod2):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod2.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6759
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod2)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod2 *>malloc(sizeof(_anon_pod2))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":6760
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <_anon_pod2 *>malloc(sizeof(_anon_pod2))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 6760, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6760, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6760, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 6760, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6761
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod2 *>malloc(sizeof(_anon_pod2))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod2")
*/
    __pyx_v_self->_ptr = ((_anon_pod2 *)malloc((sizeof(_anon_pod2))));

    /* "cuda/bindings/_nvml.pyx":6762
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod2 *>malloc(sizeof(_anon_pod2))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod2))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":6763
 *             self._ptr = <_anon_pod2 *>malloc(sizeof(_anon_pod2))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod2")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod2))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6763, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod2};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6763, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 6763, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":6762
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod2 *>malloc(sizeof(_anon_pod2))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod2))
*/
    }

    /* "cuda/bindings/_nvml.pyx":6764
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod2))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6764, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6764, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 6764, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(_anon_pod2))));

    /* "cuda/bindings/_nvml.pyx":6765
 *                 raise MemoryError("Error allocating _py_anon_pod2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod2))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":6766
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod2))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":6767
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6767, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6767, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 6767, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":6760
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <_anon_pod2 *>malloc(sizeof(_anon_pod2))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":6769
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 6769, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":6759
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod2)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod2 *>malloc(sizeof(_anon_pod2))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod2.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6771
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def avg_factor(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_10avg_factor_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_10avg_factor_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_10avg_factor___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_10avg_factor___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6774
 *     def avg_factor(self):
 *         """int: """
 *         return self._ptr[0].avgFactor             # <<<<<<<<<<<<<<
 * 
 *     @avg_factor.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).avgFactor); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6774, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6771
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def avg_factor(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod2.avg_factor.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6776
 *         return self._ptr[0].avgFactor
 * 
 *     @avg_factor.setter             # <<<<<<<<<<<<<<
 *     def avg_factor(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_10avg_factor_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_10avg_factor_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_10avg_factor_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_10avg_factor_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":6778
 *     @avg_factor.setter
 *     def avg_factor(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod2 instance is read-only")
 *         self._ptr[0].avgFactor = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":6779
 *     def avg_factor(self, val):
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].avgFactor = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This__py_anon_pod2_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6779, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 6779, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6778
 *     @avg_factor.setter
 *     def avg_factor(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod2 instance is read-only")
 *         self._ptr[0].avgFactor = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":6780
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod2 instance is read-only")
 *         self._ptr[0].avgFactor = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 6780, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).avgFactor = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":6776
 *         return self._ptr[0].avgFactor
 * 
 *     @avg_factor.setter             # <<<<<<<<<<<<<<
 *     def avg_factor(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod2.avg_factor.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6782
 *         self._ptr[0].avgFactor = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def timeslice(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_9timeslice_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_9timeslice_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_9timeslice___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_9timeslice___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6785
 *     def timeslice(self):
 *         """int: """
 *         return self._ptr[0].timeslice             # <<<<<<<<<<<<<<
 * 
 *     @timeslice.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).timeslice); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6785, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6782
 *         self._ptr[0].avgFactor = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def timeslice(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod2.timeslice.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6787
 *         return self._ptr[0].timeslice
 * 
 *     @timeslice.setter             # <<<<<<<<<<<<<<
 *     def timeslice(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_9timeslice_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_9timeslice_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_9timeslice_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_9timeslice_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":6789
 *     @timeslice.setter
 *     def timeslice(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod2 instance is read-only")
 *         self._ptr[0].timeslice = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":6790
 *     def timeslice(self, val):
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].timeslice = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This__py_anon_pod2_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6790, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 6790, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6789
 *     @timeslice.setter
 *     def timeslice(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod2 instance is read-only")
 *         self._ptr[0].timeslice = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":6791
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod2 instance is read-only")
 *         self._ptr[0].timeslice = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 6791, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).timeslice = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":6787
 *         return self._ptr[0].timeslice
 * 
 *     @timeslice.setter             # <<<<<<<<<<<<<<
 *     def timeslice(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod2.timeslice.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6793
 *         self._ptr[0].timeslice = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod2 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod2_12from_data, "_py_anon_pod2.from_data(data)\n\nCreate an _py_anon_pod2 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `_py_anon_pod2_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod2_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod2_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 6793, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6793, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 6793, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 6793, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6793, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 6793, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod2.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":6800
 *             data (_numpy.ndarray): a single-element array of dtype `_py_anon_pod2_dtype` holding the data.
 *         """
 *         return __from_data(data, "_py_anon_pod2_dtype", _py_anon_pod2_dtype, _py_anon_pod2)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_py_anon_pod2_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6800, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_py_anon_pod2_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6800, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6793
 *         self._ptr[0].timeslice = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod2 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod2.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6802
 *         return __from_data(data, "_py_anon_pod2_dtype", _py_anon_pod2_dtype, _py_anon_pod2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod2 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod2_14from_ptr, "_py_anon_pod2.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an _py_anon_pod2 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod2_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod2_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 6802, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 6802, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 6802, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6802, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 6802, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":6803
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an _py_anon_pod2 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 6802, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 6802, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 6802, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6802, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 6803, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 6803, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 6802, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod2.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":6802
 *         return __from_data(data, "_py_anon_pod2_dtype", _py_anon_pod2_dtype, _py_anon_pod2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod2 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":6811
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod2 obj = _py_anon_pod2.__new__(_py_anon_pod2)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":6812
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod2 obj = _py_anon_pod2.__new__(_py_anon_pod2)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6812, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 6812, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6811
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod2 obj = _py_anon_pod2.__new__(_py_anon_pod2)
*/
  }

  /* "cuda/bindings/_nvml.pyx":6813
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod2 obj = _py_anon_pod2.__new__(_py_anon_pod2)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <_anon_pod2 *>malloc(sizeof(_anon_pod2))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod2(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6813, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":6814
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod2 obj = _py_anon_pod2.__new__(_py_anon_pod2)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <_anon_pod2 *>malloc(sizeof(_anon_pod2))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6815
 *         cdef _py_anon_pod2 obj = _py_anon_pod2.__new__(_py_anon_pod2)
 *         if owner is None:
 *             obj._ptr = <_anon_pod2 *>malloc(sizeof(_anon_pod2))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod2")
*/
    __pyx_v_obj->_ptr = ((_anon_pod2 *)malloc((sizeof(_anon_pod2))));

    /* "cuda/bindings/_nvml.pyx":6816
 *         if owner is None:
 *             obj._ptr = <_anon_pod2 *>malloc(sizeof(_anon_pod2))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod2))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":6817
 *             obj._ptr = <_anon_pod2 *>malloc(sizeof(_anon_pod2))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod2")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod2))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6817, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod2};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6817, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 6817, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":6816
 *         if owner is None:
 *             obj._ptr = <_anon_pod2 *>malloc(sizeof(_anon_pod2))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod2))
*/
    }

    /* "cuda/bindings/_nvml.pyx":6818
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod2))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(_anon_pod2))));

    /* "cuda/bindings/_nvml.pyx":6819
 *                 raise MemoryError("Error allocating _py_anon_pod2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod2))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":6820
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod2))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <_anon_pod2 *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":6814
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod2 obj = _py_anon_pod2.__new__(_py_anon_pod2)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <_anon_pod2 *>malloc(sizeof(_anon_pod2))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":6822
 *             obj._owned = True
 *         else:
 *             obj._ptr = <_anon_pod2 *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((_anon_pod2 *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":6823
 *         else:
 *             obj._ptr = <_anon_pod2 *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":6824
 *             obj._ptr = <_anon_pod2 *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":6825
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":6826
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6802
 *         return __from_data(data, "_py_anon_pod2_dtype", _py_anon_pod2_dtype, _py_anon_pod2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod2 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod2.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod2_16__reduce_cython__, "_py_anon_pod2.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod2_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod2_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod2.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod2_18__setstate_cython__, "_py_anon_pod2.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod2_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod2_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod2.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod2_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod2.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6829
 * 
 * 
 * cdef _get__py_anon_pod3_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef _anon_pod3 pod = _anon_pod3()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod3_dtype_offsets(void) {
  _anon_pod3 __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  _anon_pod3 __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get__py_anon_pod3_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":6830
 * 
 * cdef _get__py_anon_pod3_dtype_offsets():
 *     cdef _anon_pod3 pod = _anon_pod3()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['timeslice'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":6831
 * cdef _get__py_anon_pod3_dtype_offsets():
 *     cdef _anon_pod3 pod = _anon_pod3()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['timeslice'],
 *         'formats': [_numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6831, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6831, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":6832
 *     cdef _anon_pod3 pod = _anon_pod3()
 *     return _numpy.dtype({
 *         'names': ['timeslice'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6832, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6832, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_timeslice);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_timeslice);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_timeslice) != (0)) __PYX_ERR(0, 6832, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 6832, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":6833
 *     return _numpy.dtype({
 *         'names': ['timeslice'],
 *         'formats': [_numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.timeslice)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6833, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6833, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6833, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 6833, __pyx_L1_error);
  __pyx_t_7 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 6832, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":6835
 *         'formats': [_numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.timeslice)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(_anon_pod3),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.timeslice)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6835, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":6834
 *         'names': ['timeslice'],
 *         'formats': [_numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.timeslice)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_7 = PyList_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6834, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 6834, __pyx_L1_error);
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 6832, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":6837
 *             (<intptr_t>&(pod.timeslice)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(_anon_pod3),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(_anon_pod3))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6837, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 6832, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_8 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_8 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_8, (2-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6831, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6829
 * 
 * 
 * cdef _get__py_anon_pod3_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef _anon_pod3 pod = _anon_pod3()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml._get__py_anon_pod3_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6854
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <_anon_pod3 *>calloc(1, sizeof(_anon_pod3))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3___init__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":6855
 * 
 *     def __init__(self):
 *         self._ptr = <_anon_pod3 *>calloc(1, sizeof(_anon_pod3))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod3")
*/
  __pyx_v_self->_ptr = ((_anon_pod3 *)calloc(1, (sizeof(_anon_pod3))));

  /* "cuda/bindings/_nvml.pyx":6856
 *     def __init__(self):
 *         self._ptr = <_anon_pod3 *>calloc(1, sizeof(_anon_pod3))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating _py_anon_pod3")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":6857
 *         self._ptr = <_anon_pod3 *>calloc(1, sizeof(_anon_pod3))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod3")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6857, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod3};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6857, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 6857, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6856
 *     def __init__(self):
 *         self._ptr = <_anon_pod3 *>calloc(1, sizeof(_anon_pod3))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating _py_anon_pod3")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":6858
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod3")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":6859
 *             raise MemoryError("Error allocating _py_anon_pod3")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":6860
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":6854
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <_anon_pod3 *>calloc(1, sizeof(_anon_pod3))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod3.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6862
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef _anon_pod3 *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self) {
  _anon_pod3 *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  _anon_pod3 *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":6864
 *     def __dealloc__(self):
 *         cdef _anon_pod3 *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6865
 *         cdef _anon_pod3 *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":6866
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":6867
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":6864
 *     def __dealloc__(self):
 *         cdef _anon_pod3 *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":6862
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef _anon_pod3 *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":6869
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}._py_anon_pod3 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":6870
 * 
 *     def __repr__(self):
 *         return f"<{__name__}._py_anon_pod3 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6870, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6870, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6870, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6870, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6870, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_py_anon_pod3_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 25 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6870, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6869
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}._py_anon_pod3 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod3.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6872
 *         return f"<{__name__}._py_anon_pod3 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6875
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6875, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6872
 *         return f"<{__name__}._py_anon_pod3 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod3.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6877
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod3__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":6878
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6877
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6880
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":6881
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6881, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6880
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod3.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6883
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod3 other_
 *         if not isinstance(other, _py_anon_pod3):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":6885
 *     def __eq__(self, other):
 *         cdef _py_anon_pod3 other_
 *         if not isinstance(other, _py_anon_pod3):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":6886
 *         cdef _py_anon_pod3 other_
 *         if not isinstance(other, _py_anon_pod3):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod3)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6885
 *     def __eq__(self, other):
 *         cdef _py_anon_pod3 other_
 *         if not isinstance(other, _py_anon_pod3):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":6887
 *         if not isinstance(other, _py_anon_pod3):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod3)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3))))) __PYX_ERR(0, 6887, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":6888
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod3)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(_anon_pod3))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6888, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6883
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod3 other_
 *         if not isinstance(other, _py_anon_pod3):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod3.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6890
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod3)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod3 *>malloc(sizeof(_anon_pod3))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":6891
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <_anon_pod3 *>malloc(sizeof(_anon_pod3))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 6891, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6891, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6891, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 6891, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6892
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod3 *>malloc(sizeof(_anon_pod3))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod3")
*/
    __pyx_v_self->_ptr = ((_anon_pod3 *)malloc((sizeof(_anon_pod3))));

    /* "cuda/bindings/_nvml.pyx":6893
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod3 *>malloc(sizeof(_anon_pod3))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod3")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod3))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":6894
 *             self._ptr = <_anon_pod3 *>malloc(sizeof(_anon_pod3))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod3")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod3))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6894, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod3};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6894, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 6894, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":6893
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod3 *>malloc(sizeof(_anon_pod3))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod3")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod3))
*/
    }

    /* "cuda/bindings/_nvml.pyx":6895
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod3")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod3))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6895, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6895, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 6895, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(_anon_pod3))));

    /* "cuda/bindings/_nvml.pyx":6896
 *                 raise MemoryError("Error allocating _py_anon_pod3")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod3))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":6897
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod3))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":6898
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6898, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6898, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 6898, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":6891
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <_anon_pod3 *>malloc(sizeof(_anon_pod3))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":6900
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 6900, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":6890
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod3)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod3 *>malloc(sizeof(_anon_pod3))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod3.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6902
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def timeslice(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_9timeslice_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_9timeslice_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_9timeslice___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_9timeslice___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6905
 *     def timeslice(self):
 *         """int: """
 *         return self._ptr[0].timeslice             # <<<<<<<<<<<<<<
 * 
 *     @timeslice.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).timeslice); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6905, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6902
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def timeslice(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod3.timeslice.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6907
 *         return self._ptr[0].timeslice
 * 
 *     @timeslice.setter             # <<<<<<<<<<<<<<
 *     def timeslice(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_9timeslice_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_9timeslice_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_9timeslice_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_9timeslice_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":6909
 *     @timeslice.setter
 *     def timeslice(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod3 instance is read-only")
 *         self._ptr[0].timeslice = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":6910
 *     def timeslice(self, val):
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].timeslice = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This__py_anon_pod3_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6910, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 6910, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6909
 *     @timeslice.setter
 *     def timeslice(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod3 instance is read-only")
 *         self._ptr[0].timeslice = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":6911
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod3 instance is read-only")
 *         self._ptr[0].timeslice = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 6911, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).timeslice = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":6907
 *         return self._ptr[0].timeslice
 * 
 *     @timeslice.setter             # <<<<<<<<<<<<<<
 *     def timeslice(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod3.timeslice.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6913
 *         self._ptr[0].timeslice = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod3 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod3_12from_data, "_py_anon_pod3.from_data(data)\n\nCreate an _py_anon_pod3 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `_py_anon_pod3_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod3_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod3_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 6913, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6913, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 6913, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 6913, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6913, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 6913, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod3.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":6920
 *             data (_numpy.ndarray): a single-element array of dtype `_py_anon_pod3_dtype` holding the data.
 *         """
 *         return __from_data(data, "_py_anon_pod3_dtype", _py_anon_pod3_dtype, _py_anon_pod3)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_py_anon_pod3_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6920, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_py_anon_pod3_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6920, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6913
 *         self._ptr[0].timeslice = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod3 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod3.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6922
 *         return __from_data(data, "_py_anon_pod3_dtype", _py_anon_pod3_dtype, _py_anon_pod3)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod3 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod3_14from_ptr, "_py_anon_pod3.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an _py_anon_pod3 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod3_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod3_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 6922, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 6922, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 6922, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6922, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 6922, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":6923
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an _py_anon_pod3 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 6922, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 6922, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 6922, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6922, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 6923, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 6923, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 6922, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod3.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":6922
 *         return __from_data(data, "_py_anon_pod3_dtype", _py_anon_pod3_dtype, _py_anon_pod3)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod3 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":6931
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod3 obj = _py_anon_pod3.__new__(_py_anon_pod3)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":6932
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod3 obj = _py_anon_pod3.__new__(_py_anon_pod3)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6932, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 6932, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":6931
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod3 obj = _py_anon_pod3.__new__(_py_anon_pod3)
*/
  }

  /* "cuda/bindings/_nvml.pyx":6933
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod3 obj = _py_anon_pod3.__new__(_py_anon_pod3)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <_anon_pod3 *>malloc(sizeof(_anon_pod3))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod3(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6933, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":6934
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod3 obj = _py_anon_pod3.__new__(_py_anon_pod3)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <_anon_pod3 *>malloc(sizeof(_anon_pod3))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":6935
 *         cdef _py_anon_pod3 obj = _py_anon_pod3.__new__(_py_anon_pod3)
 *         if owner is None:
 *             obj._ptr = <_anon_pod3 *>malloc(sizeof(_anon_pod3))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod3")
*/
    __pyx_v_obj->_ptr = ((_anon_pod3 *)malloc((sizeof(_anon_pod3))));

    /* "cuda/bindings/_nvml.pyx":6936
 *         if owner is None:
 *             obj._ptr = <_anon_pod3 *>malloc(sizeof(_anon_pod3))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod3")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod3))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":6937
 *             obj._ptr = <_anon_pod3 *>malloc(sizeof(_anon_pod3))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod3")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod3))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6937, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod3};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6937, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 6937, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":6936
 *         if owner is None:
 *             obj._ptr = <_anon_pod3 *>malloc(sizeof(_anon_pod3))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod3")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod3))
*/
    }

    /* "cuda/bindings/_nvml.pyx":6938
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod3")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod3))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(_anon_pod3))));

    /* "cuda/bindings/_nvml.pyx":6939
 *                 raise MemoryError("Error allocating _py_anon_pod3")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod3))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":6940
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod3))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <_anon_pod3 *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":6934
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod3 obj = _py_anon_pod3.__new__(_py_anon_pod3)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <_anon_pod3 *>malloc(sizeof(_anon_pod3))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":6942
 *             obj._owned = True
 *         else:
 *             obj._ptr = <_anon_pod3 *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((_anon_pod3 *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":6943
 *         else:
 *             obj._ptr = <_anon_pod3 *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":6944
 *             obj._ptr = <_anon_pod3 *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":6945
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":6946
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6922
 *         return __from_data(data, "_py_anon_pod3_dtype", _py_anon_pod3_dtype, _py_anon_pod3)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod3 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod3.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod3_16__reduce_cython__, "_py_anon_pod3.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod3_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod3_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod3.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod3_18__setstate_cython__, "_py_anon_pod3.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod3_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod3_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod3.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod3_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod3.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6949
 * 
 * 
 * cdef _get_vgpu_scheduler_log_entry_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerLogEntry_t pod = nvmlVgpuSchedulerLogEntry_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_log_entry_dtype_offsets(void) {
  nvmlVgpuSchedulerLogEntry_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuSchedulerLogEntry_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  size_t __pyx_t_13;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_scheduler_log_entry_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":6950
 * 
 * cdef _get_vgpu_scheduler_log_entry_dtype_offsets():
 *     cdef nvmlVgpuSchedulerLogEntry_t pod = nvmlVgpuSchedulerLogEntry_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['timestamp', 'time_run_total', 'time_run', 'sw_runlist_id', 'target_time_slice', 'cumulative_preemption_time'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":6951
 * cdef _get_vgpu_scheduler_log_entry_dtype_offsets():
 *     cdef nvmlVgpuSchedulerLogEntry_t pod = nvmlVgpuSchedulerLogEntry_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['timestamp', 'time_run_total', 'time_run', 'sw_runlist_id', 'target_time_slice', 'cumulative_preemption_time'],
 *         'formats': [_numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint32, _numpy.uint64, _numpy.uint64],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6951, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6951, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":6952
 *     cdef nvmlVgpuSchedulerLogEntry_t pod = nvmlVgpuSchedulerLogEntry_t()
 *     return _numpy.dtype({
 *         'names': ['timestamp', 'time_run_total', 'time_run', 'sw_runlist_id', 'target_time_slice', 'cumulative_preemption_time'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint32, _numpy.uint64, _numpy.uint64],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6952, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6952, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_timestamp);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_timestamp);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_timestamp) != (0)) __PYX_ERR(0, 6952, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_time_run_total);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_time_run_total);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_time_run_total) != (0)) __PYX_ERR(0, 6952, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_time_run);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_time_run);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_time_run) != (0)) __PYX_ERR(0, 6952, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_sw_runlist_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_sw_runlist_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_sw_runlist_id) != (0)) __PYX_ERR(0, 6952, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_target_time_slice);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_target_time_slice);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_target_time_slice) != (0)) __PYX_ERR(0, 6952, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_cumulative_preemption_time);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_cumulative_preemption_time);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_cumulative_preemption_time) != (0)) __PYX_ERR(0, 6952, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 6952, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":6953
 *     return _numpy.dtype({
 *         'names': ['timestamp', 'time_run_total', 'time_run', 'sw_runlist_id', 'target_time_slice', 'cumulative_preemption_time'],
 *         'formats': [_numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint32, _numpy.uint64, _numpy.uint64],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.timestamp)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 6953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 6953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 6953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 6953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 6953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 6953, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 6953, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 6953, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 6953, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 6953, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 6953, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 6952, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":6955
 *         'formats': [_numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint32, _numpy.uint64, _numpy.uint64],
 *         'offsets': [
 *             (<intptr_t>&(pod.timestamp)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.timeRunTotal)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.timeRun)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.timestamp)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6955, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":6956
 *         'offsets': [
 *             (<intptr_t>&(pod.timestamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.timeRunTotal)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.timeRun)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.swRunlistId)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.timeRunTotal)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 6956, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":6957
 *             (<intptr_t>&(pod.timestamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.timeRunTotal)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.timeRun)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.swRunlistId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.targetTimeSlice)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.timeRun)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 6957, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":6958
 *             (<intptr_t>&(pod.timeRunTotal)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.timeRun)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.swRunlistId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.targetTimeSlice)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.cumulativePreemptionTime)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.swRunlistId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 6958, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":6959
 *             (<intptr_t>&(pod.timeRun)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.swRunlistId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.targetTimeSlice)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.cumulativePreemptionTime)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.targetTimeSlice)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 6959, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":6960
 *             (<intptr_t>&(pod.swRunlistId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.targetTimeSlice)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.cumulativePreemptionTime)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuSchedulerLogEntry_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.cumulativePreemptionTime)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 6960, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":6954
 *         'names': ['timestamp', 'time_run_total', 'time_run', 'sw_runlist_id', 'target_time_slice', 'cumulative_preemption_time'],
 *         'formats': [_numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint32, _numpy.uint64, _numpy.uint64],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.timestamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.timeRunTotal)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6954, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 6954, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_12) != (0)) __PYX_ERR(0, 6954, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_11) != (0)) __PYX_ERR(0, 6954, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 6954, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_9) != (0)) __PYX_ERR(0, 6954, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_8) != (0)) __PYX_ERR(0, 6954, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 6952, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":6962
 *             (<intptr_t>&(pod.cumulativePreemptionTime)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuSchedulerLogEntry_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuSchedulerLogEntry_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 6962, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 6952, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_13 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_13 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_13, (2-__pyx_t_13) | (__pyx_t_13*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6951, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6949
 * 
 * 
 * cdef _get_vgpu_scheduler_log_entry_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerLogEntry_t pod = nvmlVgpuSchedulerLogEntry_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_scheduler_log_entry_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6984
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=vgpu_scheduler_log_entry_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 6984, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6984, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 6984, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 6984, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 6984, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":6985
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=vgpu_scheduler_log_entry_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlVgpuSchedulerLogEntry_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6985, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6985, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_log_entry_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6985, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6985, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 6985, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6985, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":6986
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=vgpu_scheduler_log_entry_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlVgpuSchedulerLogEntry_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlVgpuSchedulerLogEntry_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 6986, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6986, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6986, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":6987
 *         arr = _numpy.empty(size, dtype=vgpu_scheduler_log_entry_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlVgpuSchedulerLogEntry_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlVgpuSchedulerLogEntry_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6987, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuSchedulerLogEntry_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6987, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6987, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 6987, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":6988
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlVgpuSchedulerLogEntry_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlVgpuSchedulerLogEntry_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6988, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 6988, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlVgpuSchedulerLogEntry_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6988, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6988, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 6987, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 6987, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":6984
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=vgpu_scheduler_log_entry_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6990
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlVgpuSchedulerLogEntry_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.VgpuSchedulerLogEntry_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":6991
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.VgpuSchedulerLogEntry_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6991, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6991, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 6991, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":6992
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.VgpuSchedulerLogEntry_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.VgpuSchedulerLogEntry object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6992, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6992, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6992, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6992, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6992, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6992, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6992, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_VgpuSchedulerLogEntry_Array;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 29 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6992, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":6991
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.VgpuSchedulerLogEntry_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":6994
 *             return f"<{__name__}.VgpuSchedulerLogEntry_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.VgpuSchedulerLogEntry object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6994, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6994, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6994, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6994, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6994, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_VgpuSchedulerLogEntry_object_at;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 33 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 6994, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":6990
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlVgpuSchedulerLogEntry_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.VgpuSchedulerLogEntry_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6996
 *             return f"<{__name__}.VgpuSchedulerLogEntry object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":6999
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6999, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6999, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":6996
 *             return f"<{__name__}.VgpuSchedulerLogEntry object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7001
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":7002
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7002, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7002, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 7002, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7001
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7004
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":7005
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7005, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7005, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 7005, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":7006
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7006, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 7006, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7005
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":7008
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7008, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7008, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7004
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7010
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":7011
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7011, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 7011, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7010
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7013
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, VgpuSchedulerLogEntry)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":7014
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, VgpuSchedulerLogEntry)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":7015
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, VgpuSchedulerLogEntry)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7015, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7015, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7015, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7015, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 7015, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7015, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7015, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7015, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7015, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 7015, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":7016
 *         cdef object self_data = self._data
 *         if (not isinstance(other, VgpuSchedulerLogEntry)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":7015
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, VgpuSchedulerLogEntry)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":7017
 *         if (not isinstance(other, VgpuSchedulerLogEntry)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7017, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7017, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7017, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 7017, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7017, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7013
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, VgpuSchedulerLogEntry)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7019
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def timestamp(self):
 *         """Union[~_numpy.uint64, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_9timestamp_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_9timestamp_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_9timestamp___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_9timestamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7022
 *     def timestamp(self):
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.timestamp[0])
 *         return self._data.timestamp
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7022, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 7022, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":7023
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.timestamp[0])             # <<<<<<<<<<<<<<
 *         return self._data.timestamp
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_timestamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7023, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7023, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7023, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":7022
 *     def timestamp(self):
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.timestamp[0])
 *         return self._data.timestamp
*/
  }

  /* "cuda/bindings/_nvml.pyx":7024
 *         if self._data.size == 1:
 *             return int(self._data.timestamp[0])
 *         return self._data.timestamp             # <<<<<<<<<<<<<<
 * 
 *     @timestamp.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_timestamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7024, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7019
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def timestamp(self):
 *         """Union[~_numpy.uint64, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.timestamp.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7026
 *         return self._data.timestamp
 * 
 *     @timestamp.setter             # <<<<<<<<<<<<<<
 *     def timestamp(self, val):
 *         self._data.timestamp = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_9timestamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_9timestamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_9timestamp_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_9timestamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":7028
 *     @timestamp.setter
 *     def timestamp(self, val):
 *         self._data.timestamp = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_timestamp, __pyx_v_val) < (0)) __PYX_ERR(0, 7028, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":7026
 *         return self._data.timestamp
 * 
 *     @timestamp.setter             # <<<<<<<<<<<<<<
 *     def timestamp(self, val):
 *         self._data.timestamp = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.timestamp.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7030
 *         self._data.timestamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def time_run_total(self):
 *         """Union[~_numpy.uint64, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14time_run_total_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14time_run_total_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14time_run_total___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14time_run_total___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7033
 *     def time_run_total(self):
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.time_run_total[0])
 *         return self._data.time_run_total
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 7033, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":7034
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.time_run_total[0])             # <<<<<<<<<<<<<<
 *         return self._data.time_run_total
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_run_total); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7034, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7034, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7034, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":7033
 *     def time_run_total(self):
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.time_run_total[0])
 *         return self._data.time_run_total
*/
  }

  /* "cuda/bindings/_nvml.pyx":7035
 *         if self._data.size == 1:
 *             return int(self._data.time_run_total[0])
 *         return self._data.time_run_total             # <<<<<<<<<<<<<<
 * 
 *     @time_run_total.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_run_total); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7035, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7030
 *         self._data.timestamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def time_run_total(self):
 *         """Union[~_numpy.uint64, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.time_run_total.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7037
 *         return self._data.time_run_total
 * 
 *     @time_run_total.setter             # <<<<<<<<<<<<<<
 *     def time_run_total(self, val):
 *         self._data.time_run_total = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14time_run_total_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14time_run_total_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14time_run_total_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14time_run_total_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":7039
 *     @time_run_total.setter
 *     def time_run_total(self, val):
 *         self._data.time_run_total = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_run_total, __pyx_v_val) < (0)) __PYX_ERR(0, 7039, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":7037
 *         return self._data.time_run_total
 * 
 *     @time_run_total.setter             # <<<<<<<<<<<<<<
 *     def time_run_total(self, val):
 *         self._data.time_run_total = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.time_run_total.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7041
 *         self._data.time_run_total = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def time_run(self):
 *         """Union[~_numpy.uint64, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_8time_run_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_8time_run_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_8time_run___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_8time_run___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7044
 *     def time_run(self):
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.time_run[0])
 *         return self._data.time_run
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 7044, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":7045
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.time_run[0])             # <<<<<<<<<<<<<<
 *         return self._data.time_run
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_run); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7045, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7045, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7045, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":7044
 *     def time_run(self):
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.time_run[0])
 *         return self._data.time_run
*/
  }

  /* "cuda/bindings/_nvml.pyx":7046
 *         if self._data.size == 1:
 *             return int(self._data.time_run[0])
 *         return self._data.time_run             # <<<<<<<<<<<<<<
 * 
 *     @time_run.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_run); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7046, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7041
 *         self._data.time_run_total = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def time_run(self):
 *         """Union[~_numpy.uint64, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.time_run.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7048
 *         return self._data.time_run
 * 
 *     @time_run.setter             # <<<<<<<<<<<<<<
 *     def time_run(self, val):
 *         self._data.time_run = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_8time_run_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_8time_run_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_8time_run_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_8time_run_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":7050
 *     @time_run.setter
 *     def time_run(self, val):
 *         self._data.time_run = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_run, __pyx_v_val) < (0)) __PYX_ERR(0, 7050, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":7048
 *         return self._data.time_run
 * 
 *     @time_run.setter             # <<<<<<<<<<<<<<
 *     def time_run(self, val):
 *         self._data.time_run = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.time_run.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7052
 *         self._data.time_run = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sw_runlist_id(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_13sw_runlist_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_13sw_runlist_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_13sw_runlist_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_13sw_runlist_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7055
 *     def sw_runlist_id(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.sw_runlist_id[0])
 *         return self._data.sw_runlist_id
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7055, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 7055, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":7056
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.sw_runlist_id[0])             # <<<<<<<<<<<<<<
 *         return self._data.sw_runlist_id
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sw_runlist_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7056, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7056, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7056, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":7055
 *     def sw_runlist_id(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.sw_runlist_id[0])
 *         return self._data.sw_runlist_id
*/
  }

  /* "cuda/bindings/_nvml.pyx":7057
 *         if self._data.size == 1:
 *             return int(self._data.sw_runlist_id[0])
 *         return self._data.sw_runlist_id             # <<<<<<<<<<<<<<
 * 
 *     @sw_runlist_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sw_runlist_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7057, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7052
 *         self._data.time_run = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sw_runlist_id(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.sw_runlist_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7059
 *         return self._data.sw_runlist_id
 * 
 *     @sw_runlist_id.setter             # <<<<<<<<<<<<<<
 *     def sw_runlist_id(self, val):
 *         self._data.sw_runlist_id = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_13sw_runlist_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_13sw_runlist_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_13sw_runlist_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_13sw_runlist_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":7061
 *     @sw_runlist_id.setter
 *     def sw_runlist_id(self, val):
 *         self._data.sw_runlist_id = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sw_runlist_id, __pyx_v_val) < (0)) __PYX_ERR(0, 7061, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":7059
 *         return self._data.sw_runlist_id
 * 
 *     @sw_runlist_id.setter             # <<<<<<<<<<<<<<
 *     def sw_runlist_id(self, val):
 *         self._data.sw_runlist_id = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.sw_runlist_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7063
 *         self._data.sw_runlist_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def target_time_slice(self):
 *         """Union[~_numpy.uint64, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17target_time_slice_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17target_time_slice_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17target_time_slice___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17target_time_slice___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7066
 *     def target_time_slice(self):
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.target_time_slice[0])
 *         return self._data.target_time_slice
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7066, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 7066, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":7067
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.target_time_slice[0])             # <<<<<<<<<<<<<<
 *         return self._data.target_time_slice
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_target_time_slice); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7067, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7067, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7067, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":7066
 *     def target_time_slice(self):
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.target_time_slice[0])
 *         return self._data.target_time_slice
*/
  }

  /* "cuda/bindings/_nvml.pyx":7068
 *         if self._data.size == 1:
 *             return int(self._data.target_time_slice[0])
 *         return self._data.target_time_slice             # <<<<<<<<<<<<<<
 * 
 *     @target_time_slice.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_target_time_slice); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7068, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7063
 *         self._data.sw_runlist_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def target_time_slice(self):
 *         """Union[~_numpy.uint64, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.target_time_slice.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7070
 *         return self._data.target_time_slice
 * 
 *     @target_time_slice.setter             # <<<<<<<<<<<<<<
 *     def target_time_slice(self, val):
 *         self._data.target_time_slice = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17target_time_slice_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17target_time_slice_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17target_time_slice_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17target_time_slice_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":7072
 *     @target_time_slice.setter
 *     def target_time_slice(self, val):
 *         self._data.target_time_slice = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_target_time_slice, __pyx_v_val) < (0)) __PYX_ERR(0, 7072, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":7070
 *         return self._data.target_time_slice
 * 
 *     @target_time_slice.setter             # <<<<<<<<<<<<<<
 *     def target_time_slice(self, val):
 *         self._data.target_time_slice = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.target_time_slice.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7074
 *         self._data.target_time_slice = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cumulative_preemption_time(self):
 *         """Union[~_numpy.uint64, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_26cumulative_preemption_time_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_26cumulative_preemption_time_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_26cumulative_preemption_time___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_26cumulative_preemption_time___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7077
 *     def cumulative_preemption_time(self):
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.cumulative_preemption_time[0])
 *         return self._data.cumulative_preemption_time
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7077, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 7077, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":7078
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.cumulative_preemption_time[0])             # <<<<<<<<<<<<<<
 *         return self._data.cumulative_preemption_time
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_cumulative_preemption_time); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7078, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7078, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7078, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":7077
 *     def cumulative_preemption_time(self):
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.cumulative_preemption_time[0])
 *         return self._data.cumulative_preemption_time
*/
  }

  /* "cuda/bindings/_nvml.pyx":7079
 *         if self._data.size == 1:
 *             return int(self._data.cumulative_preemption_time[0])
 *         return self._data.cumulative_preemption_time             # <<<<<<<<<<<<<<
 * 
 *     @cumulative_preemption_time.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_cumulative_preemption_time); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7079, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7074
 *         self._data.target_time_slice = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cumulative_preemption_time(self):
 *         """Union[~_numpy.uint64, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.cumulative_preemption_time.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7081
 *         return self._data.cumulative_preemption_time
 * 
 *     @cumulative_preemption_time.setter             # <<<<<<<<<<<<<<
 *     def cumulative_preemption_time(self, val):
 *         self._data.cumulative_preemption_time = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_26cumulative_preemption_time_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_26cumulative_preemption_time_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_26cumulative_preemption_time_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_26cumulative_preemption_time_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":7083
 *     @cumulative_preemption_time.setter
 *     def cumulative_preemption_time(self, val):
 *         self._data.cumulative_preemption_time = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_cumulative_preemption_time, __pyx_v_val) < (0)) __PYX_ERR(0, 7083, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":7081
 *         return self._data.cumulative_preemption_time
 * 
 *     @cumulative_preemption_time.setter             # <<<<<<<<<<<<<<
 *     def cumulative_preemption_time(self, val):
 *         self._data.cumulative_preemption_time = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.cumulative_preemption_time.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7085
 *         self._data.cumulative_preemption_time = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":7088
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":7089
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 7089, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":7090
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7090, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 7090, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":7091
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":7092
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7092, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 7092, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":7091
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":7093
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return VgpuSchedulerLogEntry.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":7094
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return VgpuSchedulerLogEntry.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":7093
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return VgpuSchedulerLogEntry.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":7095
 *             if key_ < 0:
 *                 key_ += size
 *             return VgpuSchedulerLogEntry.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_scheduler_log_entry_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7095, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7095, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":7088
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":7096
 *                 key_ += size
 *             return VgpuSchedulerLogEntry.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_scheduler_log_entry_dtype:
 *             return VgpuSchedulerLogEntry.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7096, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":7097
 *             return VgpuSchedulerLogEntry.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_scheduler_log_entry_dtype:             # <<<<<<<<<<<<<<
 *             return VgpuSchedulerLogEntry.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7097, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7097, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 7097, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7097, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_log_entry_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7097, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7097, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 7097, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":7098
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_scheduler_log_entry_dtype:
 *             return VgpuSchedulerLogEntry.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7098, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":7097
 *             return VgpuSchedulerLogEntry.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_scheduler_log_entry_dtype:             # <<<<<<<<<<<<<<
 *             return VgpuSchedulerLogEntry.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":7099
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_scheduler_log_entry_dtype:
 *             return VgpuSchedulerLogEntry.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7085
 *         self._data.cumulative_preemption_time = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7101
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":7102
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 7102, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":7101
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7104
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerLogEntry instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14from_data, "VgpuSchedulerLogEntry.from_data(data)\n\nCreate an VgpuSchedulerLogEntry instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `vgpu_scheduler_log_entry_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 7104, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7104, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 7104, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 7104, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7104, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 7104, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":7111
 *             data (_numpy.ndarray): a 1D array of dtype `vgpu_scheduler_log_entry_dtype` holding the data.
 *         """
 *         cdef VgpuSchedulerLogEntry obj = VgpuSchedulerLogEntry.__new__(VgpuSchedulerLogEntry)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7111, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":7112
 *         """
 *         cdef VgpuSchedulerLogEntry obj = VgpuSchedulerLogEntry.__new__(VgpuSchedulerLogEntry)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7112, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7112, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 7112, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":7113
 *         cdef VgpuSchedulerLogEntry obj = VgpuSchedulerLogEntry.__new__(VgpuSchedulerLogEntry)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7113, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 7113, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7112
 *         """
 *         cdef VgpuSchedulerLogEntry obj = VgpuSchedulerLogEntry.__new__(VgpuSchedulerLogEntry)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":7114
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != vgpu_scheduler_log_entry_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7114, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 7114, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":7115
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != vgpu_scheduler_log_entry_dtype:
 *             raise ValueError("data array must be of dtype vgpu_scheduler_log_entry_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7115, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 7115, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7114
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != vgpu_scheduler_log_entry_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":7116
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != vgpu_scheduler_log_entry_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype vgpu_scheduler_log_entry_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7116, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_log_entry_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7116, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7116, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 7116, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":7117
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != vgpu_scheduler_log_entry_dtype:
 *             raise ValueError("data array must be of dtype vgpu_scheduler_log_entry_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_vgpu_2};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7117, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 7117, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7116
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != vgpu_scheduler_log_entry_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype vgpu_scheduler_log_entry_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":7118
 *         if data.dtype != vgpu_scheduler_log_entry_dtype:
 *             raise ValueError("data array must be of dtype vgpu_scheduler_log_entry_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7118, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7118, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7118, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":7120
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7104
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerLogEntry instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7122
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an VgpuSchedulerLogEntry instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_16from_ptr, "VgpuSchedulerLogEntry.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an VgpuSchedulerLogEntry instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 7122, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 7122, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 7122, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7122, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 7122, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 7122, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 7122, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 7122, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7122, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 7123, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 7123, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7123, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":7123
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an VgpuSchedulerLogEntry instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 7122, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":7122
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an VgpuSchedulerLogEntry instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":7131
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerLogEntry obj = VgpuSchedulerLogEntry.__new__(VgpuSchedulerLogEntry)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":7132
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerLogEntry obj = VgpuSchedulerLogEntry.__new__(VgpuSchedulerLogEntry)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7132, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 7132, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7131
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerLogEntry obj = VgpuSchedulerLogEntry.__new__(VgpuSchedulerLogEntry)
*/
  }

  /* "cuda/bindings/_nvml.pyx":7133
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerLogEntry obj = VgpuSchedulerLogEntry.__new__(VgpuSchedulerLogEntry)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7133, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":7134
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerLogEntry obj = VgpuSchedulerLogEntry.__new__(VgpuSchedulerLogEntry)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlVgpuSchedulerLogEntry_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7134, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7134, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":7136
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlVgpuSchedulerLogEntry_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=vgpu_scheduler_log_entry_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7136, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":7135
 *         cdef VgpuSchedulerLogEntry obj = VgpuSchedulerLogEntry.__new__(VgpuSchedulerLogEntry)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlVgpuSchedulerLogEntry_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=vgpu_scheduler_log_entry_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlVgpuSchedulerLogEntry_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7135, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":7137
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlVgpuSchedulerLogEntry_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=vgpu_scheduler_log_entry_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7137, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7137, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7137, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_log_entry_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 7137, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 7137, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 7137, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 7137, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7137, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":7138
 *             <char*>ptr, sizeof(nvmlVgpuSchedulerLogEntry_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=vgpu_scheduler_log_entry_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 7138, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 7138, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7138, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":7140
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7122
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an VgpuSchedulerLogEntry instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":6980
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_18__reduce_cython__, "VgpuSchedulerLogEntry.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_VgpuSchedulerLogEntry, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_VgpuSchedulerLogEntry, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_VgpuSchedulerLogEntry, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_VgpuSchedulerLogEntry, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_VgpuSchedulerLogE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_VgpuSchedulerLogEntry, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_VgpuSchedulerLogEntry, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_VgpuSchedulerLogEntry, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_VgpuSchedulerLogEntry__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_VgpuSchedulerLogE); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_VgpuSchedulerLogEntry, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_VgpuSchedulerLogEntry__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_20__setstate_cython__, "VgpuSchedulerLogEntry.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_VgpuSchedulerLogEntry, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_VgpuSchedulerLogEntry__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_VgpuSchedulerLogEntry__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_VgpuSchedulerLogEntry, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_VgpuSchedulerLogEntry__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogEntry.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7143
 * 
 * 
 * cdef _get__py_anon_pod4_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef _anon_pod4 pod = _anon_pod4()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod4_dtype_offsets(void) {
  _anon_pod4 __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  _anon_pod4 __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get__py_anon_pod4_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":7144
 * 
 * cdef _get__py_anon_pod4_dtype_offsets():
 *     cdef _anon_pod4 pod = _anon_pod4()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['avg_factor', 'frequency'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":7145
 * cdef _get__py_anon_pod4_dtype_offsets():
 *     cdef _anon_pod4 pod = _anon_pod4()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['avg_factor', 'frequency'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7145, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7145, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":7146
 *     cdef _anon_pod4 pod = _anon_pod4()
 *     return _numpy.dtype({
 *         'names': ['avg_factor', 'frequency'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7146, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7146, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_avg_factor);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_avg_factor);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_avg_factor) != (0)) __PYX_ERR(0, 7146, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_frequency);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_frequency);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_frequency) != (0)) __PYX_ERR(0, 7146, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 7146, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":7147
 *     return _numpy.dtype({
 *         'names': ['avg_factor', 'frequency'],
 *         'formats': [_numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.avgFactor)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7147, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7147, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7147, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 7147, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7147, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 7147, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 7147, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 7146, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":7149
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.avgFactor)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.frequency)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.avgFactor)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7149, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":7150
 *         'offsets': [
 *             (<intptr_t>&(pod.avgFactor)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.frequency)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(_anon_pod4),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.frequency)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 7150, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":7148
 *         'names': ['avg_factor', 'frequency'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.avgFactor)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.frequency)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7148, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 7148, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 7148, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 7146, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":7152
 *             (<intptr_t>&(pod.frequency)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(_anon_pod4),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(_anon_pod4))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 7146, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7145, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7143
 * 
 * 
 * cdef _get__py_anon_pod4_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef _anon_pod4 pod = _anon_pod4()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get__py_anon_pod4_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7169
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <_anon_pod4 *>calloc(1, sizeof(_anon_pod4))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4___init__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":7170
 * 
 *     def __init__(self):
 *         self._ptr = <_anon_pod4 *>calloc(1, sizeof(_anon_pod4))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod4")
*/
  __pyx_v_self->_ptr = ((_anon_pod4 *)calloc(1, (sizeof(_anon_pod4))));

  /* "cuda/bindings/_nvml.pyx":7171
 *     def __init__(self):
 *         self._ptr = <_anon_pod4 *>calloc(1, sizeof(_anon_pod4))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating _py_anon_pod4")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":7172
 *         self._ptr = <_anon_pod4 *>calloc(1, sizeof(_anon_pod4))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod4")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7172, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod4};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7172, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 7172, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7171
 *     def __init__(self):
 *         self._ptr = <_anon_pod4 *>calloc(1, sizeof(_anon_pod4))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating _py_anon_pod4")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":7173
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod4")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":7174
 *             raise MemoryError("Error allocating _py_anon_pod4")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":7175
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":7169
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <_anon_pod4 *>calloc(1, sizeof(_anon_pod4))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod4.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7177
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef _anon_pod4 *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self) {
  _anon_pod4 *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  _anon_pod4 *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":7179
 *     def __dealloc__(self):
 *         cdef _anon_pod4 *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":7180
 *         cdef _anon_pod4 *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":7181
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":7182
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":7179
 *     def __dealloc__(self):
 *         cdef _anon_pod4 *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":7177
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef _anon_pod4 *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":7184
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}._py_anon_pod4 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":7185
 * 
 *     def __repr__(self):
 *         return f"<{__name__}._py_anon_pod4 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7185, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7185, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7185, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7185, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7185, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_py_anon_pod4_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 25 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7185, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7184
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}._py_anon_pod4 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod4.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7187
 *         return f"<{__name__}._py_anon_pod4 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7190
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7190, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7187
 *         return f"<{__name__}._py_anon_pod4 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod4.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7192
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod4__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":7193
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7192
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7195
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":7196
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7196, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7195
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod4.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7198
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod4 other_
 *         if not isinstance(other, _py_anon_pod4):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":7200
 *     def __eq__(self, other):
 *         cdef _py_anon_pod4 other_
 *         if not isinstance(other, _py_anon_pod4):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":7201
 *         cdef _py_anon_pod4 other_
 *         if not isinstance(other, _py_anon_pod4):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod4)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":7200
 *     def __eq__(self, other):
 *         cdef _py_anon_pod4 other_
 *         if not isinstance(other, _py_anon_pod4):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":7202
 *         if not isinstance(other, _py_anon_pod4):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod4)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4))))) __PYX_ERR(0, 7202, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":7203
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod4)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(_anon_pod4))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7198
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod4 other_
 *         if not isinstance(other, _py_anon_pod4):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod4.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7205
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod4)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod4 *>malloc(sizeof(_anon_pod4))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":7206
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <_anon_pod4 *>malloc(sizeof(_anon_pod4))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 7206, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7206, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7206, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 7206, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":7207
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod4 *>malloc(sizeof(_anon_pod4))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod4")
*/
    __pyx_v_self->_ptr = ((_anon_pod4 *)malloc((sizeof(_anon_pod4))));

    /* "cuda/bindings/_nvml.pyx":7208
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod4 *>malloc(sizeof(_anon_pod4))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod4")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod4))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":7209
 *             self._ptr = <_anon_pod4 *>malloc(sizeof(_anon_pod4))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod4")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod4))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7209, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod4};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7209, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 7209, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":7208
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod4 *>malloc(sizeof(_anon_pod4))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod4")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod4))
*/
    }

    /* "cuda/bindings/_nvml.pyx":7210
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod4")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod4))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7210, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7210, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 7210, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(_anon_pod4))));

    /* "cuda/bindings/_nvml.pyx":7211
 *                 raise MemoryError("Error allocating _py_anon_pod4")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod4))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":7212
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod4))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":7213
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7213, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7213, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 7213, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":7206
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <_anon_pod4 *>malloc(sizeof(_anon_pod4))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":7215
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 7215, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":7205
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod4)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod4 *>malloc(sizeof(_anon_pod4))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod4.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7217
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def avg_factor(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_10avg_factor_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_10avg_factor_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_10avg_factor___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_10avg_factor___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7220
 *     def avg_factor(self):
 *         """int: """
 *         return self._ptr[0].avgFactor             # <<<<<<<<<<<<<<
 * 
 *     @avg_factor.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).avgFactor); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7220, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7217
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def avg_factor(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod4.avg_factor.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7222
 *         return self._ptr[0].avgFactor
 * 
 *     @avg_factor.setter             # <<<<<<<<<<<<<<
 *     def avg_factor(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_10avg_factor_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_10avg_factor_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_10avg_factor_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_10avg_factor_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7224
 *     @avg_factor.setter
 *     def avg_factor(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod4 instance is read-only")
 *         self._ptr[0].avgFactor = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7225
 *     def avg_factor(self, val):
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod4 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].avgFactor = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This__py_anon_pod4_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7225, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7225, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7224
 *     @avg_factor.setter
 *     def avg_factor(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod4 instance is read-only")
 *         self._ptr[0].avgFactor = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7226
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod4 instance is read-only")
 *         self._ptr[0].avgFactor = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7226, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).avgFactor = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7222
 *         return self._ptr[0].avgFactor
 * 
 *     @avg_factor.setter             # <<<<<<<<<<<<<<
 *     def avg_factor(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod4.avg_factor.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7228
 *         self._ptr[0].avgFactor = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def frequency(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_9frequency_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_9frequency_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_9frequency___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_9frequency___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7231
 *     def frequency(self):
 *         """int: """
 *         return self._ptr[0].frequency             # <<<<<<<<<<<<<<
 * 
 *     @frequency.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).frequency); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7231, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7228
 *         self._ptr[0].avgFactor = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def frequency(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod4.frequency.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7233
 *         return self._ptr[0].frequency
 * 
 *     @frequency.setter             # <<<<<<<<<<<<<<
 *     def frequency(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_9frequency_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_9frequency_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_9frequency_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_9frequency_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7235
 *     @frequency.setter
 *     def frequency(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod4 instance is read-only")
 *         self._ptr[0].frequency = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7236
 *     def frequency(self, val):
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod4 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].frequency = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This__py_anon_pod4_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7236, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7236, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7235
 *     @frequency.setter
 *     def frequency(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod4 instance is read-only")
 *         self._ptr[0].frequency = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7237
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod4 instance is read-only")
 *         self._ptr[0].frequency = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7237, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).frequency = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7233
 *         return self._ptr[0].frequency
 * 
 *     @frequency.setter             # <<<<<<<<<<<<<<
 *     def frequency(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod4.frequency.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7239
 *         self._ptr[0].frequency = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod4 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod4_12from_data, "_py_anon_pod4.from_data(data)\n\nCreate an _py_anon_pod4 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `_py_anon_pod4_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod4_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod4_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 7239, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7239, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 7239, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 7239, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7239, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 7239, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod4.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":7246
 *             data (_numpy.ndarray): a single-element array of dtype `_py_anon_pod4_dtype` holding the data.
 *         """
 *         return __from_data(data, "_py_anon_pod4_dtype", _py_anon_pod4_dtype, _py_anon_pod4)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_py_anon_pod4_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7246, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_py_anon_pod4_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7246, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7239
 *         self._ptr[0].frequency = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod4 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod4.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7248
 *         return __from_data(data, "_py_anon_pod4_dtype", _py_anon_pod4_dtype, _py_anon_pod4)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod4 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod4_14from_ptr, "_py_anon_pod4.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an _py_anon_pod4 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod4_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod4_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 7248, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 7248, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 7248, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7248, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 7248, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":7249
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an _py_anon_pod4 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 7248, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 7248, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 7248, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7248, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 7249, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7249, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 7248, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod4.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":7248
 *         return __from_data(data, "_py_anon_pod4_dtype", _py_anon_pod4_dtype, _py_anon_pod4)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod4 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":7257
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod4 obj = _py_anon_pod4.__new__(_py_anon_pod4)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":7258
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod4 obj = _py_anon_pod4.__new__(_py_anon_pod4)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7258, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 7258, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7257
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod4 obj = _py_anon_pod4.__new__(_py_anon_pod4)
*/
  }

  /* "cuda/bindings/_nvml.pyx":7259
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod4 obj = _py_anon_pod4.__new__(_py_anon_pod4)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <_anon_pod4 *>malloc(sizeof(_anon_pod4))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod4(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7259, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":7260
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod4 obj = _py_anon_pod4.__new__(_py_anon_pod4)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <_anon_pod4 *>malloc(sizeof(_anon_pod4))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":7261
 *         cdef _py_anon_pod4 obj = _py_anon_pod4.__new__(_py_anon_pod4)
 *         if owner is None:
 *             obj._ptr = <_anon_pod4 *>malloc(sizeof(_anon_pod4))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod4")
*/
    __pyx_v_obj->_ptr = ((_anon_pod4 *)malloc((sizeof(_anon_pod4))));

    /* "cuda/bindings/_nvml.pyx":7262
 *         if owner is None:
 *             obj._ptr = <_anon_pod4 *>malloc(sizeof(_anon_pod4))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod4")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod4))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":7263
 *             obj._ptr = <_anon_pod4 *>malloc(sizeof(_anon_pod4))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod4")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod4))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7263, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod4};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7263, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 7263, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":7262
 *         if owner is None:
 *             obj._ptr = <_anon_pod4 *>malloc(sizeof(_anon_pod4))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod4")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod4))
*/
    }

    /* "cuda/bindings/_nvml.pyx":7264
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod4")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod4))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(_anon_pod4))));

    /* "cuda/bindings/_nvml.pyx":7265
 *                 raise MemoryError("Error allocating _py_anon_pod4")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod4))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":7266
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod4))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <_anon_pod4 *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":7260
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod4 obj = _py_anon_pod4.__new__(_py_anon_pod4)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <_anon_pod4 *>malloc(sizeof(_anon_pod4))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":7268
 *             obj._owned = True
 *         else:
 *             obj._ptr = <_anon_pod4 *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((_anon_pod4 *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":7269
 *         else:
 *             obj._ptr = <_anon_pod4 *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":7270
 *             obj._ptr = <_anon_pod4 *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":7271
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":7272
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7248
 *         return __from_data(data, "_py_anon_pod4_dtype", _py_anon_pod4_dtype, _py_anon_pod4)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod4 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod4.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod4_16__reduce_cython__, "_py_anon_pod4.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod4_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod4_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod4.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod4_18__setstate_cython__, "_py_anon_pod4.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod4_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod4_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod4.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod4_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod4.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7275
 * 
 * 
 * cdef _get__py_anon_pod5_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef _anon_pod5 pod = _anon_pod5()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod5_dtype_offsets(void) {
  _anon_pod5 __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  _anon_pod5 __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get__py_anon_pod5_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":7276
 * 
 * cdef _get__py_anon_pod5_dtype_offsets():
 *     cdef _anon_pod5 pod = _anon_pod5()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['timeslice'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":7277
 * cdef _get__py_anon_pod5_dtype_offsets():
 *     cdef _anon_pod5 pod = _anon_pod5()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['timeslice'],
 *         'formats': [_numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7277, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7277, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":7278
 *     cdef _anon_pod5 pod = _anon_pod5()
 *     return _numpy.dtype({
 *         'names': ['timeslice'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7278, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7278, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_timeslice);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_timeslice);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_timeslice) != (0)) __PYX_ERR(0, 7278, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 7278, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":7279
 *     return _numpy.dtype({
 *         'names': ['timeslice'],
 *         'formats': [_numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.timeslice)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7279, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7279, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7279, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 7279, __pyx_L1_error);
  __pyx_t_7 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 7278, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":7281
 *         'formats': [_numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.timeslice)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(_anon_pod5),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.timeslice)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7281, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":7280
 *         'names': ['timeslice'],
 *         'formats': [_numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.timeslice)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_7 = PyList_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7280, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 7280, __pyx_L1_error);
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 7278, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":7283
 *             (<intptr_t>&(pod.timeslice)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(_anon_pod5),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(_anon_pod5))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7283, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 7278, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_8 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_8 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_8, (2-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7277, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7275
 * 
 * 
 * cdef _get__py_anon_pod5_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef _anon_pod5 pod = _anon_pod5()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml._get__py_anon_pod5_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7300
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <_anon_pod5 *>calloc(1, sizeof(_anon_pod5))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5___init__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":7301
 * 
 *     def __init__(self):
 *         self._ptr = <_anon_pod5 *>calloc(1, sizeof(_anon_pod5))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod5")
*/
  __pyx_v_self->_ptr = ((_anon_pod5 *)calloc(1, (sizeof(_anon_pod5))));

  /* "cuda/bindings/_nvml.pyx":7302
 *     def __init__(self):
 *         self._ptr = <_anon_pod5 *>calloc(1, sizeof(_anon_pod5))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating _py_anon_pod5")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":7303
 *         self._ptr = <_anon_pod5 *>calloc(1, sizeof(_anon_pod5))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod5")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7303, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod5};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7303, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 7303, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7302
 *     def __init__(self):
 *         self._ptr = <_anon_pod5 *>calloc(1, sizeof(_anon_pod5))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating _py_anon_pod5")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":7304
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating _py_anon_pod5")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":7305
 *             raise MemoryError("Error allocating _py_anon_pod5")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":7306
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":7300
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <_anon_pod5 *>calloc(1, sizeof(_anon_pod5))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod5.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7308
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef _anon_pod5 *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self) {
  _anon_pod5 *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  _anon_pod5 *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":7310
 *     def __dealloc__(self):
 *         cdef _anon_pod5 *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":7311
 *         cdef _anon_pod5 *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":7312
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":7313
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":7310
 *     def __dealloc__(self):
 *         cdef _anon_pod5 *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":7308
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef _anon_pod5 *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":7315
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}._py_anon_pod5 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":7316
 * 
 *     def __repr__(self):
 *         return f"<{__name__}._py_anon_pod5 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7316, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7316, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7316, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7316, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7316, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_py_anon_pod5_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 25 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7316, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7315
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}._py_anon_pod5 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod5.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7318
 *         return f"<{__name__}._py_anon_pod5 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7321
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7321, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7318
 *         return f"<{__name__}._py_anon_pod5 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod5.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7323
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod5__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":7324
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7323
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7326
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":7327
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7326
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod5.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7329
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod5 other_
 *         if not isinstance(other, _py_anon_pod5):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":7331
 *     def __eq__(self, other):
 *         cdef _py_anon_pod5 other_
 *         if not isinstance(other, _py_anon_pod5):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":7332
 *         cdef _py_anon_pod5 other_
 *         if not isinstance(other, _py_anon_pod5):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod5)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":7331
 *     def __eq__(self, other):
 *         cdef _py_anon_pod5 other_
 *         if not isinstance(other, _py_anon_pod5):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":7333
 *         if not isinstance(other, _py_anon_pod5):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod5)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5))))) __PYX_ERR(0, 7333, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":7334
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod5)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(_anon_pod5))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7334, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7329
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod5 other_
 *         if not isinstance(other, _py_anon_pod5):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod5.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7336
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod5)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod5 *>malloc(sizeof(_anon_pod5))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":7337
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <_anon_pod5 *>malloc(sizeof(_anon_pod5))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 7337, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 7337, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":7338
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod5 *>malloc(sizeof(_anon_pod5))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod5")
*/
    __pyx_v_self->_ptr = ((_anon_pod5 *)malloc((sizeof(_anon_pod5))));

    /* "cuda/bindings/_nvml.pyx":7339
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod5 *>malloc(sizeof(_anon_pod5))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod5")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod5))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":7340
 *             self._ptr = <_anon_pod5 *>malloc(sizeof(_anon_pod5))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod5")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod5))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7340, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod5};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7340, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 7340, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":7339
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod5 *>malloc(sizeof(_anon_pod5))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod5")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod5))
*/
    }

    /* "cuda/bindings/_nvml.pyx":7341
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod5")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod5))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7341, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7341, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 7341, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(_anon_pod5))));

    /* "cuda/bindings/_nvml.pyx":7342
 *                 raise MemoryError("Error allocating _py_anon_pod5")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod5))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":7343
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(_anon_pod5))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":7344
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7344, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7344, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 7344, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":7337
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <_anon_pod5 *>malloc(sizeof(_anon_pod5))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":7346
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 7346, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":7336
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(_anon_pod5)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <_anon_pod5 *>malloc(sizeof(_anon_pod5))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod5.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7348
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def timeslice(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_9timeslice_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_9timeslice_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_9timeslice___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_9timeslice___get__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7351
 *     def timeslice(self):
 *         """int: """
 *         return self._ptr[0].timeslice             # <<<<<<<<<<<<<<
 * 
 *     @timeslice.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).timeslice); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7351, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7348
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def timeslice(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod5.timeslice.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7353
 *         return self._ptr[0].timeslice
 * 
 *     @timeslice.setter             # <<<<<<<<<<<<<<
 *     def timeslice(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_9timeslice_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_9timeslice_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_9timeslice_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_9timeslice_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7355
 *     @timeslice.setter
 *     def timeslice(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod5 instance is read-only")
 *         self._ptr[0].timeslice = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7356
 *     def timeslice(self, val):
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod5 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].timeslice = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This__py_anon_pod5_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7356, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7356, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7355
 *     @timeslice.setter
 *     def timeslice(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This _py_anon_pod5 instance is read-only")
 *         self._ptr[0].timeslice = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7357
 *         if self._readonly:
 *             raise ValueError("This _py_anon_pod5 instance is read-only")
 *         self._ptr[0].timeslice = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7357, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).timeslice = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7353
 *         return self._ptr[0].timeslice
 * 
 *     @timeslice.setter             # <<<<<<<<<<<<<<
 *     def timeslice(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod5.timeslice.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7359
 *         self._ptr[0].timeslice = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod5 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod5_12from_data, "_py_anon_pod5.from_data(data)\n\nCreate an _py_anon_pod5 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `_py_anon_pod5_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod5_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod5_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 7359, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7359, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 7359, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 7359, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7359, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 7359, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod5.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":7366
 *             data (_numpy.ndarray): a single-element array of dtype `_py_anon_pod5_dtype` holding the data.
 *         """
 *         return __from_data(data, "_py_anon_pod5_dtype", _py_anon_pod5_dtype, _py_anon_pod5)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_py_anon_pod5_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7366, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_py_anon_pod5_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7366, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7359
 *         self._ptr[0].timeslice = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod5 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod5.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7368
 *         return __from_data(data, "_py_anon_pod5_dtype", _py_anon_pod5_dtype, _py_anon_pod5)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod5 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod5_14from_ptr, "_py_anon_pod5.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an _py_anon_pod5 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod5_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod5_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 7368, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 7368, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 7368, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7368, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 7368, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":7369
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an _py_anon_pod5 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 7368, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 7368, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 7368, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7368, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 7369, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7369, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 7368, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod5.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":7368
 *         return __from_data(data, "_py_anon_pod5_dtype", _py_anon_pod5_dtype, _py_anon_pod5)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod5 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":7377
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod5 obj = _py_anon_pod5.__new__(_py_anon_pod5)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":7378
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod5 obj = _py_anon_pod5.__new__(_py_anon_pod5)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7378, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 7378, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7377
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod5 obj = _py_anon_pod5.__new__(_py_anon_pod5)
*/
  }

  /* "cuda/bindings/_nvml.pyx":7379
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod5 obj = _py_anon_pod5.__new__(_py_anon_pod5)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <_anon_pod5 *>malloc(sizeof(_anon_pod5))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod5(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7379, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":7380
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod5 obj = _py_anon_pod5.__new__(_py_anon_pod5)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <_anon_pod5 *>malloc(sizeof(_anon_pod5))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":7381
 *         cdef _py_anon_pod5 obj = _py_anon_pod5.__new__(_py_anon_pod5)
 *         if owner is None:
 *             obj._ptr = <_anon_pod5 *>malloc(sizeof(_anon_pod5))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod5")
*/
    __pyx_v_obj->_ptr = ((_anon_pod5 *)malloc((sizeof(_anon_pod5))));

    /* "cuda/bindings/_nvml.pyx":7382
 *         if owner is None:
 *             obj._ptr = <_anon_pod5 *>malloc(sizeof(_anon_pod5))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod5")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod5))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":7383
 *             obj._ptr = <_anon_pod5 *>malloc(sizeof(_anon_pod5))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod5")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod5))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7383, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating__py_anon_pod5};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7383, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 7383, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":7382
 *         if owner is None:
 *             obj._ptr = <_anon_pod5 *>malloc(sizeof(_anon_pod5))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating _py_anon_pod5")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod5))
*/
    }

    /* "cuda/bindings/_nvml.pyx":7384
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating _py_anon_pod5")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod5))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(_anon_pod5))));

    /* "cuda/bindings/_nvml.pyx":7385
 *                 raise MemoryError("Error allocating _py_anon_pod5")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod5))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":7386
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(_anon_pod5))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <_anon_pod5 *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":7380
 *             raise ValueError("ptr must not be null (0)")
 *         cdef _py_anon_pod5 obj = _py_anon_pod5.__new__(_py_anon_pod5)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <_anon_pod5 *>malloc(sizeof(_anon_pod5))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":7388
 *             obj._owned = True
 *         else:
 *             obj._ptr = <_anon_pod5 *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((_anon_pod5 *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":7389
 *         else:
 *             obj._ptr = <_anon_pod5 *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":7390
 *             obj._ptr = <_anon_pod5 *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":7391
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":7392
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7368
 *         return __from_data(data, "_py_anon_pod5_dtype", _py_anon_pod5_dtype, _py_anon_pod5)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod5 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod5.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod5_16__reduce_cython__, "_py_anon_pod5.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod5_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod5_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod5.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod5_18__setstate_cython__, "_py_anon_pod5.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod5_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod5_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod5.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13_py_anon_pod5_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml._py_anon_pod5.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7395
 * 
 * 
 * cdef _get_vgpu_scheduler_capabilities_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerCapabilities_t pod = nvmlVgpuSchedulerCapabilities_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_capabilities_dtype_offsets(void) {
  nvmlVgpuSchedulerCapabilities_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuSchedulerCapabilities_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  PyObject *__pyx_t_14 = NULL;
  size_t __pyx_t_15;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_scheduler_capabilities_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":7396
 * 
 * cdef _get_vgpu_scheduler_capabilities_dtype_offsets():
 *     cdef nvmlVgpuSchedulerCapabilities_t pod = nvmlVgpuSchedulerCapabilities_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['supported_schedulers', 'max_timeslice', 'min_timeslice', 'is_arr_mode_supported', 'max_frequency_for_arr', 'min_frequency_for_arr', 'max_avg_factor_for_arr', 'min_avg_factor_for_arr'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":7397
 * cdef _get_vgpu_scheduler_capabilities_dtype_offsets():
 *     cdef nvmlVgpuSchedulerCapabilities_t pod = nvmlVgpuSchedulerCapabilities_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['supported_schedulers', 'max_timeslice', 'min_timeslice', 'is_arr_mode_supported', 'max_frequency_for_arr', 'min_frequency_for_arr', 'max_avg_factor_for_arr', 'min_avg_factor_for_arr'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":7398
 *     cdef nvmlVgpuSchedulerCapabilities_t pod = nvmlVgpuSchedulerCapabilities_t()
 *     return _numpy.dtype({
 *         'names': ['supported_schedulers', 'max_timeslice', 'min_timeslice', 'is_arr_mode_supported', 'max_frequency_for_arr', 'min_frequency_for_arr', 'max_avg_factor_for_arr', 'min_avg_factor_for_arr'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7398, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7398, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_supported_schedulers);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_supported_schedulers);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_supported_schedulers) != (0)) __PYX_ERR(0, 7398, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_max_timeslice);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_max_timeslice);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_max_timeslice) != (0)) __PYX_ERR(0, 7398, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_min_timeslice);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_min_timeslice);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_min_timeslice) != (0)) __PYX_ERR(0, 7398, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_is_arr_mode_supported);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_is_arr_mode_supported);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_is_arr_mode_supported) != (0)) __PYX_ERR(0, 7398, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_max_frequency_for_arr);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_max_frequency_for_arr);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_max_frequency_for_arr) != (0)) __PYX_ERR(0, 7398, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_min_frequency_for_arr);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_min_frequency_for_arr);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_min_frequency_for_arr) != (0)) __PYX_ERR(0, 7398, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_max_avg_factor_for_arr);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_max_avg_factor_for_arr);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_max_avg_factor_for_arr) != (0)) __PYX_ERR(0, 7398, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_min_avg_factor_for_arr);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_min_avg_factor_for_arr);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_mstate_global->__pyx_n_u_min_avg_factor_for_arr) != (0)) __PYX_ERR(0, 7398, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 7398, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":7399
 *     return _numpy.dtype({
 *         'names': ['supported_schedulers', 'max_timeslice', 'min_timeslice', 'is_arr_mode_supported', 'max_frequency_for_arr', 'min_frequency_for_arr', 'max_avg_factor_for_arr', 'min_avg_factor_for_arr'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.supportedSchedulers)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 7399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 7399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 7399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 7399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 7399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 7399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 7399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 7399, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 7399, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 7399, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 7399, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 7399, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 7399, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 7399, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_t_14) != (0)) __PYX_ERR(0, 7399, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  __pyx_t_14 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 7398, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":7401
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.supportedSchedulers)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.maxTimeslice)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.minTimeslice)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.supportedSchedulers)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":7402
 *         'offsets': [
 *             (<intptr_t>&(pod.supportedSchedulers)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxTimeslice)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.minTimeslice)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.isArrModeSupported)) - (<intptr_t>&pod),
*/
  __pyx_t_14 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.maxTimeslice)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 7402, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);

  /* "cuda/bindings/_nvml.pyx":7403
 *             (<intptr_t>&(pod.supportedSchedulers)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxTimeslice)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.minTimeslice)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.isArrModeSupported)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxFrequencyForARR)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.minTimeslice)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 7403, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":7404
 *             (<intptr_t>&(pod.maxTimeslice)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.minTimeslice)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.isArrModeSupported)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.maxFrequencyForARR)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.minFrequencyForARR)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.isArrModeSupported)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 7404, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":7405
 *             (<intptr_t>&(pod.minTimeslice)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.isArrModeSupported)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxFrequencyForARR)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.minFrequencyForARR)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxAvgFactorForARR)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.maxFrequencyForARR)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 7405, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":7406
 *             (<intptr_t>&(pod.isArrModeSupported)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxFrequencyForARR)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.minFrequencyForARR)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.maxAvgFactorForARR)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.minAvgFactorForARR)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.minFrequencyForARR)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 7406, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":7407
 *             (<intptr_t>&(pod.maxFrequencyForARR)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.minFrequencyForARR)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxAvgFactorForARR)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.minAvgFactorForARR)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.maxAvgFactorForARR)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 7407, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":7408
 *             (<intptr_t>&(pod.minFrequencyForARR)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxAvgFactorForARR)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.minAvgFactorForARR)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuSchedulerCapabilities_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.minAvgFactorForARR)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 7408, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":7400
 *         'names': ['supported_schedulers', 'max_timeslice', 'min_timeslice', 'is_arr_mode_supported', 'max_frequency_for_arr', 'min_frequency_for_arr', 'max_avg_factor_for_arr', 'min_avg_factor_for_arr'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.supportedSchedulers)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxTimeslice)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7400, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 7400, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_14) != (0)) __PYX_ERR(0, 7400, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_13) != (0)) __PYX_ERR(0, 7400, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_12) != (0)) __PYX_ERR(0, 7400, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 7400, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_10) != (0)) __PYX_ERR(0, 7400, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_9) != (0)) __PYX_ERR(0, 7400, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 7, __pyx_t_8) != (0)) __PYX_ERR(0, 7400, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_14 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 7398, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":7410
 *             (<intptr_t>&(pod.minAvgFactorForARR)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuSchedulerCapabilities_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuSchedulerCapabilities_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7410, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 7398, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_15 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_15 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_15, (2-__pyx_t_15) | (__pyx_t_15*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7397, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7395
 * 
 * 
 * cdef _get_vgpu_scheduler_capabilities_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerCapabilities_t pod = nvmlVgpuSchedulerCapabilities_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_XDECREF(__pyx_t_14);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_scheduler_capabilities_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7427
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuSchedulerCapabilities_t *>calloc(1, sizeof(nvmlVgpuSchedulerCapabilities_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":7428
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerCapabilities_t *>calloc(1, sizeof(nvmlVgpuSchedulerCapabilities_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerCapabilities")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuSchedulerCapabilities_t *)calloc(1, (sizeof(nvmlVgpuSchedulerCapabilities_t))));

  /* "cuda/bindings/_nvml.pyx":7429
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerCapabilities_t *>calloc(1, sizeof(nvmlVgpuSchedulerCapabilities_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuSchedulerCapabilities")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":7430
 *         self._ptr = <nvmlVgpuSchedulerCapabilities_t *>calloc(1, sizeof(nvmlVgpuSchedulerCapabilities_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerCapabilities")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7430, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerCa};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7430, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 7430, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7429
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerCapabilities_t *>calloc(1, sizeof(nvmlVgpuSchedulerCapabilities_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuSchedulerCapabilities")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":7431
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerCapabilities")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":7432
 *             raise MemoryError("Error allocating VgpuSchedulerCapabilities")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":7433
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":7427
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuSchedulerCapabilities_t *>calloc(1, sizeof(nvmlVgpuSchedulerCapabilities_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7435
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuSchedulerCapabilities_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self) {
  nvmlVgpuSchedulerCapabilities_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuSchedulerCapabilities_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":7437
 *     def __dealloc__(self):
 *         cdef nvmlVgpuSchedulerCapabilities_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":7438
 *         cdef nvmlVgpuSchedulerCapabilities_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":7439
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":7440
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":7437
 *     def __dealloc__(self):
 *         cdef nvmlVgpuSchedulerCapabilities_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":7435
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuSchedulerCapabilities_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":7442
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuSchedulerCapabilities object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":7443
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuSchedulerCapabilities object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuSchedulerCapabilities_objec;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 37 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7442
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuSchedulerCapabilities object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7445
 *         return f"<{__name__}.VgpuSchedulerCapabilities object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7448
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7448, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7445
 *         return f"<{__name__}.VgpuSchedulerCapabilities object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7450
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":7451
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7450
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7453
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":7454
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7454, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7453
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7456
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerCapabilities other_
 *         if not isinstance(other, VgpuSchedulerCapabilities):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":7458
 *     def __eq__(self, other):
 *         cdef VgpuSchedulerCapabilities other_
 *         if not isinstance(other, VgpuSchedulerCapabilities):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":7459
 *         cdef VgpuSchedulerCapabilities other_
 *         if not isinstance(other, VgpuSchedulerCapabilities):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerCapabilities_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":7458
 *     def __eq__(self, other):
 *         cdef VgpuSchedulerCapabilities other_
 *         if not isinstance(other, VgpuSchedulerCapabilities):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":7460
 *         if not isinstance(other, VgpuSchedulerCapabilities):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerCapabilities_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities))))) __PYX_ERR(0, 7460, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":7461
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerCapabilities_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuSchedulerCapabilities_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7461, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7456
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerCapabilities other_
 *         if not isinstance(other, VgpuSchedulerCapabilities):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7463
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerCapabilities_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerCapabilities_t *>malloc(sizeof(nvmlVgpuSchedulerCapabilities_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":7464
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuSchedulerCapabilities_t *>malloc(sizeof(nvmlVgpuSchedulerCapabilities_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 7464, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7464, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7464, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 7464, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":7465
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerCapabilities_t *>malloc(sizeof(nvmlVgpuSchedulerCapabilities_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerCapabilities")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuSchedulerCapabilities_t *)malloc((sizeof(nvmlVgpuSchedulerCapabilities_t))));

    /* "cuda/bindings/_nvml.pyx":7466
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerCapabilities_t *>malloc(sizeof(nvmlVgpuSchedulerCapabilities_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerCapabilities")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerCapabilities_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":7467
 *             self._ptr = <nvmlVgpuSchedulerCapabilities_t *>malloc(sizeof(nvmlVgpuSchedulerCapabilities_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerCapabilities")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerCapabilities_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7467, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerCa};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7467, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 7467, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":7466
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerCapabilities_t *>malloc(sizeof(nvmlVgpuSchedulerCapabilities_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerCapabilities")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerCapabilities_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":7468
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerCapabilities")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerCapabilities_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7468, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7468, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 7468, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuSchedulerCapabilities_t))));

    /* "cuda/bindings/_nvml.pyx":7469
 *                 raise MemoryError("Error allocating VgpuSchedulerCapabilities")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerCapabilities_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":7470
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerCapabilities_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":7471
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7471, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7471, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 7471, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":7464
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuSchedulerCapabilities_t *>malloc(sizeof(nvmlVgpuSchedulerCapabilities_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":7473
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 7473, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":7463
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerCapabilities_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerCapabilities_t *>malloc(sizeof(nvmlVgpuSchedulerCapabilities_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7475
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def supported_schedulers(self):
 *         """~_numpy.uint32: (array of length 3)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_20supported_schedulers_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_20supported_schedulers_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_20supported_schedulers___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_20supported_schedulers___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7478
 *     def supported_schedulers(self):
 *         """~_numpy.uint32: (array of length 3)."""
 *         cdef view.array arr = view.array(shape=(3,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(&(self._ptr[0].supportedSchedulers))
 *         return _numpy.asarray(arr)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7478, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 5 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7478, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[4], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 7478, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_3, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 7478, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 7478, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 7478, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_allocate_buffer, Py_False, __pyx_t_5, __pyx_callargs+1, 4) < (0)) __PYX_ERR(0, 7478, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7478, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":7479
 *         """~_numpy.uint32: (array of length 3)."""
 *         cdef view.array arr = view.array(shape=(3,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].supportedSchedulers))             # <<<<<<<<<<<<<<
 *         return _numpy.asarray(arr)
 * 
*/
  __pyx_v_arr->data = ((char *)(&(__pyx_v_self->_ptr[0]).supportedSchedulers));

  /* "cuda/bindings/_nvml.pyx":7480
 *         cdef view.array arr = view.array(shape=(3,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].supportedSchedulers))
 *         return _numpy.asarray(arr)             # <<<<<<<<<<<<<<
 * 
 *     @supported_schedulers.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7480, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7480, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, ((PyObject *)__pyx_v_arr)};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7480, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7475
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def supported_schedulers(self):
 *         """~_numpy.uint32: (array of length 3)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.supported_schedulers.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7482
 *         return _numpy.asarray(arr)
 * 
 *     @supported_schedulers.setter             # <<<<<<<<<<<<<<
 *     def supported_schedulers(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_20supported_schedulers_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_20supported_schedulers_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_20supported_schedulers_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_20supported_schedulers_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  Py_ssize_t __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7484
 *     @supported_schedulers.setter
 *     def supported_schedulers(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         cdef view.array arr = view.array(shape=(3,), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7485
 *     def supported_schedulers(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef view.array arr = view.array(shape=(3,), itemsize=sizeof(unsigned int), format="I", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint32)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerCapabilities_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7485, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7485, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7484
 *     @supported_schedulers.setter
 *     def supported_schedulers(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         cdef view.array arr = view.array(shape=(3,), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":7486
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         cdef view.array arr = view.array(shape=(3,), itemsize=sizeof(unsigned int), format="I", mode="c")             # <<<<<<<<<<<<<<
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint32)
 *         memcpy(<void *>(&(self._ptr[0].supportedSchedulers)), <void *>(arr.data), sizeof(unsigned int) * len(val))
*/
  __pyx_t_2 = NULL;
  __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7486, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7486, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[4], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 7486, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 7486, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 7486, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 7486, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7486, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":7487
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         cdef view.array arr = view.array(shape=(3,), itemsize=sizeof(unsigned int), format="I", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint32)             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(&(self._ptr[0].supportedSchedulers)), <void *>(arr.data), sizeof(unsigned int) * len(val))
 * 
*/
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7487, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7487, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7487, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7487, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_3 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_3 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_5, __pyx_v_val};
    __pyx_t_4 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7487, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_6, __pyx_t_4, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 7487, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_4);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7487, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (__Pyx_PyObject_SetSlice(((PyObject *)__pyx_v_arr), __pyx_t_1, 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[0], 0, 0, 1) < (0)) __PYX_ERR(0, 7487, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":7488
 *         cdef view.array arr = view.array(shape=(3,), itemsize=sizeof(unsigned int), format="I", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint32)
 *         memcpy(<void *>(&(self._ptr[0].supportedSchedulers)), <void *>(arr.data), sizeof(unsigned int) * len(val))             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_7 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 7488, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).supportedSchedulers)), ((void *)__pyx_v_arr->data), ((sizeof(unsigned int)) * __pyx_t_7)));

  /* "cuda/bindings/_nvml.pyx":7482
 *         return _numpy.asarray(arr)
 * 
 *     @supported_schedulers.setter             # <<<<<<<<<<<<<<
 *     def supported_schedulers(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.supported_schedulers.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7490
 *         memcpy(<void *>(&(self._ptr[0].supportedSchedulers)), <void *>(arr.data), sizeof(unsigned int) * len(val))
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def max_timeslice(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13max_timeslice_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13max_timeslice_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13max_timeslice___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13max_timeslice___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7493
 *     def max_timeslice(self):
 *         """int: """
 *         return self._ptr[0].maxTimeslice             # <<<<<<<<<<<<<<
 * 
 *     @max_timeslice.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).maxTimeslice); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7493, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7490
 *         memcpy(<void *>(&(self._ptr[0].supportedSchedulers)), <void *>(arr.data), sizeof(unsigned int) * len(val))
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def max_timeslice(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.max_timeslice.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7495
 *         return self._ptr[0].maxTimeslice
 * 
 *     @max_timeslice.setter             # <<<<<<<<<<<<<<
 *     def max_timeslice(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13max_timeslice_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13max_timeslice_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13max_timeslice_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13max_timeslice_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7497
 *     @max_timeslice.setter
 *     def max_timeslice(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].maxTimeslice = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7498
 *     def max_timeslice(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].maxTimeslice = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerCapabilities_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7498, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7498, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7497
 *     @max_timeslice.setter
 *     def max_timeslice(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].maxTimeslice = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7499
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].maxTimeslice = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7499, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).maxTimeslice = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7495
 *         return self._ptr[0].maxTimeslice
 * 
 *     @max_timeslice.setter             # <<<<<<<<<<<<<<
 *     def max_timeslice(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.max_timeslice.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7501
 *         self._ptr[0].maxTimeslice = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def min_timeslice(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13min_timeslice_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13min_timeslice_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13min_timeslice___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13min_timeslice___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7504
 *     def min_timeslice(self):
 *         """int: """
 *         return self._ptr[0].minTimeslice             # <<<<<<<<<<<<<<
 * 
 *     @min_timeslice.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).minTimeslice); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7501
 *         self._ptr[0].maxTimeslice = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def min_timeslice(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.min_timeslice.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7506
 *         return self._ptr[0].minTimeslice
 * 
 *     @min_timeslice.setter             # <<<<<<<<<<<<<<
 *     def min_timeslice(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13min_timeslice_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13min_timeslice_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13min_timeslice_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13min_timeslice_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7508
 *     @min_timeslice.setter
 *     def min_timeslice(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].minTimeslice = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7509
 *     def min_timeslice(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].minTimeslice = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerCapabilities_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7509, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7509, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7508
 *     @min_timeslice.setter
 *     def min_timeslice(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].minTimeslice = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7510
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].minTimeslice = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7510, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).minTimeslice = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7506
 *         return self._ptr[0].minTimeslice
 * 
 *     @min_timeslice.setter             # <<<<<<<<<<<<<<
 *     def min_timeslice(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.min_timeslice.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7512
 *         self._ptr[0].minTimeslice = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_arr_mode_supported(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21is_arr_mode_supported_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21is_arr_mode_supported_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21is_arr_mode_supported___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21is_arr_mode_supported___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7515
 *     def is_arr_mode_supported(self):
 *         """int: """
 *         return self._ptr[0].isArrModeSupported             # <<<<<<<<<<<<<<
 * 
 *     @is_arr_mode_supported.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).isArrModeSupported); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7515, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7512
 *         self._ptr[0].minTimeslice = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_arr_mode_supported(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.is_arr_mode_supported.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7517
 *         return self._ptr[0].isArrModeSupported
 * 
 *     @is_arr_mode_supported.setter             # <<<<<<<<<<<<<<
 *     def is_arr_mode_supported(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21is_arr_mode_supported_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21is_arr_mode_supported_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21is_arr_mode_supported_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21is_arr_mode_supported_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7519
 *     @is_arr_mode_supported.setter
 *     def is_arr_mode_supported(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].isArrModeSupported = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7520
 *     def is_arr_mode_supported(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].isArrModeSupported = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerCapabilities_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7520, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7520, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7519
 *     @is_arr_mode_supported.setter
 *     def is_arr_mode_supported(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].isArrModeSupported = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7521
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].isArrModeSupported = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7521, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).isArrModeSupported = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7517
 *         return self._ptr[0].isArrModeSupported
 * 
 *     @is_arr_mode_supported.setter             # <<<<<<<<<<<<<<
 *     def is_arr_mode_supported(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.is_arr_mode_supported.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7523
 *         self._ptr[0].isArrModeSupported = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def max_frequency_for_arr(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21max_frequency_for_arr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21max_frequency_for_arr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21max_frequency_for_arr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21max_frequency_for_arr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7526
 *     def max_frequency_for_arr(self):
 *         """int: """
 *         return self._ptr[0].maxFrequencyForARR             # <<<<<<<<<<<<<<
 * 
 *     @max_frequency_for_arr.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).maxFrequencyForARR); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7526, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7523
 *         self._ptr[0].isArrModeSupported = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def max_frequency_for_arr(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.max_frequency_for_arr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7528
 *         return self._ptr[0].maxFrequencyForARR
 * 
 *     @max_frequency_for_arr.setter             # <<<<<<<<<<<<<<
 *     def max_frequency_for_arr(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21max_frequency_for_arr_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21max_frequency_for_arr_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21max_frequency_for_arr_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21max_frequency_for_arr_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7530
 *     @max_frequency_for_arr.setter
 *     def max_frequency_for_arr(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].maxFrequencyForARR = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7531
 *     def max_frequency_for_arr(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].maxFrequencyForARR = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerCapabilities_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7531, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7531, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7530
 *     @max_frequency_for_arr.setter
 *     def max_frequency_for_arr(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].maxFrequencyForARR = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7532
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].maxFrequencyForARR = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7532, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).maxFrequencyForARR = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7528
 *         return self._ptr[0].maxFrequencyForARR
 * 
 *     @max_frequency_for_arr.setter             # <<<<<<<<<<<<<<
 *     def max_frequency_for_arr(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.max_frequency_for_arr.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7534
 *         self._ptr[0].maxFrequencyForARR = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def min_frequency_for_arr(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21min_frequency_for_arr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21min_frequency_for_arr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21min_frequency_for_arr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21min_frequency_for_arr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7537
 *     def min_frequency_for_arr(self):
 *         """int: """
 *         return self._ptr[0].minFrequencyForARR             # <<<<<<<<<<<<<<
 * 
 *     @min_frequency_for_arr.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).minFrequencyForARR); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7537, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7534
 *         self._ptr[0].maxFrequencyForARR = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def min_frequency_for_arr(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.min_frequency_for_arr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7539
 *         return self._ptr[0].minFrequencyForARR
 * 
 *     @min_frequency_for_arr.setter             # <<<<<<<<<<<<<<
 *     def min_frequency_for_arr(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21min_frequency_for_arr_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21min_frequency_for_arr_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21min_frequency_for_arr_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21min_frequency_for_arr_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7541
 *     @min_frequency_for_arr.setter
 *     def min_frequency_for_arr(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].minFrequencyForARR = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7542
 *     def min_frequency_for_arr(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].minFrequencyForARR = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerCapabilities_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7542, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7542, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7541
 *     @min_frequency_for_arr.setter
 *     def min_frequency_for_arr(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].minFrequencyForARR = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7543
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].minFrequencyForARR = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7543, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).minFrequencyForARR = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7539
 *         return self._ptr[0].minFrequencyForARR
 * 
 *     @min_frequency_for_arr.setter             # <<<<<<<<<<<<<<
 *     def min_frequency_for_arr(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.min_frequency_for_arr.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7545
 *         self._ptr[0].minFrequencyForARR = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def max_avg_factor_for_arr(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22max_avg_factor_for_arr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22max_avg_factor_for_arr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22max_avg_factor_for_arr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22max_avg_factor_for_arr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7548
 *     def max_avg_factor_for_arr(self):
 *         """int: """
 *         return self._ptr[0].maxAvgFactorForARR             # <<<<<<<<<<<<<<
 * 
 *     @max_avg_factor_for_arr.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).maxAvgFactorForARR); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7545
 *         self._ptr[0].minFrequencyForARR = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def max_avg_factor_for_arr(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.max_avg_factor_for_arr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7550
 *         return self._ptr[0].maxAvgFactorForARR
 * 
 *     @max_avg_factor_for_arr.setter             # <<<<<<<<<<<<<<
 *     def max_avg_factor_for_arr(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22max_avg_factor_for_arr_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22max_avg_factor_for_arr_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22max_avg_factor_for_arr_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22max_avg_factor_for_arr_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7552
 *     @max_avg_factor_for_arr.setter
 *     def max_avg_factor_for_arr(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].maxAvgFactorForARR = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7553
 *     def max_avg_factor_for_arr(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].maxAvgFactorForARR = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerCapabilities_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7553, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7553, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7552
 *     @max_avg_factor_for_arr.setter
 *     def max_avg_factor_for_arr(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].maxAvgFactorForARR = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7554
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].maxAvgFactorForARR = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7554, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).maxAvgFactorForARR = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7550
 *         return self._ptr[0].maxAvgFactorForARR
 * 
 *     @max_avg_factor_for_arr.setter             # <<<<<<<<<<<<<<
 *     def max_avg_factor_for_arr(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.max_avg_factor_for_arr.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7556
 *         self._ptr[0].maxAvgFactorForARR = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def min_avg_factor_for_arr(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22min_avg_factor_for_arr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22min_avg_factor_for_arr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22min_avg_factor_for_arr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22min_avg_factor_for_arr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7559
 *     def min_avg_factor_for_arr(self):
 *         """int: """
 *         return self._ptr[0].minAvgFactorForARR             # <<<<<<<<<<<<<<
 * 
 *     @min_avg_factor_for_arr.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).minAvgFactorForARR); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7559, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7556
 *         self._ptr[0].maxAvgFactorForARR = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def min_avg_factor_for_arr(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.min_avg_factor_for_arr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7561
 *         return self._ptr[0].minAvgFactorForARR
 * 
 *     @min_avg_factor_for_arr.setter             # <<<<<<<<<<<<<<
 *     def min_avg_factor_for_arr(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22min_avg_factor_for_arr_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22min_avg_factor_for_arr_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22min_avg_factor_for_arr_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22min_avg_factor_for_arr_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7563
 *     @min_avg_factor_for_arr.setter
 *     def min_avg_factor_for_arr(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].minAvgFactorForARR = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7564
 *     def min_avg_factor_for_arr(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].minAvgFactorForARR = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerCapabilities_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7564, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7564, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7563
 *     @min_avg_factor_for_arr.setter
 *     def min_avg_factor_for_arr(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].minAvgFactorForARR = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7565
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerCapabilities instance is read-only")
 *         self._ptr[0].minAvgFactorForARR = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7565, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).minAvgFactorForARR = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7561
 *         return self._ptr[0].minAvgFactorForARR
 * 
 *     @min_avg_factor_for_arr.setter             # <<<<<<<<<<<<<<
 *     def min_avg_factor_for_arr(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.min_avg_factor_for_arr.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7567
 *         self._ptr[0].minAvgFactorForARR = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerCapabilities instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_12from_data, "VgpuSchedulerCapabilities.from_data(data)\n\nCreate an VgpuSchedulerCapabilities instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_scheduler_capabilities_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 7567, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7567, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 7567, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 7567, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7567, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 7567, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":7574
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_scheduler_capabilities_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_scheduler_capabilities_dtype", vgpu_scheduler_capabilities_dtype, VgpuSchedulerCapabilities)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_capabilities_dtyp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7574, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_capabilities_dtyp, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7574, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7567
 *         self._ptr[0].minAvgFactorForARR = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerCapabilities instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7576
 *         return __from_data(data, "vgpu_scheduler_capabilities_dtype", vgpu_scheduler_capabilities_dtype, VgpuSchedulerCapabilities)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerCapabilities instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_14from_ptr, "VgpuSchedulerCapabilities.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuSchedulerCapabilities instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 7576, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 7576, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 7576, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7576, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 7576, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":7577
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuSchedulerCapabilities instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 7576, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 7576, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 7576, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7576, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 7577, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7577, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 7576, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":7576
 *         return __from_data(data, "vgpu_scheduler_capabilities_dtype", vgpu_scheduler_capabilities_dtype, VgpuSchedulerCapabilities)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerCapabilities instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":7585
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerCapabilities obj = VgpuSchedulerCapabilities.__new__(VgpuSchedulerCapabilities)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":7586
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerCapabilities obj = VgpuSchedulerCapabilities.__new__(VgpuSchedulerCapabilities)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7586, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 7586, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7585
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerCapabilities obj = VgpuSchedulerCapabilities.__new__(VgpuSchedulerCapabilities)
*/
  }

  /* "cuda/bindings/_nvml.pyx":7587
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerCapabilities obj = VgpuSchedulerCapabilities.__new__(VgpuSchedulerCapabilities)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerCapabilities_t *>malloc(sizeof(nvmlVgpuSchedulerCapabilities_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7587, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":7588
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerCapabilities obj = VgpuSchedulerCapabilities.__new__(VgpuSchedulerCapabilities)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuSchedulerCapabilities_t *>malloc(sizeof(nvmlVgpuSchedulerCapabilities_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":7589
 *         cdef VgpuSchedulerCapabilities obj = VgpuSchedulerCapabilities.__new__(VgpuSchedulerCapabilities)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerCapabilities_t *>malloc(sizeof(nvmlVgpuSchedulerCapabilities_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerCapabilities")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuSchedulerCapabilities_t *)malloc((sizeof(nvmlVgpuSchedulerCapabilities_t))));

    /* "cuda/bindings/_nvml.pyx":7590
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerCapabilities_t *>malloc(sizeof(nvmlVgpuSchedulerCapabilities_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerCapabilities")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerCapabilities_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":7591
 *             obj._ptr = <nvmlVgpuSchedulerCapabilities_t *>malloc(sizeof(nvmlVgpuSchedulerCapabilities_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerCapabilities")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerCapabilities_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7591, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerCa};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7591, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 7591, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":7590
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerCapabilities_t *>malloc(sizeof(nvmlVgpuSchedulerCapabilities_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerCapabilities")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerCapabilities_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":7592
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerCapabilities")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerCapabilities_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuSchedulerCapabilities_t))));

    /* "cuda/bindings/_nvml.pyx":7593
 *                 raise MemoryError("Error allocating VgpuSchedulerCapabilities")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerCapabilities_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":7594
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerCapabilities_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerCapabilities_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":7588
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerCapabilities obj = VgpuSchedulerCapabilities.__new__(VgpuSchedulerCapabilities)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuSchedulerCapabilities_t *>malloc(sizeof(nvmlVgpuSchedulerCapabilities_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":7596
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerCapabilities_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuSchedulerCapabilities_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":7597
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerCapabilities_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":7598
 *             obj._ptr = <nvmlVgpuSchedulerCapabilities_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":7599
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":7600
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7576
 *         return __from_data(data, "vgpu_scheduler_capabilities_dtype", vgpu_scheduler_capabilities_dtype, VgpuSchedulerCapabilities)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerCapabilities instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_16__reduce_cython__, "VgpuSchedulerCapabilities.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_18__setstate_cython__, "VgpuSchedulerCapabilities.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerCapabilities.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7603
 * 
 * 
 * cdef _get_vgpu_license_expiry_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuLicenseExpiry_t pod = nvmlVgpuLicenseExpiry_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_license_expiry_dtype_offsets(void) {
  nvmlVgpuLicenseExpiry_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuLicenseExpiry_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  size_t __pyx_t_14;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_license_expiry_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":7604
 * 
 * cdef _get_vgpu_license_expiry_dtype_offsets():
 *     cdef nvmlVgpuLicenseExpiry_t pod = nvmlVgpuLicenseExpiry_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['year', 'month', 'day', 'hour', 'min_', 'sec', 'status'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":7605
 * cdef _get_vgpu_license_expiry_dtype_offsets():
 *     cdef nvmlVgpuLicenseExpiry_t pod = nvmlVgpuLicenseExpiry_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['year', 'month', 'day', 'hour', 'min_', 'sec', 'status'],
 *         'formats': [_numpy.uint32, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7605, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7605, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":7606
 *     cdef nvmlVgpuLicenseExpiry_t pod = nvmlVgpuLicenseExpiry_t()
 *     return _numpy.dtype({
 *         'names': ['year', 'month', 'day', 'hour', 'min_', 'sec', 'status'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7606, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7606, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_year);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_year);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_year) != (0)) __PYX_ERR(0, 7606, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_month);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_month);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_month) != (0)) __PYX_ERR(0, 7606, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_day);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_day);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_day) != (0)) __PYX_ERR(0, 7606, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_hour);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_hour);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_hour) != (0)) __PYX_ERR(0, 7606, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_min);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_min);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_min) != (0)) __PYX_ERR(0, 7606, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_sec);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_sec);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_sec) != (0)) __PYX_ERR(0, 7606, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_status);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_status);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_status) != (0)) __PYX_ERR(0, 7606, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 7606, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":7607
 *     return _numpy.dtype({
 *         'names': ['year', 'month', 'day', 'hour', 'min_', 'sec', 'status'],
 *         'formats': [_numpy.uint32, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.year)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint16); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 7607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint16); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 7607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint16); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 7607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint16); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 7607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint16); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 7607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 7607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 7607, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 7607, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 7607, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 7607, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 7607, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 7607, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 7607, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 7606, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":7609
 *         'formats': [_numpy.uint32, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint8],
 *         'offsets': [
 *             (<intptr_t>&(pod.year)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.month)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.day)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.year)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7609, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":7610
 *         'offsets': [
 *             (<intptr_t>&(pod.year)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.month)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.day)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hour)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.month)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 7610, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":7611
 *             (<intptr_t>&(pod.year)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.month)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.day)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.hour)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.min)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.day)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 7611, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":7612
 *             (<intptr_t>&(pod.month)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.day)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hour)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.min)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sec)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.hour)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 7612, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":7613
 *             (<intptr_t>&(pod.day)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hour)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.min)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sec)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.status)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.min)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 7613, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":7614
 *             (<intptr_t>&(pod.hour)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.min)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sec)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.status)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sec)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 7614, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":7615
 *             (<intptr_t>&(pod.min)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sec)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.status)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuLicenseExpiry_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.status)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 7615, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":7608
 *         'names': ['year', 'month', 'day', 'hour', 'min_', 'sec', 'status'],
 *         'formats': [_numpy.uint32, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.year)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.month)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7608, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 7608, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_13) != (0)) __PYX_ERR(0, 7608, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_12) != (0)) __PYX_ERR(0, 7608, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_11) != (0)) __PYX_ERR(0, 7608, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_10) != (0)) __PYX_ERR(0, 7608, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_9) != (0)) __PYX_ERR(0, 7608, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_8) != (0)) __PYX_ERR(0, 7608, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 7606, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":7617
 *             (<intptr_t>&(pod.status)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuLicenseExpiry_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuLicenseExpiry_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7617, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 7606, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_14 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_14 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_14, (2-__pyx_t_14) | (__pyx_t_14*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7605, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7603
 * 
 * 
 * cdef _get_vgpu_license_expiry_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuLicenseExpiry_t pod = nvmlVgpuLicenseExpiry_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_license_expiry_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7634
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuLicenseExpiry_t *>calloc(1, sizeof(nvmlVgpuLicenseExpiry_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":7635
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuLicenseExpiry_t *>calloc(1, sizeof(nvmlVgpuLicenseExpiry_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuLicenseExpiry")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuLicenseExpiry_t *)calloc(1, (sizeof(nvmlVgpuLicenseExpiry_t))));

  /* "cuda/bindings/_nvml.pyx":7636
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuLicenseExpiry_t *>calloc(1, sizeof(nvmlVgpuLicenseExpiry_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuLicenseExpiry")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":7637
 *         self._ptr = <nvmlVgpuLicenseExpiry_t *>calloc(1, sizeof(nvmlVgpuLicenseExpiry_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuLicenseExpiry")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7637, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuLicenseExpi};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7637, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 7637, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7636
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuLicenseExpiry_t *>calloc(1, sizeof(nvmlVgpuLicenseExpiry_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuLicenseExpiry")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":7638
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuLicenseExpiry")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":7639
 *             raise MemoryError("Error allocating VgpuLicenseExpiry")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":7640
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":7634
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuLicenseExpiry_t *>calloc(1, sizeof(nvmlVgpuLicenseExpiry_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7642
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuLicenseExpiry_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self) {
  nvmlVgpuLicenseExpiry_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuLicenseExpiry_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":7644
 *     def __dealloc__(self):
 *         cdef nvmlVgpuLicenseExpiry_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":7645
 *         cdef nvmlVgpuLicenseExpiry_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":7646
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":7647
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":7644
 *     def __dealloc__(self):
 *         cdef nvmlVgpuLicenseExpiry_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":7642
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuLicenseExpiry_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":7649
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuLicenseExpiry object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":7650
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuLicenseExpiry object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7650, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7650, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7650, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7650, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7650, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuLicenseExpiry_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 29 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7650, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7649
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuLicenseExpiry object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7652
 *         return f"<{__name__}.VgpuLicenseExpiry object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7655
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7655, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7652
 *         return f"<{__name__}.VgpuLicenseExpiry object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7657
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":7658
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7657
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7660
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":7661
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7661, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7660
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7663
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuLicenseExpiry other_
 *         if not isinstance(other, VgpuLicenseExpiry):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":7665
 *     def __eq__(self, other):
 *         cdef VgpuLicenseExpiry other_
 *         if not isinstance(other, VgpuLicenseExpiry):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":7666
 *         cdef VgpuLicenseExpiry other_
 *         if not isinstance(other, VgpuLicenseExpiry):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuLicenseExpiry_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":7665
 *     def __eq__(self, other):
 *         cdef VgpuLicenseExpiry other_
 *         if not isinstance(other, VgpuLicenseExpiry):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":7667
 *         if not isinstance(other, VgpuLicenseExpiry):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuLicenseExpiry_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry))))) __PYX_ERR(0, 7667, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":7668
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuLicenseExpiry_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuLicenseExpiry_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7668, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7663
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuLicenseExpiry other_
 *         if not isinstance(other, VgpuLicenseExpiry):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7670
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuLicenseExpiry_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuLicenseExpiry_t *>malloc(sizeof(nvmlVgpuLicenseExpiry_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":7671
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuLicenseExpiry_t *>malloc(sizeof(nvmlVgpuLicenseExpiry_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 7671, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7671, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7671, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 7671, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":7672
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuLicenseExpiry_t *>malloc(sizeof(nvmlVgpuLicenseExpiry_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuLicenseExpiry")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuLicenseExpiry_t *)malloc((sizeof(nvmlVgpuLicenseExpiry_t))));

    /* "cuda/bindings/_nvml.pyx":7673
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuLicenseExpiry_t *>malloc(sizeof(nvmlVgpuLicenseExpiry_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuLicenseExpiry")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuLicenseExpiry_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":7674
 *             self._ptr = <nvmlVgpuLicenseExpiry_t *>malloc(sizeof(nvmlVgpuLicenseExpiry_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuLicenseExpiry")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuLicenseExpiry_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7674, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuLicenseExpi};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7674, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 7674, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":7673
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuLicenseExpiry_t *>malloc(sizeof(nvmlVgpuLicenseExpiry_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuLicenseExpiry")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuLicenseExpiry_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":7675
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuLicenseExpiry")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuLicenseExpiry_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7675, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7675, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 7675, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuLicenseExpiry_t))));

    /* "cuda/bindings/_nvml.pyx":7676
 *                 raise MemoryError("Error allocating VgpuLicenseExpiry")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuLicenseExpiry_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":7677
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuLicenseExpiry_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":7678
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7678, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7678, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 7678, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":7671
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuLicenseExpiry_t *>malloc(sizeof(nvmlVgpuLicenseExpiry_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":7680
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 7680, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":7670
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuLicenseExpiry_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuLicenseExpiry_t *>malloc(sizeof(nvmlVgpuLicenseExpiry_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7682
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def year(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4year_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4year_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4year___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4year___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7685
 *     def year(self):
 *         """int: """
 *         return self._ptr[0].year             # <<<<<<<<<<<<<<
 * 
 *     @year.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).year); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7685, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7682
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def year(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.year.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7687
 *         return self._ptr[0].year
 * 
 *     @year.setter             # <<<<<<<<<<<<<<
 *     def year(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4year_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4year_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4year_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4year_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7689
 *     @year.setter
 *     def year(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].year = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7690
 *     def year(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].year = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuLicenseExpiry_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7690, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7690, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7689
 *     @year.setter
 *     def year(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].year = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7691
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].year = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7691, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).year = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7687
 *         return self._ptr[0].year
 * 
 *     @year.setter             # <<<<<<<<<<<<<<
 *     def year(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.year.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7693
 *         self._ptr[0].year = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def month(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_5month_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_5month_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_5month___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_5month___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7696
 *     def month(self):
 *         """int: """
 *         return self._ptr[0].month             # <<<<<<<<<<<<<<
 * 
 *     @month.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_short((__pyx_v_self->_ptr[0]).month); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7696, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7693
 *         self._ptr[0].year = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def month(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.month.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7698
 *         return self._ptr[0].month
 * 
 *     @month.setter             # <<<<<<<<<<<<<<
 *     def month(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_5month_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_5month_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_5month_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_5month_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned short __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7700
 *     @month.setter
 *     def month(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].month = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7701
 *     def month(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].month = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuLicenseExpiry_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7701, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7701, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7700
 *     @month.setter
 *     def month(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].month = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7702
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].month = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_short(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned short)-1) && PyErr_Occurred())) __PYX_ERR(0, 7702, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).month = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7698
 *         return self._ptr[0].month
 * 
 *     @month.setter             # <<<<<<<<<<<<<<
 *     def month(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.month.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7704
 *         self._ptr[0].month = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def day(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3day_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3day_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3day___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3day___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7707
 *     def day(self):
 *         """int: """
 *         return self._ptr[0].day             # <<<<<<<<<<<<<<
 * 
 *     @day.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_short((__pyx_v_self->_ptr[0]).day); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7707, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7704
 *         self._ptr[0].month = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def day(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.day.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7709
 *         return self._ptr[0].day
 * 
 *     @day.setter             # <<<<<<<<<<<<<<
 *     def day(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3day_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3day_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3day_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3day_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned short __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7711
 *     @day.setter
 *     def day(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].day = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7712
 *     def day(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].day = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuLicenseExpiry_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7712, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7712, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7711
 *     @day.setter
 *     def day(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].day = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7713
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].day = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_short(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned short)-1) && PyErr_Occurred())) __PYX_ERR(0, 7713, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).day = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7709
 *         return self._ptr[0].day
 * 
 *     @day.setter             # <<<<<<<<<<<<<<
 *     def day(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.day.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7715
 *         self._ptr[0].day = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def hour(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4hour_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4hour_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4hour___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4hour___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7718
 *     def hour(self):
 *         """int: """
 *         return self._ptr[0].hour             # <<<<<<<<<<<<<<
 * 
 *     @hour.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_short((__pyx_v_self->_ptr[0]).hour); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7718, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7715
 *         self._ptr[0].day = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def hour(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.hour.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7720
 *         return self._ptr[0].hour
 * 
 *     @hour.setter             # <<<<<<<<<<<<<<
 *     def hour(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4hour_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4hour_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4hour_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4hour_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned short __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7722
 *     @hour.setter
 *     def hour(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].hour = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7723
 *     def hour(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].hour = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuLicenseExpiry_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7723, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7723, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7722
 *     @hour.setter
 *     def hour(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].hour = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7724
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].hour = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_short(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned short)-1) && PyErr_Occurred())) __PYX_ERR(0, 7724, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).hour = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7720
 *         return self._ptr[0].hour
 * 
 *     @hour.setter             # <<<<<<<<<<<<<<
 *     def hour(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.hour.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7726
 *         self._ptr[0].hour = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def min_(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4min__1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4min__1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4min____get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4min____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7729
 *     def min_(self):
 *         """int: """
 *         return self._ptr[0].min             # <<<<<<<<<<<<<<
 * 
 *     @min_.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_short((__pyx_v_self->_ptr[0]).min); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7729, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7726
 *         self._ptr[0].hour = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def min_(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.min_.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7731
 *         return self._ptr[0].min
 * 
 *     @min_.setter             # <<<<<<<<<<<<<<
 *     def min_(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4min__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4min__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4min__2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4min__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned short __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7733
 *     @min_.setter
 *     def min_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].min = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7734
 *     def min_(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].min = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuLicenseExpiry_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7734, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7734, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7733
 *     @min_.setter
 *     def min_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].min = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7735
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].min = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_short(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned short)-1) && PyErr_Occurred())) __PYX_ERR(0, 7735, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).min = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7731
 *         return self._ptr[0].min
 * 
 *     @min_.setter             # <<<<<<<<<<<<<<
 *     def min_(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.min_.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7737
 *         self._ptr[0].min = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sec(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3sec_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3sec_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3sec___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3sec___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7740
 *     def sec(self):
 *         """int: """
 *         return self._ptr[0].sec             # <<<<<<<<<<<<<<
 * 
 *     @sec.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_short((__pyx_v_self->_ptr[0]).sec); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7740, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7737
 *         self._ptr[0].min = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sec(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.sec.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7742
 *         return self._ptr[0].sec
 * 
 *     @sec.setter             # <<<<<<<<<<<<<<
 *     def sec(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3sec_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3sec_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3sec_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3sec_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned short __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7744
 *     @sec.setter
 *     def sec(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].sec = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7745
 *     def sec(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sec = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuLicenseExpiry_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7745, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7745, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7744
 *     @sec.setter
 *     def sec(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].sec = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7746
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].sec = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_short(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned short)-1) && PyErr_Occurred())) __PYX_ERR(0, 7746, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sec = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7742
 *         return self._ptr[0].sec
 * 
 *     @sec.setter             # <<<<<<<<<<<<<<
 *     def sec(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.sec.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7748
 *         self._ptr[0].sec = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def status(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_6status_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_6status_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_6status___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_6status___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7751
 *     def status(self):
 *         """int: """
 *         return self._ptr[0].status             # <<<<<<<<<<<<<<
 * 
 *     @status.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_char((__pyx_v_self->_ptr[0]).status); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7751, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7748
 *         self._ptr[0].sec = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def status(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.status.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7753
 *         return self._ptr[0].status
 * 
 *     @status.setter             # <<<<<<<<<<<<<<
 *     def status(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_6status_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_6status_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_6status_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_6status_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned char __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7755
 *     @status.setter
 *     def status(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].status = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7756
 *     def status(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].status = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuLicenseExpiry_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7756, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7756, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7755
 *     @status.setter
 *     def status(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].status = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7757
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseExpiry instance is read-only")
 *         self._ptr[0].status = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_char(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned char)-1) && PyErr_Occurred())) __PYX_ERR(0, 7757, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).status = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7753
 *         return self._ptr[0].status
 * 
 *     @status.setter             # <<<<<<<<<<<<<<
 *     def status(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.status.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7759
 *         self._ptr[0].status = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuLicenseExpiry instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_12from_data, "VgpuLicenseExpiry.from_data(data)\n\nCreate an VgpuLicenseExpiry instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_license_expiry_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 7759, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7759, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 7759, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 7759, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7759, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 7759, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":7766
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_license_expiry_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_license_expiry_dtype", vgpu_license_expiry_dtype, VgpuLicenseExpiry)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_license_expiry_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7766, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_license_expiry_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7766, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7759
 *         self._ptr[0].status = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuLicenseExpiry instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7768
 *         return __from_data(data, "vgpu_license_expiry_dtype", vgpu_license_expiry_dtype, VgpuLicenseExpiry)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuLicenseExpiry instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_14from_ptr, "VgpuLicenseExpiry.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuLicenseExpiry instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 7768, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 7768, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 7768, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7768, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 7768, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":7769
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuLicenseExpiry instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 7768, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 7768, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 7768, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7768, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 7769, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7769, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 7768, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":7768
 *         return __from_data(data, "vgpu_license_expiry_dtype", vgpu_license_expiry_dtype, VgpuLicenseExpiry)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuLicenseExpiry instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":7777
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuLicenseExpiry obj = VgpuLicenseExpiry.__new__(VgpuLicenseExpiry)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":7778
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuLicenseExpiry obj = VgpuLicenseExpiry.__new__(VgpuLicenseExpiry)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7778, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 7778, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7777
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuLicenseExpiry obj = VgpuLicenseExpiry.__new__(VgpuLicenseExpiry)
*/
  }

  /* "cuda/bindings/_nvml.pyx":7779
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuLicenseExpiry obj = VgpuLicenseExpiry.__new__(VgpuLicenseExpiry)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuLicenseExpiry_t *>malloc(sizeof(nvmlVgpuLicenseExpiry_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuLicenseExpiry(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7779, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":7780
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuLicenseExpiry obj = VgpuLicenseExpiry.__new__(VgpuLicenseExpiry)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuLicenseExpiry_t *>malloc(sizeof(nvmlVgpuLicenseExpiry_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":7781
 *         cdef VgpuLicenseExpiry obj = VgpuLicenseExpiry.__new__(VgpuLicenseExpiry)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuLicenseExpiry_t *>malloc(sizeof(nvmlVgpuLicenseExpiry_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuLicenseExpiry")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuLicenseExpiry_t *)malloc((sizeof(nvmlVgpuLicenseExpiry_t))));

    /* "cuda/bindings/_nvml.pyx":7782
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuLicenseExpiry_t *>malloc(sizeof(nvmlVgpuLicenseExpiry_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuLicenseExpiry")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuLicenseExpiry_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":7783
 *             obj._ptr = <nvmlVgpuLicenseExpiry_t *>malloc(sizeof(nvmlVgpuLicenseExpiry_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuLicenseExpiry")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuLicenseExpiry_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7783, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuLicenseExpi};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7783, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 7783, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":7782
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuLicenseExpiry_t *>malloc(sizeof(nvmlVgpuLicenseExpiry_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuLicenseExpiry")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuLicenseExpiry_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":7784
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuLicenseExpiry")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuLicenseExpiry_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuLicenseExpiry_t))));

    /* "cuda/bindings/_nvml.pyx":7785
 *                 raise MemoryError("Error allocating VgpuLicenseExpiry")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuLicenseExpiry_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":7786
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuLicenseExpiry_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuLicenseExpiry_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":7780
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuLicenseExpiry obj = VgpuLicenseExpiry.__new__(VgpuLicenseExpiry)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuLicenseExpiry_t *>malloc(sizeof(nvmlVgpuLicenseExpiry_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":7788
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuLicenseExpiry_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuLicenseExpiry_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":7789
 *         else:
 *             obj._ptr = <nvmlVgpuLicenseExpiry_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":7790
 *             obj._ptr = <nvmlVgpuLicenseExpiry_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":7791
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":7792
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7768
 *         return __from_data(data, "vgpu_license_expiry_dtype", vgpu_license_expiry_dtype, VgpuLicenseExpiry)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuLicenseExpiry instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_16__reduce_cython__, "VgpuLicenseExpiry.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_18__setstate_cython__, "VgpuLicenseExpiry.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseExpiry.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7795
 * 
 * 
 * cdef _get_grid_license_expiry_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGridLicenseExpiry_t pod = nvmlGridLicenseExpiry_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_grid_license_expiry_dtype_offsets(void) {
  nvmlGridLicenseExpiry_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlGridLicenseExpiry_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  size_t __pyx_t_14;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_grid_license_expiry_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":7796
 * 
 * cdef _get_grid_license_expiry_dtype_offsets():
 *     cdef nvmlGridLicenseExpiry_t pod = nvmlGridLicenseExpiry_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['year', 'month', 'day', 'hour', 'min_', 'sec', 'status'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":7797
 * cdef _get_grid_license_expiry_dtype_offsets():
 *     cdef nvmlGridLicenseExpiry_t pod = nvmlGridLicenseExpiry_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['year', 'month', 'day', 'hour', 'min_', 'sec', 'status'],
 *         'formats': [_numpy.uint32, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7797, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7797, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":7798
 *     cdef nvmlGridLicenseExpiry_t pod = nvmlGridLicenseExpiry_t()
 *     return _numpy.dtype({
 *         'names': ['year', 'month', 'day', 'hour', 'min_', 'sec', 'status'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7798, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7798, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_year);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_year);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_year) != (0)) __PYX_ERR(0, 7798, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_month);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_month);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_month) != (0)) __PYX_ERR(0, 7798, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_day);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_day);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_day) != (0)) __PYX_ERR(0, 7798, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_hour);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_hour);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_hour) != (0)) __PYX_ERR(0, 7798, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_min);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_min);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_min) != (0)) __PYX_ERR(0, 7798, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_sec);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_sec);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_sec) != (0)) __PYX_ERR(0, 7798, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_status);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_status);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_status) != (0)) __PYX_ERR(0, 7798, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 7798, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":7799
 *     return _numpy.dtype({
 *         'names': ['year', 'month', 'day', 'hour', 'min_', 'sec', 'status'],
 *         'formats': [_numpy.uint32, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.year)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint16); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 7799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint16); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 7799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint16); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 7799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint16); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 7799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint16); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 7799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 7799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 7799, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 7799, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 7799, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 7799, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 7799, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 7799, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 7799, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 7798, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":7801
 *         'formats': [_numpy.uint32, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint8],
 *         'offsets': [
 *             (<intptr_t>&(pod.year)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.month)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.day)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.year)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7801, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":7802
 *         'offsets': [
 *             (<intptr_t>&(pod.year)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.month)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.day)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hour)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.month)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 7802, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":7803
 *             (<intptr_t>&(pod.year)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.month)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.day)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.hour)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.min)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.day)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 7803, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":7804
 *             (<intptr_t>&(pod.month)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.day)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hour)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.min)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sec)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.hour)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 7804, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":7805
 *             (<intptr_t>&(pod.day)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hour)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.min)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sec)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.status)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.min)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 7805, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":7806
 *             (<intptr_t>&(pod.hour)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.min)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sec)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.status)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sec)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 7806, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":7807
 *             (<intptr_t>&(pod.min)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sec)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.status)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlGridLicenseExpiry_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.status)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 7807, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":7800
 *         'names': ['year', 'month', 'day', 'hour', 'min_', 'sec', 'status'],
 *         'formats': [_numpy.uint32, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint16, _numpy.uint8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.year)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.month)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7800, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 7800, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_13) != (0)) __PYX_ERR(0, 7800, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_12) != (0)) __PYX_ERR(0, 7800, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_11) != (0)) __PYX_ERR(0, 7800, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_10) != (0)) __PYX_ERR(0, 7800, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_9) != (0)) __PYX_ERR(0, 7800, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_8) != (0)) __PYX_ERR(0, 7800, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 7798, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":7809
 *             (<intptr_t>&(pod.status)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlGridLicenseExpiry_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlGridLicenseExpiry_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7809, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 7798, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_14 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_14 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_14, (2-__pyx_t_14) | (__pyx_t_14*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7797, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7795
 * 
 * 
 * cdef _get_grid_license_expiry_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGridLicenseExpiry_t pod = nvmlGridLicenseExpiry_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_grid_license_expiry_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7826
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGridLicenseExpiry_t *>calloc(1, sizeof(nvmlGridLicenseExpiry_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":7827
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlGridLicenseExpiry_t *>calloc(1, sizeof(nvmlGridLicenseExpiry_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GridLicenseExpiry")
*/
  __pyx_v_self->_ptr = ((nvmlGridLicenseExpiry_t *)calloc(1, (sizeof(nvmlGridLicenseExpiry_t))));

  /* "cuda/bindings/_nvml.pyx":7828
 *     def __init__(self):
 *         self._ptr = <nvmlGridLicenseExpiry_t *>calloc(1, sizeof(nvmlGridLicenseExpiry_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GridLicenseExpiry")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":7829
 *         self._ptr = <nvmlGridLicenseExpiry_t *>calloc(1, sizeof(nvmlGridLicenseExpiry_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GridLicenseExpiry")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7829, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GridLicenseExpi};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7829, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 7829, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7828
 *     def __init__(self):
 *         self._ptr = <nvmlGridLicenseExpiry_t *>calloc(1, sizeof(nvmlGridLicenseExpiry_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GridLicenseExpiry")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":7830
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GridLicenseExpiry")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":7831
 *             raise MemoryError("Error allocating GridLicenseExpiry")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":7832
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":7826
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGridLicenseExpiry_t *>calloc(1, sizeof(nvmlGridLicenseExpiry_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7834
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGridLicenseExpiry_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self) {
  nvmlGridLicenseExpiry_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlGridLicenseExpiry_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":7836
 *     def __dealloc__(self):
 *         cdef nvmlGridLicenseExpiry_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":7837
 *         cdef nvmlGridLicenseExpiry_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":7838
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":7839
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":7836
 *     def __dealloc__(self):
 *         cdef nvmlGridLicenseExpiry_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":7834
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGridLicenseExpiry_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":7841
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GridLicenseExpiry object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":7842
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.GridLicenseExpiry object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7842, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7842, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7842, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7842, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7842, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_GridLicenseExpiry_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 29 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7842, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7841
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GridLicenseExpiry object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7844
 *         return f"<{__name__}.GridLicenseExpiry object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7847
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7847, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7844
 *         return f"<{__name__}.GridLicenseExpiry object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7849
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_17GridLicenseExpiry__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":7850
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7849
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7852
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":7853
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7853, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7852
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7855
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GridLicenseExpiry other_
 *         if not isinstance(other, GridLicenseExpiry):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":7857
 *     def __eq__(self, other):
 *         cdef GridLicenseExpiry other_
 *         if not isinstance(other, GridLicenseExpiry):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":7858
 *         cdef GridLicenseExpiry other_
 *         if not isinstance(other, GridLicenseExpiry):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGridLicenseExpiry_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":7857
 *     def __eq__(self, other):
 *         cdef GridLicenseExpiry other_
 *         if not isinstance(other, GridLicenseExpiry):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":7859
 *         if not isinstance(other, GridLicenseExpiry):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGridLicenseExpiry_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry))))) __PYX_ERR(0, 7859, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":7860
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGridLicenseExpiry_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlGridLicenseExpiry_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7860, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7855
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GridLicenseExpiry other_
 *         if not isinstance(other, GridLicenseExpiry):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7862
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGridLicenseExpiry_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGridLicenseExpiry_t *>malloc(sizeof(nvmlGridLicenseExpiry_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":7863
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGridLicenseExpiry_t *>malloc(sizeof(nvmlGridLicenseExpiry_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 7863, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 7863, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7863, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 7863, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":7864
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGridLicenseExpiry_t *>malloc(sizeof(nvmlGridLicenseExpiry_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GridLicenseExpiry")
*/
    __pyx_v_self->_ptr = ((nvmlGridLicenseExpiry_t *)malloc((sizeof(nvmlGridLicenseExpiry_t))));

    /* "cuda/bindings/_nvml.pyx":7865
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGridLicenseExpiry_t *>malloc(sizeof(nvmlGridLicenseExpiry_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GridLicenseExpiry")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGridLicenseExpiry_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":7866
 *             self._ptr = <nvmlGridLicenseExpiry_t *>malloc(sizeof(nvmlGridLicenseExpiry_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GridLicenseExpiry")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGridLicenseExpiry_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7866, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GridLicenseExpi};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7866, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 7866, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":7865
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGridLicenseExpiry_t *>malloc(sizeof(nvmlGridLicenseExpiry_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GridLicenseExpiry")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGridLicenseExpiry_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":7867
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GridLicenseExpiry")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGridLicenseExpiry_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7867, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7867, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 7867, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlGridLicenseExpiry_t))));

    /* "cuda/bindings/_nvml.pyx":7868
 *                 raise MemoryError("Error allocating GridLicenseExpiry")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGridLicenseExpiry_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":7869
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGridLicenseExpiry_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":7870
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7870, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7870, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 7870, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":7863
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGridLicenseExpiry_t *>malloc(sizeof(nvmlGridLicenseExpiry_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":7872
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 7872, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":7862
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGridLicenseExpiry_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGridLicenseExpiry_t *>malloc(sizeof(nvmlGridLicenseExpiry_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7874
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def year(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4year_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4year_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4year___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4year___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7877
 *     def year(self):
 *         """int: """
 *         return self._ptr[0].year             # <<<<<<<<<<<<<<
 * 
 *     @year.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).year); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7877, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7874
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def year(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.year.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7879
 *         return self._ptr[0].year
 * 
 *     @year.setter             # <<<<<<<<<<<<<<
 *     def year(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4year_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4year_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4year_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4year_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7881
 *     @year.setter
 *     def year(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].year = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7882
 *     def year(self, val):
 *         if self._readonly:
 *             raise ValueError("This GridLicenseExpiry instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].year = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GridLicenseExpiry_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7882, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7882, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7881
 *     @year.setter
 *     def year(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].year = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7883
 *         if self._readonly:
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].year = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7883, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).year = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7879
 *         return self._ptr[0].year
 * 
 *     @year.setter             # <<<<<<<<<<<<<<
 *     def year(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.year.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7885
 *         self._ptr[0].year = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def month(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_5month_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_5month_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_5month___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_5month___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7888
 *     def month(self):
 *         """int: """
 *         return self._ptr[0].month             # <<<<<<<<<<<<<<
 * 
 *     @month.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_short((__pyx_v_self->_ptr[0]).month); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7888, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7885
 *         self._ptr[0].year = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def month(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.month.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7890
 *         return self._ptr[0].month
 * 
 *     @month.setter             # <<<<<<<<<<<<<<
 *     def month(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_5month_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_5month_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_5month_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_5month_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned short __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7892
 *     @month.setter
 *     def month(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].month = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7893
 *     def month(self, val):
 *         if self._readonly:
 *             raise ValueError("This GridLicenseExpiry instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].month = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GridLicenseExpiry_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7893, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7893, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7892
 *     @month.setter
 *     def month(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].month = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7894
 *         if self._readonly:
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].month = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_short(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned short)-1) && PyErr_Occurred())) __PYX_ERR(0, 7894, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).month = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7890
 *         return self._ptr[0].month
 * 
 *     @month.setter             # <<<<<<<<<<<<<<
 *     def month(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.month.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7896
 *         self._ptr[0].month = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def day(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3day_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3day_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3day___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3day___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7899
 *     def day(self):
 *         """int: """
 *         return self._ptr[0].day             # <<<<<<<<<<<<<<
 * 
 *     @day.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_short((__pyx_v_self->_ptr[0]).day); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7899, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7896
 *         self._ptr[0].month = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def day(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.day.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7901
 *         return self._ptr[0].day
 * 
 *     @day.setter             # <<<<<<<<<<<<<<
 *     def day(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3day_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3day_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3day_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3day_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned short __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7903
 *     @day.setter
 *     def day(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].day = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7904
 *     def day(self, val):
 *         if self._readonly:
 *             raise ValueError("This GridLicenseExpiry instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].day = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GridLicenseExpiry_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7904, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7904, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7903
 *     @day.setter
 *     def day(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].day = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7905
 *         if self._readonly:
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].day = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_short(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned short)-1) && PyErr_Occurred())) __PYX_ERR(0, 7905, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).day = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7901
 *         return self._ptr[0].day
 * 
 *     @day.setter             # <<<<<<<<<<<<<<
 *     def day(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.day.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7907
 *         self._ptr[0].day = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def hour(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4hour_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4hour_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4hour___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4hour___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7910
 *     def hour(self):
 *         """int: """
 *         return self._ptr[0].hour             # <<<<<<<<<<<<<<
 * 
 *     @hour.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_short((__pyx_v_self->_ptr[0]).hour); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7910, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7907
 *         self._ptr[0].day = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def hour(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.hour.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7912
 *         return self._ptr[0].hour
 * 
 *     @hour.setter             # <<<<<<<<<<<<<<
 *     def hour(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4hour_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4hour_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4hour_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4hour_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned short __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7914
 *     @hour.setter
 *     def hour(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].hour = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7915
 *     def hour(self, val):
 *         if self._readonly:
 *             raise ValueError("This GridLicenseExpiry instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].hour = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GridLicenseExpiry_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7915, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7915, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7914
 *     @hour.setter
 *     def hour(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].hour = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7916
 *         if self._readonly:
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].hour = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_short(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned short)-1) && PyErr_Occurred())) __PYX_ERR(0, 7916, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).hour = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7912
 *         return self._ptr[0].hour
 * 
 *     @hour.setter             # <<<<<<<<<<<<<<
 *     def hour(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.hour.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7918
 *         self._ptr[0].hour = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def min_(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4min__1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4min__1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4min____get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4min____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7921
 *     def min_(self):
 *         """int: """
 *         return self._ptr[0].min             # <<<<<<<<<<<<<<
 * 
 *     @min_.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_short((__pyx_v_self->_ptr[0]).min); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7921, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7918
 *         self._ptr[0].hour = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def min_(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.min_.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7923
 *         return self._ptr[0].min
 * 
 *     @min_.setter             # <<<<<<<<<<<<<<
 *     def min_(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4min__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4min__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4min__2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4min__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned short __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7925
 *     @min_.setter
 *     def min_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].min = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7926
 *     def min_(self, val):
 *         if self._readonly:
 *             raise ValueError("This GridLicenseExpiry instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].min = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GridLicenseExpiry_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7926, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7926, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7925
 *     @min_.setter
 *     def min_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].min = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7927
 *         if self._readonly:
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].min = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_short(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned short)-1) && PyErr_Occurred())) __PYX_ERR(0, 7927, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).min = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7923
 *         return self._ptr[0].min
 * 
 *     @min_.setter             # <<<<<<<<<<<<<<
 *     def min_(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.min_.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7929
 *         self._ptr[0].min = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sec(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3sec_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3sec_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3sec___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3sec___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7932
 *     def sec(self):
 *         """int: """
 *         return self._ptr[0].sec             # <<<<<<<<<<<<<<
 * 
 *     @sec.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_short((__pyx_v_self->_ptr[0]).sec); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7932, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7929
 *         self._ptr[0].min = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sec(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.sec.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7934
 *         return self._ptr[0].sec
 * 
 *     @sec.setter             # <<<<<<<<<<<<<<
 *     def sec(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3sec_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3sec_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3sec_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3sec_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned short __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7936
 *     @sec.setter
 *     def sec(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].sec = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7937
 *     def sec(self, val):
 *         if self._readonly:
 *             raise ValueError("This GridLicenseExpiry instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sec = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GridLicenseExpiry_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7937, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7937, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7936
 *     @sec.setter
 *     def sec(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].sec = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7938
 *         if self._readonly:
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].sec = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_short(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned short)-1) && PyErr_Occurred())) __PYX_ERR(0, 7938, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sec = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7934
 *         return self._ptr[0].sec
 * 
 *     @sec.setter             # <<<<<<<<<<<<<<
 *     def sec(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.sec.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7940
 *         self._ptr[0].sec = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def status(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_6status_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_6status_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_6status___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_6status___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":7943
 *     def status(self):
 *         """int: """
 *         return self._ptr[0].status             # <<<<<<<<<<<<<<
 * 
 *     @status.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_char((__pyx_v_self->_ptr[0]).status); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7943, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7940
 *         self._ptr[0].sec = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def status(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.status.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7945
 *         return self._ptr[0].status
 * 
 *     @status.setter             # <<<<<<<<<<<<<<
 *     def status(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_6status_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_6status_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_6status_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_6status_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned char __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":7947
 *     @status.setter
 *     def status(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].status = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":7948
 *     def status(self, val):
 *         if self._readonly:
 *             raise ValueError("This GridLicenseExpiry instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].status = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GridLicenseExpiry_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7948, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 7948, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7947
 *     @status.setter
 *     def status(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].status = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":7949
 *         if self._readonly:
 *             raise ValueError("This GridLicenseExpiry instance is read-only")
 *         self._ptr[0].status = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_char(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned char)-1) && PyErr_Occurred())) __PYX_ERR(0, 7949, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).status = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":7945
 *         return self._ptr[0].status
 * 
 *     @status.setter             # <<<<<<<<<<<<<<
 *     def status(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.status.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7951
 *         self._ptr[0].status = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GridLicenseExpiry instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_17GridLicenseExpiry_12from_data, "GridLicenseExpiry.from_data(data)\n\nCreate an GridLicenseExpiry instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `grid_license_expiry_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_17GridLicenseExpiry_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17GridLicenseExpiry_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 7951, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7951, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 7951, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 7951, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7951, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 7951, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":7958
 *             data (_numpy.ndarray): a single-element array of dtype `grid_license_expiry_dtype` holding the data.
 *         """
 *         return __from_data(data, "grid_license_expiry_dtype", grid_license_expiry_dtype, GridLicenseExpiry)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_grid_license_expiry_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7958, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_grid_license_expiry_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7958, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7951
 *         self._ptr[0].status = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GridLicenseExpiry instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7960
 *         return __from_data(data, "grid_license_expiry_dtype", grid_license_expiry_dtype, GridLicenseExpiry)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GridLicenseExpiry instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_17GridLicenseExpiry_14from_ptr, "GridLicenseExpiry.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an GridLicenseExpiry instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_17GridLicenseExpiry_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17GridLicenseExpiry_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 7960, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 7960, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 7960, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7960, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 7960, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":7961
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an GridLicenseExpiry instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 7960, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 7960, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 7960, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 7960, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 7961, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 7961, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 7960, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":7960
 *         return __from_data(data, "grid_license_expiry_dtype", grid_license_expiry_dtype, GridLicenseExpiry)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GridLicenseExpiry instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":7969
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GridLicenseExpiry obj = GridLicenseExpiry.__new__(GridLicenseExpiry)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":7970
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef GridLicenseExpiry obj = GridLicenseExpiry.__new__(GridLicenseExpiry)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7970, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 7970, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":7969
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GridLicenseExpiry obj = GridLicenseExpiry.__new__(GridLicenseExpiry)
*/
  }

  /* "cuda/bindings/_nvml.pyx":7971
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GridLicenseExpiry obj = GridLicenseExpiry.__new__(GridLicenseExpiry)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlGridLicenseExpiry_t *>malloc(sizeof(nvmlGridLicenseExpiry_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_GridLicenseExpiry(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7971, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":7972
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GridLicenseExpiry obj = GridLicenseExpiry.__new__(GridLicenseExpiry)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGridLicenseExpiry_t *>malloc(sizeof(nvmlGridLicenseExpiry_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":7973
 *         cdef GridLicenseExpiry obj = GridLicenseExpiry.__new__(GridLicenseExpiry)
 *         if owner is None:
 *             obj._ptr = <nvmlGridLicenseExpiry_t *>malloc(sizeof(nvmlGridLicenseExpiry_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GridLicenseExpiry")
*/
    __pyx_v_obj->_ptr = ((nvmlGridLicenseExpiry_t *)malloc((sizeof(nvmlGridLicenseExpiry_t))));

    /* "cuda/bindings/_nvml.pyx":7974
 *         if owner is None:
 *             obj._ptr = <nvmlGridLicenseExpiry_t *>malloc(sizeof(nvmlGridLicenseExpiry_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GridLicenseExpiry")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGridLicenseExpiry_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":7975
 *             obj._ptr = <nvmlGridLicenseExpiry_t *>malloc(sizeof(nvmlGridLicenseExpiry_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GridLicenseExpiry")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGridLicenseExpiry_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7975, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GridLicenseExpi};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7975, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 7975, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":7974
 *         if owner is None:
 *             obj._ptr = <nvmlGridLicenseExpiry_t *>malloc(sizeof(nvmlGridLicenseExpiry_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GridLicenseExpiry")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGridLicenseExpiry_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":7976
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GridLicenseExpiry")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGridLicenseExpiry_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlGridLicenseExpiry_t))));

    /* "cuda/bindings/_nvml.pyx":7977
 *                 raise MemoryError("Error allocating GridLicenseExpiry")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGridLicenseExpiry_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":7978
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGridLicenseExpiry_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlGridLicenseExpiry_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":7972
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GridLicenseExpiry obj = GridLicenseExpiry.__new__(GridLicenseExpiry)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGridLicenseExpiry_t *>malloc(sizeof(nvmlGridLicenseExpiry_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":7980
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlGridLicenseExpiry_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlGridLicenseExpiry_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":7981
 *         else:
 *             obj._ptr = <nvmlGridLicenseExpiry_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":7982
 *             obj._ptr = <nvmlGridLicenseExpiry_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":7983
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":7984
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7960
 *         return __from_data(data, "grid_license_expiry_dtype", grid_license_expiry_dtype, GridLicenseExpiry)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GridLicenseExpiry instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_17GridLicenseExpiry_16__reduce_cython__, "GridLicenseExpiry.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_17GridLicenseExpiry_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17GridLicenseExpiry_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_17GridLicenseExpiry_18__setstate_cython__, "GridLicenseExpiry.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_17GridLicenseExpiry_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17GridLicenseExpiry_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17GridLicenseExpiry_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicenseExpiry.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":7987
 * 
 * 
 * cdef _get_vgpu_type_id_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuTypeIdInfo_v1_t pod = nvmlVgpuTypeIdInfo_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_type_id_info_v1_dtype_offsets(void) {
  nvmlVgpuTypeIdInfo_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuTypeIdInfo_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_type_id_info_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":7988
 * 
 * cdef _get_vgpu_type_id_info_v1_dtype_offsets():
 *     cdef nvmlVgpuTypeIdInfo_v1_t pod = nvmlVgpuTypeIdInfo_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'vgpu_count', 'vgpu_type_ids'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":7989
 * cdef _get_vgpu_type_id_info_v1_dtype_offsets():
 *     cdef nvmlVgpuTypeIdInfo_v1_t pod = nvmlVgpuTypeIdInfo_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'vgpu_count', 'vgpu_type_ids'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.intp],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7989, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7989, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":7990
 *     cdef nvmlVgpuTypeIdInfo_v1_t pod = nvmlVgpuTypeIdInfo_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'vgpu_count', 'vgpu_type_ids'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.intp],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 7990, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7990, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 7990, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_vgpu_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_vgpu_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_vgpu_count) != (0)) __PYX_ERR(0, 7990, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_vgpu_type_ids);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_vgpu_type_ids);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_vgpu_type_ids) != (0)) __PYX_ERR(0, 7990, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 7990, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":7991
 *     return _numpy.dtype({
 *         'names': ['version', 'vgpu_count', 'vgpu_type_ids'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.intp],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7991, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7991, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7991, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 7991, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7991, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_intp); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 7991, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7991, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 7991, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 7991, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 7991, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 7990, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":7993
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.intp],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vgpuCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuTypeIds)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 7993, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":7994
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vgpuTypeIds)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vgpuCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 7994, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":7995
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuTypeIds)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuTypeIdInfo_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vgpuTypeIds)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 7995, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":7992
 *         'names': ['version', 'vgpu_count', 'vgpu_type_ids'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.intp],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuCount)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7992, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 7992, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 7992, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 7992, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 7990, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":7997
 *             (<intptr_t>&(pod.vgpuTypeIds)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuTypeIdInfo_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuTypeIdInfo_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 7997, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 7990, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7989, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":7987
 * 
 * 
 * cdef _get_vgpu_type_id_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuTypeIdInfo_v1_t pod = nvmlVgpuTypeIdInfo_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_type_id_info_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8014
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuTypeIdInfo_v1_t *>calloc(1, sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":8015
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuTypeIdInfo_v1_t *>calloc(1, sizeof(nvmlVgpuTypeIdInfo_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuTypeIdInfo_v1")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuTypeIdInfo_v1_t *)calloc(1, (sizeof(nvmlVgpuTypeIdInfo_v1_t))));

  /* "cuda/bindings/_nvml.pyx":8016
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuTypeIdInfo_v1_t *>calloc(1, sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuTypeIdInfo_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":8017
 *         self._ptr = <nvmlVgpuTypeIdInfo_v1_t *>calloc(1, sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuTypeIdInfo_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8017, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuTypeIdInfo};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8017, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 8017, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8016
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuTypeIdInfo_v1_t *>calloc(1, sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuTypeIdInfo_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":8018
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuTypeIdInfo_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":8019
 *             raise MemoryError("Error allocating VgpuTypeIdInfo_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":8020
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":8014
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuTypeIdInfo_v1_t *>calloc(1, sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeIdInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8022
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuTypeIdInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self) {
  nvmlVgpuTypeIdInfo_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuTypeIdInfo_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":8024
 *     def __dealloc__(self):
 *         cdef nvmlVgpuTypeIdInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8025
 *         cdef nvmlVgpuTypeIdInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":8026
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":8027
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":8024
 *     def __dealloc__(self):
 *         cdef nvmlVgpuTypeIdInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":8022
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuTypeIdInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":8029
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuTypeIdInfo_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":8030
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuTypeIdInfo_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuTypeIdInfo_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 29 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8029
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuTypeIdInfo_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeIdInfo_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8032
 *         return f"<{__name__}.VgpuTypeIdInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8035
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8035, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8032
 *         return f"<{__name__}.VgpuTypeIdInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeIdInfo_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8037
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":8038
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8037
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8040
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":8041
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8041, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8040
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeIdInfo_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8043
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuTypeIdInfo_v1 other_
 *         if not isinstance(other, VgpuTypeIdInfo_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":8045
 *     def __eq__(self, other):
 *         cdef VgpuTypeIdInfo_v1 other_
 *         if not isinstance(other, VgpuTypeIdInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":8046
 *         cdef VgpuTypeIdInfo_v1 other_
 *         if not isinstance(other, VgpuTypeIdInfo_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuTypeIdInfo_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":8045
 *     def __eq__(self, other):
 *         cdef VgpuTypeIdInfo_v1 other_
 *         if not isinstance(other, VgpuTypeIdInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":8047
 *         if not isinstance(other, VgpuTypeIdInfo_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuTypeIdInfo_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1))))) __PYX_ERR(0, 8047, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":8048
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuTypeIdInfo_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuTypeIdInfo_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8048, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8043
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuTypeIdInfo_v1 other_
 *         if not isinstance(other, VgpuTypeIdInfo_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeIdInfo_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8050
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuTypeIdInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuTypeIdInfo_v1_t *>malloc(sizeof(nvmlVgpuTypeIdInfo_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":8051
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuTypeIdInfo_v1_t *>malloc(sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 8051, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8051, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8051, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 8051, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8052
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuTypeIdInfo_v1_t *>malloc(sizeof(nvmlVgpuTypeIdInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeIdInfo_v1")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuTypeIdInfo_v1_t *)malloc((sizeof(nvmlVgpuTypeIdInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":8053
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuTypeIdInfo_v1_t *>malloc(sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuTypeIdInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeIdInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":8054
 *             self._ptr = <nvmlVgpuTypeIdInfo_v1_t *>malloc(sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeIdInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8054, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuTypeIdInfo};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8054, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 8054, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":8053
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuTypeIdInfo_v1_t *>malloc(sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuTypeIdInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeIdInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":8055
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeIdInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeIdInfo_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8055, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8055, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 8055, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuTypeIdInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":8056
 *                 raise MemoryError("Error allocating VgpuTypeIdInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":8057
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":8058
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8058, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8058, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 8058, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":8051
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuTypeIdInfo_v1_t *>malloc(sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":8060
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 8060, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":8050
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuTypeIdInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuTypeIdInfo_v1_t *>malloc(sizeof(nvmlVgpuTypeIdInfo_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeIdInfo_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8062
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8065
 *     def version(self):
 *         """int: IN: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8065, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8062
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeIdInfo_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8067
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":8069
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuTypeIdInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":8070
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuTypeIdInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuTypeIdInfo_v1_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8070, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8070, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8069
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuTypeIdInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":8071
 *         if self._readonly:
 *             raise ValueError("This VgpuTypeIdInfo_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 8071, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":8067
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeIdInfo_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8073
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_type_ids(self):
 *         """int: OUT: List of vGPU type IDs."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13vgpu_type_ids_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13vgpu_type_ids_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13vgpu_type_ids___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13vgpu_type_ids___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8076
 *     def vgpu_type_ids(self):
 *         """int: OUT: List of vGPU type IDs."""
 *         return <intptr_t>(self._ptr[0].vgpuTypeIds)             # <<<<<<<<<<<<<<
 * 
 *     @vgpu_type_ids.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)(__pyx_v_self->_ptr[0]).vgpuTypeIds)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8076, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8073
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_type_ids(self):
 *         """int: OUT: List of vGPU type IDs."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeIdInfo_v1.vgpu_type_ids.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8078
 *         return <intptr_t>(self._ptr[0].vgpuTypeIds)
 * 
 *     @vgpu_type_ids.setter             # <<<<<<<<<<<<<<
 *     def vgpu_type_ids(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13vgpu_type_ids_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13vgpu_type_ids_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13vgpu_type_ids_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13vgpu_type_ids_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":8080
 *     @vgpu_type_ids.setter
 *     def vgpu_type_ids(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuTypeIdInfo_v1 instance is read-only")
 *         self._ptr[0].vgpuTypeIds = <nvmlVgpuTypeId_t*><intptr_t>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":8081
 *     def vgpu_type_ids(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuTypeIdInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].vgpuTypeIds = <nvmlVgpuTypeId_t*><intptr_t>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuTypeIdInfo_v1_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8081, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8081, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8080
 *     @vgpu_type_ids.setter
 *     def vgpu_type_ids(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuTypeIdInfo_v1 instance is read-only")
 *         self._ptr[0].vgpuTypeIds = <nvmlVgpuTypeId_t*><intptr_t>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":8082
 *         if self._readonly:
 *             raise ValueError("This VgpuTypeIdInfo_v1 instance is read-only")
 *         self._ptr[0].vgpuTypeIds = <nvmlVgpuTypeId_t*><intptr_t>val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = PyLong_AsSsize_t(__pyx_v_val); if (unlikely((__pyx_t_4 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 8082, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).vgpuTypeIds = ((nvmlVgpuTypeId_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":8078
 *         return <intptr_t>(self._ptr[0].vgpuTypeIds)
 * 
 *     @vgpu_type_ids.setter             # <<<<<<<<<<<<<<
 *     def vgpu_type_ids(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeIdInfo_v1.vgpu_type_ids.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8084
 *         self._ptr[0].vgpuTypeIds = <nvmlVgpuTypeId_t*><intptr_t>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuTypeIdInfo_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_12from_data, "VgpuTypeIdInfo_v1.from_data(data)\n\nCreate an VgpuTypeIdInfo_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_type_id_info_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 8084, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8084, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 8084, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 8084, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8084, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 8084, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeIdInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":8091
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_type_id_info_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_type_id_info_v1_dtype", vgpu_type_id_info_v1_dtype, VgpuTypeIdInfo_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_type_id_info_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8091, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_type_id_info_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8091, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8084
 *         self._ptr[0].vgpuTypeIds = <nvmlVgpuTypeId_t*><intptr_t>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuTypeIdInfo_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeIdInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8093
 *         return __from_data(data, "vgpu_type_id_info_v1_dtype", vgpu_type_id_info_v1_dtype, VgpuTypeIdInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuTypeIdInfo_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_14from_ptr, "VgpuTypeIdInfo_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuTypeIdInfo_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 8093, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 8093, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 8093, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8093, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 8093, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":8094
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuTypeIdInfo_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 8093, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 8093, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 8093, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8093, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 8094, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 8094, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 8093, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeIdInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":8093
 *         return __from_data(data, "vgpu_type_id_info_v1_dtype", vgpu_type_id_info_v1_dtype, VgpuTypeIdInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuTypeIdInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":8102
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuTypeIdInfo_v1 obj = VgpuTypeIdInfo_v1.__new__(VgpuTypeIdInfo_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":8103
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuTypeIdInfo_v1 obj = VgpuTypeIdInfo_v1.__new__(VgpuTypeIdInfo_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8103, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 8103, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8102
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuTypeIdInfo_v1 obj = VgpuTypeIdInfo_v1.__new__(VgpuTypeIdInfo_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":8104
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuTypeIdInfo_v1 obj = VgpuTypeIdInfo_v1.__new__(VgpuTypeIdInfo_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuTypeIdInfo_v1_t *>malloc(sizeof(nvmlVgpuTypeIdInfo_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8104, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":8105
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuTypeIdInfo_v1 obj = VgpuTypeIdInfo_v1.__new__(VgpuTypeIdInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuTypeIdInfo_v1_t *>malloc(sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8106
 *         cdef VgpuTypeIdInfo_v1 obj = VgpuTypeIdInfo_v1.__new__(VgpuTypeIdInfo_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuTypeIdInfo_v1_t *>malloc(sizeof(nvmlVgpuTypeIdInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeIdInfo_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuTypeIdInfo_v1_t *)malloc((sizeof(nvmlVgpuTypeIdInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":8107
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuTypeIdInfo_v1_t *>malloc(sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuTypeIdInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeIdInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":8108
 *             obj._ptr = <nvmlVgpuTypeIdInfo_v1_t *>malloc(sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeIdInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8108, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuTypeIdInfo};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8108, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 8108, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":8107
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuTypeIdInfo_v1_t *>malloc(sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuTypeIdInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeIdInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":8109
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeIdInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeIdInfo_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuTypeIdInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":8110
 *                 raise MemoryError("Error allocating VgpuTypeIdInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":8111
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuTypeIdInfo_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":8105
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuTypeIdInfo_v1 obj = VgpuTypeIdInfo_v1.__new__(VgpuTypeIdInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuTypeIdInfo_v1_t *>malloc(sizeof(nvmlVgpuTypeIdInfo_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":8113
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuTypeIdInfo_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuTypeIdInfo_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":8114
 *         else:
 *             obj._ptr = <nvmlVgpuTypeIdInfo_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":8115
 *             obj._ptr = <nvmlVgpuTypeIdInfo_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":8116
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":8117
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8093
 *         return __from_data(data, "vgpu_type_id_info_v1_dtype", vgpu_type_id_info_v1_dtype, VgpuTypeIdInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuTypeIdInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeIdInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_16__reduce_cython__, "VgpuTypeIdInfo_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeIdInfo_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_18__setstate_cython__, "VgpuTypeIdInfo_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeIdInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeIdInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8120
 * 
 * 
 * cdef _get_vgpu_type_max_instance_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuTypeMaxInstance_v1_t pod = nvmlVgpuTypeMaxInstance_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_type_max_instance_v1_dtype_offsets(void) {
  nvmlVgpuTypeMaxInstance_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuTypeMaxInstance_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_type_max_instance_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":8121
 * 
 * cdef _get_vgpu_type_max_instance_v1_dtype_offsets():
 *     cdef nvmlVgpuTypeMaxInstance_v1_t pod = nvmlVgpuTypeMaxInstance_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'vgpu_type_id', 'max_instance_per_gi'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":8122
 * cdef _get_vgpu_type_max_instance_v1_dtype_offsets():
 *     cdef nvmlVgpuTypeMaxInstance_v1_t pod = nvmlVgpuTypeMaxInstance_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'vgpu_type_id', 'max_instance_per_gi'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8122, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8122, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":8123
 *     cdef nvmlVgpuTypeMaxInstance_v1_t pod = nvmlVgpuTypeMaxInstance_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'vgpu_type_id', 'max_instance_per_gi'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8123, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8123, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 8123, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_vgpu_type_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_vgpu_type_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_vgpu_type_id) != (0)) __PYX_ERR(0, 8123, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_max_instance_per_gi);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_max_instance_per_gi);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_max_instance_per_gi) != (0)) __PYX_ERR(0, 8123, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 8123, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":8124
 *     return _numpy.dtype({
 *         'names': ['version', 'vgpu_type_id', 'max_instance_per_gi'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8124, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8124, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8124, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 8124, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8124, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 8124, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8124, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 8124, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 8124, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 8124, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 8123, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":8126
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vgpuTypeId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxInstancePerGI)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8126, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":8127
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuTypeId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.maxInstancePerGI)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vgpuTypeId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 8127, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":8128
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuTypeId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxInstancePerGI)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuTypeMaxInstance_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.maxInstancePerGI)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 8128, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":8125
 *         'names': ['version', 'vgpu_type_id', 'max_instance_per_gi'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuTypeId)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 8125, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 8125, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 8125, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 8123, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":8130
 *             (<intptr_t>&(pod.maxInstancePerGI)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuTypeMaxInstance_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuTypeMaxInstance_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8130, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 8123, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8122, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8120
 * 
 * 
 * cdef _get_vgpu_type_max_instance_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuTypeMaxInstance_v1_t pod = nvmlVgpuTypeMaxInstance_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_type_max_instance_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8147
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>calloc(1, sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":8148
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>calloc(1, sizeof(nvmlVgpuTypeMaxInstance_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuTypeMaxInstance_v1_t *)calloc(1, (sizeof(nvmlVgpuTypeMaxInstance_v1_t))));

  /* "cuda/bindings/_nvml.pyx":8149
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>calloc(1, sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":8150
 *         self._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>calloc(1, sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8150, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuTypeMaxInst};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8150, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 8150, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8149
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>calloc(1, sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":8151
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":8152
 *             raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":8153
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":8147
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>calloc(1, sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8155
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuTypeMaxInstance_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self) {
  nvmlVgpuTypeMaxInstance_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuTypeMaxInstance_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":8157
 *     def __dealloc__(self):
 *         cdef nvmlVgpuTypeMaxInstance_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8158
 *         cdef nvmlVgpuTypeMaxInstance_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":8159
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":8160
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":8157
 *     def __dealloc__(self):
 *         cdef nvmlVgpuTypeMaxInstance_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":8155
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuTypeMaxInstance_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":8162
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuTypeMaxInstance_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":8163
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuTypeMaxInstance_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuTypeMaxInstance_v1_object_a;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 34 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8162
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuTypeMaxInstance_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8165
 *         return f"<{__name__}.VgpuTypeMaxInstance_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8168
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8168, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8165
 *         return f"<{__name__}.VgpuTypeMaxInstance_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8170
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":8171
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8170
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8173
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":8174
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8174, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8173
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8176
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuTypeMaxInstance_v1 other_
 *         if not isinstance(other, VgpuTypeMaxInstance_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":8178
 *     def __eq__(self, other):
 *         cdef VgpuTypeMaxInstance_v1 other_
 *         if not isinstance(other, VgpuTypeMaxInstance_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":8179
 *         cdef VgpuTypeMaxInstance_v1 other_
 *         if not isinstance(other, VgpuTypeMaxInstance_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuTypeMaxInstance_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":8178
 *     def __eq__(self, other):
 *         cdef VgpuTypeMaxInstance_v1 other_
 *         if not isinstance(other, VgpuTypeMaxInstance_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":8180
 *         if not isinstance(other, VgpuTypeMaxInstance_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuTypeMaxInstance_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1))))) __PYX_ERR(0, 8180, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":8181
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuTypeMaxInstance_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuTypeMaxInstance_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8181, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8176
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuTypeMaxInstance_v1 other_
 *         if not isinstance(other, VgpuTypeMaxInstance_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8183
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuTypeMaxInstance_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>malloc(sizeof(nvmlVgpuTypeMaxInstance_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":8184
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>malloc(sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 8184, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8184, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8184, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 8184, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8185
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>malloc(sizeof(nvmlVgpuTypeMaxInstance_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuTypeMaxInstance_v1_t *)malloc((sizeof(nvmlVgpuTypeMaxInstance_v1_t))));

    /* "cuda/bindings/_nvml.pyx":8186
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>malloc(sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeMaxInstance_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":8187
 *             self._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>malloc(sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8187, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuTypeMaxInst};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8187, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 8187, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":8186
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>malloc(sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeMaxInstance_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":8188
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeMaxInstance_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8188, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8188, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 8188, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuTypeMaxInstance_v1_t))));

    /* "cuda/bindings/_nvml.pyx":8189
 *                 raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":8190
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":8191
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8191, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8191, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 8191, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":8184
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>malloc(sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":8193
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 8193, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":8183
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuTypeMaxInstance_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>malloc(sizeof(nvmlVgpuTypeMaxInstance_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8195
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8198
 *     def version(self):
 *         """int: IN: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8198, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8195
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8200
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":8202
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuTypeMaxInstance_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":8203
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuTypeMaxInstance_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuTypeMaxInstance_v1_inst};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8203, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8203, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8202
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuTypeMaxInstance_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":8204
 *         if self._readonly:
 *             raise ValueError("This VgpuTypeMaxInstance_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 8204, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":8200
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8206
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_type_id(self):
 *         """int: IN: Handle to vGPU type."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12vgpu_type_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12vgpu_type_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12vgpu_type_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12vgpu_type_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8209
 *     def vgpu_type_id(self):
 *         """int: IN: Handle to vGPU type."""
 *         return <unsigned int>(self._ptr[0].vgpuTypeId)             # <<<<<<<<<<<<<<
 * 
 *     @vgpu_type_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int(((unsigned int)(__pyx_v_self->_ptr[0]).vgpuTypeId)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8209, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8206
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_type_id(self):
 *         """int: IN: Handle to vGPU type."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.vgpu_type_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8211
 *         return <unsigned int>(self._ptr[0].vgpuTypeId)
 * 
 *     @vgpu_type_id.setter             # <<<<<<<<<<<<<<
 *     def vgpu_type_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12vgpu_type_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12vgpu_type_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12vgpu_type_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12vgpu_type_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":8213
 *     @vgpu_type_id.setter
 *     def vgpu_type_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuTypeMaxInstance_v1 instance is read-only")
 *         self._ptr[0].vgpuTypeId = <nvmlVgpuTypeId_t><unsigned int>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":8214
 *     def vgpu_type_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuTypeMaxInstance_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].vgpuTypeId = <nvmlVgpuTypeId_t><unsigned int>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuTypeMaxInstance_v1_inst};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8214, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8214, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8213
 *     @vgpu_type_id.setter
 *     def vgpu_type_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuTypeMaxInstance_v1 instance is read-only")
 *         self._ptr[0].vgpuTypeId = <nvmlVgpuTypeId_t><unsigned int>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":8215
 *         if self._readonly:
 *             raise ValueError("This VgpuTypeMaxInstance_v1 instance is read-only")
 *         self._ptr[0].vgpuTypeId = <nvmlVgpuTypeId_t><unsigned int>val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 8215, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).vgpuTypeId = ((nvmlVgpuTypeId_t)((unsigned int)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":8211
 *         return <unsigned int>(self._ptr[0].vgpuTypeId)
 * 
 *     @vgpu_type_id.setter             # <<<<<<<<<<<<<<
 *     def vgpu_type_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.vgpu_type_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8217
 *         self._ptr[0].vgpuTypeId = <nvmlVgpuTypeId_t><unsigned int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def max_instance_per_gi(self):
 *         """int: OUT: Maximum number of vGPU instances per GPU instance."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19max_instance_per_gi_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19max_instance_per_gi_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19max_instance_per_gi___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19max_instance_per_gi___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8220
 *     def max_instance_per_gi(self):
 *         """int: OUT: Maximum number of vGPU instances per GPU instance."""
 *         return self._ptr[0].maxInstancePerGI             # <<<<<<<<<<<<<<
 * 
 *     @max_instance_per_gi.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).maxInstancePerGI); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8220, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8217
 *         self._ptr[0].vgpuTypeId = <nvmlVgpuTypeId_t><unsigned int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def max_instance_per_gi(self):
 *         """int: OUT: Maximum number of vGPU instances per GPU instance."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.max_instance_per_gi.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8222
 *         return self._ptr[0].maxInstancePerGI
 * 
 *     @max_instance_per_gi.setter             # <<<<<<<<<<<<<<
 *     def max_instance_per_gi(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19max_instance_per_gi_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19max_instance_per_gi_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19max_instance_per_gi_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19max_instance_per_gi_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":8224
 *     @max_instance_per_gi.setter
 *     def max_instance_per_gi(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuTypeMaxInstance_v1 instance is read-only")
 *         self._ptr[0].maxInstancePerGI = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":8225
 *     def max_instance_per_gi(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuTypeMaxInstance_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].maxInstancePerGI = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuTypeMaxInstance_v1_inst};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8225, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8225, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8224
 *     @max_instance_per_gi.setter
 *     def max_instance_per_gi(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuTypeMaxInstance_v1 instance is read-only")
 *         self._ptr[0].maxInstancePerGI = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":8226
 *         if self._readonly:
 *             raise ValueError("This VgpuTypeMaxInstance_v1 instance is read-only")
 *         self._ptr[0].maxInstancePerGI = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 8226, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).maxInstancePerGI = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":8222
 *         return self._ptr[0].maxInstancePerGI
 * 
 *     @max_instance_per_gi.setter             # <<<<<<<<<<<<<<
 *     def max_instance_per_gi(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.max_instance_per_gi.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8228
 *         self._ptr[0].maxInstancePerGI = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuTypeMaxInstance_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12from_data, "VgpuTypeMaxInstance_v1.from_data(data)\n\nCreate an VgpuTypeMaxInstance_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_type_max_instance_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 8228, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8228, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 8228, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 8228, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8228, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 8228, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":8235
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_type_max_instance_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_type_max_instance_v1_dtype", vgpu_type_max_instance_v1_dtype, VgpuTypeMaxInstance_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_type_max_instance_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8235, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_type_max_instance_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8235, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8228
 *         self._ptr[0].maxInstancePerGI = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuTypeMaxInstance_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8237
 *         return __from_data(data, "vgpu_type_max_instance_v1_dtype", vgpu_type_max_instance_v1_dtype, VgpuTypeMaxInstance_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuTypeMaxInstance_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_14from_ptr, "VgpuTypeMaxInstance_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuTypeMaxInstance_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 8237, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 8237, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 8237, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8237, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 8237, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":8238
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuTypeMaxInstance_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 8237, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 8237, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 8237, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8237, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 8238, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 8238, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 8237, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":8237
 *         return __from_data(data, "vgpu_type_max_instance_v1_dtype", vgpu_type_max_instance_v1_dtype, VgpuTypeMaxInstance_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuTypeMaxInstance_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":8246
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuTypeMaxInstance_v1 obj = VgpuTypeMaxInstance_v1.__new__(VgpuTypeMaxInstance_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":8247
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuTypeMaxInstance_v1 obj = VgpuTypeMaxInstance_v1.__new__(VgpuTypeMaxInstance_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8247, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 8247, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8246
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuTypeMaxInstance_v1 obj = VgpuTypeMaxInstance_v1.__new__(VgpuTypeMaxInstance_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":8248
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuTypeMaxInstance_v1 obj = VgpuTypeMaxInstance_v1.__new__(VgpuTypeMaxInstance_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>malloc(sizeof(nvmlVgpuTypeMaxInstance_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8248, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":8249
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuTypeMaxInstance_v1 obj = VgpuTypeMaxInstance_v1.__new__(VgpuTypeMaxInstance_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>malloc(sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8250
 *         cdef VgpuTypeMaxInstance_v1 obj = VgpuTypeMaxInstance_v1.__new__(VgpuTypeMaxInstance_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>malloc(sizeof(nvmlVgpuTypeMaxInstance_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuTypeMaxInstance_v1_t *)malloc((sizeof(nvmlVgpuTypeMaxInstance_v1_t))));

    /* "cuda/bindings/_nvml.pyx":8251
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>malloc(sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeMaxInstance_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":8252
 *             obj._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>malloc(sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8252, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuTypeMaxInst};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8252, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 8252, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":8251
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>malloc(sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeMaxInstance_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":8253
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeMaxInstance_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuTypeMaxInstance_v1_t))));

    /* "cuda/bindings/_nvml.pyx":8254
 *                 raise MemoryError("Error allocating VgpuTypeMaxInstance_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":8255
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":8249
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuTypeMaxInstance_v1 obj = VgpuTypeMaxInstance_v1.__new__(VgpuTypeMaxInstance_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>malloc(sizeof(nvmlVgpuTypeMaxInstance_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":8257
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuTypeMaxInstance_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":8258
 *         else:
 *             obj._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":8259
 *             obj._ptr = <nvmlVgpuTypeMaxInstance_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":8260
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":8261
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8237
 *         return __from_data(data, "vgpu_type_max_instance_v1_dtype", vgpu_type_max_instance_v1_dtype, VgpuTypeMaxInstance_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuTypeMaxInstance_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_16__reduce_cython__, "VgpuTypeMaxInstance_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_18__setstate_cython__, "VgpuTypeMaxInstance_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuTypeMaxInstance_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8264
 * 
 * 
 * cdef _get_active_vgpu_instance_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlActiveVgpuInstanceInfo_v1_t pod = nvmlActiveVgpuInstanceInfo_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_active_vgpu_instance_info_v1_dtype_offsets(void) {
  nvmlActiveVgpuInstanceInfo_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlActiveVgpuInstanceInfo_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_active_vgpu_instance_info_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":8265
 * 
 * cdef _get_active_vgpu_instance_info_v1_dtype_offsets():
 *     cdef nvmlActiveVgpuInstanceInfo_v1_t pod = nvmlActiveVgpuInstanceInfo_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'vgpu_count', 'vgpu_instances'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":8266
 * cdef _get_active_vgpu_instance_info_v1_dtype_offsets():
 *     cdef nvmlActiveVgpuInstanceInfo_v1_t pod = nvmlActiveVgpuInstanceInfo_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'vgpu_count', 'vgpu_instances'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.intp],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8266, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8266, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":8267
 *     cdef nvmlActiveVgpuInstanceInfo_v1_t pod = nvmlActiveVgpuInstanceInfo_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'vgpu_count', 'vgpu_instances'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.intp],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8267, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8267, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 8267, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_vgpu_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_vgpu_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_vgpu_count) != (0)) __PYX_ERR(0, 8267, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_vgpu_instances);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_vgpu_instances);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_vgpu_instances) != (0)) __PYX_ERR(0, 8267, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 8267, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":8268
 *     return _numpy.dtype({
 *         'names': ['version', 'vgpu_count', 'vgpu_instances'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.intp],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8268, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8268, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8268, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 8268, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8268, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_intp); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 8268, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8268, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 8268, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 8268, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 8268, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 8267, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":8270
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.intp],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vgpuCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuInstances)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8270, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":8271
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vgpuInstances)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vgpuCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 8271, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":8272
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuInstances)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlActiveVgpuInstanceInfo_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vgpuInstances)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 8272, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":8269
 *         'names': ['version', 'vgpu_count', 'vgpu_instances'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.intp],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuCount)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8269, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 8269, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 8269, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 8269, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 8267, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":8274
 *             (<intptr_t>&(pod.vgpuInstances)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlActiveVgpuInstanceInfo_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlActiveVgpuInstanceInfo_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 8267, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8266, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8264
 * 
 * 
 * cdef _get_active_vgpu_instance_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlActiveVgpuInstanceInfo_v1_t pod = nvmlActiveVgpuInstanceInfo_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_active_vgpu_instance_info_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8291
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>calloc(1, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":8292
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>calloc(1, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")
*/
  __pyx_v_self->_ptr = ((nvmlActiveVgpuInstanceInfo_v1_t *)calloc(1, (sizeof(nvmlActiveVgpuInstanceInfo_v1_t))));

  /* "cuda/bindings/_nvml.pyx":8293
 *     def __init__(self):
 *         self._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>calloc(1, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":8294
 *         self._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>calloc(1, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8294, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ActiveVgpuInsta};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8294, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 8294, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8293
 *     def __init__(self):
 *         self._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>calloc(1, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":8295
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":8296
 *             raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":8297
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":8291
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>calloc(1, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8299
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlActiveVgpuInstanceInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self) {
  nvmlActiveVgpuInstanceInfo_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlActiveVgpuInstanceInfo_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":8301
 *     def __dealloc__(self):
 *         cdef nvmlActiveVgpuInstanceInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8302
 *         cdef nvmlActiveVgpuInstanceInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":8303
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":8304
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":8301
 *     def __dealloc__(self):
 *         cdef nvmlActiveVgpuInstanceInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":8299
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlActiveVgpuInstanceInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":8306
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ActiveVgpuInstanceInfo_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":8307
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.ActiveVgpuInstanceInfo_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8307, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8307, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8307, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8307, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8307, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_ActiveVgpuInstanceInfo_v1_objec;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 37 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8307, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8306
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ActiveVgpuInstanceInfo_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8309
 *         return f"<{__name__}.ActiveVgpuInstanceInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8312
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8312, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8309
 *         return f"<{__name__}.ActiveVgpuInstanceInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8314
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":8315
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8314
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8317
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":8318
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8318, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8317
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8320
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ActiveVgpuInstanceInfo_v1 other_
 *         if not isinstance(other, ActiveVgpuInstanceInfo_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":8322
 *     def __eq__(self, other):
 *         cdef ActiveVgpuInstanceInfo_v1 other_
 *         if not isinstance(other, ActiveVgpuInstanceInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":8323
 *         cdef ActiveVgpuInstanceInfo_v1 other_
 *         if not isinstance(other, ActiveVgpuInstanceInfo_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlActiveVgpuInstanceInfo_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":8322
 *     def __eq__(self, other):
 *         cdef ActiveVgpuInstanceInfo_v1 other_
 *         if not isinstance(other, ActiveVgpuInstanceInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":8324
 *         if not isinstance(other, ActiveVgpuInstanceInfo_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlActiveVgpuInstanceInfo_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1))))) __PYX_ERR(0, 8324, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":8325
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlActiveVgpuInstanceInfo_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlActiveVgpuInstanceInfo_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8325, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8320
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ActiveVgpuInstanceInfo_v1 other_
 *         if not isinstance(other, ActiveVgpuInstanceInfo_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8327
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlActiveVgpuInstanceInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>malloc(sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":8328
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>malloc(sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 8328, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8328, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8328, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 8328, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8329
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>malloc(sizeof(nvmlActiveVgpuInstanceInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")
*/
    __pyx_v_self->_ptr = ((nvmlActiveVgpuInstanceInfo_v1_t *)malloc((sizeof(nvmlActiveVgpuInstanceInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":8330
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>malloc(sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":8331
 *             self._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>malloc(sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8331, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ActiveVgpuInsta};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8331, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 8331, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":8330
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>malloc(sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":8332
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8332, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8332, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 8332, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlActiveVgpuInstanceInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":8333
 *                 raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":8334
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":8335
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8335, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8335, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 8335, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":8328
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>malloc(sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":8337
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 8337, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":8327
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlActiveVgpuInstanceInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>malloc(sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8339
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8342
 *     def version(self):
 *         """int: IN: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8342, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8339
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8344
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":8346
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ActiveVgpuInstanceInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":8347
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This ActiveVgpuInstanceInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ActiveVgpuInstanceInfo_v1_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8347, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8347, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8346
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ActiveVgpuInstanceInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":8348
 *         if self._readonly:
 *             raise ValueError("This ActiveVgpuInstanceInfo_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 8348, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":8344
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8350
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_instances(self):
 *         """int: IN/OUT: list of active vGPU instances."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14vgpu_instances_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14vgpu_instances_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14vgpu_instances___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14vgpu_instances___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8353
 *     def vgpu_instances(self):
 *         """int: IN/OUT: list of active vGPU instances."""
 *         return <intptr_t>(self._ptr[0].vgpuInstances)             # <<<<<<<<<<<<<<
 * 
 *     @vgpu_instances.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)(__pyx_v_self->_ptr[0]).vgpuInstances)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8353, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8350
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_instances(self):
 *         """int: IN/OUT: list of active vGPU instances."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1.vgpu_instances.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8355
 *         return <intptr_t>(self._ptr[0].vgpuInstances)
 * 
 *     @vgpu_instances.setter             # <<<<<<<<<<<<<<
 *     def vgpu_instances(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14vgpu_instances_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14vgpu_instances_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14vgpu_instances_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14vgpu_instances_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":8357
 *     @vgpu_instances.setter
 *     def vgpu_instances(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ActiveVgpuInstanceInfo_v1 instance is read-only")
 *         self._ptr[0].vgpuInstances = <nvmlVgpuInstance_t*><intptr_t>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":8358
 *     def vgpu_instances(self, val):
 *         if self._readonly:
 *             raise ValueError("This ActiveVgpuInstanceInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].vgpuInstances = <nvmlVgpuInstance_t*><intptr_t>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ActiveVgpuInstanceInfo_v1_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8358, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8358, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8357
 *     @vgpu_instances.setter
 *     def vgpu_instances(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ActiveVgpuInstanceInfo_v1 instance is read-only")
 *         self._ptr[0].vgpuInstances = <nvmlVgpuInstance_t*><intptr_t>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":8359
 *         if self._readonly:
 *             raise ValueError("This ActiveVgpuInstanceInfo_v1 instance is read-only")
 *         self._ptr[0].vgpuInstances = <nvmlVgpuInstance_t*><intptr_t>val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = PyLong_AsSsize_t(__pyx_v_val); if (unlikely((__pyx_t_4 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 8359, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).vgpuInstances = ((nvmlVgpuInstance_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":8355
 *         return <intptr_t>(self._ptr[0].vgpuInstances)
 * 
 *     @vgpu_instances.setter             # <<<<<<<<<<<<<<
 *     def vgpu_instances(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1.vgpu_instances.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8361
 *         self._ptr[0].vgpuInstances = <nvmlVgpuInstance_t*><intptr_t>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ActiveVgpuInstanceInfo_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_12from_data, "ActiveVgpuInstanceInfo_v1.from_data(data)\n\nCreate an ActiveVgpuInstanceInfo_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `active_vgpu_instance_info_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 8361, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8361, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 8361, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 8361, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8361, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 8361, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":8368
 *             data (_numpy.ndarray): a single-element array of dtype `active_vgpu_instance_info_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "active_vgpu_instance_info_v1_dtype", active_vgpu_instance_info_v1_dtype, ActiveVgpuInstanceInfo_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_active_vgpu_instance_info_v1_dty); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_active_vgpu_instance_info_v1_dty, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8361
 *         self._ptr[0].vgpuInstances = <nvmlVgpuInstance_t*><intptr_t>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ActiveVgpuInstanceInfo_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8370
 *         return __from_data(data, "active_vgpu_instance_info_v1_dtype", active_vgpu_instance_info_v1_dtype, ActiveVgpuInstanceInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ActiveVgpuInstanceInfo_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14from_ptr, "ActiveVgpuInstanceInfo_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an ActiveVgpuInstanceInfo_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 8370, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 8370, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 8370, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8370, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 8370, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":8371
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an ActiveVgpuInstanceInfo_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 8370, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 8370, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 8370, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8370, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 8371, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 8371, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 8370, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":8370
 *         return __from_data(data, "active_vgpu_instance_info_v1_dtype", active_vgpu_instance_info_v1_dtype, ActiveVgpuInstanceInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ActiveVgpuInstanceInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":8379
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ActiveVgpuInstanceInfo_v1 obj = ActiveVgpuInstanceInfo_v1.__new__(ActiveVgpuInstanceInfo_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":8380
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ActiveVgpuInstanceInfo_v1 obj = ActiveVgpuInstanceInfo_v1.__new__(ActiveVgpuInstanceInfo_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8380, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 8380, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8379
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ActiveVgpuInstanceInfo_v1 obj = ActiveVgpuInstanceInfo_v1.__new__(ActiveVgpuInstanceInfo_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":8381
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ActiveVgpuInstanceInfo_v1 obj = ActiveVgpuInstanceInfo_v1.__new__(ActiveVgpuInstanceInfo_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>malloc(sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8381, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":8382
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ActiveVgpuInstanceInfo_v1 obj = ActiveVgpuInstanceInfo_v1.__new__(ActiveVgpuInstanceInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>malloc(sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8383
 *         cdef ActiveVgpuInstanceInfo_v1 obj = ActiveVgpuInstanceInfo_v1.__new__(ActiveVgpuInstanceInfo_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>malloc(sizeof(nvmlActiveVgpuInstanceInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlActiveVgpuInstanceInfo_v1_t *)malloc((sizeof(nvmlActiveVgpuInstanceInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":8384
 *         if owner is None:
 *             obj._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>malloc(sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":8385
 *             obj._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>malloc(sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8385, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ActiveVgpuInsta};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8385, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 8385, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":8384
 *         if owner is None:
 *             obj._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>malloc(sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":8386
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlActiveVgpuInstanceInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":8387
 *                 raise MemoryError("Error allocating ActiveVgpuInstanceInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":8388
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":8382
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ActiveVgpuInstanceInfo_v1 obj = ActiveVgpuInstanceInfo_v1.__new__(ActiveVgpuInstanceInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>malloc(sizeof(nvmlActiveVgpuInstanceInfo_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":8390
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlActiveVgpuInstanceInfo_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":8391
 *         else:
 *             obj._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":8392
 *             obj._ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":8393
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":8394
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8370
 *         return __from_data(data, "active_vgpu_instance_info_v1_dtype", active_vgpu_instance_info_v1_dtype, ActiveVgpuInstanceInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ActiveVgpuInstanceInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_16__reduce_cython__, "ActiveVgpuInstanceInfo_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_18__setstate_cython__, "ActiveVgpuInstanceInfo_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8397
 * 
 * 
 * cdef _get_vgpu_creatable_placement_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuCreatablePlacementInfo_v1_t pod = nvmlVgpuCreatablePlacementInfo_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_creatable_placement_info_v1_dtype_offsets(void) {
  nvmlVgpuCreatablePlacementInfo_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuCreatablePlacementInfo_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  size_t __pyx_t_12;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_creatable_placement_info_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":8398
 * 
 * cdef _get_vgpu_creatable_placement_info_v1_dtype_offsets():
 *     cdef nvmlVgpuCreatablePlacementInfo_v1_t pod = nvmlVgpuCreatablePlacementInfo_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'vgpu_type_id', 'count', 'placement_ids', 'placement_size'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":8399
 * cdef _get_vgpu_creatable_placement_info_v1_dtype_offsets():
 *     cdef nvmlVgpuCreatablePlacementInfo_v1_t pod = nvmlVgpuCreatablePlacementInfo_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'vgpu_type_id', 'count', 'placement_ids', 'placement_size'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.intp, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":8400
 *     cdef nvmlVgpuCreatablePlacementInfo_v1_t pod = nvmlVgpuCreatablePlacementInfo_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'vgpu_type_id', 'count', 'placement_ids', 'placement_size'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.intp, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8400, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8400, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 8400, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_vgpu_type_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_vgpu_type_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_vgpu_type_id) != (0)) __PYX_ERR(0, 8400, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_count) != (0)) __PYX_ERR(0, 8400, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_placement_ids);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_placement_ids);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_placement_ids) != (0)) __PYX_ERR(0, 8400, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_placement_size);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_placement_size);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_placement_size) != (0)) __PYX_ERR(0, 8400, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 8400, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":8401
 *     return _numpy.dtype({
 *         'names': ['version', 'vgpu_type_id', 'count', 'placement_ids', 'placement_size'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.intp, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 8401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 8401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_intp); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 8401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 8401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 8401, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 8401, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 8401, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 8401, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 8401, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 8400, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":8403
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.intp, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vgpuTypeId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8403, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":8404
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuTypeId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.placementIds)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vgpuTypeId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 8404, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":8405
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuTypeId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.placementIds)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.placementSize)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.count)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 8405, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":8406
 *             (<intptr_t>&(pod.vgpuTypeId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.placementIds)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.placementSize)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.placementIds)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 8406, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":8407
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.placementIds)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.placementSize)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuCreatablePlacementInfo_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.placementSize)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 8407, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":8402
 *         'names': ['version', 'vgpu_type_id', 'count', 'placement_ids', 'placement_size'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.intp, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuTypeId)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8402, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 8402, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_11) != (0)) __PYX_ERR(0, 8402, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_10) != (0)) __PYX_ERR(0, 8402, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_9) != (0)) __PYX_ERR(0, 8402, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_8) != (0)) __PYX_ERR(0, 8402, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 8400, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":8409
 *             (<intptr_t>&(pod.placementSize)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuCreatablePlacementInfo_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8409, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 8400, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_12 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_12 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_12, (2-__pyx_t_12) | (__pyx_t_12*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8399, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8397
 * 
 * 
 * cdef _get_vgpu_creatable_placement_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuCreatablePlacementInfo_v1_t pod = nvmlVgpuCreatablePlacementInfo_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_creatable_placement_info_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8427
 *         dict _refs
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>calloc(1, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":8428
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>calloc(1, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuCreatablePlacementInfo_v1_t *)calloc(1, (sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))));

  /* "cuda/bindings/_nvml.pyx":8429
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>calloc(1, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":8430
 *         self._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>calloc(1, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8430, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuCreatablePl};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8430, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 8430, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8429
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>calloc(1, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":8431
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":8432
 *             raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 *         self._refs = {}
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":8433
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 *         self._refs = {}
 * 
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":8434
 *         self._owned = True
 *         self._readonly = False
 *         self._refs = {}             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8434, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_self->_refs);
  __Pyx_DECREF(__pyx_v_self->_refs);
  __pyx_v_self->_refs = ((PyObject*)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":8427
 *         dict _refs
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>calloc(1, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8436
 *         self._refs = {}
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuCreatablePlacementInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self) {
  nvmlVgpuCreatablePlacementInfo_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuCreatablePlacementInfo_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":8438
 *     def __dealloc__(self):
 *         cdef nvmlVgpuCreatablePlacementInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8439
 *         cdef nvmlVgpuCreatablePlacementInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":8440
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":8441
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":8438
 *     def __dealloc__(self):
 *         cdef nvmlVgpuCreatablePlacementInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":8436
 *         self._refs = {}
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuCreatablePlacementInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":8443
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuCreatablePlacementInfo_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":8444
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuCreatablePlacementInfo_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8444, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8444, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8444, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8444, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8444, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuCreatablePlacementInfo_v1_o;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 41 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8444, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8443
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuCreatablePlacementInfo_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8446
 *         return f"<{__name__}.VgpuCreatablePlacementInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8449
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8449, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8446
 *         return f"<{__name__}.VgpuCreatablePlacementInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8451
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":8452
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8451
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8454
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":8455
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8455, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8454
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8457
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuCreatablePlacementInfo_v1 other_
 *         if not isinstance(other, VgpuCreatablePlacementInfo_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":8459
 *     def __eq__(self, other):
 *         cdef VgpuCreatablePlacementInfo_v1 other_
 *         if not isinstance(other, VgpuCreatablePlacementInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":8460
 *         cdef VgpuCreatablePlacementInfo_v1 other_
 *         if not isinstance(other, VgpuCreatablePlacementInfo_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuCreatablePlacementInfo_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":8459
 *     def __eq__(self, other):
 *         cdef VgpuCreatablePlacementInfo_v1 other_
 *         if not isinstance(other, VgpuCreatablePlacementInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":8461
 *         if not isinstance(other, VgpuCreatablePlacementInfo_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuCreatablePlacementInfo_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1))))) __PYX_ERR(0, 8461, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":8462
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuCreatablePlacementInfo_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8462, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8457
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuCreatablePlacementInfo_v1 other_
 *         if not isinstance(other, VgpuCreatablePlacementInfo_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8464
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuCreatablePlacementInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>malloc(sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":8465
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>malloc(sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 8465, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8465, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8465, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 8465, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8466
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>malloc(sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuCreatablePlacementInfo_v1_t *)malloc((sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":8467
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>malloc(sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":8468
 *             self._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>malloc(sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8468, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuCreatablePl};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8468, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 8468, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":8467
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>malloc(sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":8469
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8469, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8469, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 8469, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":8470
 *                 raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":8471
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":8472
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8472, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8472, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 8472, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":8465
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>malloc(sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":8474
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 8474, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":8464
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuCreatablePlacementInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>malloc(sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8476
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8479
 *     def version(self):
 *         """int: IN: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8479, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8476
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8481
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":8483
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuCreatablePlacementInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":8484
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuCreatablePlacementInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuCreatablePlacementInfo};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8484, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8484, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8483
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuCreatablePlacementInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":8485
 *         if self._readonly:
 *             raise ValueError("This VgpuCreatablePlacementInfo_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 8485, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":8481
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8487
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_type_id(self):
 *         """int: IN: Handle to vGPU type."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12vgpu_type_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12vgpu_type_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12vgpu_type_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12vgpu_type_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8490
 *     def vgpu_type_id(self):
 *         """int: IN: Handle to vGPU type."""
 *         return <unsigned int>(self._ptr[0].vgpuTypeId)             # <<<<<<<<<<<<<<
 * 
 *     @vgpu_type_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int(((unsigned int)(__pyx_v_self->_ptr[0]).vgpuTypeId)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8490, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8487
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_type_id(self):
 *         """int: IN: Handle to vGPU type."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.vgpu_type_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8492
 *         return <unsigned int>(self._ptr[0].vgpuTypeId)
 * 
 *     @vgpu_type_id.setter             # <<<<<<<<<<<<<<
 *     def vgpu_type_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12vgpu_type_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12vgpu_type_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12vgpu_type_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12vgpu_type_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":8494
 *     @vgpu_type_id.setter
 *     def vgpu_type_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuCreatablePlacementInfo_v1 instance is read-only")
 *         self._ptr[0].vgpuTypeId = <nvmlVgpuTypeId_t><unsigned int>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":8495
 *     def vgpu_type_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuCreatablePlacementInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].vgpuTypeId = <nvmlVgpuTypeId_t><unsigned int>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuCreatablePlacementInfo};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8495, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8495, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8494
 *     @vgpu_type_id.setter
 *     def vgpu_type_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuCreatablePlacementInfo_v1 instance is read-only")
 *         self._ptr[0].vgpuTypeId = <nvmlVgpuTypeId_t><unsigned int>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":8496
 *         if self._readonly:
 *             raise ValueError("This VgpuCreatablePlacementInfo_v1 instance is read-only")
 *         self._ptr[0].vgpuTypeId = <nvmlVgpuTypeId_t><unsigned int>val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 8496, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).vgpuTypeId = ((nvmlVgpuTypeId_t)((unsigned int)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":8492
 *         return <unsigned int>(self._ptr[0].vgpuTypeId)
 * 
 *     @vgpu_type_id.setter             # <<<<<<<<<<<<<<
 *     def vgpu_type_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.vgpu_type_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8498
 *         self._ptr[0].vgpuTypeId = <nvmlVgpuTypeId_t><unsigned int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def count(self):
 *         """int: IN/OUT: Count of the placement IDs."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_5count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_5count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_5count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_5count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8501
 *     def count(self):
 *         """int: IN/OUT: Count of the placement IDs."""
 *         return self._ptr[0].count             # <<<<<<<<<<<<<<
 * 
 *     @count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).count); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8501, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8498
 *         self._ptr[0].vgpuTypeId = <nvmlVgpuTypeId_t><unsigned int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def count(self):
 *         """int: IN/OUT: Count of the placement IDs."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8503
 *         return self._ptr[0].count
 * 
 *     @count.setter             # <<<<<<<<<<<<<<
 *     def count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_5count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_5count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_5count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_5count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":8505
 *     @count.setter
 *     def count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuCreatablePlacementInfo_v1 instance is read-only")
 *         self._ptr[0].count = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":8506
 *     def count(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuCreatablePlacementInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].count = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuCreatablePlacementInfo};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8506, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8506, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8505
 *     @count.setter
 *     def count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuCreatablePlacementInfo_v1 instance is read-only")
 *         self._ptr[0].count = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":8507
 *         if self._readonly:
 *             raise ValueError("This VgpuCreatablePlacementInfo_v1 instance is read-only")
 *         self._ptr[0].count = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 8507, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).count = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":8503
 *         return self._ptr[0].count
 * 
 *     @count.setter             # <<<<<<<<<<<<<<
 *     def count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8509
 *         self._ptr[0].count = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def placement_ids(self):
 *         """int: IN/OUT: Placement IDs for the vGPU type."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13placement_ids_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13placement_ids_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13placement_ids___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13placement_ids___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8512
 *     def placement_ids(self):
 *         """int: IN/OUT: Placement IDs for the vGPU type."""
 *         if self._ptr[0].placementIds == NULL:             # <<<<<<<<<<<<<<
 *             return []
 *         cdef view.array arr = view.array(shape=(self._ptr[0].placementSize,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False)
*/
  __pyx_t_1 = ((__pyx_v_self->_ptr[0]).placementIds == NULL);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8513
 *         """int: IN/OUT: Placement IDs for the vGPU type."""
 *         if self._ptr[0].placementIds == NULL:
 *             return []             # <<<<<<<<<<<<<<
 *         cdef view.array arr = view.array(shape=(self._ptr[0].placementSize,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(self._ptr[0].placementIds)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8513, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_r = __pyx_t_2;
    __pyx_t_2 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":8512
 *     def placement_ids(self):
 *         """int: IN/OUT: Placement IDs for the vGPU type."""
 *         if self._ptr[0].placementIds == NULL:             # <<<<<<<<<<<<<<
 *             return []
 *         cdef view.array arr = view.array(shape=(self._ptr[0].placementSize,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False)
*/
  }

  /* "cuda/bindings/_nvml.pyx":8514
 *         if self._ptr[0].placementIds == NULL:
 *             return []
 *         cdef view.array arr = view.array(shape=(self._ptr[0].placementSize,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(self._ptr[0].placementIds)
 *         return _numpy.asarray(arr)
*/
  __pyx_t_3 = NULL;
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).placementSize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8514, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8514, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 8514, __pyx_L1_error);
  __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8514, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 5 : 0)] = {__pyx_t_3, NULL};
    __pyx_t_7 = __Pyx_MakeVectorcallBuilderKwds(5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8514, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_5, __pyx_t_7, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 8514, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_7, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 8514, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_7, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 8514, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_7, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 8514, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_allocate_buffer, Py_False, __pyx_t_7, __pyx_callargs+1, 4) < (0)) __PYX_ERR(0, 8514, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_7);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8514, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_2);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":8515
 *             return []
 *         cdef view.array arr = view.array(shape=(self._ptr[0].placementSize,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(self._ptr[0].placementIds)             # <<<<<<<<<<<<<<
 *         return _numpy.asarray(arr)
 * 
*/
  __pyx_v_arr->data = ((char *)(__pyx_v_self->_ptr[0]).placementIds);

  /* "cuda/bindings/_nvml.pyx":8516
 *         cdef view.array arr = view.array(shape=(self._ptr[0].placementSize,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(self._ptr[0].placementIds)
 *         return _numpy.asarray(arr)             # <<<<<<<<<<<<<<
 * 
 *     @placement_ids.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_7 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_6 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_7);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_7);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_6 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, ((PyObject *)__pyx_v_arr)};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8516, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8509
 *         self._ptr[0].count = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def placement_ids(self):
 *         """int: IN/OUT: Placement IDs for the vGPU type."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.placement_ids.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8518
 *         return _numpy.asarray(arr)
 * 
 *     @placement_ids.setter             # <<<<<<<<<<<<<<
 *     def placement_ids(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13placement_ids_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13placement_ids_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13placement_ids_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13placement_ids_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":8520
 *     @placement_ids.setter
 *     def placement_ids(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuCreatablePlacementInfo_v1 instance is read-only")
 *         cdef view.array arr = view.array(shape=(len(val),), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":8521
 *     def placement_ids(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuCreatablePlacementInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef view.array arr = view.array(shape=(len(val),), itemsize=sizeof(unsigned int), format="I", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint32)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuCreatablePlacementInfo};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8521, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8521, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8520
 *     @placement_ids.setter
 *     def placement_ids(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuCreatablePlacementInfo_v1 instance is read-only")
 *         cdef view.array arr = view.array(shape=(len(val),), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":8522
 *         if self._readonly:
 *             raise ValueError("This VgpuCreatablePlacementInfo_v1 instance is read-only")
 *         cdef view.array arr = view.array(shape=(len(val),), itemsize=sizeof(unsigned int), format="I", mode="c")             # <<<<<<<<<<<<<<
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint32)
 *         self._ptr[0].placementIds = <unsigned int*><intptr_t>(arr.data)
*/
  __pyx_t_2 = NULL;
  __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 8522, __pyx_L1_error)
  __pyx_t_5 = PyLong_FromSsize_t(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8522, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8522, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_5);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 8522, __pyx_L1_error);
  __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8522, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_7 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8522, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_6, __pyx_t_7, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 8522, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_5, __pyx_t_7, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 8522, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_7, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 8522, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_7, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 8522, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_7);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8522, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":8523
 *             raise ValueError("This VgpuCreatablePlacementInfo_v1 instance is read-only")
 *         cdef view.array arr = view.array(shape=(len(val),), itemsize=sizeof(unsigned int), format="I", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint32)             # <<<<<<<<<<<<<<
 *         self._ptr[0].placementIds = <unsigned int*><intptr_t>(arr.data)
 *         self._ptr[0].placementSize = len(val)
*/
  __pyx_t_7 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_3 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_6))) {
    __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6);
    assert(__pyx_t_7);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_6);
    __Pyx_INCREF(__pyx_t_7);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_6, __pyx__function);
    __pyx_t_3 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_7, __pyx_v_val};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8523, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_2, __pyx_t_5, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 8523, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_6, __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8523, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (__Pyx_PyObject_SetSlice(((PyObject *)__pyx_v_arr), __pyx_t_1, 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[0], 0, 0, 1) < (0)) __PYX_ERR(0, 8523, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":8524
 *         cdef view.array arr = view.array(shape=(len(val),), itemsize=sizeof(unsigned int), format="I", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint32)
 *         self._ptr[0].placementIds = <unsigned int*><intptr_t>(arr.data)             # <<<<<<<<<<<<<<
 *         self._ptr[0].placementSize = len(val)
 *         self._refs["placement_ids"] = arr
*/
  (__pyx_v_self->_ptr[0]).placementIds = ((unsigned int *)((intptr_t)__pyx_v_arr->data));

  /* "cuda/bindings/_nvml.pyx":8525
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint32)
 *         self._ptr[0].placementIds = <unsigned int*><intptr_t>(arr.data)
 *         self._ptr[0].placementSize = len(val)             # <<<<<<<<<<<<<<
 *         self._refs["placement_ids"] = arr
 * 
*/
  __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 8525, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).placementSize = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":8526
 *         self._ptr[0].placementIds = <unsigned int*><intptr_t>(arr.data)
 *         self._ptr[0].placementSize = len(val)
 *         self._refs["placement_ids"] = arr             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely(__pyx_v_self->_refs == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
    __PYX_ERR(0, 8526, __pyx_L1_error)
  }
  if (unlikely((PyDict_SetItem(__pyx_v_self->_refs, __pyx_mstate_global->__pyx_n_u_placement_ids, ((PyObject *)__pyx_v_arr)) < 0))) __PYX_ERR(0, 8526, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":8518
 *         return _numpy.asarray(arr)
 * 
 *     @placement_ids.setter             # <<<<<<<<<<<<<<
 *     def placement_ids(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.placement_ids.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8528
 *         self._refs["placement_ids"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuCreatablePlacementInfo_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12from_data, "VgpuCreatablePlacementInfo_v1.from_data(data)\n\nCreate an VgpuCreatablePlacementInfo_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_creatable_placement_info_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 8528, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8528, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 8528, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 8528, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8528, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 8528, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":8535
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_creatable_placement_info_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_creatable_placement_info_v1_dtype", vgpu_creatable_placement_info_v1_dtype, VgpuCreatablePlacementInfo_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_creatable_placement_info_v1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8535, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_creatable_placement_info_v1, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8535, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8528
 *         self._refs["placement_ids"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuCreatablePlacementInfo_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8537
 *         return __from_data(data, "vgpu_creatable_placement_info_v1_dtype", vgpu_creatable_placement_info_v1_dtype, VgpuCreatablePlacementInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuCreatablePlacementInfo_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_14from_ptr, "VgpuCreatablePlacementInfo_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuCreatablePlacementInfo_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 8537, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 8537, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 8537, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8537, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 8537, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":8538
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuCreatablePlacementInfo_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 8537, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 8537, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 8537, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8537, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 8538, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 8538, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 8537, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":8537
 *         return __from_data(data, "vgpu_creatable_placement_info_v1_dtype", vgpu_creatable_placement_info_v1_dtype, VgpuCreatablePlacementInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuCreatablePlacementInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":8546
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuCreatablePlacementInfo_v1 obj = VgpuCreatablePlacementInfo_v1.__new__(VgpuCreatablePlacementInfo_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":8547
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuCreatablePlacementInfo_v1 obj = VgpuCreatablePlacementInfo_v1.__new__(VgpuCreatablePlacementInfo_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8547, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 8547, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8546
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuCreatablePlacementInfo_v1 obj = VgpuCreatablePlacementInfo_v1.__new__(VgpuCreatablePlacementInfo_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":8548
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuCreatablePlacementInfo_v1 obj = VgpuCreatablePlacementInfo_v1.__new__(VgpuCreatablePlacementInfo_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>malloc(sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8548, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":8549
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuCreatablePlacementInfo_v1 obj = VgpuCreatablePlacementInfo_v1.__new__(VgpuCreatablePlacementInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>malloc(sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8550
 *         cdef VgpuCreatablePlacementInfo_v1 obj = VgpuCreatablePlacementInfo_v1.__new__(VgpuCreatablePlacementInfo_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>malloc(sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuCreatablePlacementInfo_v1_t *)malloc((sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":8551
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>malloc(sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":8552
 *             obj._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>malloc(sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8552, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuCreatablePl};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8552, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 8552, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":8551
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>malloc(sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":8553
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":8554
 *                 raise MemoryError("Error allocating VgpuCreatablePlacementInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":8555
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":8549
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuCreatablePlacementInfo_v1 obj = VgpuCreatablePlacementInfo_v1.__new__(VgpuCreatablePlacementInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>malloc(sizeof(nvmlVgpuCreatablePlacementInfo_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":8557
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuCreatablePlacementInfo_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":8558
 *         else:
 *             obj._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":8559
 *             obj._ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         obj._refs = {}
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":8560
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         obj._refs = {}
 *         return obj
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":8561
 *             obj._owned = False
 *         obj._readonly = readonly
 *         obj._refs = {}             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8561, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_refs);
  __Pyx_DECREF(__pyx_v_obj->_refs);
  __pyx_v_obj->_refs = ((PyObject*)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":8562
 *         obj._readonly = readonly
 *         obj._refs = {}
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8537
 *         return __from_data(data, "vgpu_creatable_placement_info_v1_dtype", vgpu_creatable_placement_info_v1_dtype, VgpuCreatablePlacementInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuCreatablePlacementInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_16__reduce_cython__, "VgpuCreatablePlacementInfo_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_18__setstate_cython__, "VgpuCreatablePlacementInfo_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8565
 * 
 * 
 * cdef _get_hwbc_entry_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlHwbcEntry_t pod = nvmlHwbcEntry_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_hwbc_entry_dtype_offsets(void) {
  nvmlHwbcEntry_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlHwbcEntry_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_hwbc_entry_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":8566
 * 
 * cdef _get_hwbc_entry_dtype_offsets():
 *     cdef nvmlHwbcEntry_t pod = nvmlHwbcEntry_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['hwbc_id', 'firmware_version'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":8567
 * cdef _get_hwbc_entry_dtype_offsets():
 *     cdef nvmlHwbcEntry_t pod = nvmlHwbcEntry_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['hwbc_id', 'firmware_version'],
 *         'formats': [_numpy.uint32, _numpy.int8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8567, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8567, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":8568
 *     cdef nvmlHwbcEntry_t pod = nvmlHwbcEntry_t()
 *     return _numpy.dtype({
 *         'names': ['hwbc_id', 'firmware_version'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.int8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_hwbc_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_hwbc_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_hwbc_id) != (0)) __PYX_ERR(0, 8568, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_firmware_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_firmware_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_firmware_version) != (0)) __PYX_ERR(0, 8568, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 8568, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":8569
 *     return _numpy.dtype({
 *         'names': ['hwbc_id', 'firmware_version'],
 *         'formats': [_numpy.uint32, _numpy.int8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.hwbcId)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8569, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8569, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8569, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 8569, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8569, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 8569, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 8569, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 8568, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":8571
 *         'formats': [_numpy.uint32, _numpy.int8],
 *         'offsets': [
 *             (<intptr_t>&(pod.hwbcId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.firmwareVersion)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.hwbcId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8571, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":8572
 *         'offsets': [
 *             (<intptr_t>&(pod.hwbcId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.firmwareVersion)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlHwbcEntry_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.firmwareVersion)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 8572, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":8570
 *         'names': ['hwbc_id', 'firmware_version'],
 *         'formats': [_numpy.uint32, _numpy.int8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.hwbcId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.firmwareVersion)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8570, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 8570, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 8570, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 8568, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":8574
 *             (<intptr_t>&(pod.firmwareVersion)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlHwbcEntry_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlHwbcEntry_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8574, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 8568, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8567, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8565
 * 
 * 
 * cdef _get_hwbc_entry_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlHwbcEntry_t pod = nvmlHwbcEntry_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_hwbc_entry_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8596
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=hwbc_entry_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 8596, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8596, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 8596, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8596, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 8596, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":8597
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=hwbc_entry_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlHwbcEntry_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8597, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8597, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_hwbc_entry_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8597, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8597, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 8597, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8597, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":8598
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=hwbc_entry_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlHwbcEntry_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlHwbcEntry_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8598, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8598, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8598, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":8599
 *         arr = _numpy.empty(size, dtype=hwbc_entry_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlHwbcEntry_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlHwbcEntry_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8599, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlHwbcEntry_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8599, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8599, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 8599, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":8600
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlHwbcEntry_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlHwbcEntry_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8600, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8600, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlHwbcEntry_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8600, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8600, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 8599, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 8599, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":8596
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=hwbc_entry_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8602
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlHwbcEntry_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.HwbcEntry_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":8603
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.HwbcEntry_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8603, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8603, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 8603, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":8604
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.HwbcEntry_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.HwbcEntry object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8604, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8604, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8604, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8604, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8604, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8604, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8604, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_HwbcEntry_Array;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 17 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8604, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":8603
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.HwbcEntry_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":8606
 *             return f"<{__name__}.HwbcEntry_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.HwbcEntry object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8606, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8606, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8606, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8606, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8606, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_HwbcEntry_object_at;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 21 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8606, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":8602
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlHwbcEntry_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.HwbcEntry_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8608
 *             return f"<{__name__}.HwbcEntry object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8611
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8611, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8611, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8608
 *             return f"<{__name__}.HwbcEntry object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8613
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_9HwbcEntry__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":8614
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8614, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8614, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 8614, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8613
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8616
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":8617
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8617, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8617, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 8617, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":8618
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8618, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 8618, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8617
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":8620
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8620, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8620, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8616
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8622
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":8623
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8623, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 8623, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8622
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8625
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, HwbcEntry)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":8626
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, HwbcEntry)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":8627
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, HwbcEntry)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8627, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8627, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8627, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8627, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 8627, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8627, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8627, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8627, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8627, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 8627, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":8628
 *         cdef object self_data = self._data
 *         if (not isinstance(other, HwbcEntry)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":8627
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, HwbcEntry)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":8629
 *         if (not isinstance(other, HwbcEntry)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8629, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8629, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 8629, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8625
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, HwbcEntry)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8631
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def hwbc_id(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_7hwbc_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_7hwbc_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_7hwbc_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_7hwbc_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8634
 *     def hwbc_id(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.hwbc_id[0])
 *         return self._data.hwbc_id
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8634, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 8634, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":8635
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.hwbc_id[0])             # <<<<<<<<<<<<<<
 *         return self._data.hwbc_id
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_hwbc_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8635, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8635, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8635, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":8634
 *     def hwbc_id(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.hwbc_id[0])
 *         return self._data.hwbc_id
*/
  }

  /* "cuda/bindings/_nvml.pyx":8636
 *         if self._data.size == 1:
 *             return int(self._data.hwbc_id[0])
 *         return self._data.hwbc_id             # <<<<<<<<<<<<<<
 * 
 *     @hwbc_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_hwbc_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8636, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8631
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def hwbc_id(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.hwbc_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8638
 *         return self._data.hwbc_id
 * 
 *     @hwbc_id.setter             # <<<<<<<<<<<<<<
 *     def hwbc_id(self, val):
 *         self._data.hwbc_id = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_7hwbc_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_7hwbc_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_7hwbc_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_7hwbc_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":8640
 *     @hwbc_id.setter
 *     def hwbc_id(self, val):
 *         self._data.hwbc_id = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_hwbc_id, __pyx_v_val) < (0)) __PYX_ERR(0, 8640, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":8638
 *         return self._data.hwbc_id
 * 
 *     @hwbc_id.setter             # <<<<<<<<<<<<<<
 *     def hwbc_id(self, val):
 *         self._data.hwbc_id = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.hwbc_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8642
 *         self._data.hwbc_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def firmware_version(self):
 *         """~_numpy.int8: (array of length 32)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_16firmware_version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_16firmware_version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_16firmware_version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_16firmware_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8645
 *     def firmware_version(self):
 *         """~_numpy.int8: (array of length 32)."""
 *         return self._data.firmware_version             # <<<<<<<<<<<<<<
 * 
 *     @firmware_version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_firmware_version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8645, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8642
 *         self._data.hwbc_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def firmware_version(self):
 *         """~_numpy.int8: (array of length 32)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.firmware_version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8647
 *         return self._data.firmware_version
 * 
 *     @firmware_version.setter             # <<<<<<<<<<<<<<
 *     def firmware_version(self, val):
 *         self._data.firmware_version = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_16firmware_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_16firmware_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_16firmware_version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_16firmware_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":8649
 *     @firmware_version.setter
 *     def firmware_version(self, val):
 *         self._data.firmware_version = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_firmware_version, __pyx_v_val) < (0)) __PYX_ERR(0, 8649, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":8647
 *         return self._data.firmware_version
 * 
 *     @firmware_version.setter             # <<<<<<<<<<<<<<
 *     def firmware_version(self, val):
 *         self._data.firmware_version = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.firmware_version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8651
 *         self._data.firmware_version = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":8654
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8655
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 8655, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":8656
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8656, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 8656, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":8657
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":8658
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8658, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 8658, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":8657
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":8659
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return HwbcEntry.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":8660
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return HwbcEntry.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":8659
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return HwbcEntry.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":8661
 *             if key_ < 0:
 *                 key_ += size
 *             return HwbcEntry.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == hwbc_entry_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8661, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8661, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":8654
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":8662
 *                 key_ += size
 *             return HwbcEntry.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == hwbc_entry_dtype:
 *             return HwbcEntry.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8662, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":8663
 *             return HwbcEntry.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == hwbc_entry_dtype:             # <<<<<<<<<<<<<<
 *             return HwbcEntry.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8663, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8663, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 8663, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8663, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_hwbc_entry_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8663, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8663, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 8663, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8664
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == hwbc_entry_dtype:
 *             return HwbcEntry.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8664, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":8663
 *             return HwbcEntry.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == hwbc_entry_dtype:             # <<<<<<<<<<<<<<
 *             return HwbcEntry.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":8665
 *         if isinstance(out, _numpy.recarray) and out.dtype == hwbc_entry_dtype:
 *             return HwbcEntry.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8651
 *         self._data.firmware_version = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8667
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":8668
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 8668, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":8667
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8670
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an HwbcEntry instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_9HwbcEntry_14from_data, "HwbcEntry.from_data(data)\n\nCreate an HwbcEntry instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `hwbc_entry_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_9HwbcEntry_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9HwbcEntry_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 8670, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8670, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 8670, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 8670, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8670, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 8670, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":8677
 *             data (_numpy.ndarray): a 1D array of dtype `hwbc_entry_dtype` holding the data.
 *         """
 *         cdef HwbcEntry obj = HwbcEntry.__new__(HwbcEntry)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_HwbcEntry(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8677, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":8678
 *         """
 *         cdef HwbcEntry obj = HwbcEntry.__new__(HwbcEntry)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8678, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8678, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 8678, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":8679
 *         cdef HwbcEntry obj = HwbcEntry.__new__(HwbcEntry)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8679, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 8679, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8678
 *         """
 *         cdef HwbcEntry obj = HwbcEntry.__new__(HwbcEntry)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":8680
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != hwbc_entry_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8680, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 8680, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":8681
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != hwbc_entry_dtype:
 *             raise ValueError("data array must be of dtype hwbc_entry_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8681, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 8681, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8680
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != hwbc_entry_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":8682
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != hwbc_entry_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype hwbc_entry_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8682, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_hwbc_entry_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8682, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8682, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 8682, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":8683
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != hwbc_entry_dtype:
 *             raise ValueError("data array must be of dtype hwbc_entry_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_hwbc};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8683, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 8683, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8682
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != hwbc_entry_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype hwbc_entry_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":8684
 *         if data.dtype != hwbc_entry_dtype:
 *             raise ValueError("data array must be of dtype hwbc_entry_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8684, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8684, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8684, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":8686
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8670
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an HwbcEntry instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8688
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an HwbcEntry instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_9HwbcEntry_16from_ptr, "HwbcEntry.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an HwbcEntry instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_9HwbcEntry_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9HwbcEntry_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 8688, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 8688, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 8688, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8688, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 8688, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 8688, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 8688, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 8688, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8688, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 8689, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 8689, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 8689, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":8689
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an HwbcEntry instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 8688, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":8688
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an HwbcEntry instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":8697
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef HwbcEntry obj = HwbcEntry.__new__(HwbcEntry)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":8698
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef HwbcEntry obj = HwbcEntry.__new__(HwbcEntry)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8698, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 8698, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8697
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef HwbcEntry obj = HwbcEntry.__new__(HwbcEntry)
*/
  }

  /* "cuda/bindings/_nvml.pyx":8699
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef HwbcEntry obj = HwbcEntry.__new__(HwbcEntry)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_HwbcEntry(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8699, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":8700
 *             raise ValueError("ptr must not be null (0)")
 *         cdef HwbcEntry obj = HwbcEntry.__new__(HwbcEntry)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlHwbcEntry_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8700, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8700, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":8702
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlHwbcEntry_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=hwbc_entry_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 8702, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":8701
 *         cdef HwbcEntry obj = HwbcEntry.__new__(HwbcEntry)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlHwbcEntry_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=hwbc_entry_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlHwbcEntry_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8701, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":8703
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlHwbcEntry_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=hwbc_entry_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8703, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8703, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8703, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_hwbc_entry_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 8703, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 8703, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 8703, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 8703, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8703, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":8704
 *             <char*>ptr, sizeof(nvmlHwbcEntry_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=hwbc_entry_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 8704, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 8704, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8704, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":8706
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8688
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an HwbcEntry instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8592
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_9HwbcEntry_18__reduce_cython__, "HwbcEntry.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_9HwbcEntry_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9HwbcEntry_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_HwbcEntry, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_HwbcEntry, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_HwbcEntry, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_HwbcEntry, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_HwbcEntry); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_HwbcEntry, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_HwbcEntry, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_HwbcEntry, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_HwbcEntry__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_HwbcEntry); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_HwbcEntry, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_HwbcEntry__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_9HwbcEntry_20__setstate_cython__, "HwbcEntry.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_9HwbcEntry_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9HwbcEntry_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9HwbcEntry_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_HwbcEntry, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_HwbcEntry__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_HwbcEntry__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_HwbcEntry, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_HwbcEntry__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.HwbcEntry.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8709
 * 
 * 
 * cdef _get_led_state_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlLedState_t pod = nvmlLedState_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_led_state_dtype_offsets(void) {
  nvmlLedState_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlLedState_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_led_state_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":8710
 * 
 * cdef _get_led_state_dtype_offsets():
 *     cdef nvmlLedState_t pod = nvmlLedState_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['cause', 'color'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":8711
 * cdef _get_led_state_dtype_offsets():
 *     cdef nvmlLedState_t pod = nvmlLedState_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['cause', 'color'],
 *         'formats': [_numpy.int8, _numpy.int32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":8712
 *     cdef nvmlLedState_t pod = nvmlLedState_t()
 *     return _numpy.dtype({
 *         'names': ['cause', 'color'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.int8, _numpy.int32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8712, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8712, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_cause);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_cause);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_cause) != (0)) __PYX_ERR(0, 8712, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_color);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_color);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_color) != (0)) __PYX_ERR(0, 8712, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 8712, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":8713
 *     return _numpy.dtype({
 *         'names': ['cause', 'color'],
 *         'formats': [_numpy.int8, _numpy.int32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.cause)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8713, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8713, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8713, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 8713, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8713, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 8713, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 8713, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 8712, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":8715
 *         'formats': [_numpy.int8, _numpy.int32],
 *         'offsets': [
 *             (<intptr_t>&(pod.cause)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.color)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.cause)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8715, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":8716
 *         'offsets': [
 *             (<intptr_t>&(pod.cause)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.color)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlLedState_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.color)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 8716, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":8714
 *         'names': ['cause', 'color'],
 *         'formats': [_numpy.int8, _numpy.int32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.cause)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.color)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8714, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 8714, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 8714, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 8712, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":8718
 *             (<intptr_t>&(pod.color)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlLedState_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlLedState_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8718, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 8712, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8711, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8709
 * 
 * 
 * cdef _get_led_state_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlLedState_t pod = nvmlLedState_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_led_state_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8735
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlLedState_t *>calloc(1, sizeof(nvmlLedState_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_8LedState_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_8LedState_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8LedState___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_8LedState___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":8736
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlLedState_t *>calloc(1, sizeof(nvmlLedState_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating LedState")
*/
  __pyx_v_self->_ptr = ((nvmlLedState_t *)calloc(1, (sizeof(nvmlLedState_t))));

  /* "cuda/bindings/_nvml.pyx":8737
 *     def __init__(self):
 *         self._ptr = <nvmlLedState_t *>calloc(1, sizeof(nvmlLedState_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating LedState")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":8738
 *         self._ptr = <nvmlLedState_t *>calloc(1, sizeof(nvmlLedState_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating LedState")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8738, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_LedState};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8738, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 8738, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8737
 *     def __init__(self):
 *         self._ptr = <nvmlLedState_t *>calloc(1, sizeof(nvmlLedState_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating LedState")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":8739
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating LedState")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":8740
 *             raise MemoryError("Error allocating LedState")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":8741
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":8735
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlLedState_t *>calloc(1, sizeof(nvmlLedState_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.LedState.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8743
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlLedState_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_8LedState_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_8LedState_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_8LedState_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_8LedState_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self) {
  nvmlLedState_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlLedState_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":8745
 *     def __dealloc__(self):
 *         cdef nvmlLedState_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8746
 *         cdef nvmlLedState_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":8747
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":8748
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":8745
 *     def __dealloc__(self):
 *         cdef nvmlLedState_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":8743
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlLedState_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":8750
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.LedState object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8LedState_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":8751
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.LedState object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8751, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8751, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8751, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8751, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8751, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_LedState_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 20 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8751, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8750
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.LedState object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.LedState.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8753
 *         return f"<{__name__}.LedState object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8LedState_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8756
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8756, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8753
 *         return f"<{__name__}.LedState object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.LedState.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8758
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_8LedState__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":8759
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8758
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8761
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8LedState_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":8762
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8762, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8761
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.LedState.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8764
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef LedState other_
 *         if not isinstance(other, LedState):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8LedState_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":8766
 *     def __eq__(self, other):
 *         cdef LedState other_
 *         if not isinstance(other, LedState):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_LedState); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":8767
 *         cdef LedState other_
 *         if not isinstance(other, LedState):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlLedState_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":8766
 *     def __eq__(self, other):
 *         cdef LedState other_
 *         if not isinstance(other, LedState):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":8768
 *         if not isinstance(other, LedState):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlLedState_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_LedState))))) __PYX_ERR(0, 8768, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":8769
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlLedState_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlLedState_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8769, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8764
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef LedState other_
 *         if not isinstance(other, LedState):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.LedState.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8771
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlLedState_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlLedState_t *>malloc(sizeof(nvmlLedState_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_8LedState_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_8LedState_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8LedState_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_8LedState_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":8772
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlLedState_t *>malloc(sizeof(nvmlLedState_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 8772, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8772, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8772, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 8772, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8773
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlLedState_t *>malloc(sizeof(nvmlLedState_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating LedState")
*/
    __pyx_v_self->_ptr = ((nvmlLedState_t *)malloc((sizeof(nvmlLedState_t))));

    /* "cuda/bindings/_nvml.pyx":8774
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlLedState_t *>malloc(sizeof(nvmlLedState_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating LedState")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlLedState_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":8775
 *             self._ptr = <nvmlLedState_t *>malloc(sizeof(nvmlLedState_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating LedState")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlLedState_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8775, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_LedState};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8775, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 8775, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":8774
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlLedState_t *>malloc(sizeof(nvmlLedState_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating LedState")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlLedState_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":8776
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating LedState")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlLedState_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8776, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8776, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 8776, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlLedState_t))));

    /* "cuda/bindings/_nvml.pyx":8777
 *                 raise MemoryError("Error allocating LedState")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlLedState_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":8778
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlLedState_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":8779
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8779, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8779, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 8779, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":8772
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlLedState_t *>malloc(sizeof(nvmlLedState_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":8781
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 8781, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":8771
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlLedState_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlLedState_t *>malloc(sizeof(nvmlLedState_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.LedState.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8783
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cause(self):
 *         """~_numpy.int8: (array of length 256)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_5cause_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_5cause_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8LedState_5cause___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_5cause___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8786
 *     def cause(self):
 *         """~_numpy.int8: (array of length 256)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].cause)             # <<<<<<<<<<<<<<
 * 
 *     @cause.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).cause); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8786, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8783
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cause(self):
 *         """~_numpy.int8: (array of length 256)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.LedState.cause.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8788
 *         return cpython.PyUnicode_FromString(self._ptr[0].cause)
 * 
 *     @cause.setter             # <<<<<<<<<<<<<<
 *     def cause(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_8LedState_5cause_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_8LedState_5cause_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8LedState_5cause_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_8LedState_5cause_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":8790
 *     @cause.setter
 *     def cause(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This LedState instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":8791
 *     def cause(self, val):
 *         if self._readonly:
 *             raise ValueError("This LedState instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 256:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_LedState_instance_is_read_o};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8791, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8791, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8790
 *     @cause.setter
 *     def cause(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This LedState instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":8792
 *         if self._readonly:
 *             raise ValueError("This LedState instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 256:
 *             raise ValueError("String too long for field cause, max length is 255")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8792, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 8792, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":8793
 *             raise ValueError("This LedState instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 256:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field cause, max length is 255")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 8793, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 8793, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 0x100);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":8794
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 256:
 *             raise ValueError("String too long for field cause, max length is 255")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].cause), <void *>ptr, 256)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_cause};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8794, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8794, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8793
 *             raise ValueError("This LedState instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 256:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field cause, max length is 255")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":8795
 *         if len(buf) >= 256:
 *             raise ValueError("String too long for field cause, max length is 255")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].cause), <void *>ptr, 256)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 8795, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 8795, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":8796
 *             raise ValueError("String too long for field cause, max length is 255")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].cause), <void *>ptr, 256)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).cause), ((void *)__pyx_v_ptr), 0x100));

  /* "cuda/bindings/_nvml.pyx":8788
 *         return cpython.PyUnicode_FromString(self._ptr[0].cause)
 * 
 *     @cause.setter             # <<<<<<<<<<<<<<
 *     def cause(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.LedState.cause.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8798
 *         memcpy(<void *>(self._ptr[0].cause), <void *>ptr, 256)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def color(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_5color_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_5color_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8LedState_5color___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_5color___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8801
 *     def color(self):
 *         """int: """
 *         return <int>(self._ptr[0].color)             # <<<<<<<<<<<<<<
 * 
 *     @color.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int(((int)(__pyx_v_self->_ptr[0]).color)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8801, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8798
 *         memcpy(<void *>(self._ptr[0].cause), <void *>ptr, 256)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def color(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.LedState.color.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8803
 *         return <int>(self._ptr[0].color)
 * 
 *     @color.setter             # <<<<<<<<<<<<<<
 *     def color(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_8LedState_5color_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_8LedState_5color_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8LedState_5color_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_8LedState_5color_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":8805
 *     @color.setter
 *     def color(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This LedState instance is read-only")
 *         self._ptr[0].color = <nvmlLedColor_t><int>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":8806
 *     def color(self, val):
 *         if self._readonly:
 *             raise ValueError("This LedState instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].color = <nvmlLedColor_t><int>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_LedState_instance_is_read_o};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8806, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8806, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8805
 *     @color.setter
 *     def color(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This LedState instance is read-only")
 *         self._ptr[0].color = <nvmlLedColor_t><int>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":8807
 *         if self._readonly:
 *             raise ValueError("This LedState instance is read-only")
 *         self._ptr[0].color = <nvmlLedColor_t><int>val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 8807, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).color = ((nvmlLedColor_t)((int)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":8803
 *         return <int>(self._ptr[0].color)
 * 
 *     @color.setter             # <<<<<<<<<<<<<<
 *     def color(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.LedState.color.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8809
 *         self._ptr[0].color = <nvmlLedColor_t><int>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an LedState instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_8LedState_12from_data, "LedState.from_data(data)\n\nCreate an LedState instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `led_state_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_8LedState_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8LedState_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8LedState_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 8809, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8809, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 8809, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 8809, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8809, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 8809, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.LedState.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8LedState_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":8816
 *             data (_numpy.ndarray): a single-element array of dtype `led_state_dtype` holding the data.
 *         """
 *         return __from_data(data, "led_state_dtype", led_state_dtype, LedState)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_led_state_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8816, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_led_state_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_LedState)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8816, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8809
 *         self._ptr[0].color = <nvmlLedColor_t><int>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an LedState instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.LedState.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8818
 *         return __from_data(data, "led_state_dtype", led_state_dtype, LedState)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an LedState instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_8LedState_14from_ptr, "LedState.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an LedState instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_8LedState_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8LedState_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8LedState_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 8818, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 8818, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 8818, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8818, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 8818, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":8819
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an LedState instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 8818, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 8818, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 8818, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8818, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 8819, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 8819, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 8818, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.LedState.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8LedState_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":8818
 *         return __from_data(data, "led_state_dtype", led_state_dtype, LedState)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an LedState instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":8827
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef LedState obj = LedState.__new__(LedState)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":8828
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef LedState obj = LedState.__new__(LedState)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8828, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 8828, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8827
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef LedState obj = LedState.__new__(LedState)
*/
  }

  /* "cuda/bindings/_nvml.pyx":8829
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef LedState obj = LedState.__new__(LedState)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlLedState_t *>malloc(sizeof(nvmlLedState_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_LedState(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_LedState), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8829, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":8830
 *             raise ValueError("ptr must not be null (0)")
 *         cdef LedState obj = LedState.__new__(LedState)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlLedState_t *>malloc(sizeof(nvmlLedState_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8831
 *         cdef LedState obj = LedState.__new__(LedState)
 *         if owner is None:
 *             obj._ptr = <nvmlLedState_t *>malloc(sizeof(nvmlLedState_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating LedState")
*/
    __pyx_v_obj->_ptr = ((nvmlLedState_t *)malloc((sizeof(nvmlLedState_t))));

    /* "cuda/bindings/_nvml.pyx":8832
 *         if owner is None:
 *             obj._ptr = <nvmlLedState_t *>malloc(sizeof(nvmlLedState_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating LedState")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlLedState_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":8833
 *             obj._ptr = <nvmlLedState_t *>malloc(sizeof(nvmlLedState_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating LedState")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlLedState_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8833, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_LedState};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8833, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 8833, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":8832
 *         if owner is None:
 *             obj._ptr = <nvmlLedState_t *>malloc(sizeof(nvmlLedState_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating LedState")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlLedState_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":8834
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating LedState")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlLedState_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlLedState_t))));

    /* "cuda/bindings/_nvml.pyx":8835
 *                 raise MemoryError("Error allocating LedState")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlLedState_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":8836
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlLedState_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlLedState_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":8830
 *             raise ValueError("ptr must not be null (0)")
 *         cdef LedState obj = LedState.__new__(LedState)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlLedState_t *>malloc(sizeof(nvmlLedState_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":8838
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlLedState_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlLedState_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":8839
 *         else:
 *             obj._ptr = <nvmlLedState_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":8840
 *             obj._ptr = <nvmlLedState_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":8841
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":8842
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8818
 *         return __from_data(data, "led_state_dtype", led_state_dtype, LedState)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an LedState instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.LedState.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_8LedState_16__reduce_cython__, "LedState.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_8LedState_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8LedState_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8LedState_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8LedState_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.LedState.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_8LedState_18__setstate_cython__, "LedState.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_8LedState_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8LedState_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8LedState_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8LedState_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.LedState.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8LedState_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8LedState_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.LedState.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8845
 * 
 * 
 * cdef _get_unit_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlUnitInfo_t pod = nvmlUnitInfo_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_unit_info_dtype_offsets(void) {
  nvmlUnitInfo_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlUnitInfo_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  size_t __pyx_t_11;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_unit_info_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":8846
 * 
 * cdef _get_unit_info_dtype_offsets():
 *     cdef nvmlUnitInfo_t pod = nvmlUnitInfo_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['name', 'id', 'serial', 'firmware_version'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":8847
 * cdef _get_unit_info_dtype_offsets():
 *     cdef nvmlUnitInfo_t pod = nvmlUnitInfo_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['name', 'id', 'serial', 'firmware_version'],
 *         'formats': [_numpy.int8, _numpy.int8, _numpy.int8, _numpy.int8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8847, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8847, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":8848
 *     cdef nvmlUnitInfo_t pod = nvmlUnitInfo_t()
 *     return _numpy.dtype({
 *         'names': ['name', 'id', 'serial', 'firmware_version'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.int8, _numpy.int8, _numpy.int8, _numpy.int8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8848, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8848, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_name);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_name);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_name) != (0)) __PYX_ERR(0, 8848, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_id) != (0)) __PYX_ERR(0, 8848, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_serial);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_serial);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_serial) != (0)) __PYX_ERR(0, 8848, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_firmware_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_firmware_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_firmware_version) != (0)) __PYX_ERR(0, 8848, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 8848, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":8849
 *     return _numpy.dtype({
 *         'names': ['name', 'id', 'serial', 'firmware_version'],
 *         'formats': [_numpy.int8, _numpy.int8, _numpy.int8, _numpy.int8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 8849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 8849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 8849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 8849, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 8849, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 8849, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 8849, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 8848, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":8851
 *         'formats': [_numpy.int8, _numpy.int8, _numpy.int8, _numpy.int8],
 *         'offsets': [
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.serial)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.name)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 8851, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":8852
 *         'offsets': [
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.serial)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.firmwareVersion)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.id)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 8852, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":8853
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.serial)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.firmwareVersion)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.serial)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 8853, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":8854
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.serial)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.firmwareVersion)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlUnitInfo_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.firmwareVersion)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 8854, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":8850
 *         'names': ['name', 'id', 'serial', 'firmware_version'],
 *         'formats': [_numpy.int8, _numpy.int8, _numpy.int8, _numpy.int8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8850, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 8850, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_10) != (0)) __PYX_ERR(0, 8850, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 8850, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_8) != (0)) __PYX_ERR(0, 8850, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 8848, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":8856
 *             (<intptr_t>&(pod.firmwareVersion)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlUnitInfo_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlUnitInfo_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 8856, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 8848, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_11 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_11 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_11, (2-__pyx_t_11) | (__pyx_t_11*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8847, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8845
 * 
 * 
 * cdef _get_unit_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlUnitInfo_t pod = nvmlUnitInfo_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_unit_info_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8873
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlUnitInfo_t *>calloc(1, sizeof(nvmlUnitInfo_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":8874
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlUnitInfo_t *>calloc(1, sizeof(nvmlUnitInfo_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating UnitInfo")
*/
  __pyx_v_self->_ptr = ((nvmlUnitInfo_t *)calloc(1, (sizeof(nvmlUnitInfo_t))));

  /* "cuda/bindings/_nvml.pyx":8875
 *     def __init__(self):
 *         self._ptr = <nvmlUnitInfo_t *>calloc(1, sizeof(nvmlUnitInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating UnitInfo")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":8876
 *         self._ptr = <nvmlUnitInfo_t *>calloc(1, sizeof(nvmlUnitInfo_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating UnitInfo")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8876, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_UnitInfo};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8876, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 8876, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8875
 *     def __init__(self):
 *         self._ptr = <nvmlUnitInfo_t *>calloc(1, sizeof(nvmlUnitInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating UnitInfo")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":8877
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating UnitInfo")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":8878
 *             raise MemoryError("Error allocating UnitInfo")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":8879
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":8873
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlUnitInfo_t *>calloc(1, sizeof(nvmlUnitInfo_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8881
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlUnitInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self) {
  nvmlUnitInfo_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlUnitInfo_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":8883
 *     def __dealloc__(self):
 *         cdef nvmlUnitInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8884
 *         cdef nvmlUnitInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":8885
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":8886
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":8883
 *     def __dealloc__(self):
 *         cdef nvmlUnitInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":8881
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlUnitInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":8888
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.UnitInfo object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":8889
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.UnitInfo object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_UnitInfo_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 20 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8888
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.UnitInfo object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8891
 *         return f"<{__name__}.UnitInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8894
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8891
 *         return f"<{__name__}.UnitInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8896
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_8UnitInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":8897
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8896
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8899
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":8900
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8900, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8899
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8902
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef UnitInfo other_
 *         if not isinstance(other, UnitInfo):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":8904
 *     def __eq__(self, other):
 *         cdef UnitInfo other_
 *         if not isinstance(other, UnitInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":8905
 *         cdef UnitInfo other_
 *         if not isinstance(other, UnitInfo):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlUnitInfo_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":8904
 *     def __eq__(self, other):
 *         cdef UnitInfo other_
 *         if not isinstance(other, UnitInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":8906
 *         if not isinstance(other, UnitInfo):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlUnitInfo_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo))))) __PYX_ERR(0, 8906, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":8907
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlUnitInfo_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlUnitInfo_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8907, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8902
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef UnitInfo other_
 *         if not isinstance(other, UnitInfo):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8909
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlUnitInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlUnitInfo_t *>malloc(sizeof(nvmlUnitInfo_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":8910
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlUnitInfo_t *>malloc(sizeof(nvmlUnitInfo_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 8910, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 8910, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8910, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 8910, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":8911
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlUnitInfo_t *>malloc(sizeof(nvmlUnitInfo_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating UnitInfo")
*/
    __pyx_v_self->_ptr = ((nvmlUnitInfo_t *)malloc((sizeof(nvmlUnitInfo_t))));

    /* "cuda/bindings/_nvml.pyx":8912
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlUnitInfo_t *>malloc(sizeof(nvmlUnitInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating UnitInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUnitInfo_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":8913
 *             self._ptr = <nvmlUnitInfo_t *>malloc(sizeof(nvmlUnitInfo_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating UnitInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUnitInfo_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8913, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_UnitInfo};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8913, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 8913, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":8912
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlUnitInfo_t *>malloc(sizeof(nvmlUnitInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating UnitInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUnitInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":8914
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating UnitInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUnitInfo_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8914, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8914, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 8914, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlUnitInfo_t))));

    /* "cuda/bindings/_nvml.pyx":8915
 *                 raise MemoryError("Error allocating UnitInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUnitInfo_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":8916
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUnitInfo_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":8917
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8917, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 8917, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 8917, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":8910
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlUnitInfo_t *>malloc(sizeof(nvmlUnitInfo_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":8919
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 8919, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":8909
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlUnitInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlUnitInfo_t *>malloc(sizeof(nvmlUnitInfo_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8921
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def name(self):
 *         """~_numpy.int8: (array of length 96)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_4name_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_4name_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_4name___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_4name___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8924
 *     def name(self):
 *         """~_numpy.int8: (array of length 96)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].name)             # <<<<<<<<<<<<<<
 * 
 *     @name.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).name); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8924, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8921
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def name(self):
 *         """~_numpy.int8: (array of length 96)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.name.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8926
 *         return cpython.PyUnicode_FromString(self._ptr[0].name)
 * 
 *     @name.setter             # <<<<<<<<<<<<<<
 *     def name(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_4name_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_4name_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_4name_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_4name_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":8928
 *     @name.setter
 *     def name(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":8929
 *     def name(self, val):
 *         if self._readonly:
 *             raise ValueError("This UnitInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_UnitInfo_instance_is_read_o};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8929, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8929, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8928
 *     @name.setter
 *     def name(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":8930
 *         if self._readonly:
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field name, max length is 95")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8930, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 8930, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":8931
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 8931, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 8931, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 96);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":8932
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field name, max length is 95")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_name_m};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8932, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8932, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8931
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":8933
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 8933, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 8933, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":8934
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).name), ((void *)__pyx_v_ptr), 96));

  /* "cuda/bindings/_nvml.pyx":8926
 *         return cpython.PyUnicode_FromString(self._ptr[0].name)
 * 
 *     @name.setter             # <<<<<<<<<<<<<<
 *     def name(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.name.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8936
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def id(self):
 *         """~_numpy.int8: (array of length 96)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_2id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_2id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_2id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_2id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8939
 *     def id(self):
 *         """~_numpy.int8: (array of length 96)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].id)             # <<<<<<<<<<<<<<
 * 
 *     @id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8939, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8936
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def id(self):
 *         """~_numpy.int8: (array of length 96)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8941
 *         return cpython.PyUnicode_FromString(self._ptr[0].id)
 * 
 *     @id.setter             # <<<<<<<<<<<<<<
 *     def id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_2id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_2id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_2id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_2id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":8943
 *     @id.setter
 *     def id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":8944
 *     def id(self, val):
 *         if self._readonly:
 *             raise ValueError("This UnitInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_UnitInfo_instance_is_read_o};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8944, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8944, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8943
 *     @id.setter
 *     def id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":8945
 *         if self._readonly:
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field id, max length is 95")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8945, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 8945, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":8946
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field id, max length is 95")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 8946, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 8946, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 96);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":8947
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field id, max length is 95")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].id), <void *>ptr, 96)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_id_max};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8947, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8947, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8946
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field id, max length is 95")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":8948
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field id, max length is 95")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].id), <void *>ptr, 96)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 8948, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 8948, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":8949
 *             raise ValueError("String too long for field id, max length is 95")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].id), <void *>ptr, 96)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).id), ((void *)__pyx_v_ptr), 96));

  /* "cuda/bindings/_nvml.pyx":8941
 *         return cpython.PyUnicode_FromString(self._ptr[0].id)
 * 
 *     @id.setter             # <<<<<<<<<<<<<<
 *     def id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8951
 *         memcpy(<void *>(self._ptr[0].id), <void *>ptr, 96)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def serial(self):
 *         """~_numpy.int8: (array of length 96)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_6serial_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_6serial_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_6serial___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_6serial___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8954
 *     def serial(self):
 *         """~_numpy.int8: (array of length 96)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].serial)             # <<<<<<<<<<<<<<
 * 
 *     @serial.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).serial); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8954, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8951
 *         memcpy(<void *>(self._ptr[0].id), <void *>ptr, 96)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def serial(self):
 *         """~_numpy.int8: (array of length 96)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.serial.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8956
 *         return cpython.PyUnicode_FromString(self._ptr[0].serial)
 * 
 *     @serial.setter             # <<<<<<<<<<<<<<
 *     def serial(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_6serial_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_6serial_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_6serial_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_6serial_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":8958
 *     @serial.setter
 *     def serial(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":8959
 *     def serial(self, val):
 *         if self._readonly:
 *             raise ValueError("This UnitInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_UnitInfo_instance_is_read_o};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8959, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8959, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8958
 *     @serial.setter
 *     def serial(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":8960
 *         if self._readonly:
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field serial, max length is 95")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8960, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 8960, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":8961
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field serial, max length is 95")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 8961, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 8961, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 96);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":8962
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field serial, max length is 95")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].serial), <void *>ptr, 96)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_serial};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8962, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8962, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8961
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field serial, max length is 95")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":8963
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field serial, max length is 95")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].serial), <void *>ptr, 96)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 8963, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 8963, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":8964
 *             raise ValueError("String too long for field serial, max length is 95")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].serial), <void *>ptr, 96)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).serial), ((void *)__pyx_v_ptr), 96));

  /* "cuda/bindings/_nvml.pyx":8956
 *         return cpython.PyUnicode_FromString(self._ptr[0].serial)
 * 
 *     @serial.setter             # <<<<<<<<<<<<<<
 *     def serial(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.serial.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8966
 *         memcpy(<void *>(self._ptr[0].serial), <void *>ptr, 96)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def firmware_version(self):
 *         """~_numpy.int8: (array of length 96)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_16firmware_version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_16firmware_version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_16firmware_version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_16firmware_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":8969
 *     def firmware_version(self):
 *         """~_numpy.int8: (array of length 96)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].firmwareVersion)             # <<<<<<<<<<<<<<
 * 
 *     @firmware_version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).firmwareVersion); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8969, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8966
 *         memcpy(<void *>(self._ptr[0].serial), <void *>ptr, 96)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def firmware_version(self):
 *         """~_numpy.int8: (array of length 96)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.firmware_version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8971
 *         return cpython.PyUnicode_FromString(self._ptr[0].firmwareVersion)
 * 
 *     @firmware_version.setter             # <<<<<<<<<<<<<<
 *     def firmware_version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_16firmware_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_16firmware_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_16firmware_version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_16firmware_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":8973
 *     @firmware_version.setter
 *     def firmware_version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":8974
 *     def firmware_version(self, val):
 *         if self._readonly:
 *             raise ValueError("This UnitInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_UnitInfo_instance_is_read_o};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8974, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8974, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8973
 *     @firmware_version.setter
 *     def firmware_version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":8975
 *         if self._readonly:
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field firmware_version, max length is 95")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8975, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 8975, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":8976
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field firmware_version, max length is 95")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 8976, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 8976, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 96);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":8977
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field firmware_version, max length is 95")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].firmwareVersion), <void *>ptr, 96)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_firmwa};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8977, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 8977, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8976
 *             raise ValueError("This UnitInfo instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field firmware_version, max length is 95")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":8978
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field firmware_version, max length is 95")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].firmwareVersion), <void *>ptr, 96)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 8978, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 8978, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":8979
 *             raise ValueError("String too long for field firmware_version, max length is 95")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].firmwareVersion), <void *>ptr, 96)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).firmwareVersion), ((void *)__pyx_v_ptr), 96));

  /* "cuda/bindings/_nvml.pyx":8971
 *         return cpython.PyUnicode_FromString(self._ptr[0].firmwareVersion)
 * 
 *     @firmware_version.setter             # <<<<<<<<<<<<<<
 *     def firmware_version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.firmware_version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8981
 *         memcpy(<void *>(self._ptr[0].firmwareVersion), <void *>ptr, 96)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an UnitInfo instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_8UnitInfo_12from_data, "UnitInfo.from_data(data)\n\nCreate an UnitInfo instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `unit_info_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_8UnitInfo_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8UnitInfo_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 8981, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8981, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 8981, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 8981, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8981, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 8981, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":8988
 *             data (_numpy.ndarray): a single-element array of dtype `unit_info_dtype` holding the data.
 *         """
 *         return __from_data(data, "unit_info_dtype", unit_info_dtype, UnitInfo)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_unit_info_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8988, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_unit_info_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8988, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8981
 *         memcpy(<void *>(self._ptr[0].firmwareVersion), <void *>ptr, 96)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an UnitInfo instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":8990
 *         return __from_data(data, "unit_info_dtype", unit_info_dtype, UnitInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an UnitInfo instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_8UnitInfo_14from_ptr, "UnitInfo.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an UnitInfo instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_8UnitInfo_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8UnitInfo_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 8990, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 8990, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 8990, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8990, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 8990, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":8991
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an UnitInfo instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 8990, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 8990, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 8990, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 8990, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 8991, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 8991, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 8990, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":8990
 *         return __from_data(data, "unit_info_dtype", unit_info_dtype, UnitInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an UnitInfo instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":8999
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef UnitInfo obj = UnitInfo.__new__(UnitInfo)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":9000
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef UnitInfo obj = UnitInfo.__new__(UnitInfo)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9000, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 9000, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":8999
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef UnitInfo obj = UnitInfo.__new__(UnitInfo)
*/
  }

  /* "cuda/bindings/_nvml.pyx":9001
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef UnitInfo obj = UnitInfo.__new__(UnitInfo)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlUnitInfo_t *>malloc(sizeof(nvmlUnitInfo_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_UnitInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9001, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":9002
 *             raise ValueError("ptr must not be null (0)")
 *         cdef UnitInfo obj = UnitInfo.__new__(UnitInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlUnitInfo_t *>malloc(sizeof(nvmlUnitInfo_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":9003
 *         cdef UnitInfo obj = UnitInfo.__new__(UnitInfo)
 *         if owner is None:
 *             obj._ptr = <nvmlUnitInfo_t *>malloc(sizeof(nvmlUnitInfo_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating UnitInfo")
*/
    __pyx_v_obj->_ptr = ((nvmlUnitInfo_t *)malloc((sizeof(nvmlUnitInfo_t))));

    /* "cuda/bindings/_nvml.pyx":9004
 *         if owner is None:
 *             obj._ptr = <nvmlUnitInfo_t *>malloc(sizeof(nvmlUnitInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating UnitInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUnitInfo_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":9005
 *             obj._ptr = <nvmlUnitInfo_t *>malloc(sizeof(nvmlUnitInfo_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating UnitInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUnitInfo_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9005, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_UnitInfo};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9005, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 9005, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":9004
 *         if owner is None:
 *             obj._ptr = <nvmlUnitInfo_t *>malloc(sizeof(nvmlUnitInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating UnitInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUnitInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":9006
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating UnitInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUnitInfo_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlUnitInfo_t))));

    /* "cuda/bindings/_nvml.pyx":9007
 *                 raise MemoryError("Error allocating UnitInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUnitInfo_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":9008
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUnitInfo_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlUnitInfo_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":9002
 *             raise ValueError("ptr must not be null (0)")
 *         cdef UnitInfo obj = UnitInfo.__new__(UnitInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlUnitInfo_t *>malloc(sizeof(nvmlUnitInfo_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":9010
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlUnitInfo_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlUnitInfo_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":9011
 *         else:
 *             obj._ptr = <nvmlUnitInfo_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":9012
 *             obj._ptr = <nvmlUnitInfo_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":9013
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":9014
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":8990
 *         return __from_data(data, "unit_info_dtype", unit_info_dtype, UnitInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an UnitInfo instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_8UnitInfo_16__reduce_cython__, "UnitInfo.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_8UnitInfo_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8UnitInfo_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_8UnitInfo_18__setstate_cython__, "UnitInfo.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_8UnitInfo_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8UnitInfo_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8UnitInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9017
 * 
 * 
 * cdef _get_psu_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlPSUInfo_t pod = nvmlPSUInfo_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_psu_info_dtype_offsets(void) {
  nvmlPSUInfo_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlPSUInfo_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  size_t __pyx_t_11;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_psu_info_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":9018
 * 
 * cdef _get_psu_info_dtype_offsets():
 *     cdef nvmlPSUInfo_t pod = nvmlPSUInfo_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['state', 'current', 'voltage', 'power'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":9019
 * cdef _get_psu_info_dtype_offsets():
 *     cdef nvmlPSUInfo_t pod = nvmlPSUInfo_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['state', 'current', 'voltage', 'power'],
 *         'formats': [_numpy.int8, _numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9019, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9019, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":9020
 *     cdef nvmlPSUInfo_t pod = nvmlPSUInfo_t()
 *     return _numpy.dtype({
 *         'names': ['state', 'current', 'voltage', 'power'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.int8, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9020, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9020, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_state);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_state);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_state) != (0)) __PYX_ERR(0, 9020, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_current);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_current);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_current) != (0)) __PYX_ERR(0, 9020, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_voltage);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_voltage);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_voltage) != (0)) __PYX_ERR(0, 9020, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_power);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_power);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_power) != (0)) __PYX_ERR(0, 9020, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 9020, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":9021
 *     return _numpy.dtype({
 *         'names': ['state', 'current', 'voltage', 'power'],
 *         'formats': [_numpy.int8, _numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.state)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 9021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 9021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 9021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 9021, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 9021, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 9021, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 9021, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 9020, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":9023
 *         'formats': [_numpy.int8, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.state)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.current)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.voltage)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.state)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9023, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":9024
 *         'offsets': [
 *             (<intptr_t>&(pod.state)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.current)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.voltage)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.power)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.current)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 9024, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":9025
 *             (<intptr_t>&(pod.state)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.current)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.voltage)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.power)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.voltage)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 9025, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":9026
 *             (<intptr_t>&(pod.current)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.voltage)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.power)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlPSUInfo_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.power)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 9026, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":9022
 *         'names': ['state', 'current', 'voltage', 'power'],
 *         'formats': [_numpy.int8, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.state)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.current)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9022, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 9022, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_10) != (0)) __PYX_ERR(0, 9022, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 9022, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_8) != (0)) __PYX_ERR(0, 9022, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 9020, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":9028
 *             (<intptr_t>&(pod.power)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlPSUInfo_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlPSUInfo_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9028, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 9020, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_11 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_11 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_11, (2-__pyx_t_11) | (__pyx_t_11*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9019, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9017
 * 
 * 
 * cdef _get_psu_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlPSUInfo_t pod = nvmlPSUInfo_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_psu_info_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9045
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlPSUInfo_t *>calloc(1, sizeof(nvmlPSUInfo_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":9046
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlPSUInfo_t *>calloc(1, sizeof(nvmlPSUInfo_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating PSUInfo")
*/
  __pyx_v_self->_ptr = ((nvmlPSUInfo_t *)calloc(1, (sizeof(nvmlPSUInfo_t))));

  /* "cuda/bindings/_nvml.pyx":9047
 *     def __init__(self):
 *         self._ptr = <nvmlPSUInfo_t *>calloc(1, sizeof(nvmlPSUInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating PSUInfo")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":9048
 *         self._ptr = <nvmlPSUInfo_t *>calloc(1, sizeof(nvmlPSUInfo_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating PSUInfo")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9048, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_PSUInfo};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9048, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 9048, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9047
 *     def __init__(self):
 *         self._ptr = <nvmlPSUInfo_t *>calloc(1, sizeof(nvmlPSUInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating PSUInfo")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":9049
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating PSUInfo")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":9050
 *             raise MemoryError("Error allocating PSUInfo")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":9051
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":9045
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlPSUInfo_t *>calloc(1, sizeof(nvmlPSUInfo_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9053
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlPSUInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self) {
  nvmlPSUInfo_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlPSUInfo_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":9055
 *     def __dealloc__(self):
 *         cdef nvmlPSUInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":9056
 *         cdef nvmlPSUInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":9057
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":9058
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":9055
 *     def __dealloc__(self):
 *         cdef nvmlPSUInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":9053
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlPSUInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":9060
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.PSUInfo object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":9061
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.PSUInfo object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9061, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9061, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9061, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9061, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9061, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_PSUInfo_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 19 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9061, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9060
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.PSUInfo object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9063
 *         return f"<{__name__}.PSUInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9066
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9066, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9063
 *         return f"<{__name__}.PSUInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9068
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_7PSUInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":9069
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9068
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9071
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":9072
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9072, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9071
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9074
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef PSUInfo other_
 *         if not isinstance(other, PSUInfo):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":9076
 *     def __eq__(self, other):
 *         cdef PSUInfo other_
 *         if not isinstance(other, PSUInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":9077
 *         cdef PSUInfo other_
 *         if not isinstance(other, PSUInfo):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPSUInfo_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9076
 *     def __eq__(self, other):
 *         cdef PSUInfo other_
 *         if not isinstance(other, PSUInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":9078
 *         if not isinstance(other, PSUInfo):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPSUInfo_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo))))) __PYX_ERR(0, 9078, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":9079
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPSUInfo_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlPSUInfo_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9079, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9074
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef PSUInfo other_
 *         if not isinstance(other, PSUInfo):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9081
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPSUInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPSUInfo_t *>malloc(sizeof(nvmlPSUInfo_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":9082
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlPSUInfo_t *>malloc(sizeof(nvmlPSUInfo_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 9082, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9082, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9082, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 9082, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":9083
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPSUInfo_t *>malloc(sizeof(nvmlPSUInfo_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating PSUInfo")
*/
    __pyx_v_self->_ptr = ((nvmlPSUInfo_t *)malloc((sizeof(nvmlPSUInfo_t))));

    /* "cuda/bindings/_nvml.pyx":9084
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPSUInfo_t *>malloc(sizeof(nvmlPSUInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating PSUInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPSUInfo_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":9085
 *             self._ptr = <nvmlPSUInfo_t *>malloc(sizeof(nvmlPSUInfo_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating PSUInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPSUInfo_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9085, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_PSUInfo};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9085, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 9085, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":9084
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPSUInfo_t *>malloc(sizeof(nvmlPSUInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating PSUInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPSUInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":9086
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating PSUInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPSUInfo_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9086, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9086, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 9086, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlPSUInfo_t))));

    /* "cuda/bindings/_nvml.pyx":9087
 *                 raise MemoryError("Error allocating PSUInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPSUInfo_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":9088
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPSUInfo_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":9089
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9089, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9089, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 9089, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":9082
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlPSUInfo_t *>malloc(sizeof(nvmlPSUInfo_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":9091
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 9091, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":9081
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPSUInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPSUInfo_t *>malloc(sizeof(nvmlPSUInfo_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9093
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def state(self):
 *         """~_numpy.int8: (array of length 256)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_5state_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_5state_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_5state___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_5state___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9096
 *     def state(self):
 *         """~_numpy.int8: (array of length 256)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].state)             # <<<<<<<<<<<<<<
 * 
 *     @state.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).state); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9096, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9093
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def state(self):
 *         """~_numpy.int8: (array of length 256)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.state.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9098
 *         return cpython.PyUnicode_FromString(self._ptr[0].state)
 * 
 *     @state.setter             # <<<<<<<<<<<<<<
 *     def state(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_5state_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_5state_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_5state_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_5state_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9100
 *     @state.setter
 *     def state(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PSUInfo instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9101
 *     def state(self, val):
 *         if self._readonly:
 *             raise ValueError("This PSUInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 256:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PSUInfo_instance_is_read_on};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9101, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9101, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9100
 *     @state.setter
 *     def state(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PSUInfo instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":9102
 *         if self._readonly:
 *             raise ValueError("This PSUInfo instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 256:
 *             raise ValueError("String too long for field state, max length is 255")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9102, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 9102, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":9103
 *             raise ValueError("This PSUInfo instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 256:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field state, max length is 255")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 9103, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 9103, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 0x100);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":9104
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 256:
 *             raise ValueError("String too long for field state, max length is 255")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].state), <void *>ptr, 256)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_state};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9104, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9104, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9103
 *             raise ValueError("This PSUInfo instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 256:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field state, max length is 255")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":9105
 *         if len(buf) >= 256:
 *             raise ValueError("String too long for field state, max length is 255")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].state), <void *>ptr, 256)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 9105, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 9105, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":9106
 *             raise ValueError("String too long for field state, max length is 255")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].state), <void *>ptr, 256)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).state), ((void *)__pyx_v_ptr), 0x100));

  /* "cuda/bindings/_nvml.pyx":9098
 *         return cpython.PyUnicode_FromString(self._ptr[0].state)
 * 
 *     @state.setter             # <<<<<<<<<<<<<<
 *     def state(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.state.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9108
 *         memcpy(<void *>(self._ptr[0].state), <void *>ptr, 256)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def current(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_7current_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_7current_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_7current___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_7current___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9111
 *     def current(self):
 *         """int: """
 *         return self._ptr[0].current             # <<<<<<<<<<<<<<
 * 
 *     @current.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).current); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9111, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9108
 *         memcpy(<void *>(self._ptr[0].state), <void *>ptr, 256)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def current(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.current.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9113
 *         return self._ptr[0].current
 * 
 *     @current.setter             # <<<<<<<<<<<<<<
 *     def current(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_7current_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_7current_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_7current_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_7current_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9115
 *     @current.setter
 *     def current(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PSUInfo instance is read-only")
 *         self._ptr[0].current = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9116
 *     def current(self, val):
 *         if self._readonly:
 *             raise ValueError("This PSUInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].current = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PSUInfo_instance_is_read_on};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9116, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9116, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9115
 *     @current.setter
 *     def current(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PSUInfo instance is read-only")
 *         self._ptr[0].current = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":9117
 *         if self._readonly:
 *             raise ValueError("This PSUInfo instance is read-only")
 *         self._ptr[0].current = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9117, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).current = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":9113
 *         return self._ptr[0].current
 * 
 *     @current.setter             # <<<<<<<<<<<<<<
 *     def current(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.current.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9119
 *         self._ptr[0].current = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def voltage(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_7voltage_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_7voltage_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_7voltage___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_7voltage___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9122
 *     def voltage(self):
 *         """int: """
 *         return self._ptr[0].voltage             # <<<<<<<<<<<<<<
 * 
 *     @voltage.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).voltage); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9122, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9119
 *         self._ptr[0].current = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def voltage(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.voltage.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9124
 *         return self._ptr[0].voltage
 * 
 *     @voltage.setter             # <<<<<<<<<<<<<<
 *     def voltage(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_7voltage_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_7voltage_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_7voltage_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_7voltage_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9126
 *     @voltage.setter
 *     def voltage(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PSUInfo instance is read-only")
 *         self._ptr[0].voltage = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9127
 *     def voltage(self, val):
 *         if self._readonly:
 *             raise ValueError("This PSUInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].voltage = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PSUInfo_instance_is_read_on};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9127, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9127, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9126
 *     @voltage.setter
 *     def voltage(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PSUInfo instance is read-only")
 *         self._ptr[0].voltage = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":9128
 *         if self._readonly:
 *             raise ValueError("This PSUInfo instance is read-only")
 *         self._ptr[0].voltage = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9128, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).voltage = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":9124
 *         return self._ptr[0].voltage
 * 
 *     @voltage.setter             # <<<<<<<<<<<<<<
 *     def voltage(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.voltage.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9130
 *         self._ptr[0].voltage = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def power(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_5power_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_5power_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_5power___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_5power___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9133
 *     def power(self):
 *         """int: """
 *         return self._ptr[0].power             # <<<<<<<<<<<<<<
 * 
 *     @power.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).power); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9130
 *         self._ptr[0].voltage = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def power(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.power.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9135
 *         return self._ptr[0].power
 * 
 *     @power.setter             # <<<<<<<<<<<<<<
 *     def power(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_5power_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_5power_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_5power_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_5power_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9137
 *     @power.setter
 *     def power(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PSUInfo instance is read-only")
 *         self._ptr[0].power = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9138
 *     def power(self, val):
 *         if self._readonly:
 *             raise ValueError("This PSUInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].power = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_PSUInfo_instance_is_read_on};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9138, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9138, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9137
 *     @power.setter
 *     def power(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This PSUInfo instance is read-only")
 *         self._ptr[0].power = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":9139
 *         if self._readonly:
 *             raise ValueError("This PSUInfo instance is read-only")
 *         self._ptr[0].power = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9139, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).power = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":9135
 *         return self._ptr[0].power
 * 
 *     @power.setter             # <<<<<<<<<<<<<<
 *     def power(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.power.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9141
 *         self._ptr[0].power = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an PSUInfo instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_7PSUInfo_12from_data, "PSUInfo.from_data(data)\n\nCreate an PSUInfo instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `psu_info_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_7PSUInfo_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_7PSUInfo_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 9141, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9141, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 9141, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 9141, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9141, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 9141, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":9148
 *             data (_numpy.ndarray): a single-element array of dtype `psu_info_dtype` holding the data.
 *         """
 *         return __from_data(data, "psu_info_dtype", psu_info_dtype, PSUInfo)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_psu_info_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9148, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_psu_info_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9148, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9141
 *         self._ptr[0].power = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an PSUInfo instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9150
 *         return __from_data(data, "psu_info_dtype", psu_info_dtype, PSUInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an PSUInfo instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_7PSUInfo_14from_ptr, "PSUInfo.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an PSUInfo instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_7PSUInfo_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_7PSUInfo_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 9150, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 9150, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 9150, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9150, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 9150, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":9151
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an PSUInfo instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 9150, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 9150, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 9150, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9150, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 9151, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9151, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 9150, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":9150
 *         return __from_data(data, "psu_info_dtype", psu_info_dtype, PSUInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an PSUInfo instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":9159
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PSUInfo obj = PSUInfo.__new__(PSUInfo)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":9160
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef PSUInfo obj = PSUInfo.__new__(PSUInfo)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9160, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 9160, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9159
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PSUInfo obj = PSUInfo.__new__(PSUInfo)
*/
  }

  /* "cuda/bindings/_nvml.pyx":9161
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PSUInfo obj = PSUInfo.__new__(PSUInfo)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlPSUInfo_t *>malloc(sizeof(nvmlPSUInfo_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_PSUInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9161, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":9162
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PSUInfo obj = PSUInfo.__new__(PSUInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlPSUInfo_t *>malloc(sizeof(nvmlPSUInfo_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":9163
 *         cdef PSUInfo obj = PSUInfo.__new__(PSUInfo)
 *         if owner is None:
 *             obj._ptr = <nvmlPSUInfo_t *>malloc(sizeof(nvmlPSUInfo_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating PSUInfo")
*/
    __pyx_v_obj->_ptr = ((nvmlPSUInfo_t *)malloc((sizeof(nvmlPSUInfo_t))));

    /* "cuda/bindings/_nvml.pyx":9164
 *         if owner is None:
 *             obj._ptr = <nvmlPSUInfo_t *>malloc(sizeof(nvmlPSUInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating PSUInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPSUInfo_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":9165
 *             obj._ptr = <nvmlPSUInfo_t *>malloc(sizeof(nvmlPSUInfo_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating PSUInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPSUInfo_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9165, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_PSUInfo};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9165, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 9165, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":9164
 *         if owner is None:
 *             obj._ptr = <nvmlPSUInfo_t *>malloc(sizeof(nvmlPSUInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating PSUInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPSUInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":9166
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating PSUInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPSUInfo_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlPSUInfo_t))));

    /* "cuda/bindings/_nvml.pyx":9167
 *                 raise MemoryError("Error allocating PSUInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPSUInfo_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":9168
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPSUInfo_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlPSUInfo_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":9162
 *             raise ValueError("ptr must not be null (0)")
 *         cdef PSUInfo obj = PSUInfo.__new__(PSUInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlPSUInfo_t *>malloc(sizeof(nvmlPSUInfo_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":9170
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlPSUInfo_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlPSUInfo_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":9171
 *         else:
 *             obj._ptr = <nvmlPSUInfo_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":9172
 *             obj._ptr = <nvmlPSUInfo_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":9173
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":9174
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9150
 *         return __from_data(data, "psu_info_dtype", psu_info_dtype, PSUInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an PSUInfo instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_7PSUInfo_16__reduce_cython__, "PSUInfo.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_7PSUInfo_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_7PSUInfo_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_7PSUInfo_18__setstate_cython__, "PSUInfo.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_7PSUInfo_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_7PSUInfo_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_7PSUInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.PSUInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9177
 * 
 * 
 * cdef _get_unit_fan_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlUnitFanInfo_t pod = nvmlUnitFanInfo_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_unit_fan_info_dtype_offsets(void) {
  nvmlUnitFanInfo_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlUnitFanInfo_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_unit_fan_info_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":9178
 * 
 * cdef _get_unit_fan_info_dtype_offsets():
 *     cdef nvmlUnitFanInfo_t pod = nvmlUnitFanInfo_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['speed', 'state'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":9179
 * cdef _get_unit_fan_info_dtype_offsets():
 *     cdef nvmlUnitFanInfo_t pod = nvmlUnitFanInfo_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['speed', 'state'],
 *         'formats': [_numpy.uint32, _numpy.int32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":9180
 *     cdef nvmlUnitFanInfo_t pod = nvmlUnitFanInfo_t()
 *     return _numpy.dtype({
 *         'names': ['speed', 'state'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.int32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9180, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9180, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_speed);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_speed);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_speed) != (0)) __PYX_ERR(0, 9180, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_state);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_state);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_state) != (0)) __PYX_ERR(0, 9180, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 9180, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":9181
 *     return _numpy.dtype({
 *         'names': ['speed', 'state'],
 *         'formats': [_numpy.uint32, _numpy.int32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.speed)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9181, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9181, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9181, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 9181, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9181, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 9181, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 9181, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 9180, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":9183
 *         'formats': [_numpy.uint32, _numpy.int32],
 *         'offsets': [
 *             (<intptr_t>&(pod.speed)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.state)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.speed)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9183, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":9184
 *         'offsets': [
 *             (<intptr_t>&(pod.speed)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.state)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlUnitFanInfo_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.state)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 9184, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":9182
 *         'names': ['speed', 'state'],
 *         'formats': [_numpy.uint32, _numpy.int32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.speed)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.state)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9182, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 9182, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 9182, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 9180, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":9186
 *             (<intptr_t>&(pod.state)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlUnitFanInfo_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlUnitFanInfo_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9186, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 9180, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9179, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9177
 * 
 * 
 * cdef _get_unit_fan_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlUnitFanInfo_t pod = nvmlUnitFanInfo_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_unit_fan_info_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9208
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=unit_fan_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 9208, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9208, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 9208, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9208, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 9208, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":9209
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=unit_fan_info_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlUnitFanInfo_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9209, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9209, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_unit_fan_info_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9209, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9209, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 9209, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9209, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":9210
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=unit_fan_info_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlUnitFanInfo_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlUnitFanInfo_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9210, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9210, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9210, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":9211
 *         arr = _numpy.empty(size, dtype=unit_fan_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlUnitFanInfo_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlUnitFanInfo_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9211, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlUnitFanInfo_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9211, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9211, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 9211, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":9212
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlUnitFanInfo_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlUnitFanInfo_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9212, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9212, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlUnitFanInfo_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9212, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9212, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 9211, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 9211, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":9208
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=unit_fan_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9214
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlUnitFanInfo_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.UnitFanInfo_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":9215
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.UnitFanInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9215, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9215, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 9215, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":9216
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.UnitFanInfo_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.UnitFanInfo object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9216, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9216, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9216, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9216, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9216, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9216, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9216, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_UnitFanInfo_Array;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 19 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9216, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9215
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.UnitFanInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":9218
 *             return f"<{__name__}.UnitFanInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.UnitFanInfo object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9218, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9218, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9218, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9218, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9218, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_UnitFanInfo_object_at;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 23 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9218, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":9214
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlUnitFanInfo_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.UnitFanInfo_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9220
 *             return f"<{__name__}.UnitFanInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9223
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9223, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9223, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9220
 *             return f"<{__name__}.UnitFanInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9225
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_11UnitFanInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":9226
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9226, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9226, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 9226, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9225
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9228
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":9229
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9229, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9229, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 9229, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":9230
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9230, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 9230, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9229
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":9232
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9232, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9232, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9228
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9234
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":9235
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9235, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 9235, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9234
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9237
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, UnitFanInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":9238
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, UnitFanInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":9239
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, UnitFanInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9239, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 9239, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9239, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 9239, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":9240
 *         cdef object self_data = self._data
 *         if (not isinstance(other, UnitFanInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9239
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, UnitFanInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":9241
 *         if (not isinstance(other, UnitFanInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9241, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9241, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9241, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 9241, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9241, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9237
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, UnitFanInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9243
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def speed(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5speed_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5speed_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_5speed___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_5speed___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9246
 *     def speed(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.speed[0])
 *         return self._data.speed
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9246, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 9246, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":9247
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.speed[0])             # <<<<<<<<<<<<<<
 *         return self._data.speed
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_speed); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9247, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9247, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9247, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9246
 *     def speed(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.speed[0])
 *         return self._data.speed
*/
  }

  /* "cuda/bindings/_nvml.pyx":9248
 *         if self._data.size == 1:
 *             return int(self._data.speed[0])
 *         return self._data.speed             # <<<<<<<<<<<<<<
 * 
 *     @speed.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_speed); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9248, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9243
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def speed(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.speed.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9250
 *         return self._data.speed
 * 
 *     @speed.setter             # <<<<<<<<<<<<<<
 *     def speed(self, val):
 *         self._data.speed = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5speed_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5speed_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_5speed_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_5speed_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":9252
 *     @speed.setter
 *     def speed(self, val):
 *         self._data.speed = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_speed, __pyx_v_val) < (0)) __PYX_ERR(0, 9252, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":9250
 *         return self._data.speed
 * 
 *     @speed.setter             # <<<<<<<<<<<<<<
 *     def speed(self, val):
 *         self._data.speed = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.speed.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9254
 *         self._data.speed = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def state(self):
 *         """Union[~_numpy.int32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5state_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5state_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_5state___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_5state___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9257
 *     def state(self):
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.state[0])
 *         return self._data.state
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9257, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 9257, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":9258
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.state[0])             # <<<<<<<<<<<<<<
 *         return self._data.state
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_state); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9258, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9258, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9258, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9257
 *     def state(self):
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.state[0])
 *         return self._data.state
*/
  }

  /* "cuda/bindings/_nvml.pyx":9259
 *         if self._data.size == 1:
 *             return int(self._data.state[0])
 *         return self._data.state             # <<<<<<<<<<<<<<
 * 
 *     @state.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_state); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9259, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9254
 *         self._data.speed = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def state(self):
 *         """Union[~_numpy.int32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.state.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9261
 *         return self._data.state
 * 
 *     @state.setter             # <<<<<<<<<<<<<<
 *     def state(self, val):
 *         self._data.state = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5state_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5state_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_5state_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_5state_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":9263
 *     @state.setter
 *     def state(self, val):
 *         self._data.state = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_state, __pyx_v_val) < (0)) __PYX_ERR(0, 9263, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":9261
 *         return self._data.state
 * 
 *     @state.setter             # <<<<<<<<<<<<<<
 *     def state(self, val):
 *         self._data.state = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.state.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9265
 *         self._data.state = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":9268
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":9269
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 9269, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":9270
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9270, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 9270, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":9271
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":9272
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9272, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 9272, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":9271
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":9273
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return UnitFanInfo.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":9274
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return UnitFanInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":9273
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return UnitFanInfo.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":9275
 *             if key_ < 0:
 *                 key_ += size
 *             return UnitFanInfo.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == unit_fan_info_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9275, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9275, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9268
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":9276
 *                 key_ += size
 *             return UnitFanInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == unit_fan_info_dtype:
 *             return UnitFanInfo.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9276, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":9277
 *             return UnitFanInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == unit_fan_info_dtype:             # <<<<<<<<<<<<<<
 *             return UnitFanInfo.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9277, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9277, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 9277, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9277, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_unit_fan_info_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9277, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9277, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 9277, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":9278
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == unit_fan_info_dtype:
 *             return UnitFanInfo.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9278, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9277
 *             return UnitFanInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == unit_fan_info_dtype:             # <<<<<<<<<<<<<<
 *             return UnitFanInfo.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":9279
 *         if isinstance(out, _numpy.recarray) and out.dtype == unit_fan_info_dtype:
 *             return UnitFanInfo.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9265
 *         self._data.state = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9281
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":9282
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 9282, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":9281
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9284
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an UnitFanInfo instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_11UnitFanInfo_14from_data, "UnitFanInfo.from_data(data)\n\nCreate an UnitFanInfo instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `unit_fan_info_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_11UnitFanInfo_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11UnitFanInfo_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 9284, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9284, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 9284, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 9284, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9284, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 9284, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":9291
 *             data (_numpy.ndarray): a 1D array of dtype `unit_fan_info_dtype` holding the data.
 *         """
 *         cdef UnitFanInfo obj = UnitFanInfo.__new__(UnitFanInfo)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_UnitFanInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9291, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":9292
 *         """
 *         cdef UnitFanInfo obj = UnitFanInfo.__new__(UnitFanInfo)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9292, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9292, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 9292, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":9293
 *         cdef UnitFanInfo obj = UnitFanInfo.__new__(UnitFanInfo)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9293, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 9293, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9292
 *         """
 *         cdef UnitFanInfo obj = UnitFanInfo.__new__(UnitFanInfo)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":9294
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != unit_fan_info_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9294, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 9294, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":9295
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != unit_fan_info_dtype:
 *             raise ValueError("data array must be of dtype unit_fan_info_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9295, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 9295, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9294
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != unit_fan_info_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":9296
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != unit_fan_info_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype unit_fan_info_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9296, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_unit_fan_info_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9296, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9296, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 9296, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":9297
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != unit_fan_info_dtype:
 *             raise ValueError("data array must be of dtype unit_fan_info_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_unit};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9297, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 9297, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9296
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != unit_fan_info_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype unit_fan_info_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":9298
 *         if data.dtype != unit_fan_info_dtype:
 *             raise ValueError("data array must be of dtype unit_fan_info_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9298, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9298, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9298, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":9300
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9284
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an UnitFanInfo instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9302
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an UnitFanInfo instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_11UnitFanInfo_16from_ptr, "UnitFanInfo.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an UnitFanInfo instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_11UnitFanInfo_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11UnitFanInfo_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 9302, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 9302, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 9302, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9302, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 9302, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 9302, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 9302, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 9302, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9302, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 9303, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 9303, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9303, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":9303
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an UnitFanInfo instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 9302, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":9302
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an UnitFanInfo instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":9311
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef UnitFanInfo obj = UnitFanInfo.__new__(UnitFanInfo)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":9312
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef UnitFanInfo obj = UnitFanInfo.__new__(UnitFanInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9312, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 9312, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9311
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef UnitFanInfo obj = UnitFanInfo.__new__(UnitFanInfo)
*/
  }

  /* "cuda/bindings/_nvml.pyx":9313
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef UnitFanInfo obj = UnitFanInfo.__new__(UnitFanInfo)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_UnitFanInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9313, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":9314
 *             raise ValueError("ptr must not be null (0)")
 *         cdef UnitFanInfo obj = UnitFanInfo.__new__(UnitFanInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlUnitFanInfo_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9314, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9314, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":9316
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlUnitFanInfo_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=unit_fan_info_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9316, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":9315
 *         cdef UnitFanInfo obj = UnitFanInfo.__new__(UnitFanInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlUnitFanInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=unit_fan_info_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlUnitFanInfo_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9315, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":9317
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlUnitFanInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=unit_fan_info_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9317, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9317, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9317, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_unit_fan_info_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 9317, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 9317, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 9317, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 9317, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9317, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":9318
 *             <char*>ptr, sizeof(nvmlUnitFanInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=unit_fan_info_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 9318, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 9318, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9318, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":9320
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9302
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an UnitFanInfo instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9204
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_11UnitFanInfo_18__reduce_cython__, "UnitFanInfo.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_11UnitFanInfo_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11UnitFanInfo_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_UnitFanInfo, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_UnitFanInfo, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_UnitFanInfo, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_UnitFanInfo, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_UnitFanInfo); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_UnitFanInfo, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_UnitFanInfo, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_UnitFanInfo, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_UnitFanInfo__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_UnitFanInfo); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_UnitFanInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_UnitFanInfo__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_11UnitFanInfo_20__setstate_cython__, "UnitFanInfo.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_11UnitFanInfo_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11UnitFanInfo_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11UnitFanInfo_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_UnitFanInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_UnitFanInfo__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_UnitFanInfo__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_UnitFanInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_UnitFanInfo__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9323
 * 
 * 
 * cdef _get_event_data_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlEventData_t pod = nvmlEventData_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_event_data_dtype_offsets(void) {
  nvmlEventData_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlEventData_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  size_t __pyx_t_12;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_event_data_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":9324
 * 
 * cdef _get_event_data_dtype_offsets():
 *     cdef nvmlEventData_t pod = nvmlEventData_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['device_', 'event_type', 'event_data', 'gpu_instance_id', 'compute_instance_id'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":9325
 * cdef _get_event_data_dtype_offsets():
 *     cdef nvmlEventData_t pod = nvmlEventData_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['device_', 'event_type', 'event_data', 'gpu_instance_id', 'compute_instance_id'],
 *         'formats': [_numpy.intp, _numpy.uint64, _numpy.uint64, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9325, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9325, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":9326
 *     cdef nvmlEventData_t pod = nvmlEventData_t()
 *     return _numpy.dtype({
 *         'names': ['device_', 'event_type', 'event_data', 'gpu_instance_id', 'compute_instance_id'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.intp, _numpy.uint64, _numpy.uint64, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9326, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9326, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_device);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_device);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_device) != (0)) __PYX_ERR(0, 9326, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_event_type);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_event_type);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_event_type) != (0)) __PYX_ERR(0, 9326, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_event_data);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_event_data);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_event_data) != (0)) __PYX_ERR(0, 9326, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_gpu_instance_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_gpu_instance_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_gpu_instance_id) != (0)) __PYX_ERR(0, 9326, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_compute_instance_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_compute_instance_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_compute_instance_id) != (0)) __PYX_ERR(0, 9326, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 9326, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":9327
 *     return _numpy.dtype({
 *         'names': ['device_', 'event_type', 'event_data', 'gpu_instance_id', 'compute_instance_id'],
 *         'formats': [_numpy.intp, _numpy.uint64, _numpy.uint64, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_intp); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 9327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 9327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 9327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 9327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 9327, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 9327, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 9327, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 9327, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 9327, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 9326, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":9329
 *         'formats': [_numpy.intp, _numpy.uint64, _numpy.uint64, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.eventType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.eventData)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.device)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":9330
 *         'offsets': [
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.eventType)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.eventData)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gpuInstanceId)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.eventType)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 9330, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":9331
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.eventType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.eventData)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.gpuInstanceId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.computeInstanceId)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.eventData)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 9331, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":9332
 *             (<intptr_t>&(pod.eventType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.eventData)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gpuInstanceId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.computeInstanceId)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.gpuInstanceId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 9332, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":9333
 *             (<intptr_t>&(pod.eventData)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gpuInstanceId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.computeInstanceId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlEventData_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.computeInstanceId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 9333, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":9328
 *         'names': ['device_', 'event_type', 'event_data', 'gpu_instance_id', 'compute_instance_id'],
 *         'formats': [_numpy.intp, _numpy.uint64, _numpy.uint64, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.eventType)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9328, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 9328, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_11) != (0)) __PYX_ERR(0, 9328, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_10) != (0)) __PYX_ERR(0, 9328, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_9) != (0)) __PYX_ERR(0, 9328, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_8) != (0)) __PYX_ERR(0, 9328, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 9326, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":9335
 *             (<intptr_t>&(pod.computeInstanceId)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlEventData_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlEventData_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9335, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 9326, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_12 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_12 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_12, (2-__pyx_t_12) | (__pyx_t_12*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9325, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9323
 * 
 * 
 * cdef _get_event_data_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlEventData_t pod = nvmlEventData_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_event_data_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9352
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlEventData_t *>calloc(1, sizeof(nvmlEventData_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_9EventData_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_9EventData_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_9EventData___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":9353
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlEventData_t *>calloc(1, sizeof(nvmlEventData_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating EventData")
*/
  __pyx_v_self->_ptr = ((nvmlEventData_t *)calloc(1, (sizeof(nvmlEventData_t))));

  /* "cuda/bindings/_nvml.pyx":9354
 *     def __init__(self):
 *         self._ptr = <nvmlEventData_t *>calloc(1, sizeof(nvmlEventData_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating EventData")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":9355
 *         self._ptr = <nvmlEventData_t *>calloc(1, sizeof(nvmlEventData_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating EventData")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9355, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_EventData};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9355, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 9355, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9354
 *     def __init__(self):
 *         self._ptr = <nvmlEventData_t *>calloc(1, sizeof(nvmlEventData_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating EventData")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":9356
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating EventData")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":9357
 *             raise MemoryError("Error allocating EventData")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":9358
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":9352
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlEventData_t *>calloc(1, sizeof(nvmlEventData_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9360
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlEventData_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_9EventData_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_9EventData_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_9EventData_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_9EventData_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self) {
  nvmlEventData_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlEventData_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":9362
 *     def __dealloc__(self):
 *         cdef nvmlEventData_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":9363
 *         cdef nvmlEventData_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":9364
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":9365
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":9362
 *     def __dealloc__(self):
 *         cdef nvmlEventData_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":9360
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlEventData_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":9367
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.EventData object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":9368
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.EventData object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_EventData_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 21 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9367
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.EventData object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9370
 *         return f"<{__name__}.EventData object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9373
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9373, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9370
 *         return f"<{__name__}.EventData object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9375
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_9EventData__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":9376
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9375
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9378
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":9379
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9379, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9378
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9381
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef EventData other_
 *         if not isinstance(other, EventData):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":9383
 *     def __eq__(self, other):
 *         cdef EventData other_
 *         if not isinstance(other, EventData):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EventData); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":9384
 *         cdef EventData other_
 *         if not isinstance(other, EventData):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlEventData_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9383
 *     def __eq__(self, other):
 *         cdef EventData other_
 *         if not isinstance(other, EventData):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":9385
 *         if not isinstance(other, EventData):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlEventData_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EventData))))) __PYX_ERR(0, 9385, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":9386
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlEventData_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlEventData_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9386, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9381
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef EventData other_
 *         if not isinstance(other, EventData):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9388
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlEventData_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlEventData_t *>malloc(sizeof(nvmlEventData_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_9EventData_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_9EventData_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_9EventData_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":9389
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlEventData_t *>malloc(sizeof(nvmlEventData_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 9389, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9389, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9389, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 9389, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":9390
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlEventData_t *>malloc(sizeof(nvmlEventData_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating EventData")
*/
    __pyx_v_self->_ptr = ((nvmlEventData_t *)malloc((sizeof(nvmlEventData_t))));

    /* "cuda/bindings/_nvml.pyx":9391
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlEventData_t *>malloc(sizeof(nvmlEventData_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating EventData")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEventData_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":9392
 *             self._ptr = <nvmlEventData_t *>malloc(sizeof(nvmlEventData_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating EventData")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEventData_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9392, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_EventData};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9392, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 9392, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":9391
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlEventData_t *>malloc(sizeof(nvmlEventData_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating EventData")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEventData_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":9393
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating EventData")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEventData_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9393, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9393, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 9393, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlEventData_t))));

    /* "cuda/bindings/_nvml.pyx":9394
 *                 raise MemoryError("Error allocating EventData")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEventData_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":9395
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEventData_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":9396
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9396, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9396, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 9396, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":9389
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlEventData_t *>malloc(sizeof(nvmlEventData_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":9398
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 9398, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":9388
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlEventData_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlEventData_t *>malloc(sizeof(nvmlEventData_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9400
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def device_(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_7device__1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_7device__1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_7device____get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_7device____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9403
 *     def device_(self):
 *         """int: """
 *         return <intptr_t>(self._ptr[0].device)             # <<<<<<<<<<<<<<
 * 
 *     @device_.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)(__pyx_v_self->_ptr[0]).device)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9403, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9400
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def device_(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.device_.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9405
 *         return <intptr_t>(self._ptr[0].device)
 * 
 *     @device_.setter             # <<<<<<<<<<<<<<
 *     def device_(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_9EventData_7device__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_9EventData_7device__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_7device__2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_9EventData_7device__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9407
 *     @device_.setter
 *     def device_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EventData instance is read-only")
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9408
 *     def device_(self, val):
 *         if self._readonly:
 *             raise ValueError("This EventData instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EventData_instance_is_read};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9408, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9408, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9407
 *     @device_.setter
 *     def device_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EventData instance is read-only")
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":9409
 *         if self._readonly:
 *             raise ValueError("This EventData instance is read-only")
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = PyLong_AsSsize_t(__pyx_v_val); if (unlikely((__pyx_t_4 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 9409, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).device = ((nvmlDevice_t)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":9405
 *         return <intptr_t>(self._ptr[0].device)
 * 
 *     @device_.setter             # <<<<<<<<<<<<<<
 *     def device_(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.device_.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9411
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def event_type(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_10event_type_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_10event_type_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_10event_type___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_10event_type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9414
 *     def event_type(self):
 *         """int: """
 *         return self._ptr[0].eventType             # <<<<<<<<<<<<<<
 * 
 *     @event_type.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).eventType); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9414, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9411
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def event_type(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.event_type.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9416
 *         return self._ptr[0].eventType
 * 
 *     @event_type.setter             # <<<<<<<<<<<<<<
 *     def event_type(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_9EventData_10event_type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_9EventData_10event_type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_10event_type_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_9EventData_10event_type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9418
 *     @event_type.setter
 *     def event_type(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EventData instance is read-only")
 *         self._ptr[0].eventType = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9419
 *     def event_type(self, val):
 *         if self._readonly:
 *             raise ValueError("This EventData instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].eventType = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EventData_instance_is_read};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9419, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9419, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9418
 *     @event_type.setter
 *     def event_type(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EventData instance is read-only")
 *         self._ptr[0].eventType = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":9420
 *         if self._readonly:
 *             raise ValueError("This EventData instance is read-only")
 *         self._ptr[0].eventType = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 9420, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).eventType = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":9416
 *         return self._ptr[0].eventType
 * 
 *     @event_type.setter             # <<<<<<<<<<<<<<
 *     def event_type(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.event_type.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9422
 *         self._ptr[0].eventType = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def event_data(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_10event_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_10event_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_10event_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_10event_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9425
 *     def event_data(self):
 *         """int: """
 *         return self._ptr[0].eventData             # <<<<<<<<<<<<<<
 * 
 *     @event_data.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).eventData); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9425, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9422
 *         self._ptr[0].eventType = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def event_data(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.event_data.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9427
 *         return self._ptr[0].eventData
 * 
 *     @event_data.setter             # <<<<<<<<<<<<<<
 *     def event_data(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_9EventData_10event_data_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_9EventData_10event_data_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_10event_data_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_9EventData_10event_data_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9429
 *     @event_data.setter
 *     def event_data(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EventData instance is read-only")
 *         self._ptr[0].eventData = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9430
 *     def event_data(self, val):
 *         if self._readonly:
 *             raise ValueError("This EventData instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].eventData = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EventData_instance_is_read};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9430, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9430, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9429
 *     @event_data.setter
 *     def event_data(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EventData instance is read-only")
 *         self._ptr[0].eventData = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":9431
 *         if self._readonly:
 *             raise ValueError("This EventData instance is read-only")
 *         self._ptr[0].eventData = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 9431, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).eventData = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":9427
 *         return self._ptr[0].eventData
 * 
 *     @event_data.setter             # <<<<<<<<<<<<<<
 *     def event_data(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.event_data.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9433
 *         self._ptr[0].eventData = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def gpu_instance_id(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_15gpu_instance_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_15gpu_instance_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_15gpu_instance_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_15gpu_instance_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9436
 *     def gpu_instance_id(self):
 *         """int: """
 *         return self._ptr[0].gpuInstanceId             # <<<<<<<<<<<<<<
 * 
 *     @gpu_instance_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).gpuInstanceId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9436, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9433
 *         self._ptr[0].eventData = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def gpu_instance_id(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.gpu_instance_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9438
 *         return self._ptr[0].gpuInstanceId
 * 
 *     @gpu_instance_id.setter             # <<<<<<<<<<<<<<
 *     def gpu_instance_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_9EventData_15gpu_instance_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_9EventData_15gpu_instance_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_15gpu_instance_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_9EventData_15gpu_instance_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9440
 *     @gpu_instance_id.setter
 *     def gpu_instance_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EventData instance is read-only")
 *         self._ptr[0].gpuInstanceId = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9441
 *     def gpu_instance_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This EventData instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].gpuInstanceId = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EventData_instance_is_read};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9441, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9441, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9440
 *     @gpu_instance_id.setter
 *     def gpu_instance_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EventData instance is read-only")
 *         self._ptr[0].gpuInstanceId = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":9442
 *         if self._readonly:
 *             raise ValueError("This EventData instance is read-only")
 *         self._ptr[0].gpuInstanceId = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9442, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).gpuInstanceId = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":9438
 *         return self._ptr[0].gpuInstanceId
 * 
 *     @gpu_instance_id.setter             # <<<<<<<<<<<<<<
 *     def gpu_instance_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.gpu_instance_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9444
 *         self._ptr[0].gpuInstanceId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def compute_instance_id(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_19compute_instance_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_19compute_instance_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_19compute_instance_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_19compute_instance_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9447
 *     def compute_instance_id(self):
 *         """int: """
 *         return self._ptr[0].computeInstanceId             # <<<<<<<<<<<<<<
 * 
 *     @compute_instance_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).computeInstanceId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9447, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9444
 *         self._ptr[0].gpuInstanceId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def compute_instance_id(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.compute_instance_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9449
 *         return self._ptr[0].computeInstanceId
 * 
 *     @compute_instance_id.setter             # <<<<<<<<<<<<<<
 *     def compute_instance_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_9EventData_19compute_instance_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_9EventData_19compute_instance_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_19compute_instance_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_9EventData_19compute_instance_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9451
 *     @compute_instance_id.setter
 *     def compute_instance_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EventData instance is read-only")
 *         self._ptr[0].computeInstanceId = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9452
 *     def compute_instance_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This EventData instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].computeInstanceId = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EventData_instance_is_read};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9452, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9452, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9451
 *     @compute_instance_id.setter
 *     def compute_instance_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EventData instance is read-only")
 *         self._ptr[0].computeInstanceId = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":9453
 *         if self._readonly:
 *             raise ValueError("This EventData instance is read-only")
 *         self._ptr[0].computeInstanceId = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9453, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).computeInstanceId = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":9449
 *         return self._ptr[0].computeInstanceId
 * 
 *     @compute_instance_id.setter             # <<<<<<<<<<<<<<
 *     def compute_instance_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.compute_instance_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9455
 *         self._ptr[0].computeInstanceId = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an EventData instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_9EventData_12from_data, "EventData.from_data(data)\n\nCreate an EventData instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `event_data_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_9EventData_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9EventData_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9EventData_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 9455, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9455, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 9455, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 9455, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9455, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 9455, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":9462
 *             data (_numpy.ndarray): a single-element array of dtype `event_data_dtype` holding the data.
 *         """
 *         return __from_data(data, "event_data_dtype", event_data_dtype, EventData)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_event_data_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9462, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_event_data_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EventData)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9462, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9455
 *         self._ptr[0].computeInstanceId = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an EventData instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9464
 *         return __from_data(data, "event_data_dtype", event_data_dtype, EventData)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an EventData instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_9EventData_14from_ptr, "EventData.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an EventData instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_9EventData_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9EventData_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9EventData_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 9464, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 9464, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 9464, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9464, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 9464, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":9465
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an EventData instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 9464, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 9464, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 9464, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9464, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 9465, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9465, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 9464, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":9464
 *         return __from_data(data, "event_data_dtype", event_data_dtype, EventData)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an EventData instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":9473
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EventData obj = EventData.__new__(EventData)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":9474
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef EventData obj = EventData.__new__(EventData)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9474, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 9474, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9473
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EventData obj = EventData.__new__(EventData)
*/
  }

  /* "cuda/bindings/_nvml.pyx":9475
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EventData obj = EventData.__new__(EventData)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlEventData_t *>malloc(sizeof(nvmlEventData_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_EventData(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EventData), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9475, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":9476
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EventData obj = EventData.__new__(EventData)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlEventData_t *>malloc(sizeof(nvmlEventData_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":9477
 *         cdef EventData obj = EventData.__new__(EventData)
 *         if owner is None:
 *             obj._ptr = <nvmlEventData_t *>malloc(sizeof(nvmlEventData_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating EventData")
*/
    __pyx_v_obj->_ptr = ((nvmlEventData_t *)malloc((sizeof(nvmlEventData_t))));

    /* "cuda/bindings/_nvml.pyx":9478
 *         if owner is None:
 *             obj._ptr = <nvmlEventData_t *>malloc(sizeof(nvmlEventData_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating EventData")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEventData_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":9479
 *             obj._ptr = <nvmlEventData_t *>malloc(sizeof(nvmlEventData_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating EventData")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEventData_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9479, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_EventData};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9479, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 9479, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":9478
 *         if owner is None:
 *             obj._ptr = <nvmlEventData_t *>malloc(sizeof(nvmlEventData_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating EventData")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEventData_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":9480
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating EventData")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEventData_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlEventData_t))));

    /* "cuda/bindings/_nvml.pyx":9481
 *                 raise MemoryError("Error allocating EventData")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEventData_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":9482
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEventData_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlEventData_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":9476
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EventData obj = EventData.__new__(EventData)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlEventData_t *>malloc(sizeof(nvmlEventData_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":9484
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlEventData_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlEventData_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":9485
 *         else:
 *             obj._ptr = <nvmlEventData_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":9486
 *             obj._ptr = <nvmlEventData_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":9487
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":9488
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9464
 *         return __from_data(data, "event_data_dtype", event_data_dtype, EventData)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an EventData instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_9EventData_16__reduce_cython__, "EventData.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_9EventData_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9EventData_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9EventData_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_9EventData_18__setstate_cython__, "EventData.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_9EventData_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9EventData_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9EventData_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9EventData_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_9EventData_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_9EventData_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EventData.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9491
 * 
 * 
 * cdef _get_accounting_stats_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlAccountingStats_t pod = nvmlAccountingStats_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_accounting_stats_dtype_offsets(void) {
  nvmlAccountingStats_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlAccountingStats_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  size_t __pyx_t_14;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_accounting_stats_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":9492
 * 
 * cdef _get_accounting_stats_dtype_offsets():
 *     cdef nvmlAccountingStats_t pod = nvmlAccountingStats_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['gpu_utilization', 'memory_utilization', 'max_memory_usage', 'time', 'start_time', 'is_running', 'reserved'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":9493
 * cdef _get_accounting_stats_dtype_offsets():
 *     cdef nvmlAccountingStats_t pod = nvmlAccountingStats_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['gpu_utilization', 'memory_utilization', 'max_memory_usage', 'time', 'start_time', 'is_running', 'reserved'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9493, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9493, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":9494
 *     cdef nvmlAccountingStats_t pod = nvmlAccountingStats_t()
 *     return _numpy.dtype({
 *         'names': ['gpu_utilization', 'memory_utilization', 'max_memory_usage', 'time', 'start_time', 'is_running', 'reserved'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9494, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9494, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_gpu_utilization);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_gpu_utilization);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_gpu_utilization) != (0)) __PYX_ERR(0, 9494, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_memory_utilization);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_memory_utilization);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_memory_utilization) != (0)) __PYX_ERR(0, 9494, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_max_memory_usage);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_max_memory_usage);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_max_memory_usage) != (0)) __PYX_ERR(0, 9494, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_time);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_time);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_time) != (0)) __PYX_ERR(0, 9494, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_start_time);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_start_time);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_start_time) != (0)) __PYX_ERR(0, 9494, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_is_running);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_is_running);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_is_running) != (0)) __PYX_ERR(0, 9494, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_reserved);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_reserved);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_reserved) != (0)) __PYX_ERR(0, 9494, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 9494, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":9495
 *     return _numpy.dtype({
 *         'names': ['gpu_utilization', 'memory_utilization', 'max_memory_usage', 'time', 'start_time', 'is_running', 'reserved'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.gpuUtilization)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 9495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 9495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 9495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 9495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 9495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 9495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9495, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 9495, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 9495, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 9495, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 9495, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 9495, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 9495, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 9495, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 9494, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":9497
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.gpuUtilization)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.memoryUtilization)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxMemoryUsage)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.gpuUtilization)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9497, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":9498
 *         'offsets': [
 *             (<intptr_t>&(pod.gpuUtilization)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memoryUtilization)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.maxMemoryUsage)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.time)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.memoryUtilization)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 9498, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":9499
 *             (<intptr_t>&(pod.gpuUtilization)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memoryUtilization)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxMemoryUsage)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.time)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.startTime)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.maxMemoryUsage)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 9499, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":9500
 *             (<intptr_t>&(pod.memoryUtilization)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxMemoryUsage)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.time)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.startTime)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.isRunning)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.time)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 9500, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":9501
 *             (<intptr_t>&(pod.maxMemoryUsage)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.time)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.startTime)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.isRunning)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.startTime)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 9501, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":9502
 *             (<intptr_t>&(pod.time)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.startTime)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.isRunning)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.isRunning)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 9502, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":9503
 *             (<intptr_t>&(pod.startTime)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.isRunning)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlAccountingStats_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.reserved)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 9503, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":9496
 *         'names': ['gpu_utilization', 'memory_utilization', 'max_memory_usage', 'time', 'start_time', 'is_running', 'reserved'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.uint64, _numpy.uint64, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.gpuUtilization)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memoryUtilization)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9496, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 9496, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_13) != (0)) __PYX_ERR(0, 9496, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_12) != (0)) __PYX_ERR(0, 9496, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_11) != (0)) __PYX_ERR(0, 9496, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_10) != (0)) __PYX_ERR(0, 9496, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_9) != (0)) __PYX_ERR(0, 9496, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_8) != (0)) __PYX_ERR(0, 9496, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 9494, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":9505
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlAccountingStats_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlAccountingStats_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9505, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 9494, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_14 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_14 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_14, (2-__pyx_t_14) | (__pyx_t_14*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9493, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9491
 * 
 * 
 * cdef _get_accounting_stats_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlAccountingStats_t pod = nvmlAccountingStats_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_accounting_stats_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9522
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlAccountingStats_t *>calloc(1, sizeof(nvmlAccountingStats_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":9523
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlAccountingStats_t *>calloc(1, sizeof(nvmlAccountingStats_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating AccountingStats")
*/
  __pyx_v_self->_ptr = ((nvmlAccountingStats_t *)calloc(1, (sizeof(nvmlAccountingStats_t))));

  /* "cuda/bindings/_nvml.pyx":9524
 *     def __init__(self):
 *         self._ptr = <nvmlAccountingStats_t *>calloc(1, sizeof(nvmlAccountingStats_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating AccountingStats")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":9525
 *         self._ptr = <nvmlAccountingStats_t *>calloc(1, sizeof(nvmlAccountingStats_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating AccountingStats")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9525, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_AccountingStats};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9525, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 9525, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9524
 *     def __init__(self):
 *         self._ptr = <nvmlAccountingStats_t *>calloc(1, sizeof(nvmlAccountingStats_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating AccountingStats")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":9526
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating AccountingStats")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":9527
 *             raise MemoryError("Error allocating AccountingStats")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":9528
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":9522
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlAccountingStats_t *>calloc(1, sizeof(nvmlAccountingStats_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9530
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlAccountingStats_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self) {
  nvmlAccountingStats_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlAccountingStats_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":9532
 *     def __dealloc__(self):
 *         cdef nvmlAccountingStats_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":9533
 *         cdef nvmlAccountingStats_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":9534
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":9535
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":9532
 *     def __dealloc__(self):
 *         cdef nvmlAccountingStats_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":9530
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlAccountingStats_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":9537
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.AccountingStats object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":9538
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.AccountingStats object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9538, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9538, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9538, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9538, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9538, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_AccountingStats_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 27 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9538, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9537
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.AccountingStats object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9540
 *         return f"<{__name__}.AccountingStats object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9543
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9543, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9540
 *         return f"<{__name__}.AccountingStats object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9545
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_15AccountingStats__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":9546
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9545
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9548
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":9549
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9549, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9548
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9551
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef AccountingStats other_
 *         if not isinstance(other, AccountingStats):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":9553
 *     def __eq__(self, other):
 *         cdef AccountingStats other_
 *         if not isinstance(other, AccountingStats):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":9554
 *         cdef AccountingStats other_
 *         if not isinstance(other, AccountingStats):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlAccountingStats_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9553
 *     def __eq__(self, other):
 *         cdef AccountingStats other_
 *         if not isinstance(other, AccountingStats):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":9555
 *         if not isinstance(other, AccountingStats):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlAccountingStats_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats))))) __PYX_ERR(0, 9555, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":9556
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlAccountingStats_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlAccountingStats_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9556, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9551
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef AccountingStats other_
 *         if not isinstance(other, AccountingStats):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9558
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlAccountingStats_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlAccountingStats_t *>malloc(sizeof(nvmlAccountingStats_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":9559
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlAccountingStats_t *>malloc(sizeof(nvmlAccountingStats_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 9559, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9559, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9559, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 9559, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":9560
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlAccountingStats_t *>malloc(sizeof(nvmlAccountingStats_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating AccountingStats")
*/
    __pyx_v_self->_ptr = ((nvmlAccountingStats_t *)malloc((sizeof(nvmlAccountingStats_t))));

    /* "cuda/bindings/_nvml.pyx":9561
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlAccountingStats_t *>malloc(sizeof(nvmlAccountingStats_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating AccountingStats")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlAccountingStats_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":9562
 *             self._ptr = <nvmlAccountingStats_t *>malloc(sizeof(nvmlAccountingStats_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating AccountingStats")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlAccountingStats_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9562, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_AccountingStats};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9562, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 9562, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":9561
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlAccountingStats_t *>malloc(sizeof(nvmlAccountingStats_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating AccountingStats")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlAccountingStats_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":9563
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating AccountingStats")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlAccountingStats_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9563, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9563, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 9563, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlAccountingStats_t))));

    /* "cuda/bindings/_nvml.pyx":9564
 *                 raise MemoryError("Error allocating AccountingStats")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlAccountingStats_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":9565
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlAccountingStats_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":9566
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9566, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9566, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 9566, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":9559
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlAccountingStats_t *>malloc(sizeof(nvmlAccountingStats_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":9568
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 9568, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":9558
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlAccountingStats_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlAccountingStats_t *>malloc(sizeof(nvmlAccountingStats_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9570
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def gpu_utilization(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_15gpu_utilization_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_15gpu_utilization_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_15gpu_utilization___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_15gpu_utilization___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9573
 *     def gpu_utilization(self):
 *         """int: """
 *         return self._ptr[0].gpuUtilization             # <<<<<<<<<<<<<<
 * 
 *     @gpu_utilization.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).gpuUtilization); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9573, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9570
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def gpu_utilization(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.gpu_utilization.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9575
 *         return self._ptr[0].gpuUtilization
 * 
 *     @gpu_utilization.setter             # <<<<<<<<<<<<<<
 *     def gpu_utilization(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_15gpu_utilization_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_15gpu_utilization_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_15gpu_utilization_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_15gpu_utilization_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9577
 *     @gpu_utilization.setter
 *     def gpu_utilization(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].gpuUtilization = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9578
 *     def gpu_utilization(self, val):
 *         if self._readonly:
 *             raise ValueError("This AccountingStats instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].gpuUtilization = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_AccountingStats_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9578, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9578, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9577
 *     @gpu_utilization.setter
 *     def gpu_utilization(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].gpuUtilization = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":9579
 *         if self._readonly:
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].gpuUtilization = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9579, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).gpuUtilization = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":9575
 *         return self._ptr[0].gpuUtilization
 * 
 *     @gpu_utilization.setter             # <<<<<<<<<<<<<<
 *     def gpu_utilization(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.gpu_utilization.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9581
 *         self._ptr[0].gpuUtilization = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def memory_utilization(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_18memory_utilization_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_18memory_utilization_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_18memory_utilization___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_18memory_utilization___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9584
 *     def memory_utilization(self):
 *         """int: """
 *         return self._ptr[0].memoryUtilization             # <<<<<<<<<<<<<<
 * 
 *     @memory_utilization.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).memoryUtilization); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9584, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9581
 *         self._ptr[0].gpuUtilization = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def memory_utilization(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.memory_utilization.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9586
 *         return self._ptr[0].memoryUtilization
 * 
 *     @memory_utilization.setter             # <<<<<<<<<<<<<<
 *     def memory_utilization(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_18memory_utilization_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_18memory_utilization_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_18memory_utilization_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_18memory_utilization_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9588
 *     @memory_utilization.setter
 *     def memory_utilization(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].memoryUtilization = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9589
 *     def memory_utilization(self, val):
 *         if self._readonly:
 *             raise ValueError("This AccountingStats instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].memoryUtilization = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_AccountingStats_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9589, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9589, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9588
 *     @memory_utilization.setter
 *     def memory_utilization(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].memoryUtilization = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":9590
 *         if self._readonly:
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].memoryUtilization = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9590, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).memoryUtilization = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":9586
 *         return self._ptr[0].memoryUtilization
 * 
 *     @memory_utilization.setter             # <<<<<<<<<<<<<<
 *     def memory_utilization(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.memory_utilization.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9592
 *         self._ptr[0].memoryUtilization = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def max_memory_usage(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_16max_memory_usage_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_16max_memory_usage_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_16max_memory_usage___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_16max_memory_usage___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9595
 *     def max_memory_usage(self):
 *         """int: """
 *         return self._ptr[0].maxMemoryUsage             # <<<<<<<<<<<<<<
 * 
 *     @max_memory_usage.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).maxMemoryUsage); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9595, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9592
 *         self._ptr[0].memoryUtilization = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def max_memory_usage(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.max_memory_usage.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9597
 *         return self._ptr[0].maxMemoryUsage
 * 
 *     @max_memory_usage.setter             # <<<<<<<<<<<<<<
 *     def max_memory_usage(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_16max_memory_usage_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_16max_memory_usage_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_16max_memory_usage_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_16max_memory_usage_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9599
 *     @max_memory_usage.setter
 *     def max_memory_usage(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].maxMemoryUsage = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9600
 *     def max_memory_usage(self, val):
 *         if self._readonly:
 *             raise ValueError("This AccountingStats instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].maxMemoryUsage = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_AccountingStats_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9600, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9600, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9599
 *     @max_memory_usage.setter
 *     def max_memory_usage(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].maxMemoryUsage = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":9601
 *         if self._readonly:
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].maxMemoryUsage = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 9601, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).maxMemoryUsage = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":9597
 *         return self._ptr[0].maxMemoryUsage
 * 
 *     @max_memory_usage.setter             # <<<<<<<<<<<<<<
 *     def max_memory_usage(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.max_memory_usage.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9603
 *         self._ptr[0].maxMemoryUsage = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def time(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_4time_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_4time_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_4time___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_4time___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9606
 *     def time(self):
 *         """int: """
 *         return self._ptr[0].time             # <<<<<<<<<<<<<<
 * 
 *     @time.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).time); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9606, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9603
 *         self._ptr[0].maxMemoryUsage = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def time(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.time.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9608
 *         return self._ptr[0].time
 * 
 *     @time.setter             # <<<<<<<<<<<<<<
 *     def time(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_4time_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_4time_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_4time_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_4time_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9610
 *     @time.setter
 *     def time(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].time = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9611
 *     def time(self, val):
 *         if self._readonly:
 *             raise ValueError("This AccountingStats instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].time = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_AccountingStats_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9611, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9611, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9610
 *     @time.setter
 *     def time(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].time = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":9612
 *         if self._readonly:
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].time = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 9612, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).time = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":9608
 *         return self._ptr[0].time
 * 
 *     @time.setter             # <<<<<<<<<<<<<<
 *     def time(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.time.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9614
 *         self._ptr[0].time = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def start_time(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_10start_time_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_10start_time_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_10start_time___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_10start_time___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9617
 *     def start_time(self):
 *         """int: """
 *         return self._ptr[0].startTime             # <<<<<<<<<<<<<<
 * 
 *     @start_time.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).startTime); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9617, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9614
 *         self._ptr[0].time = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def start_time(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.start_time.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9619
 *         return self._ptr[0].startTime
 * 
 *     @start_time.setter             # <<<<<<<<<<<<<<
 *     def start_time(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_10start_time_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_10start_time_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_10start_time_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_10start_time_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9621
 *     @start_time.setter
 *     def start_time(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].startTime = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9622
 *     def start_time(self, val):
 *         if self._readonly:
 *             raise ValueError("This AccountingStats instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].startTime = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_AccountingStats_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9622, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9622, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9621
 *     @start_time.setter
 *     def start_time(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].startTime = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":9623
 *         if self._readonly:
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].startTime = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 9623, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).startTime = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":9619
 *         return self._ptr[0].startTime
 * 
 *     @start_time.setter             # <<<<<<<<<<<<<<
 *     def start_time(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.start_time.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9625
 *         self._ptr[0].startTime = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_running(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_10is_running_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_10is_running_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_10is_running___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_10is_running___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9628
 *     def is_running(self):
 *         """int: """
 *         return self._ptr[0].isRunning             # <<<<<<<<<<<<<<
 * 
 *     @is_running.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).isRunning); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9628, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9625
 *         self._ptr[0].startTime = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_running(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.is_running.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9630
 *         return self._ptr[0].isRunning
 * 
 *     @is_running.setter             # <<<<<<<<<<<<<<
 *     def is_running(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_10is_running_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_10is_running_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_10is_running_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_10is_running_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9632
 *     @is_running.setter
 *     def is_running(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].isRunning = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9633
 *     def is_running(self, val):
 *         if self._readonly:
 *             raise ValueError("This AccountingStats instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].isRunning = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_AccountingStats_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9633, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9633, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9632
 *     @is_running.setter
 *     def is_running(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].isRunning = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":9634
 *         if self._readonly:
 *             raise ValueError("This AccountingStats instance is read-only")
 *         self._ptr[0].isRunning = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9634, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).isRunning = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":9630
 *         return self._ptr[0].isRunning
 * 
 *     @is_running.setter             # <<<<<<<<<<<<<<
 *     def is_running(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.is_running.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9636
 *         self._ptr[0].isRunning = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an AccountingStats instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15AccountingStats_12from_data, "AccountingStats.from_data(data)\n\nCreate an AccountingStats instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `accounting_stats_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15AccountingStats_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15AccountingStats_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 9636, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9636, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 9636, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 9636, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9636, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 9636, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":9643
 *             data (_numpy.ndarray): a single-element array of dtype `accounting_stats_dtype` holding the data.
 *         """
 *         return __from_data(data, "accounting_stats_dtype", accounting_stats_dtype, AccountingStats)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_accounting_stats_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9643, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_accounting_stats_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9643, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9636
 *         self._ptr[0].isRunning = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an AccountingStats instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9645
 *         return __from_data(data, "accounting_stats_dtype", accounting_stats_dtype, AccountingStats)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an AccountingStats instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15AccountingStats_14from_ptr, "AccountingStats.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an AccountingStats instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15AccountingStats_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15AccountingStats_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 9645, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 9645, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 9645, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9645, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 9645, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":9646
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an AccountingStats instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 9645, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 9645, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 9645, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9645, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 9646, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9646, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 9645, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":9645
 *         return __from_data(data, "accounting_stats_dtype", accounting_stats_dtype, AccountingStats)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an AccountingStats instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":9654
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef AccountingStats obj = AccountingStats.__new__(AccountingStats)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":9655
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef AccountingStats obj = AccountingStats.__new__(AccountingStats)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9655, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 9655, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9654
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef AccountingStats obj = AccountingStats.__new__(AccountingStats)
*/
  }

  /* "cuda/bindings/_nvml.pyx":9656
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef AccountingStats obj = AccountingStats.__new__(AccountingStats)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlAccountingStats_t *>malloc(sizeof(nvmlAccountingStats_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_AccountingStats(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9656, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":9657
 *             raise ValueError("ptr must not be null (0)")
 *         cdef AccountingStats obj = AccountingStats.__new__(AccountingStats)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlAccountingStats_t *>malloc(sizeof(nvmlAccountingStats_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":9658
 *         cdef AccountingStats obj = AccountingStats.__new__(AccountingStats)
 *         if owner is None:
 *             obj._ptr = <nvmlAccountingStats_t *>malloc(sizeof(nvmlAccountingStats_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating AccountingStats")
*/
    __pyx_v_obj->_ptr = ((nvmlAccountingStats_t *)malloc((sizeof(nvmlAccountingStats_t))));

    /* "cuda/bindings/_nvml.pyx":9659
 *         if owner is None:
 *             obj._ptr = <nvmlAccountingStats_t *>malloc(sizeof(nvmlAccountingStats_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating AccountingStats")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlAccountingStats_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":9660
 *             obj._ptr = <nvmlAccountingStats_t *>malloc(sizeof(nvmlAccountingStats_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating AccountingStats")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlAccountingStats_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9660, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_AccountingStats};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9660, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 9660, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":9659
 *         if owner is None:
 *             obj._ptr = <nvmlAccountingStats_t *>malloc(sizeof(nvmlAccountingStats_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating AccountingStats")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlAccountingStats_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":9661
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating AccountingStats")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlAccountingStats_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlAccountingStats_t))));

    /* "cuda/bindings/_nvml.pyx":9662
 *                 raise MemoryError("Error allocating AccountingStats")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlAccountingStats_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":9663
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlAccountingStats_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlAccountingStats_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":9657
 *             raise ValueError("ptr must not be null (0)")
 *         cdef AccountingStats obj = AccountingStats.__new__(AccountingStats)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlAccountingStats_t *>malloc(sizeof(nvmlAccountingStats_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":9665
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlAccountingStats_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlAccountingStats_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":9666
 *         else:
 *             obj._ptr = <nvmlAccountingStats_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":9667
 *             obj._ptr = <nvmlAccountingStats_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":9668
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":9669
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9645
 *         return __from_data(data, "accounting_stats_dtype", accounting_stats_dtype, AccountingStats)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an AccountingStats instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15AccountingStats_16__reduce_cython__, "AccountingStats.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15AccountingStats_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15AccountingStats_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15AccountingStats_18__setstate_cython__, "AccountingStats.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15AccountingStats_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15AccountingStats_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15AccountingStats_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.AccountingStats.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9672
 * 
 * 
 * cdef _get_encoder_session_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlEncoderSessionInfo_t pod = nvmlEncoderSessionInfo_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_encoder_session_info_dtype_offsets(void) {
  nvmlEncoderSessionInfo_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlEncoderSessionInfo_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  PyObject *__pyx_t_14 = NULL;
  size_t __pyx_t_15;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_encoder_session_info_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":9673
 * 
 * cdef _get_encoder_session_info_dtype_offsets():
 *     cdef nvmlEncoderSessionInfo_t pod = nvmlEncoderSessionInfo_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['session_id', 'pid', 'vgpu_instance', 'codec_type', 'h_resolution', 'v_resolution', 'average_fps', 'average_latency'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":9674
 * cdef _get_encoder_session_info_dtype_offsets():
 *     cdef nvmlEncoderSessionInfo_t pod = nvmlEncoderSessionInfo_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['session_id', 'pid', 'vgpu_instance', 'codec_type', 'h_resolution', 'v_resolution', 'average_fps', 'average_latency'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9674, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9674, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":9675
 *     cdef nvmlEncoderSessionInfo_t pod = nvmlEncoderSessionInfo_t()
 *     return _numpy.dtype({
 *         'names': ['session_id', 'pid', 'vgpu_instance', 'codec_type', 'h_resolution', 'v_resolution', 'average_fps', 'average_latency'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9675, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9675, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_session_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_session_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_session_id) != (0)) __PYX_ERR(0, 9675, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_pid);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_pid);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_pid) != (0)) __PYX_ERR(0, 9675, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_vgpu_instance);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_vgpu_instance);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_vgpu_instance) != (0)) __PYX_ERR(0, 9675, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_codec_type);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_codec_type);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_codec_type) != (0)) __PYX_ERR(0, 9675, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_h_resolution);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_h_resolution);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_h_resolution) != (0)) __PYX_ERR(0, 9675, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_v_resolution);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_v_resolution);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_v_resolution) != (0)) __PYX_ERR(0, 9675, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_average_fps);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_average_fps);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_average_fps) != (0)) __PYX_ERR(0, 9675, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_average_latency);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_average_latency);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_mstate_global->__pyx_n_u_average_latency) != (0)) __PYX_ERR(0, 9675, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 9675, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":9676
 *     return _numpy.dtype({
 *         'names': ['session_id', 'pid', 'vgpu_instance', 'codec_type', 'h_resolution', 'v_resolution', 'average_fps', 'average_latency'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.sessionId)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 9676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 9676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 9676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 9676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 9676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 9676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 9676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 9676, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 9676, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 9676, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 9676, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 9676, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 9676, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 9676, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_t_14) != (0)) __PYX_ERR(0, 9676, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  __pyx_t_14 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 9675, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":9678
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.sessionId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sessionId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9678, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":9679
 *         'offsets': [
 *             (<intptr_t>&(pod.sessionId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.codecType)) - (<intptr_t>&pod),
*/
  __pyx_t_14 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.pid)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 9679, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);

  /* "cuda/bindings/_nvml.pyx":9680
 *             (<intptr_t>&(pod.sessionId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.codecType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hResolution)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vgpuInstance)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 9680, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":9681
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.codecType)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.hResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vResolution)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.codecType)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 9681, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":9682
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.codecType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hResolution)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.averageFps)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.hResolution)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 9682, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":9683
 *             (<intptr_t>&(pod.codecType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vResolution)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.averageFps)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.averageLatency)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vResolution)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 9683, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":9684
 *             (<intptr_t>&(pod.hResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.averageFps)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.averageLatency)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.averageFps)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 9684, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":9685
 *             (<intptr_t>&(pod.vResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.averageFps)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.averageLatency)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlEncoderSessionInfo_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.averageLatency)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 9685, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":9677
 *         'names': ['session_id', 'pid', 'vgpu_instance', 'codec_type', 'h_resolution', 'v_resolution', 'average_fps', 'average_latency'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sessionId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9677, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 9677, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_14) != (0)) __PYX_ERR(0, 9677, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_13) != (0)) __PYX_ERR(0, 9677, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_12) != (0)) __PYX_ERR(0, 9677, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 9677, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_10) != (0)) __PYX_ERR(0, 9677, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_9) != (0)) __PYX_ERR(0, 9677, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 7, __pyx_t_8) != (0)) __PYX_ERR(0, 9677, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_14 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 9675, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":9687
 *             (<intptr_t>&(pod.averageLatency)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlEncoderSessionInfo_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlEncoderSessionInfo_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9687, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 9675, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_15 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_15 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_15, (2-__pyx_t_15) | (__pyx_t_15*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9674, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9672
 * 
 * 
 * cdef _get_encoder_session_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlEncoderSessionInfo_t pod = nvmlEncoderSessionInfo_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_XDECREF(__pyx_t_14);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_encoder_session_info_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9709
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=encoder_session_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 9709, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9709, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 9709, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9709, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 9709, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":9710
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=encoder_session_info_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlEncoderSessionInfo_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9710, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9710, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_encoder_session_info_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9710, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9710, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 9710, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9710, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":9711
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=encoder_session_info_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlEncoderSessionInfo_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlEncoderSessionInfo_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9711, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":9712
 *         arr = _numpy.empty(size, dtype=encoder_session_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlEncoderSessionInfo_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlEncoderSessionInfo_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9712, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlEncoderSessionInfo_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9712, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9712, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 9712, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":9713
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlEncoderSessionInfo_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlEncoderSessionInfo_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9713, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9713, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlEncoderSessionInfo_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9713, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9713, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 9712, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 9712, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":9709
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=encoder_session_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9715
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlEncoderSessionInfo_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.EncoderSessionInfo_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":9716
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.EncoderSessionInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9716, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9716, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 9716, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":9717
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.EncoderSessionInfo_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.EncoderSessionInfo object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9717, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9717, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9717, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9717, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9717, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9717, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9717, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_EncoderSessionInfo_Array;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 26 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9717, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9716
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.EncoderSessionInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":9719
 *             return f"<{__name__}.EncoderSessionInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.EncoderSessionInfo object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9719, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9719, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9719, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9719, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9719, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_EncoderSessionInfo_object_at;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 30 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9719, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":9715
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlEncoderSessionInfo_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.EncoderSessionInfo_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9721
 *             return f"<{__name__}.EncoderSessionInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9724
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9724, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9724, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9721
 *             return f"<{__name__}.EncoderSessionInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9726
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_18EncoderSessionInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":9727
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9727, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9727, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 9727, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9726
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9729
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":9730
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9730, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9730, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 9730, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":9731
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9731, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 9731, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9730
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":9733
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9733, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9733, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9729
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9735
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":9736
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9736, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 9736, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9735
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9738
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, EncoderSessionInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":9739
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, EncoderSessionInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":9740
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, EncoderSessionInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9740, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9740, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9740, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9740, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 9740, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9740, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9740, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9740, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9740, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 9740, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":9741
 *         cdef object self_data = self._data
 *         if (not isinstance(other, EncoderSessionInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9740
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, EncoderSessionInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":9742
 *         if (not isinstance(other, EncoderSessionInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9742, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9742, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9742, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 9742, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9742, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9738
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, EncoderSessionInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9744
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def session_id(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10session_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10session_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10session_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10session_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9747
 *     def session_id(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.session_id[0])
 *         return self._data.session_id
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9747, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 9747, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":9748
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.session_id[0])             # <<<<<<<<<<<<<<
 *         return self._data.session_id
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_session_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9748, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9748, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9748, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9747
 *     def session_id(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.session_id[0])
 *         return self._data.session_id
*/
  }

  /* "cuda/bindings/_nvml.pyx":9749
 *         if self._data.size == 1:
 *             return int(self._data.session_id[0])
 *         return self._data.session_id             # <<<<<<<<<<<<<<
 * 
 *     @session_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_session_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9749, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9744
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def session_id(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.session_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9751
 *         return self._data.session_id
 * 
 *     @session_id.setter             # <<<<<<<<<<<<<<
 *     def session_id(self, val):
 *         self._data.session_id = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10session_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10session_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10session_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10session_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":9753
 *     @session_id.setter
 *     def session_id(self, val):
 *         self._data.session_id = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_session_id, __pyx_v_val) < (0)) __PYX_ERR(0, 9753, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":9751
 *         return self._data.session_id
 * 
 *     @session_id.setter             # <<<<<<<<<<<<<<
 *     def session_id(self, val):
 *         self._data.session_id = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.session_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9755
 *         self._data.session_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3pid_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3pid_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3pid___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3pid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9758
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.pid[0])
 *         return self._data.pid
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9758, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 9758, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":9759
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.pid[0])             # <<<<<<<<<<<<<<
 *         return self._data.pid
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9759, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9759, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9759, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9758
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.pid[0])
 *         return self._data.pid
*/
  }

  /* "cuda/bindings/_nvml.pyx":9760
 *         if self._data.size == 1:
 *             return int(self._data.pid[0])
 *         return self._data.pid             # <<<<<<<<<<<<<<
 * 
 *     @pid.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9760, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9755
 *         self._data.session_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.pid.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9762
 *         return self._data.pid
 * 
 *     @pid.setter             # <<<<<<<<<<<<<<
 *     def pid(self, val):
 *         self._data.pid = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3pid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3pid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3pid_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3pid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":9764
 *     @pid.setter
 *     def pid(self, val):
 *         self._data.pid = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid, __pyx_v_val) < (0)) __PYX_ERR(0, 9764, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":9762
 *         return self._data.pid
 * 
 *     @pid.setter             # <<<<<<<<<<<<<<
 *     def pid(self, val):
 *         self._data.pid = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.pid.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9766
 *         self._data.pid = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_instance(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_13vgpu_instance_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_13vgpu_instance_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_13vgpu_instance___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_13vgpu_instance___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9769
 *     def vgpu_instance(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.vgpu_instance[0])
 *         return self._data.vgpu_instance
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9769, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 9769, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":9770
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.vgpu_instance[0])             # <<<<<<<<<<<<<<
 *         return self._data.vgpu_instance
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_vgpu_instance); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9770, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9770, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9770, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9769
 *     def vgpu_instance(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.vgpu_instance[0])
 *         return self._data.vgpu_instance
*/
  }

  /* "cuda/bindings/_nvml.pyx":9771
 *         if self._data.size == 1:
 *             return int(self._data.vgpu_instance[0])
 *         return self._data.vgpu_instance             # <<<<<<<<<<<<<<
 * 
 *     @vgpu_instance.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_vgpu_instance); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9771, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9766
 *         self._data.pid = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_instance(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.vgpu_instance.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9773
 *         return self._data.vgpu_instance
 * 
 *     @vgpu_instance.setter             # <<<<<<<<<<<<<<
 *     def vgpu_instance(self, val):
 *         self._data.vgpu_instance = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_13vgpu_instance_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_13vgpu_instance_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_13vgpu_instance_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_13vgpu_instance_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":9775
 *     @vgpu_instance.setter
 *     def vgpu_instance(self, val):
 *         self._data.vgpu_instance = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_vgpu_instance, __pyx_v_val) < (0)) __PYX_ERR(0, 9775, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":9773
 *         return self._data.vgpu_instance
 * 
 *     @vgpu_instance.setter             # <<<<<<<<<<<<<<
 *     def vgpu_instance(self, val):
 *         self._data.vgpu_instance = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.vgpu_instance.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9777
 *         self._data.vgpu_instance = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def codec_type(self):
 *         """Union[~_numpy.int32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10codec_type_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10codec_type_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10codec_type___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10codec_type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9780
 *     def codec_type(self):
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.codec_type[0])
 *         return self._data.codec_type
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9780, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 9780, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":9781
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.codec_type[0])             # <<<<<<<<<<<<<<
 *         return self._data.codec_type
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_codec_type); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9781, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9781, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9781, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9780
 *     def codec_type(self):
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.codec_type[0])
 *         return self._data.codec_type
*/
  }

  /* "cuda/bindings/_nvml.pyx":9782
 *         if self._data.size == 1:
 *             return int(self._data.codec_type[0])
 *         return self._data.codec_type             # <<<<<<<<<<<<<<
 * 
 *     @codec_type.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_codec_type); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9782, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9777
 *         self._data.vgpu_instance = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def codec_type(self):
 *         """Union[~_numpy.int32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.codec_type.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9784
 *         return self._data.codec_type
 * 
 *     @codec_type.setter             # <<<<<<<<<<<<<<
 *     def codec_type(self, val):
 *         self._data.codec_type = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10codec_type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10codec_type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10codec_type_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10codec_type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":9786
 *     @codec_type.setter
 *     def codec_type(self, val):
 *         self._data.codec_type = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_codec_type, __pyx_v_val) < (0)) __PYX_ERR(0, 9786, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":9784
 *         return self._data.codec_type
 * 
 *     @codec_type.setter             # <<<<<<<<<<<<<<
 *     def codec_type(self, val):
 *         self._data.codec_type = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.codec_type.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9788
 *         self._data.codec_type = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def h_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12h_resolution_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12h_resolution_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12h_resolution___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12h_resolution___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9791
 *     def h_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.h_resolution[0])
 *         return self._data.h_resolution
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9791, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 9791, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":9792
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.h_resolution[0])             # <<<<<<<<<<<<<<
 *         return self._data.h_resolution
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_h_resolution); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9792, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9792, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9792, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9791
 *     def h_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.h_resolution[0])
 *         return self._data.h_resolution
*/
  }

  /* "cuda/bindings/_nvml.pyx":9793
 *         if self._data.size == 1:
 *             return int(self._data.h_resolution[0])
 *         return self._data.h_resolution             # <<<<<<<<<<<<<<
 * 
 *     @h_resolution.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_h_resolution); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9793, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9788
 *         self._data.codec_type = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def h_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.h_resolution.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9795
 *         return self._data.h_resolution
 * 
 *     @h_resolution.setter             # <<<<<<<<<<<<<<
 *     def h_resolution(self, val):
 *         self._data.h_resolution = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12h_resolution_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12h_resolution_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12h_resolution_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12h_resolution_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":9797
 *     @h_resolution.setter
 *     def h_resolution(self, val):
 *         self._data.h_resolution = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_h_resolution, __pyx_v_val) < (0)) __PYX_ERR(0, 9797, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":9795
 *         return self._data.h_resolution
 * 
 *     @h_resolution.setter             # <<<<<<<<<<<<<<
 *     def h_resolution(self, val):
 *         self._data.h_resolution = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.h_resolution.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9799
 *         self._data.h_resolution = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def v_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12v_resolution_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12v_resolution_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12v_resolution___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12v_resolution___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9802
 *     def v_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.v_resolution[0])
 *         return self._data.v_resolution
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9802, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 9802, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":9803
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.v_resolution[0])             # <<<<<<<<<<<<<<
 *         return self._data.v_resolution
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_v_resolution); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9803, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9803, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9803, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9802
 *     def v_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.v_resolution[0])
 *         return self._data.v_resolution
*/
  }

  /* "cuda/bindings/_nvml.pyx":9804
 *         if self._data.size == 1:
 *             return int(self._data.v_resolution[0])
 *         return self._data.v_resolution             # <<<<<<<<<<<<<<
 * 
 *     @v_resolution.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_v_resolution); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9804, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9799
 *         self._data.h_resolution = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def v_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.v_resolution.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9806
 *         return self._data.v_resolution
 * 
 *     @v_resolution.setter             # <<<<<<<<<<<<<<
 *     def v_resolution(self, val):
 *         self._data.v_resolution = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12v_resolution_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12v_resolution_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12v_resolution_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12v_resolution_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":9808
 *     @v_resolution.setter
 *     def v_resolution(self, val):
 *         self._data.v_resolution = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_v_resolution, __pyx_v_val) < (0)) __PYX_ERR(0, 9808, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":9806
 *         return self._data.v_resolution
 * 
 *     @v_resolution.setter             # <<<<<<<<<<<<<<
 *     def v_resolution(self, val):
 *         self._data.v_resolution = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.v_resolution.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9810
 *         self._data.v_resolution = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def average_fps(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_11average_fps_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_11average_fps_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_11average_fps___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_11average_fps___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9813
 *     def average_fps(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.average_fps[0])
 *         return self._data.average_fps
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9813, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 9813, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":9814
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.average_fps[0])             # <<<<<<<<<<<<<<
 *         return self._data.average_fps
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_average_fps); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9814, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9814, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9814, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9813
 *     def average_fps(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.average_fps[0])
 *         return self._data.average_fps
*/
  }

  /* "cuda/bindings/_nvml.pyx":9815
 *         if self._data.size == 1:
 *             return int(self._data.average_fps[0])
 *         return self._data.average_fps             # <<<<<<<<<<<<<<
 * 
 *     @average_fps.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_average_fps); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9815, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9810
 *         self._data.v_resolution = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def average_fps(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.average_fps.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9817
 *         return self._data.average_fps
 * 
 *     @average_fps.setter             # <<<<<<<<<<<<<<
 *     def average_fps(self, val):
 *         self._data.average_fps = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_11average_fps_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_11average_fps_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_11average_fps_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_11average_fps_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":9819
 *     @average_fps.setter
 *     def average_fps(self, val):
 *         self._data.average_fps = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_average_fps, __pyx_v_val) < (0)) __PYX_ERR(0, 9819, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":9817
 *         return self._data.average_fps
 * 
 *     @average_fps.setter             # <<<<<<<<<<<<<<
 *     def average_fps(self, val):
 *         self._data.average_fps = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.average_fps.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9821
 *         self._data.average_fps = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def average_latency(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15average_latency_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15average_latency_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15average_latency___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15average_latency___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9824
 *     def average_latency(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.average_latency[0])
 *         return self._data.average_latency
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9824, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 9824, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":9825
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.average_latency[0])             # <<<<<<<<<<<<<<
 *         return self._data.average_latency
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_average_latency); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9825, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9825, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9825, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9824
 *     def average_latency(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.average_latency[0])
 *         return self._data.average_latency
*/
  }

  /* "cuda/bindings/_nvml.pyx":9826
 *         if self._data.size == 1:
 *             return int(self._data.average_latency[0])
 *         return self._data.average_latency             # <<<<<<<<<<<<<<
 * 
 *     @average_latency.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_average_latency); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9826, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9821
 *         self._data.average_fps = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def average_latency(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.average_latency.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9828
 *         return self._data.average_latency
 * 
 *     @average_latency.setter             # <<<<<<<<<<<<<<
 *     def average_latency(self, val):
 *         self._data.average_latency = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15average_latency_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15average_latency_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15average_latency_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15average_latency_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":9830
 *     @average_latency.setter
 *     def average_latency(self, val):
 *         self._data.average_latency = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_average_latency, __pyx_v_val) < (0)) __PYX_ERR(0, 9830, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":9828
 *         return self._data.average_latency
 * 
 *     @average_latency.setter             # <<<<<<<<<<<<<<
 *     def average_latency(self, val):
 *         self._data.average_latency = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.average_latency.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9832
 *         self._data.average_latency = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":9835
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":9836
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 9836, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":9837
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9837, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 9837, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":9838
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":9839
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9839, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 9839, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":9838
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":9840
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return EncoderSessionInfo.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":9841
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return EncoderSessionInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":9840
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return EncoderSessionInfo.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":9842
 *             if key_ < 0:
 *                 key_ += size
 *             return EncoderSessionInfo.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == encoder_session_info_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9842, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9842, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9835
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":9843
 *                 key_ += size
 *             return EncoderSessionInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == encoder_session_info_dtype:
 *             return EncoderSessionInfo.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9843, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":9844
 *             return EncoderSessionInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == encoder_session_info_dtype:             # <<<<<<<<<<<<<<
 *             return EncoderSessionInfo.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9844, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9844, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 9844, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9844, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_encoder_session_info_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9844, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9844, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 9844, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":9845
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == encoder_session_info_dtype:
 *             return EncoderSessionInfo.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9845, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9844
 *             return EncoderSessionInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == encoder_session_info_dtype:             # <<<<<<<<<<<<<<
 *             return EncoderSessionInfo.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":9846
 *         if isinstance(out, _numpy.recarray) and out.dtype == encoder_session_info_dtype:
 *             return EncoderSessionInfo.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9832
 *         self._data.average_latency = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9848
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":9849
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 9849, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":9848
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9851
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an EncoderSessionInfo instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18EncoderSessionInfo_14from_data, "EncoderSessionInfo.from_data(data)\n\nCreate an EncoderSessionInfo instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `encoder_session_info_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18EncoderSessionInfo_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 9851, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9851, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 9851, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 9851, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9851, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 9851, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":9858
 *             data (_numpy.ndarray): a 1D array of dtype `encoder_session_info_dtype` holding the data.
 *         """
 *         cdef EncoderSessionInfo obj = EncoderSessionInfo.__new__(EncoderSessionInfo)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_EncoderSessionInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9858, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":9859
 *         """
 *         cdef EncoderSessionInfo obj = EncoderSessionInfo.__new__(EncoderSessionInfo)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9859, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9859, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 9859, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":9860
 *         cdef EncoderSessionInfo obj = EncoderSessionInfo.__new__(EncoderSessionInfo)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9860, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 9860, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9859
 *         """
 *         cdef EncoderSessionInfo obj = EncoderSessionInfo.__new__(EncoderSessionInfo)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":9861
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != encoder_session_info_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9861, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 9861, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":9862
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != encoder_session_info_dtype:
 *             raise ValueError("data array must be of dtype encoder_session_info_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9862, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 9862, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9861
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != encoder_session_info_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":9863
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != encoder_session_info_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype encoder_session_info_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9863, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_encoder_session_info_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9863, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9863, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 9863, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":9864
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != encoder_session_info_dtype:
 *             raise ValueError("data array must be of dtype encoder_session_info_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_enco};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9864, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 9864, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9863
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != encoder_session_info_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype encoder_session_info_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":9865
 *         if data.dtype != encoder_session_info_dtype:
 *             raise ValueError("data array must be of dtype encoder_session_info_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9865, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9865, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9865, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":9867
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9851
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an EncoderSessionInfo instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9869
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an EncoderSessionInfo instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18EncoderSessionInfo_16from_ptr, "EncoderSessionInfo.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an EncoderSessionInfo instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18EncoderSessionInfo_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18EncoderSessionInfo_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 9869, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 9869, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 9869, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9869, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 9869, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 9869, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 9869, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 9869, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9869, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 9870, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 9870, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9870, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":9870
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an EncoderSessionInfo instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 9869, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":9869
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an EncoderSessionInfo instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":9878
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EncoderSessionInfo obj = EncoderSessionInfo.__new__(EncoderSessionInfo)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":9879
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef EncoderSessionInfo obj = EncoderSessionInfo.__new__(EncoderSessionInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9879, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 9879, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9878
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EncoderSessionInfo obj = EncoderSessionInfo.__new__(EncoderSessionInfo)
*/
  }

  /* "cuda/bindings/_nvml.pyx":9880
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EncoderSessionInfo obj = EncoderSessionInfo.__new__(EncoderSessionInfo)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_EncoderSessionInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9880, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":9881
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EncoderSessionInfo obj = EncoderSessionInfo.__new__(EncoderSessionInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlEncoderSessionInfo_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9881, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9881, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":9883
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlEncoderSessionInfo_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=encoder_session_info_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9883, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":9882
 *         cdef EncoderSessionInfo obj = EncoderSessionInfo.__new__(EncoderSessionInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlEncoderSessionInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=encoder_session_info_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlEncoderSessionInfo_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9882, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":9884
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlEncoderSessionInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=encoder_session_info_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9884, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9884, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9884, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_encoder_session_info_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 9884, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 9884, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 9884, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 9884, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9884, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":9885
 *             <char*>ptr, sizeof(nvmlEncoderSessionInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=encoder_session_info_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 9885, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 9885, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9885, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":9887
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9869
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an EncoderSessionInfo instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9705
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18EncoderSessionInfo_18__reduce_cython__, "EncoderSessionInfo.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18EncoderSessionInfo_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18EncoderSessionInfo_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_EncoderSessionInfo, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_EncoderSessionInfo, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_EncoderSessionInfo, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_EncoderSessionInfo, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_EncoderSessionInf); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_EncoderSessionInfo, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_EncoderSessionInfo, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_EncoderSessionInfo, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_EncoderSessionInfo__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_EncoderSessionInf); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_EncoderSessionInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_EncoderSessionInfo__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18EncoderSessionInfo_20__setstate_cython__, "EncoderSessionInfo.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18EncoderSessionInfo_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18EncoderSessionInfo_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18EncoderSessionInfo_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_EncoderSessionInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_EncoderSessionInfo__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_EncoderSessionInfo__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_EncoderSessionInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_EncoderSessionInfo__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EncoderSessionInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9890
 * 
 * 
 * cdef _get_fbc_stats_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlFBCStats_t pod = nvmlFBCStats_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_fbc_stats_dtype_offsets(void) {
  nvmlFBCStats_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlFBCStats_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_fbc_stats_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":9891
 * 
 * cdef _get_fbc_stats_dtype_offsets():
 *     cdef nvmlFBCStats_t pod = nvmlFBCStats_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['sessions_count', 'average_fps', 'average_latency'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":9892
 * cdef _get_fbc_stats_dtype_offsets():
 *     cdef nvmlFBCStats_t pod = nvmlFBCStats_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['sessions_count', 'average_fps', 'average_latency'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9892, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9892, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":9893
 *     cdef nvmlFBCStats_t pod = nvmlFBCStats_t()
 *     return _numpy.dtype({
 *         'names': ['sessions_count', 'average_fps', 'average_latency'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9893, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9893, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_sessions_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_sessions_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_sessions_count) != (0)) __PYX_ERR(0, 9893, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_average_fps);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_average_fps);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_average_fps) != (0)) __PYX_ERR(0, 9893, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_average_latency);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_average_latency);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_average_latency) != (0)) __PYX_ERR(0, 9893, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 9893, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":9894
 *     return _numpy.dtype({
 *         'names': ['sessions_count', 'average_fps', 'average_latency'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.sessionsCount)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 9894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 9894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 9894, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 9894, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 9894, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 9893, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":9896
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.sessionsCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.averageFPS)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.averageLatency)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sessionsCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 9896, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":9897
 *         'offsets': [
 *             (<intptr_t>&(pod.sessionsCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.averageFPS)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.averageLatency)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.averageFPS)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 9897, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":9898
 *             (<intptr_t>&(pod.sessionsCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.averageFPS)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.averageLatency)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlFBCStats_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.averageLatency)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 9898, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":9895
 *         'names': ['sessions_count', 'average_fps', 'average_latency'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sessionsCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.averageFPS)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9895, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 9895, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 9895, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 9895, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 9893, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":9900
 *             (<intptr_t>&(pod.averageLatency)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlFBCStats_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlFBCStats_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9900, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 9893, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9892, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9890
 * 
 * 
 * cdef _get_fbc_stats_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlFBCStats_t pod = nvmlFBCStats_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_fbc_stats_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9917
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlFBCStats_t *>calloc(1, sizeof(nvmlFBCStats_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":9918
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlFBCStats_t *>calloc(1, sizeof(nvmlFBCStats_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating FBCStats")
*/
  __pyx_v_self->_ptr = ((nvmlFBCStats_t *)calloc(1, (sizeof(nvmlFBCStats_t))));

  /* "cuda/bindings/_nvml.pyx":9919
 *     def __init__(self):
 *         self._ptr = <nvmlFBCStats_t *>calloc(1, sizeof(nvmlFBCStats_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating FBCStats")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":9920
 *         self._ptr = <nvmlFBCStats_t *>calloc(1, sizeof(nvmlFBCStats_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating FBCStats")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9920, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_FBCStats};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9920, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 9920, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9919
 *     def __init__(self):
 *         self._ptr = <nvmlFBCStats_t *>calloc(1, sizeof(nvmlFBCStats_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating FBCStats")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":9921
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating FBCStats")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":9922
 *             raise MemoryError("Error allocating FBCStats")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":9923
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":9917
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlFBCStats_t *>calloc(1, sizeof(nvmlFBCStats_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9925
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlFBCStats_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self) {
  nvmlFBCStats_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlFBCStats_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":9927
 *     def __dealloc__(self):
 *         cdef nvmlFBCStats_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":9928
 *         cdef nvmlFBCStats_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":9929
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":9930
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":9927
 *     def __dealloc__(self):
 *         cdef nvmlFBCStats_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":9925
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlFBCStats_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":9932
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.FBCStats object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":9933
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.FBCStats object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9933, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9933, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9933, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9933, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9933, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_FBCStats_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 20 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9933, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9932
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.FBCStats object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9935
 *         return f"<{__name__}.FBCStats object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9938
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9938, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9935
 *         return f"<{__name__}.FBCStats object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9940
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_8FBCStats__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":9941
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9940
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9943
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":9944
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9944, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9943
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9946
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef FBCStats other_
 *         if not isinstance(other, FBCStats):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":9948
 *     def __eq__(self, other):
 *         cdef FBCStats other_
 *         if not isinstance(other, FBCStats):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":9949
 *         cdef FBCStats other_
 *         if not isinstance(other, FBCStats):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlFBCStats_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":9948
 *     def __eq__(self, other):
 *         cdef FBCStats other_
 *         if not isinstance(other, FBCStats):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":9950
 *         if not isinstance(other, FBCStats):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlFBCStats_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats))))) __PYX_ERR(0, 9950, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":9951
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlFBCStats_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlFBCStats_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9951, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9946
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef FBCStats other_
 *         if not isinstance(other, FBCStats):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9953
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlFBCStats_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlFBCStats_t *>malloc(sizeof(nvmlFBCStats_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":9954
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlFBCStats_t *>malloc(sizeof(nvmlFBCStats_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 9954, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9954, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9954, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 9954, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":9955
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlFBCStats_t *>malloc(sizeof(nvmlFBCStats_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating FBCStats")
*/
    __pyx_v_self->_ptr = ((nvmlFBCStats_t *)malloc((sizeof(nvmlFBCStats_t))));

    /* "cuda/bindings/_nvml.pyx":9956
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlFBCStats_t *>malloc(sizeof(nvmlFBCStats_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating FBCStats")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlFBCStats_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":9957
 *             self._ptr = <nvmlFBCStats_t *>malloc(sizeof(nvmlFBCStats_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating FBCStats")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlFBCStats_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9957, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_FBCStats};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9957, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 9957, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":9956
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlFBCStats_t *>malloc(sizeof(nvmlFBCStats_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating FBCStats")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlFBCStats_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":9958
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating FBCStats")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlFBCStats_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9958, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9958, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 9958, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlFBCStats_t))));

    /* "cuda/bindings/_nvml.pyx":9959
 *                 raise MemoryError("Error allocating FBCStats")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlFBCStats_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":9960
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlFBCStats_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":9961
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9961, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9961, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 9961, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":9954
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlFBCStats_t *>malloc(sizeof(nvmlFBCStats_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":9963
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 9963, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":9953
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlFBCStats_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlFBCStats_t *>malloc(sizeof(nvmlFBCStats_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9965
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sessions_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_14sessions_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_14sessions_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_14sessions_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_14sessions_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9968
 *     def sessions_count(self):
 *         """int: """
 *         return self._ptr[0].sessionsCount             # <<<<<<<<<<<<<<
 * 
 *     @sessions_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sessionsCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9968, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9965
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sessions_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.sessions_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9970
 *         return self._ptr[0].sessionsCount
 * 
 *     @sessions_count.setter             # <<<<<<<<<<<<<<
 *     def sessions_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_14sessions_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_14sessions_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_14sessions_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_14sessions_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9972
 *     @sessions_count.setter
 *     def sessions_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This FBCStats instance is read-only")
 *         self._ptr[0].sessionsCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9973
 *     def sessions_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This FBCStats instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sessionsCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_FBCStats_instance_is_read_o};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9973, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9973, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9972
 *     @sessions_count.setter
 *     def sessions_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This FBCStats instance is read-only")
 *         self._ptr[0].sessionsCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":9974
 *         if self._readonly:
 *             raise ValueError("This FBCStats instance is read-only")
 *         self._ptr[0].sessionsCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9974, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sessionsCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":9970
 *         return self._ptr[0].sessionsCount
 * 
 *     @sessions_count.setter             # <<<<<<<<<<<<<<
 *     def sessions_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.sessions_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9976
 *         self._ptr[0].sessionsCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def average_fps(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_11average_fps_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_11average_fps_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_11average_fps___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_11average_fps___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9979
 *     def average_fps(self):
 *         """int: """
 *         return self._ptr[0].averageFPS             # <<<<<<<<<<<<<<
 * 
 *     @average_fps.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).averageFPS); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9976
 *         self._ptr[0].sessionsCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def average_fps(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.average_fps.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9981
 *         return self._ptr[0].averageFPS
 * 
 *     @average_fps.setter             # <<<<<<<<<<<<<<
 *     def average_fps(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_11average_fps_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_11average_fps_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_11average_fps_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_11average_fps_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9983
 *     @average_fps.setter
 *     def average_fps(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This FBCStats instance is read-only")
 *         self._ptr[0].averageFPS = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9984
 *     def average_fps(self, val):
 *         if self._readonly:
 *             raise ValueError("This FBCStats instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].averageFPS = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_FBCStats_instance_is_read_o};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9984, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9984, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9983
 *     @average_fps.setter
 *     def average_fps(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This FBCStats instance is read-only")
 *         self._ptr[0].averageFPS = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":9985
 *         if self._readonly:
 *             raise ValueError("This FBCStats instance is read-only")
 *         self._ptr[0].averageFPS = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9985, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).averageFPS = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":9981
 *         return self._ptr[0].averageFPS
 * 
 *     @average_fps.setter             # <<<<<<<<<<<<<<
 *     def average_fps(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.average_fps.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9987
 *         self._ptr[0].averageFPS = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def average_latency(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_15average_latency_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_15average_latency_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_15average_latency___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_15average_latency___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":9990
 *     def average_latency(self):
 *         """int: """
 *         return self._ptr[0].averageLatency             # <<<<<<<<<<<<<<
 * 
 *     @average_latency.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).averageLatency); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9990, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9987
 *         self._ptr[0].averageFPS = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def average_latency(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.average_latency.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9992
 *         return self._ptr[0].averageLatency
 * 
 *     @average_latency.setter             # <<<<<<<<<<<<<<
 *     def average_latency(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_15average_latency_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_15average_latency_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_15average_latency_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_15average_latency_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":9994
 *     @average_latency.setter
 *     def average_latency(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This FBCStats instance is read-only")
 *         self._ptr[0].averageLatency = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":9995
 *     def average_latency(self, val):
 *         if self._readonly:
 *             raise ValueError("This FBCStats instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].averageLatency = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_FBCStats_instance_is_read_o};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9995, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 9995, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":9994
 *     @average_latency.setter
 *     def average_latency(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This FBCStats instance is read-only")
 *         self._ptr[0].averageLatency = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":9996
 *         if self._readonly:
 *             raise ValueError("This FBCStats instance is read-only")
 *         self._ptr[0].averageLatency = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 9996, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).averageLatency = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":9992
 *         return self._ptr[0].averageLatency
 * 
 *     @average_latency.setter             # <<<<<<<<<<<<<<
 *     def average_latency(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.average_latency.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":9998
 *         self._ptr[0].averageLatency = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an FBCStats instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_8FBCStats_12from_data, "FBCStats.from_data(data)\n\nCreate an FBCStats instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `fbc_stats_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_8FBCStats_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8FBCStats_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 9998, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9998, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 9998, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 9998, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 9998, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 9998, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":10005
 *             data (_numpy.ndarray): a single-element array of dtype `fbc_stats_dtype` holding the data.
 *         """
 *         return __from_data(data, "fbc_stats_dtype", fbc_stats_dtype, FBCStats)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_fbc_stats_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10005, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_fbc_stats_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10005, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":9998
 *         self._ptr[0].averageLatency = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an FBCStats instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10007
 *         return __from_data(data, "fbc_stats_dtype", fbc_stats_dtype, FBCStats)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an FBCStats instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_8FBCStats_14from_ptr, "FBCStats.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an FBCStats instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_8FBCStats_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8FBCStats_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 10007, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 10007, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 10007, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10007, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 10007, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":10008
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an FBCStats instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 10007, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 10007, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 10007, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10007, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 10008, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10008, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 10007, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":10007
 *         return __from_data(data, "fbc_stats_dtype", fbc_stats_dtype, FBCStats)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an FBCStats instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":10016
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FBCStats obj = FBCStats.__new__(FBCStats)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":10017
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef FBCStats obj = FBCStats.__new__(FBCStats)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10017, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 10017, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10016
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FBCStats obj = FBCStats.__new__(FBCStats)
*/
  }

  /* "cuda/bindings/_nvml.pyx":10018
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FBCStats obj = FBCStats.__new__(FBCStats)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlFBCStats_t *>malloc(sizeof(nvmlFBCStats_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_FBCStats(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10018, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":10019
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FBCStats obj = FBCStats.__new__(FBCStats)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlFBCStats_t *>malloc(sizeof(nvmlFBCStats_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":10020
 *         cdef FBCStats obj = FBCStats.__new__(FBCStats)
 *         if owner is None:
 *             obj._ptr = <nvmlFBCStats_t *>malloc(sizeof(nvmlFBCStats_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating FBCStats")
*/
    __pyx_v_obj->_ptr = ((nvmlFBCStats_t *)malloc((sizeof(nvmlFBCStats_t))));

    /* "cuda/bindings/_nvml.pyx":10021
 *         if owner is None:
 *             obj._ptr = <nvmlFBCStats_t *>malloc(sizeof(nvmlFBCStats_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating FBCStats")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlFBCStats_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":10022
 *             obj._ptr = <nvmlFBCStats_t *>malloc(sizeof(nvmlFBCStats_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating FBCStats")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlFBCStats_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10022, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_FBCStats};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10022, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 10022, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":10021
 *         if owner is None:
 *             obj._ptr = <nvmlFBCStats_t *>malloc(sizeof(nvmlFBCStats_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating FBCStats")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlFBCStats_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":10023
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating FBCStats")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlFBCStats_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlFBCStats_t))));

    /* "cuda/bindings/_nvml.pyx":10024
 *                 raise MemoryError("Error allocating FBCStats")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlFBCStats_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":10025
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlFBCStats_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlFBCStats_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":10019
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FBCStats obj = FBCStats.__new__(FBCStats)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlFBCStats_t *>malloc(sizeof(nvmlFBCStats_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":10027
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlFBCStats_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlFBCStats_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":10028
 *         else:
 *             obj._ptr = <nvmlFBCStats_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":10029
 *             obj._ptr = <nvmlFBCStats_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":10030
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":10031
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10007
 *         return __from_data(data, "fbc_stats_dtype", fbc_stats_dtype, FBCStats)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an FBCStats instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_8FBCStats_16__reduce_cython__, "FBCStats.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_8FBCStats_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8FBCStats_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_8FBCStats_18__setstate_cython__, "FBCStats.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_8FBCStats_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8FBCStats_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8FBCStats_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCStats.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10034
 * 
 * 
 * cdef _get_fbc_session_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlFBCSessionInfo_t pod = nvmlFBCSessionInfo_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_fbc_session_info_dtype_offsets(void) {
  nvmlFBCSessionInfo_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlFBCSessionInfo_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  PyObject *__pyx_t_14 = NULL;
  PyObject *__pyx_t_15 = NULL;
  PyObject *__pyx_t_16 = NULL;
  PyObject *__pyx_t_17 = NULL;
  PyObject *__pyx_t_18 = NULL;
  size_t __pyx_t_19;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_fbc_session_info_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":10035
 * 
 * cdef _get_fbc_session_info_dtype_offsets():
 *     cdef nvmlFBCSessionInfo_t pod = nvmlFBCSessionInfo_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['session_id', 'pid', 'vgpu_instance', 'display_ordinal', 'session_type', 'session_flags', 'h_max_resolution', 'v_max_resolution', 'h_resolution', 'v_resolution', 'average_fps', 'average_latency'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":10036
 * cdef _get_fbc_session_info_dtype_offsets():
 *     cdef nvmlFBCSessionInfo_t pod = nvmlFBCSessionInfo_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['session_id', 'pid', 'vgpu_instance', 'display_ordinal', 'session_type', 'session_flags', 'h_max_resolution', 'v_max_resolution', 'h_resolution', 'v_resolution', 'average_fps', 'average_latency'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10036, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10036, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":10037
 *     cdef nvmlFBCSessionInfo_t pod = nvmlFBCSessionInfo_t()
 *     return _numpy.dtype({
 *         'names': ['session_id', 'pid', 'vgpu_instance', 'display_ordinal', 'session_type', 'session_flags', 'h_max_resolution', 'v_max_resolution', 'h_resolution', 'v_resolution', 'average_fps', 'average_latency'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(12); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_session_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_session_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_session_id) != (0)) __PYX_ERR(0, 10037, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_pid);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_pid);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_pid) != (0)) __PYX_ERR(0, 10037, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_vgpu_instance);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_vgpu_instance);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_vgpu_instance) != (0)) __PYX_ERR(0, 10037, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_display_ordinal);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_display_ordinal);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_display_ordinal) != (0)) __PYX_ERR(0, 10037, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_session_type);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_session_type);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_session_type) != (0)) __PYX_ERR(0, 10037, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_session_flags);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_session_flags);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_session_flags) != (0)) __PYX_ERR(0, 10037, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_h_max_resolution);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_h_max_resolution);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_h_max_resolution) != (0)) __PYX_ERR(0, 10037, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_v_max_resolution);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_v_max_resolution);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_mstate_global->__pyx_n_u_v_max_resolution) != (0)) __PYX_ERR(0, 10037, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_h_resolution);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_h_resolution);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_mstate_global->__pyx_n_u_h_resolution) != (0)) __PYX_ERR(0, 10037, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_v_resolution);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_v_resolution);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 9, __pyx_mstate_global->__pyx_n_u_v_resolution) != (0)) __PYX_ERR(0, 10037, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_average_fps);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_average_fps);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 10, __pyx_mstate_global->__pyx_n_u_average_fps) != (0)) __PYX_ERR(0, 10037, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_average_latency);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_average_latency);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 11, __pyx_mstate_global->__pyx_n_u_average_latency) != (0)) __PYX_ERR(0, 10037, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 10037, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":10038
 *     return _numpy.dtype({
 *         'names': ['session_id', 'pid', 'vgpu_instance', 'display_ordinal', 'session_type', 'session_flags', 'h_max_resolution', 'v_max_resolution', 'h_resolution', 'v_resolution', 'average_fps', 'average_latency'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.sessionId)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_15 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_16 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_17 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_17);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_18 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_18);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(12); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 10038, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 10038, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 10038, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 10038, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 10038, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 10038, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 10038, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_t_14) != (0)) __PYX_ERR(0, 10038, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_t_15) != (0)) __PYX_ERR(0, 10038, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_16);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 9, __pyx_t_16) != (0)) __PYX_ERR(0, 10038, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_17);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 10, __pyx_t_17) != (0)) __PYX_ERR(0, 10038, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_18);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 11, __pyx_t_18) != (0)) __PYX_ERR(0, 10038, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  __pyx_t_14 = 0;
  __pyx_t_15 = 0;
  __pyx_t_16 = 0;
  __pyx_t_17 = 0;
  __pyx_t_18 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 10037, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":10040
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.sessionId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sessionId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10040, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":10041
 *         'offsets': [
 *             (<intptr_t>&(pod.sessionId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.displayOrdinal)) - (<intptr_t>&pod),
*/
  __pyx_t_18 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.pid)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 10041, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_18);

  /* "cuda/bindings/_nvml.pyx":10042
 *             (<intptr_t>&(pod.sessionId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.displayOrdinal)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sessionType)) - (<intptr_t>&pod),
*/
  __pyx_t_17 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vgpuInstance)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 10042, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_17);

  /* "cuda/bindings/_nvml.pyx":10043
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.displayOrdinal)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sessionType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sessionFlags)) - (<intptr_t>&pod),
*/
  __pyx_t_16 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.displayOrdinal)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 10043, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);

  /* "cuda/bindings/_nvml.pyx":10044
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.displayOrdinal)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sessionType)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sessionFlags)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hMaxResolution)) - (<intptr_t>&pod),
*/
  __pyx_t_15 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sessionType)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 10044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);

  /* "cuda/bindings/_nvml.pyx":10045
 *             (<intptr_t>&(pod.displayOrdinal)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sessionType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sessionFlags)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.hMaxResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vMaxResolution)) - (<intptr_t>&pod),
*/
  __pyx_t_14 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sessionFlags)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 10045, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);

  /* "cuda/bindings/_nvml.pyx":10046
 *             (<intptr_t>&(pod.sessionType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sessionFlags)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hMaxResolution)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vMaxResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hResolution)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.hMaxResolution)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 10046, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":10047
 *             (<intptr_t>&(pod.sessionFlags)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hMaxResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vMaxResolution)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.hResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vResolution)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vMaxResolution)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 10047, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":10048
 *             (<intptr_t>&(pod.hMaxResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vMaxResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hResolution)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.averageFPS)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.hResolution)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 10048, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":10049
 *             (<intptr_t>&(pod.vMaxResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vResolution)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.averageFPS)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.averageLatency)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vResolution)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 10049, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":10050
 *             (<intptr_t>&(pod.hResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.averageFPS)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.averageLatency)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.averageFPS)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 10050, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":10051
 *             (<intptr_t>&(pod.vResolution)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.averageFPS)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.averageLatency)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlFBCSessionInfo_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.averageLatency)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 10051, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":10039
 *         'names': ['session_id', 'pid', 'vgpu_instance', 'display_ordinal', 'session_type', 'session_flags', 'h_max_resolution', 'v_max_resolution', 'h_resolution', 'v_resolution', 'average_fps', 'average_latency'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sessionId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pid)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(12); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10039, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 10039, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_18);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_18) != (0)) __PYX_ERR(0, 10039, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_17);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_17) != (0)) __PYX_ERR(0, 10039, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_16);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_16) != (0)) __PYX_ERR(0, 10039, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_15) != (0)) __PYX_ERR(0, 10039, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_14) != (0)) __PYX_ERR(0, 10039, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 10039, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 7, __pyx_t_12) != (0)) __PYX_ERR(0, 10039, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 8, __pyx_t_11) != (0)) __PYX_ERR(0, 10039, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 9, __pyx_t_10) != (0)) __PYX_ERR(0, 10039, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 10, __pyx_t_9) != (0)) __PYX_ERR(0, 10039, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 11, __pyx_t_8) != (0)) __PYX_ERR(0, 10039, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_18 = 0;
  __pyx_t_17 = 0;
  __pyx_t_16 = 0;
  __pyx_t_15 = 0;
  __pyx_t_14 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 10037, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":10053
 *             (<intptr_t>&(pod.averageLatency)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlFBCSessionInfo_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlFBCSessionInfo_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10053, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 10037, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_19 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_19 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_19, (2-__pyx_t_19) | (__pyx_t_19*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10036, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10034
 * 
 * 
 * cdef _get_fbc_session_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlFBCSessionInfo_t pod = nvmlFBCSessionInfo_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_XDECREF(__pyx_t_14);
  __Pyx_XDECREF(__pyx_t_15);
  __Pyx_XDECREF(__pyx_t_16);
  __Pyx_XDECREF(__pyx_t_17);
  __Pyx_XDECREF(__pyx_t_18);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_fbc_session_info_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10075
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=fbc_session_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 10075, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10075, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 10075, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10075, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 10075, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":10076
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=fbc_session_info_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlFBCSessionInfo_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10076, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10076, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_fbc_session_info_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10076, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10076, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 10076, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10076, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":10077
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=fbc_session_info_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlFBCSessionInfo_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlFBCSessionInfo_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10077, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10077, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10077, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":10078
 *         arr = _numpy.empty(size, dtype=fbc_session_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlFBCSessionInfo_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlFBCSessionInfo_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10078, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlFBCSessionInfo_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10078, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10078, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 10078, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":10079
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlFBCSessionInfo_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlFBCSessionInfo_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10079, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10079, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlFBCSessionInfo_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10079, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10079, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 10078, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 10078, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":10075
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=fbc_session_info_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10081
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlFBCSessionInfo_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.FBCSessionInfo_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":10082
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.FBCSessionInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10082, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10082, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 10082, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":10083
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.FBCSessionInfo_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.FBCSessionInfo object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10083, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10083, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10083, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10083, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10083, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10083, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10083, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_FBCSessionInfo_Array;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 22 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10083, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10082
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.FBCSessionInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":10085
 *             return f"<{__name__}.FBCSessionInfo_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.FBCSessionInfo object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10085, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10085, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10085, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10085, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10085, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_FBCSessionInfo_object_at;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 26 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10085, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":10081
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlFBCSessionInfo_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.FBCSessionInfo_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10087
 *             return f"<{__name__}.FBCSessionInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10090
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10087
 *             return f"<{__name__}.FBCSessionInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10092
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_14FBCSessionInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":10093
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10093, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10093, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 10093, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10092
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10095
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":10096
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10096, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10096, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 10096, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":10097
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10097, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 10097, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10096
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":10099
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10099, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10099, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10095
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10101
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":10102
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10102, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 10102, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10101
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10104
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, FBCSessionInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":10105
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, FBCSessionInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":10106
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, FBCSessionInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10106, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10106, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10106, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10106, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 10106, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10106, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10106, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10106, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10106, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 10106, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10107
 *         cdef object self_data = self._data
 *         if (not isinstance(other, FBCSessionInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10106
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, FBCSessionInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":10108
 *         if (not isinstance(other, FBCSessionInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10108, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10108, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10108, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10108, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10108, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10104
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, FBCSessionInfo)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10110
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def session_id(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_10session_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_10session_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_10session_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_10session_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10113
 *     def session_id(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.session_id[0])
 *         return self._data.session_id
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10113, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10114
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.session_id[0])             # <<<<<<<<<<<<<<
 *         return self._data.session_id
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_session_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10114, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10114, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10114, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10113
 *     def session_id(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.session_id[0])
 *         return self._data.session_id
*/
  }

  /* "cuda/bindings/_nvml.pyx":10115
 *         if self._data.size == 1:
 *             return int(self._data.session_id[0])
 *         return self._data.session_id             # <<<<<<<<<<<<<<
 * 
 *     @session_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_session_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10115, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10110
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def session_id(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.session_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10117
 *         return self._data.session_id
 * 
 *     @session_id.setter             # <<<<<<<<<<<<<<
 *     def session_id(self, val):
 *         self._data.session_id = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_10session_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_10session_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_10session_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_10session_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":10119
 *     @session_id.setter
 *     def session_id(self, val):
 *         self._data.session_id = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_session_id, __pyx_v_val) < (0)) __PYX_ERR(0, 10119, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":10117
 *         return self._data.session_id
 * 
 *     @session_id.setter             # <<<<<<<<<<<<<<
 *     def session_id(self, val):
 *         self._data.session_id = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.session_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10121
 *         self._data.session_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_3pid_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_3pid_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_3pid___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_3pid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10124
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.pid[0])
 *         return self._data.pid
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10124, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10124, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10125
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.pid[0])             # <<<<<<<<<<<<<<
 *         return self._data.pid
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10125, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10125, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10125, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10124
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.pid[0])
 *         return self._data.pid
*/
  }

  /* "cuda/bindings/_nvml.pyx":10126
 *         if self._data.size == 1:
 *             return int(self._data.pid[0])
 *         return self._data.pid             # <<<<<<<<<<<<<<
 * 
 *     @pid.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10126, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10121
 *         self._data.session_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pid(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.pid.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10128
 *         return self._data.pid
 * 
 *     @pid.setter             # <<<<<<<<<<<<<<
 *     def pid(self, val):
 *         self._data.pid = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_3pid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_3pid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_3pid_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_3pid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":10130
 *     @pid.setter
 *     def pid(self, val):
 *         self._data.pid = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_pid, __pyx_v_val) < (0)) __PYX_ERR(0, 10130, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":10128
 *         return self._data.pid
 * 
 *     @pid.setter             # <<<<<<<<<<<<<<
 *     def pid(self, val):
 *         self._data.pid = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.pid.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10132
 *         self._data.pid = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_instance(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_13vgpu_instance_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_13vgpu_instance_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_13vgpu_instance___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_13vgpu_instance___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10135
 *     def vgpu_instance(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.vgpu_instance[0])
 *         return self._data.vgpu_instance
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10135, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10135, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10136
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.vgpu_instance[0])             # <<<<<<<<<<<<<<
 *         return self._data.vgpu_instance
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_vgpu_instance); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10136, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10136, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10136, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10135
 *     def vgpu_instance(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.vgpu_instance[0])
 *         return self._data.vgpu_instance
*/
  }

  /* "cuda/bindings/_nvml.pyx":10137
 *         if self._data.size == 1:
 *             return int(self._data.vgpu_instance[0])
 *         return self._data.vgpu_instance             # <<<<<<<<<<<<<<
 * 
 *     @vgpu_instance.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_vgpu_instance); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10137, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10132
 *         self._data.pid = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_instance(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.vgpu_instance.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10139
 *         return self._data.vgpu_instance
 * 
 *     @vgpu_instance.setter             # <<<<<<<<<<<<<<
 *     def vgpu_instance(self, val):
 *         self._data.vgpu_instance = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_13vgpu_instance_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_13vgpu_instance_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_13vgpu_instance_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_13vgpu_instance_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":10141
 *     @vgpu_instance.setter
 *     def vgpu_instance(self, val):
 *         self._data.vgpu_instance = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_vgpu_instance, __pyx_v_val) < (0)) __PYX_ERR(0, 10141, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":10139
 *         return self._data.vgpu_instance
 * 
 *     @vgpu_instance.setter             # <<<<<<<<<<<<<<
 *     def vgpu_instance(self, val):
 *         self._data.vgpu_instance = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.vgpu_instance.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10143
 *         self._data.vgpu_instance = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def display_ordinal(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_15display_ordinal_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_15display_ordinal_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_15display_ordinal___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_15display_ordinal___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10146
 *     def display_ordinal(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.display_ordinal[0])
 *         return self._data.display_ordinal
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10146, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10146, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10147
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.display_ordinal[0])             # <<<<<<<<<<<<<<
 *         return self._data.display_ordinal
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_display_ordinal); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10147, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10147, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10147, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10146
 *     def display_ordinal(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.display_ordinal[0])
 *         return self._data.display_ordinal
*/
  }

  /* "cuda/bindings/_nvml.pyx":10148
 *         if self._data.size == 1:
 *             return int(self._data.display_ordinal[0])
 *         return self._data.display_ordinal             # <<<<<<<<<<<<<<
 * 
 *     @display_ordinal.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_display_ordinal); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10148, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10143
 *         self._data.vgpu_instance = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def display_ordinal(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.display_ordinal.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10150
 *         return self._data.display_ordinal
 * 
 *     @display_ordinal.setter             # <<<<<<<<<<<<<<
 *     def display_ordinal(self, val):
 *         self._data.display_ordinal = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_15display_ordinal_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_15display_ordinal_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_15display_ordinal_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_15display_ordinal_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":10152
 *     @display_ordinal.setter
 *     def display_ordinal(self, val):
 *         self._data.display_ordinal = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_display_ordinal, __pyx_v_val) < (0)) __PYX_ERR(0, 10152, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":10150
 *         return self._data.display_ordinal
 * 
 *     @display_ordinal.setter             # <<<<<<<<<<<<<<
 *     def display_ordinal(self, val):
 *         self._data.display_ordinal = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.display_ordinal.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10154
 *         self._data.display_ordinal = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def session_type(self):
 *         """Union[~_numpy.int32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12session_type_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12session_type_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12session_type___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12session_type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10157
 *     def session_type(self):
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.session_type[0])
 *         return self._data.session_type
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10157, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10157, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10158
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.session_type[0])             # <<<<<<<<<<<<<<
 *         return self._data.session_type
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_session_type); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10158, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10158, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10158, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10157
 *     def session_type(self):
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.session_type[0])
 *         return self._data.session_type
*/
  }

  /* "cuda/bindings/_nvml.pyx":10159
 *         if self._data.size == 1:
 *             return int(self._data.session_type[0])
 *         return self._data.session_type             # <<<<<<<<<<<<<<
 * 
 *     @session_type.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_session_type); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10159, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10154
 *         self._data.display_ordinal = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def session_type(self):
 *         """Union[~_numpy.int32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.session_type.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10161
 *         return self._data.session_type
 * 
 *     @session_type.setter             # <<<<<<<<<<<<<<
 *     def session_type(self, val):
 *         self._data.session_type = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12session_type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12session_type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12session_type_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12session_type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":10163
 *     @session_type.setter
 *     def session_type(self, val):
 *         self._data.session_type = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_session_type, __pyx_v_val) < (0)) __PYX_ERR(0, 10163, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":10161
 *         return self._data.session_type
 * 
 *     @session_type.setter             # <<<<<<<<<<<<<<
 *     def session_type(self, val):
 *         self._data.session_type = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.session_type.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10165
 *         self._data.session_type = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def session_flags(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_13session_flags_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_13session_flags_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_13session_flags___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_13session_flags___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10168
 *     def session_flags(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.session_flags[0])
 *         return self._data.session_flags
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10168, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10168, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10169
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.session_flags[0])             # <<<<<<<<<<<<<<
 *         return self._data.session_flags
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_session_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10169, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10169, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10169, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10168
 *     def session_flags(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.session_flags[0])
 *         return self._data.session_flags
*/
  }

  /* "cuda/bindings/_nvml.pyx":10170
 *         if self._data.size == 1:
 *             return int(self._data.session_flags[0])
 *         return self._data.session_flags             # <<<<<<<<<<<<<<
 * 
 *     @session_flags.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_session_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10170, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10165
 *         self._data.session_type = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def session_flags(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.session_flags.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10172
 *         return self._data.session_flags
 * 
 *     @session_flags.setter             # <<<<<<<<<<<<<<
 *     def session_flags(self, val):
 *         self._data.session_flags = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_13session_flags_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_13session_flags_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_13session_flags_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_13session_flags_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":10174
 *     @session_flags.setter
 *     def session_flags(self, val):
 *         self._data.session_flags = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_session_flags, __pyx_v_val) < (0)) __PYX_ERR(0, 10174, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":10172
 *         return self._data.session_flags
 * 
 *     @session_flags.setter             # <<<<<<<<<<<<<<
 *     def session_flags(self, val):
 *         self._data.session_flags = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.session_flags.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10176
 *         self._data.session_flags = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def h_max_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_16h_max_resolution_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_16h_max_resolution_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_16h_max_resolution___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_16h_max_resolution___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10179
 *     def h_max_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.h_max_resolution[0])
 *         return self._data.h_max_resolution
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10179, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10180
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.h_max_resolution[0])             # <<<<<<<<<<<<<<
 *         return self._data.h_max_resolution
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_h_max_resolution); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10180, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10180, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10180, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10179
 *     def h_max_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.h_max_resolution[0])
 *         return self._data.h_max_resolution
*/
  }

  /* "cuda/bindings/_nvml.pyx":10181
 *         if self._data.size == 1:
 *             return int(self._data.h_max_resolution[0])
 *         return self._data.h_max_resolution             # <<<<<<<<<<<<<<
 * 
 *     @h_max_resolution.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_h_max_resolution); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10181, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10176
 *         self._data.session_flags = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def h_max_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.h_max_resolution.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10183
 *         return self._data.h_max_resolution
 * 
 *     @h_max_resolution.setter             # <<<<<<<<<<<<<<
 *     def h_max_resolution(self, val):
 *         self._data.h_max_resolution = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_16h_max_resolution_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_16h_max_resolution_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_16h_max_resolution_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_16h_max_resolution_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":10185
 *     @h_max_resolution.setter
 *     def h_max_resolution(self, val):
 *         self._data.h_max_resolution = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_h_max_resolution, __pyx_v_val) < (0)) __PYX_ERR(0, 10185, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":10183
 *         return self._data.h_max_resolution
 * 
 *     @h_max_resolution.setter             # <<<<<<<<<<<<<<
 *     def h_max_resolution(self, val):
 *         self._data.h_max_resolution = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.h_max_resolution.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10187
 *         self._data.h_max_resolution = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def v_max_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_16v_max_resolution_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_16v_max_resolution_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_16v_max_resolution___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_16v_max_resolution___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10190
 *     def v_max_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.v_max_resolution[0])
 *         return self._data.v_max_resolution
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10190, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10190, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10191
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.v_max_resolution[0])             # <<<<<<<<<<<<<<
 *         return self._data.v_max_resolution
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_v_max_resolution); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10191, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10191, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10191, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10190
 *     def v_max_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.v_max_resolution[0])
 *         return self._data.v_max_resolution
*/
  }

  /* "cuda/bindings/_nvml.pyx":10192
 *         if self._data.size == 1:
 *             return int(self._data.v_max_resolution[0])
 *         return self._data.v_max_resolution             # <<<<<<<<<<<<<<
 * 
 *     @v_max_resolution.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_v_max_resolution); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10192, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10187
 *         self._data.h_max_resolution = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def v_max_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.v_max_resolution.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10194
 *         return self._data.v_max_resolution
 * 
 *     @v_max_resolution.setter             # <<<<<<<<<<<<<<
 *     def v_max_resolution(self, val):
 *         self._data.v_max_resolution = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_16v_max_resolution_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_16v_max_resolution_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_16v_max_resolution_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_16v_max_resolution_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":10196
 *     @v_max_resolution.setter
 *     def v_max_resolution(self, val):
 *         self._data.v_max_resolution = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_v_max_resolution, __pyx_v_val) < (0)) __PYX_ERR(0, 10196, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":10194
 *         return self._data.v_max_resolution
 * 
 *     @v_max_resolution.setter             # <<<<<<<<<<<<<<
 *     def v_max_resolution(self, val):
 *         self._data.v_max_resolution = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.v_max_resolution.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10198
 *         self._data.v_max_resolution = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def h_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12h_resolution_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12h_resolution_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12h_resolution___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12h_resolution___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10201
 *     def h_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.h_resolution[0])
 *         return self._data.h_resolution
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10201, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10201, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10202
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.h_resolution[0])             # <<<<<<<<<<<<<<
 *         return self._data.h_resolution
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_h_resolution); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10202, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10202, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10202, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10201
 *     def h_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.h_resolution[0])
 *         return self._data.h_resolution
*/
  }

  /* "cuda/bindings/_nvml.pyx":10203
 *         if self._data.size == 1:
 *             return int(self._data.h_resolution[0])
 *         return self._data.h_resolution             # <<<<<<<<<<<<<<
 * 
 *     @h_resolution.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_h_resolution); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10198
 *         self._data.v_max_resolution = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def h_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.h_resolution.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10205
 *         return self._data.h_resolution
 * 
 *     @h_resolution.setter             # <<<<<<<<<<<<<<
 *     def h_resolution(self, val):
 *         self._data.h_resolution = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12h_resolution_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12h_resolution_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12h_resolution_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12h_resolution_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":10207
 *     @h_resolution.setter
 *     def h_resolution(self, val):
 *         self._data.h_resolution = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_h_resolution, __pyx_v_val) < (0)) __PYX_ERR(0, 10207, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":10205
 *         return self._data.h_resolution
 * 
 *     @h_resolution.setter             # <<<<<<<<<<<<<<
 *     def h_resolution(self, val):
 *         self._data.h_resolution = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.h_resolution.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10209
 *         self._data.h_resolution = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def v_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12v_resolution_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12v_resolution_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12v_resolution___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12v_resolution___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10212
 *     def v_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.v_resolution[0])
 *         return self._data.v_resolution
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10212, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10212, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10213
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.v_resolution[0])             # <<<<<<<<<<<<<<
 *         return self._data.v_resolution
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_v_resolution); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10213, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10213, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10213, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10212
 *     def v_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.v_resolution[0])
 *         return self._data.v_resolution
*/
  }

  /* "cuda/bindings/_nvml.pyx":10214
 *         if self._data.size == 1:
 *             return int(self._data.v_resolution[0])
 *         return self._data.v_resolution             # <<<<<<<<<<<<<<
 * 
 *     @v_resolution.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_v_resolution); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10214, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10209
 *         self._data.h_resolution = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def v_resolution(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.v_resolution.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10216
 *         return self._data.v_resolution
 * 
 *     @v_resolution.setter             # <<<<<<<<<<<<<<
 *     def v_resolution(self, val):
 *         self._data.v_resolution = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12v_resolution_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12v_resolution_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12v_resolution_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12v_resolution_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":10218
 *     @v_resolution.setter
 *     def v_resolution(self, val):
 *         self._data.v_resolution = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_v_resolution, __pyx_v_val) < (0)) __PYX_ERR(0, 10218, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":10216
 *         return self._data.v_resolution
 * 
 *     @v_resolution.setter             # <<<<<<<<<<<<<<
 *     def v_resolution(self, val):
 *         self._data.v_resolution = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.v_resolution.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10220
 *         self._data.v_resolution = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def average_fps(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_11average_fps_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_11average_fps_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_11average_fps___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_11average_fps___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10223
 *     def average_fps(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.average_fps[0])
 *         return self._data.average_fps
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10223, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10223, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10224
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.average_fps[0])             # <<<<<<<<<<<<<<
 *         return self._data.average_fps
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_average_fps); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10224, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10224, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10224, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10223
 *     def average_fps(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.average_fps[0])
 *         return self._data.average_fps
*/
  }

  /* "cuda/bindings/_nvml.pyx":10225
 *         if self._data.size == 1:
 *             return int(self._data.average_fps[0])
 *         return self._data.average_fps             # <<<<<<<<<<<<<<
 * 
 *     @average_fps.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_average_fps); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10225, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10220
 *         self._data.v_resolution = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def average_fps(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.average_fps.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10227
 *         return self._data.average_fps
 * 
 *     @average_fps.setter             # <<<<<<<<<<<<<<
 *     def average_fps(self, val):
 *         self._data.average_fps = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_11average_fps_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_11average_fps_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_11average_fps_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_11average_fps_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":10229
 *     @average_fps.setter
 *     def average_fps(self, val):
 *         self._data.average_fps = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_average_fps, __pyx_v_val) < (0)) __PYX_ERR(0, 10229, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":10227
 *         return self._data.average_fps
 * 
 *     @average_fps.setter             # <<<<<<<<<<<<<<
 *     def average_fps(self, val):
 *         self._data.average_fps = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.average_fps.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10231
 *         self._data.average_fps = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def average_latency(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_15average_latency_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_15average_latency_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_15average_latency___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_15average_latency___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10234
 *     def average_latency(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.average_latency[0])
 *         return self._data.average_latency
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10234, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10234, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10235
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.average_latency[0])             # <<<<<<<<<<<<<<
 *         return self._data.average_latency
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_average_latency); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10235, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10235, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10235, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10234
 *     def average_latency(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.average_latency[0])
 *         return self._data.average_latency
*/
  }

  /* "cuda/bindings/_nvml.pyx":10236
 *         if self._data.size == 1:
 *             return int(self._data.average_latency[0])
 *         return self._data.average_latency             # <<<<<<<<<<<<<<
 * 
 *     @average_latency.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_average_latency); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10236, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10231
 *         self._data.average_fps = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def average_latency(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.average_latency.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10238
 *         return self._data.average_latency
 * 
 *     @average_latency.setter             # <<<<<<<<<<<<<<
 *     def average_latency(self, val):
 *         self._data.average_latency = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_15average_latency_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_15average_latency_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_15average_latency_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_15average_latency_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":10240
 *     @average_latency.setter
 *     def average_latency(self, val):
 *         self._data.average_latency = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_average_latency, __pyx_v_val) < (0)) __PYX_ERR(0, 10240, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":10238
 *         return self._data.average_latency
 * 
 *     @average_latency.setter             # <<<<<<<<<<<<<<
 *     def average_latency(self, val):
 *         self._data.average_latency = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.average_latency.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10242
 *         self._data.average_latency = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":10245
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":10246
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 10246, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":10247
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10247, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 10247, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":10248
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":10249
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10249, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 10249, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":10248
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":10250
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return FBCSessionInfo.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":10251
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return FBCSessionInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":10250
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return FBCSessionInfo.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":10252
 *             if key_ < 0:
 *                 key_ += size
 *             return FBCSessionInfo.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == fbc_session_info_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10252, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10252, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10245
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":10253
 *                 key_ += size
 *             return FBCSessionInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == fbc_session_info_dtype:
 *             return FBCSessionInfo.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10253, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":10254
 *             return FBCSessionInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == fbc_session_info_dtype:             # <<<<<<<<<<<<<<
 *             return FBCSessionInfo.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10254, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10254, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 10254, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10254, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_fbc_session_info_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10254, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10254, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 10254, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":10255
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == fbc_session_info_dtype:
 *             return FBCSessionInfo.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10255, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10254
 *             return FBCSessionInfo.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == fbc_session_info_dtype:             # <<<<<<<<<<<<<<
 *             return FBCSessionInfo.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":10256
 *         if isinstance(out, _numpy.recarray) and out.dtype == fbc_session_info_dtype:
 *             return FBCSessionInfo.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10242
 *         self._data.average_latency = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10258
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":10259
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 10259, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":10258
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10261
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an FBCSessionInfo instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_14FBCSessionInfo_14from_data, "FBCSessionInfo.from_data(data)\n\nCreate an FBCSessionInfo instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `fbc_session_info_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_14FBCSessionInfo_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14FBCSessionInfo_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 10261, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10261, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 10261, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 10261, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10261, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 10261, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":10268
 *             data (_numpy.ndarray): a 1D array of dtype `fbc_session_info_dtype` holding the data.
 *         """
 *         cdef FBCSessionInfo obj = FBCSessionInfo.__new__(FBCSessionInfo)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_FBCSessionInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10268, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":10269
 *         """
 *         cdef FBCSessionInfo obj = FBCSessionInfo.__new__(FBCSessionInfo)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10269, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10269, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 10269, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":10270
 *         cdef FBCSessionInfo obj = FBCSessionInfo.__new__(FBCSessionInfo)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10270, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 10270, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10269
 *         """
 *         cdef FBCSessionInfo obj = FBCSessionInfo.__new__(FBCSessionInfo)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":10271
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != fbc_session_info_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10271, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 10271, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":10272
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != fbc_session_info_dtype:
 *             raise ValueError("data array must be of dtype fbc_session_info_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10272, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 10272, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10271
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != fbc_session_info_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":10273
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != fbc_session_info_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype fbc_session_info_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10273, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_fbc_session_info_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10273, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10273, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 10273, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":10274
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != fbc_session_info_dtype:
 *             raise ValueError("data array must be of dtype fbc_session_info_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_fbc};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10274, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 10274, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10273
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != fbc_session_info_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype fbc_session_info_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":10275
 *         if data.dtype != fbc_session_info_dtype:
 *             raise ValueError("data array must be of dtype fbc_session_info_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10275, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10275, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10275, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":10277
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10261
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an FBCSessionInfo instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10279
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an FBCSessionInfo instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_14FBCSessionInfo_16from_ptr, "FBCSessionInfo.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an FBCSessionInfo instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_14FBCSessionInfo_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14FBCSessionInfo_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 10279, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 10279, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 10279, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10279, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 10279, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 10279, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 10279, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 10279, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10279, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 10280, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 10280, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10280, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":10280
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an FBCSessionInfo instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 10279, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":10279
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an FBCSessionInfo instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":10288
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FBCSessionInfo obj = FBCSessionInfo.__new__(FBCSessionInfo)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":10289
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef FBCSessionInfo obj = FBCSessionInfo.__new__(FBCSessionInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10289, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 10289, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10288
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FBCSessionInfo obj = FBCSessionInfo.__new__(FBCSessionInfo)
*/
  }

  /* "cuda/bindings/_nvml.pyx":10290
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FBCSessionInfo obj = FBCSessionInfo.__new__(FBCSessionInfo)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_FBCSessionInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10290, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":10291
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FBCSessionInfo obj = FBCSessionInfo.__new__(FBCSessionInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlFBCSessionInfo_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10291, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10291, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":10293
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlFBCSessionInfo_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=fbc_session_info_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10293, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":10292
 *         cdef FBCSessionInfo obj = FBCSessionInfo.__new__(FBCSessionInfo)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlFBCSessionInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=fbc_session_info_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlFBCSessionInfo_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10292, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":10294
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlFBCSessionInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=fbc_session_info_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10294, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10294, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10294, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_fbc_session_info_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 10294, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 10294, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 10294, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 10294, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10294, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":10295
 *             <char*>ptr, sizeof(nvmlFBCSessionInfo_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=fbc_session_info_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 10295, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 10295, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10295, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":10297
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10279
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an FBCSessionInfo instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10071
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_14FBCSessionInfo_18__reduce_cython__, "FBCSessionInfo.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_14FBCSessionInfo_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14FBCSessionInfo_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_FBCSessionInfo, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_FBCSessionInfo, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_FBCSessionInfo, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_FBCSessionInfo, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_FBCSessionInfo); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_FBCSessionInfo, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_FBCSessionInfo, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_FBCSessionInfo, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_FBCSessionInfo__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_FBCSessionInfo); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_FBCSessionInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_FBCSessionInfo__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_14FBCSessionInfo_20__setstate_cython__, "FBCSessionInfo.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_14FBCSessionInfo_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14FBCSessionInfo_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14FBCSessionInfo_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_FBCSessionInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_FBCSessionInfo__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_FBCSessionInfo__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_FBCSessionInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_FBCSessionInfo__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.FBCSessionInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10300
 * 
 * 
 * cdef _get_conf_compute_system_caps_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeSystemCaps_t pod = nvmlConfComputeSystemCaps_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_system_caps_dtype_offsets(void) {
  nvmlConfComputeSystemCaps_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlConfComputeSystemCaps_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_conf_compute_system_caps_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":10301
 * 
 * cdef _get_conf_compute_system_caps_dtype_offsets():
 *     cdef nvmlConfComputeSystemCaps_t pod = nvmlConfComputeSystemCaps_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['cpu_caps', 'gpus_caps'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":10302
 * cdef _get_conf_compute_system_caps_dtype_offsets():
 *     cdef nvmlConfComputeSystemCaps_t pod = nvmlConfComputeSystemCaps_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['cpu_caps', 'gpus_caps'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10302, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10302, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":10303
 *     cdef nvmlConfComputeSystemCaps_t pod = nvmlConfComputeSystemCaps_t()
 *     return _numpy.dtype({
 *         'names': ['cpu_caps', 'gpus_caps'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10303, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10303, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_cpu_caps);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_cpu_caps);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_cpu_caps) != (0)) __PYX_ERR(0, 10303, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_gpus_caps);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_gpus_caps);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_gpus_caps) != (0)) __PYX_ERR(0, 10303, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 10303, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":10304
 *     return _numpy.dtype({
 *         'names': ['cpu_caps', 'gpus_caps'],
 *         'formats': [_numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.cpuCaps)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10304, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10304, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10304, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 10304, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10304, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 10304, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 10304, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 10303, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":10306
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.cpuCaps)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.gpusCaps)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.cpuCaps)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10306, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":10307
 *         'offsets': [
 *             (<intptr_t>&(pod.cpuCaps)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gpusCaps)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlConfComputeSystemCaps_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.gpusCaps)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 10307, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":10305
 *         'names': ['cpu_caps', 'gpus_caps'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.cpuCaps)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gpusCaps)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 10305, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 10305, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 10303, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":10309
 *             (<intptr_t>&(pod.gpusCaps)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlConfComputeSystemCaps_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlConfComputeSystemCaps_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10309, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 10303, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10302, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10300
 * 
 * 
 * cdef _get_conf_compute_system_caps_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeSystemCaps_t pod = nvmlConfComputeSystemCaps_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_conf_compute_system_caps_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10326
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlConfComputeSystemCaps_t *>calloc(1, sizeof(nvmlConfComputeSystemCaps_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":10327
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeSystemCaps_t *>calloc(1, sizeof(nvmlConfComputeSystemCaps_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeSystemCaps")
*/
  __pyx_v_self->_ptr = ((nvmlConfComputeSystemCaps_t *)calloc(1, (sizeof(nvmlConfComputeSystemCaps_t))));

  /* "cuda/bindings/_nvml.pyx":10328
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeSystemCaps_t *>calloc(1, sizeof(nvmlConfComputeSystemCaps_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ConfComputeSystemCaps")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":10329
 *         self._ptr = <nvmlConfComputeSystemCaps_t *>calloc(1, sizeof(nvmlConfComputeSystemCaps_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeSystemCaps")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10329, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeSyst};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10329, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 10329, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10328
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeSystemCaps_t *>calloc(1, sizeof(nvmlConfComputeSystemCaps_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ConfComputeSystemCaps")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":10330
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeSystemCaps")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":10331
 *             raise MemoryError("Error allocating ConfComputeSystemCaps")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":10332
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":10326
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlConfComputeSystemCaps_t *>calloc(1, sizeof(nvmlConfComputeSystemCaps_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemCaps.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10334
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlConfComputeSystemCaps_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self) {
  nvmlConfComputeSystemCaps_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlConfComputeSystemCaps_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":10336
 *     def __dealloc__(self):
 *         cdef nvmlConfComputeSystemCaps_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":10337
 *         cdef nvmlConfComputeSystemCaps_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":10338
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":10339
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":10336
 *     def __dealloc__(self):
 *         cdef nvmlConfComputeSystemCaps_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":10334
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlConfComputeSystemCaps_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":10341
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ConfComputeSystemCaps object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":10342
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.ConfComputeSystemCaps object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10342, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10342, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10342, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10342, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10342, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_ConfComputeSystemCaps_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 33 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10342, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10341
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ConfComputeSystemCaps object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemCaps.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10344
 *         return f"<{__name__}.ConfComputeSystemCaps object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10347
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10347, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10344
 *         return f"<{__name__}.ConfComputeSystemCaps object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemCaps.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10349
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":10350
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10349
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10352
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":10353
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10353, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10352
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemCaps.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10355
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ConfComputeSystemCaps other_
 *         if not isinstance(other, ConfComputeSystemCaps):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":10357
 *     def __eq__(self, other):
 *         cdef ConfComputeSystemCaps other_
 *         if not isinstance(other, ConfComputeSystemCaps):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10358
 *         cdef ConfComputeSystemCaps other_
 *         if not isinstance(other, ConfComputeSystemCaps):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeSystemCaps_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10357
 *     def __eq__(self, other):
 *         cdef ConfComputeSystemCaps other_
 *         if not isinstance(other, ConfComputeSystemCaps):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":10359
 *         if not isinstance(other, ConfComputeSystemCaps):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeSystemCaps_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps))))) __PYX_ERR(0, 10359, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":10360
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeSystemCaps_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlConfComputeSystemCaps_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10360, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10355
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ConfComputeSystemCaps other_
 *         if not isinstance(other, ConfComputeSystemCaps):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemCaps.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10362
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeSystemCaps_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeSystemCaps_t *>malloc(sizeof(nvmlConfComputeSystemCaps_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":10363
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlConfComputeSystemCaps_t *>malloc(sizeof(nvmlConfComputeSystemCaps_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10363, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10363, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10363, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 10363, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":10364
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeSystemCaps_t *>malloc(sizeof(nvmlConfComputeSystemCaps_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeSystemCaps")
*/
    __pyx_v_self->_ptr = ((nvmlConfComputeSystemCaps_t *)malloc((sizeof(nvmlConfComputeSystemCaps_t))));

    /* "cuda/bindings/_nvml.pyx":10365
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeSystemCaps_t *>malloc(sizeof(nvmlConfComputeSystemCaps_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeSystemCaps")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeSystemCaps_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":10366
 *             self._ptr = <nvmlConfComputeSystemCaps_t *>malloc(sizeof(nvmlConfComputeSystemCaps_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeSystemCaps")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeSystemCaps_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10366, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeSyst};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10366, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 10366, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":10365
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeSystemCaps_t *>malloc(sizeof(nvmlConfComputeSystemCaps_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeSystemCaps")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeSystemCaps_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":10367
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeSystemCaps")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeSystemCaps_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10367, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10367, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 10367, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlConfComputeSystemCaps_t))));

    /* "cuda/bindings/_nvml.pyx":10368
 *                 raise MemoryError("Error allocating ConfComputeSystemCaps")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeSystemCaps_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":10369
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeSystemCaps_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":10370
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10370, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10370, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 10370, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":10363
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlConfComputeSystemCaps_t *>malloc(sizeof(nvmlConfComputeSystemCaps_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":10372
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 10372, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":10362
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeSystemCaps_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeSystemCaps_t *>malloc(sizeof(nvmlConfComputeSystemCaps_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemCaps.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10374
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cpu_caps(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_8cpu_caps_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_8cpu_caps_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_8cpu_caps___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_8cpu_caps___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10377
 *     def cpu_caps(self):
 *         """int: """
 *         return self._ptr[0].cpuCaps             # <<<<<<<<<<<<<<
 * 
 *     @cpu_caps.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).cpuCaps); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10377, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10374
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cpu_caps(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemCaps.cpu_caps.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10379
 *         return self._ptr[0].cpuCaps
 * 
 *     @cpu_caps.setter             # <<<<<<<<<<<<<<
 *     def cpu_caps(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_8cpu_caps_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_8cpu_caps_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_8cpu_caps_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_8cpu_caps_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":10381
 *     @cpu_caps.setter
 *     def cpu_caps(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeSystemCaps instance is read-only")
 *         self._ptr[0].cpuCaps = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":10382
 *     def cpu_caps(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeSystemCaps instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].cpuCaps = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeSystemCaps_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10382, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 10382, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10381
 *     @cpu_caps.setter
 *     def cpu_caps(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeSystemCaps instance is read-only")
 *         self._ptr[0].cpuCaps = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":10383
 *         if self._readonly:
 *             raise ValueError("This ConfComputeSystemCaps instance is read-only")
 *         self._ptr[0].cpuCaps = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10383, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).cpuCaps = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":10379
 *         return self._ptr[0].cpuCaps
 * 
 *     @cpu_caps.setter             # <<<<<<<<<<<<<<
 *     def cpu_caps(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemCaps.cpu_caps.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10385
 *         self._ptr[0].cpuCaps = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def gpus_caps(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_9gpus_caps_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_9gpus_caps_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_9gpus_caps___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_9gpus_caps___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10388
 *     def gpus_caps(self):
 *         """int: """
 *         return self._ptr[0].gpusCaps             # <<<<<<<<<<<<<<
 * 
 *     @gpus_caps.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).gpusCaps); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10388, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10385
 *         self._ptr[0].cpuCaps = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def gpus_caps(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemCaps.gpus_caps.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10390
 *         return self._ptr[0].gpusCaps
 * 
 *     @gpus_caps.setter             # <<<<<<<<<<<<<<
 *     def gpus_caps(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_9gpus_caps_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_9gpus_caps_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_9gpus_caps_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_9gpus_caps_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":10392
 *     @gpus_caps.setter
 *     def gpus_caps(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeSystemCaps instance is read-only")
 *         self._ptr[0].gpusCaps = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":10393
 *     def gpus_caps(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeSystemCaps instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].gpusCaps = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeSystemCaps_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10393, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 10393, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10392
 *     @gpus_caps.setter
 *     def gpus_caps(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeSystemCaps instance is read-only")
 *         self._ptr[0].gpusCaps = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":10394
 *         if self._readonly:
 *             raise ValueError("This ConfComputeSystemCaps instance is read-only")
 *         self._ptr[0].gpusCaps = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10394, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).gpusCaps = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":10390
 *         return self._ptr[0].gpusCaps
 * 
 *     @gpus_caps.setter             # <<<<<<<<<<<<<<
 *     def gpus_caps(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemCaps.gpus_caps.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10396
 *         self._ptr[0].gpusCaps = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeSystemCaps instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_12from_data, "ConfComputeSystemCaps.from_data(data)\n\nCreate an ConfComputeSystemCaps instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `conf_compute_system_caps_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 10396, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10396, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 10396, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 10396, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10396, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 10396, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemCaps.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":10403
 *             data (_numpy.ndarray): a single-element array of dtype `conf_compute_system_caps_dtype` holding the data.
 *         """
 *         return __from_data(data, "conf_compute_system_caps_dtype", conf_compute_system_caps_dtype, ConfComputeSystemCaps)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_conf_compute_system_caps_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10403, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_conf_compute_system_caps_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10403, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10396
 *         self._ptr[0].gpusCaps = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeSystemCaps instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemCaps.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10405
 *         return __from_data(data, "conf_compute_system_caps_dtype", conf_compute_system_caps_dtype, ConfComputeSystemCaps)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeSystemCaps instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_14from_ptr, "ConfComputeSystemCaps.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an ConfComputeSystemCaps instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 10405, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 10405, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 10405, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10405, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 10405, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":10406
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an ConfComputeSystemCaps instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 10405, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 10405, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 10405, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10405, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 10406, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10406, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 10405, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemCaps.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":10405
 *         return __from_data(data, "conf_compute_system_caps_dtype", conf_compute_system_caps_dtype, ConfComputeSystemCaps)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeSystemCaps instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":10414
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeSystemCaps obj = ConfComputeSystemCaps.__new__(ConfComputeSystemCaps)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":10415
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ConfComputeSystemCaps obj = ConfComputeSystemCaps.__new__(ConfComputeSystemCaps)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10415, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 10415, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10414
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeSystemCaps obj = ConfComputeSystemCaps.__new__(ConfComputeSystemCaps)
*/
  }

  /* "cuda/bindings/_nvml.pyx":10416
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeSystemCaps obj = ConfComputeSystemCaps.__new__(ConfComputeSystemCaps)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeSystemCaps_t *>malloc(sizeof(nvmlConfComputeSystemCaps_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeSystemCaps(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10416, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":10417
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeSystemCaps obj = ConfComputeSystemCaps.__new__(ConfComputeSystemCaps)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlConfComputeSystemCaps_t *>malloc(sizeof(nvmlConfComputeSystemCaps_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":10418
 *         cdef ConfComputeSystemCaps obj = ConfComputeSystemCaps.__new__(ConfComputeSystemCaps)
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeSystemCaps_t *>malloc(sizeof(nvmlConfComputeSystemCaps_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeSystemCaps")
*/
    __pyx_v_obj->_ptr = ((nvmlConfComputeSystemCaps_t *)malloc((sizeof(nvmlConfComputeSystemCaps_t))));

    /* "cuda/bindings/_nvml.pyx":10419
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeSystemCaps_t *>malloc(sizeof(nvmlConfComputeSystemCaps_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeSystemCaps")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeSystemCaps_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":10420
 *             obj._ptr = <nvmlConfComputeSystemCaps_t *>malloc(sizeof(nvmlConfComputeSystemCaps_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeSystemCaps")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeSystemCaps_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10420, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeSyst};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10420, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 10420, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":10419
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeSystemCaps_t *>malloc(sizeof(nvmlConfComputeSystemCaps_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeSystemCaps")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeSystemCaps_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":10421
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeSystemCaps")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeSystemCaps_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlConfComputeSystemCaps_t))));

    /* "cuda/bindings/_nvml.pyx":10422
 *                 raise MemoryError("Error allocating ConfComputeSystemCaps")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeSystemCaps_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":10423
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeSystemCaps_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlConfComputeSystemCaps_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":10417
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeSystemCaps obj = ConfComputeSystemCaps.__new__(ConfComputeSystemCaps)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlConfComputeSystemCaps_t *>malloc(sizeof(nvmlConfComputeSystemCaps_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":10425
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlConfComputeSystemCaps_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlConfComputeSystemCaps_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":10426
 *         else:
 *             obj._ptr = <nvmlConfComputeSystemCaps_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":10427
 *             obj._ptr = <nvmlConfComputeSystemCaps_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":10428
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":10429
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10405
 *         return __from_data(data, "conf_compute_system_caps_dtype", conf_compute_system_caps_dtype, ConfComputeSystemCaps)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeSystemCaps instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemCaps.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_16__reduce_cython__, "ConfComputeSystemCaps.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemCaps.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_18__setstate_cython__, "ConfComputeSystemCaps.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemCaps.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemCaps.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10432
 * 
 * 
 * cdef _get_conf_compute_system_state_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeSystemState_t pod = nvmlConfComputeSystemState_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_system_state_dtype_offsets(void) {
  nvmlConfComputeSystemState_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlConfComputeSystemState_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_conf_compute_system_state_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":10433
 * 
 * cdef _get_conf_compute_system_state_dtype_offsets():
 *     cdef nvmlConfComputeSystemState_t pod = nvmlConfComputeSystemState_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['environment', 'cc_feature', 'dev_tools_mode'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":10434
 * cdef _get_conf_compute_system_state_dtype_offsets():
 *     cdef nvmlConfComputeSystemState_t pod = nvmlConfComputeSystemState_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['environment', 'cc_feature', 'dev_tools_mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10434, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10434, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":10435
 *     cdef nvmlConfComputeSystemState_t pod = nvmlConfComputeSystemState_t()
 *     return _numpy.dtype({
 *         'names': ['environment', 'cc_feature', 'dev_tools_mode'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10435, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10435, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_environment);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_environment);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_environment) != (0)) __PYX_ERR(0, 10435, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_cc_feature);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_cc_feature);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_cc_feature) != (0)) __PYX_ERR(0, 10435, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_dev_tools_mode);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_dev_tools_mode);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_dev_tools_mode) != (0)) __PYX_ERR(0, 10435, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 10435, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":10436
 *     return _numpy.dtype({
 *         'names': ['environment', 'cc_feature', 'dev_tools_mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.environment)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10436, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10436, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10436, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 10436, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10436, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 10436, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10436, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 10436, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 10436, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 10436, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 10435, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":10438
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.environment)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.ccFeature)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.devToolsMode)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.environment)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":10439
 *         'offsets': [
 *             (<intptr_t>&(pod.environment)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ccFeature)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.devToolsMode)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.ccFeature)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 10439, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":10440
 *             (<intptr_t>&(pod.environment)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ccFeature)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.devToolsMode)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlConfComputeSystemState_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.devToolsMode)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 10440, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":10437
 *         'names': ['environment', 'cc_feature', 'dev_tools_mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.environment)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ccFeature)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10437, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 10437, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 10437, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 10437, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 10435, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":10442
 *             (<intptr_t>&(pod.devToolsMode)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlConfComputeSystemState_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlConfComputeSystemState_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10442, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 10435, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10434, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10432
 * 
 * 
 * cdef _get_conf_compute_system_state_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeSystemState_t pod = nvmlConfComputeSystemState_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_conf_compute_system_state_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10459
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlConfComputeSystemState_t *>calloc(1, sizeof(nvmlConfComputeSystemState_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":10460
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeSystemState_t *>calloc(1, sizeof(nvmlConfComputeSystemState_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeSystemState")
*/
  __pyx_v_self->_ptr = ((nvmlConfComputeSystemState_t *)calloc(1, (sizeof(nvmlConfComputeSystemState_t))));

  /* "cuda/bindings/_nvml.pyx":10461
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeSystemState_t *>calloc(1, sizeof(nvmlConfComputeSystemState_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ConfComputeSystemState")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":10462
 *         self._ptr = <nvmlConfComputeSystemState_t *>calloc(1, sizeof(nvmlConfComputeSystemState_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeSystemState")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10462, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeSyst_2};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10462, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 10462, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10461
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeSystemState_t *>calloc(1, sizeof(nvmlConfComputeSystemState_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ConfComputeSystemState")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":10463
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeSystemState")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":10464
 *             raise MemoryError("Error allocating ConfComputeSystemState")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":10465
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":10459
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlConfComputeSystemState_t *>calloc(1, sizeof(nvmlConfComputeSystemState_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10467
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlConfComputeSystemState_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self) {
  nvmlConfComputeSystemState_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlConfComputeSystemState_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":10469
 *     def __dealloc__(self):
 *         cdef nvmlConfComputeSystemState_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":10470
 *         cdef nvmlConfComputeSystemState_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":10471
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":10472
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":10469
 *     def __dealloc__(self):
 *         cdef nvmlConfComputeSystemState_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":10467
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlConfComputeSystemState_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":10474
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ConfComputeSystemState object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":10475
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.ConfComputeSystemState object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10475, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10475, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10475, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10475, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10475, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_ConfComputeSystemState_object_a;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 34 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10475, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10474
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ConfComputeSystemState object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10477
 *         return f"<{__name__}.ConfComputeSystemState object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10480
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10480, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10477
 *         return f"<{__name__}.ConfComputeSystemState object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10482
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_22ConfComputeSystemState__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":10483
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10482
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10485
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":10486
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10486, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10485
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10488
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ConfComputeSystemState other_
 *         if not isinstance(other, ConfComputeSystemState):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":10490
 *     def __eq__(self, other):
 *         cdef ConfComputeSystemState other_
 *         if not isinstance(other, ConfComputeSystemState):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10491
 *         cdef ConfComputeSystemState other_
 *         if not isinstance(other, ConfComputeSystemState):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeSystemState_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10490
 *     def __eq__(self, other):
 *         cdef ConfComputeSystemState other_
 *         if not isinstance(other, ConfComputeSystemState):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":10492
 *         if not isinstance(other, ConfComputeSystemState):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeSystemState_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState))))) __PYX_ERR(0, 10492, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":10493
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeSystemState_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlConfComputeSystemState_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10493, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10488
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ConfComputeSystemState other_
 *         if not isinstance(other, ConfComputeSystemState):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10495
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeSystemState_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeSystemState_t *>malloc(sizeof(nvmlConfComputeSystemState_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":10496
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlConfComputeSystemState_t *>malloc(sizeof(nvmlConfComputeSystemState_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10496, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10496, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10496, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 10496, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":10497
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeSystemState_t *>malloc(sizeof(nvmlConfComputeSystemState_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeSystemState")
*/
    __pyx_v_self->_ptr = ((nvmlConfComputeSystemState_t *)malloc((sizeof(nvmlConfComputeSystemState_t))));

    /* "cuda/bindings/_nvml.pyx":10498
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeSystemState_t *>malloc(sizeof(nvmlConfComputeSystemState_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeSystemState")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeSystemState_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":10499
 *             self._ptr = <nvmlConfComputeSystemState_t *>malloc(sizeof(nvmlConfComputeSystemState_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeSystemState")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeSystemState_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10499, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeSyst_2};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10499, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 10499, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":10498
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeSystemState_t *>malloc(sizeof(nvmlConfComputeSystemState_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeSystemState")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeSystemState_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":10500
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeSystemState")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeSystemState_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10500, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10500, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 10500, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlConfComputeSystemState_t))));

    /* "cuda/bindings/_nvml.pyx":10501
 *                 raise MemoryError("Error allocating ConfComputeSystemState")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeSystemState_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":10502
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeSystemState_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":10503
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10503, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10503, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 10503, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":10496
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlConfComputeSystemState_t *>malloc(sizeof(nvmlConfComputeSystemState_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":10505
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 10505, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":10495
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeSystemState_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeSystemState_t *>malloc(sizeof(nvmlConfComputeSystemState_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10507
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def environment(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_11environment_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_11environment_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_11environment___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_11environment___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10510
 *     def environment(self):
 *         """int: """
 *         return self._ptr[0].environment             # <<<<<<<<<<<<<<
 * 
 *     @environment.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).environment); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10510, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10507
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def environment(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.environment.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10512
 *         return self._ptr[0].environment
 * 
 *     @environment.setter             # <<<<<<<<<<<<<<
 *     def environment(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_11environment_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_11environment_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_11environment_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_11environment_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":10514
 *     @environment.setter
 *     def environment(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeSystemState instance is read-only")
 *         self._ptr[0].environment = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":10515
 *     def environment(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeSystemState instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].environment = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeSystemState_inst};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10515, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 10515, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10514
 *     @environment.setter
 *     def environment(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeSystemState instance is read-only")
 *         self._ptr[0].environment = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":10516
 *         if self._readonly:
 *             raise ValueError("This ConfComputeSystemState instance is read-only")
 *         self._ptr[0].environment = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10516, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).environment = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":10512
 *         return self._ptr[0].environment
 * 
 *     @environment.setter             # <<<<<<<<<<<<<<
 *     def environment(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.environment.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10518
 *         self._ptr[0].environment = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cc_feature(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_10cc_feature_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_10cc_feature_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_10cc_feature___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_10cc_feature___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10521
 *     def cc_feature(self):
 *         """int: """
 *         return self._ptr[0].ccFeature             # <<<<<<<<<<<<<<
 * 
 *     @cc_feature.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).ccFeature); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10521, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10518
 *         self._ptr[0].environment = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cc_feature(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.cc_feature.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10523
 *         return self._ptr[0].ccFeature
 * 
 *     @cc_feature.setter             # <<<<<<<<<<<<<<
 *     def cc_feature(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_10cc_feature_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_10cc_feature_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_10cc_feature_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_10cc_feature_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":10525
 *     @cc_feature.setter
 *     def cc_feature(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeSystemState instance is read-only")
 *         self._ptr[0].ccFeature = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":10526
 *     def cc_feature(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeSystemState instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].ccFeature = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeSystemState_inst};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10526, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 10526, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10525
 *     @cc_feature.setter
 *     def cc_feature(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeSystemState instance is read-only")
 *         self._ptr[0].ccFeature = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":10527
 *         if self._readonly:
 *             raise ValueError("This ConfComputeSystemState instance is read-only")
 *         self._ptr[0].ccFeature = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10527, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).ccFeature = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":10523
 *         return self._ptr[0].ccFeature
 * 
 *     @cc_feature.setter             # <<<<<<<<<<<<<<
 *     def cc_feature(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.cc_feature.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10529
 *         self._ptr[0].ccFeature = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def dev_tools_mode(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14dev_tools_mode_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14dev_tools_mode_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14dev_tools_mode___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14dev_tools_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10532
 *     def dev_tools_mode(self):
 *         """int: """
 *         return self._ptr[0].devToolsMode             # <<<<<<<<<<<<<<
 * 
 *     @dev_tools_mode.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).devToolsMode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10532, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10529
 *         self._ptr[0].ccFeature = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def dev_tools_mode(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.dev_tools_mode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10534
 *         return self._ptr[0].devToolsMode
 * 
 *     @dev_tools_mode.setter             # <<<<<<<<<<<<<<
 *     def dev_tools_mode(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14dev_tools_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14dev_tools_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14dev_tools_mode_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14dev_tools_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":10536
 *     @dev_tools_mode.setter
 *     def dev_tools_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeSystemState instance is read-only")
 *         self._ptr[0].devToolsMode = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":10537
 *     def dev_tools_mode(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeSystemState instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].devToolsMode = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeSystemState_inst};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10537, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 10537, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10536
 *     @dev_tools_mode.setter
 *     def dev_tools_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeSystemState instance is read-only")
 *         self._ptr[0].devToolsMode = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":10538
 *         if self._readonly:
 *             raise ValueError("This ConfComputeSystemState instance is read-only")
 *         self._ptr[0].devToolsMode = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10538, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).devToolsMode = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":10534
 *         return self._ptr[0].devToolsMode
 * 
 *     @dev_tools_mode.setter             # <<<<<<<<<<<<<<
 *     def dev_tools_mode(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.dev_tools_mode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10540
 *         self._ptr[0].devToolsMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeSystemState instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeSystemState_12from_data, "ConfComputeSystemState.from_data(data)\n\nCreate an ConfComputeSystemState instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `conf_compute_system_state_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22ConfComputeSystemState_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeSystemState_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 10540, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10540, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 10540, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 10540, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10540, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 10540, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":10547
 *             data (_numpy.ndarray): a single-element array of dtype `conf_compute_system_state_dtype` holding the data.
 *         """
 *         return __from_data(data, "conf_compute_system_state_dtype", conf_compute_system_state_dtype, ConfComputeSystemState)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_conf_compute_system_state_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10547, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_conf_compute_system_state_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10547, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10540
 *         self._ptr[0].devToolsMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeSystemState instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10549
 *         return __from_data(data, "conf_compute_system_state_dtype", conf_compute_system_state_dtype, ConfComputeSystemState)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeSystemState instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14from_ptr, "ConfComputeSystemState.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an ConfComputeSystemState instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22ConfComputeSystemState_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 10549, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 10549, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 10549, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10549, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 10549, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":10550
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an ConfComputeSystemState instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 10549, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 10549, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 10549, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10549, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 10550, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10550, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 10549, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":10549
 *         return __from_data(data, "conf_compute_system_state_dtype", conf_compute_system_state_dtype, ConfComputeSystemState)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeSystemState instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":10558
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeSystemState obj = ConfComputeSystemState.__new__(ConfComputeSystemState)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":10559
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ConfComputeSystemState obj = ConfComputeSystemState.__new__(ConfComputeSystemState)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10559, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 10559, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10558
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeSystemState obj = ConfComputeSystemState.__new__(ConfComputeSystemState)
*/
  }

  /* "cuda/bindings/_nvml.pyx":10560
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeSystemState obj = ConfComputeSystemState.__new__(ConfComputeSystemState)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeSystemState_t *>malloc(sizeof(nvmlConfComputeSystemState_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeSystemState(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10560, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":10561
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeSystemState obj = ConfComputeSystemState.__new__(ConfComputeSystemState)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlConfComputeSystemState_t *>malloc(sizeof(nvmlConfComputeSystemState_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":10562
 *         cdef ConfComputeSystemState obj = ConfComputeSystemState.__new__(ConfComputeSystemState)
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeSystemState_t *>malloc(sizeof(nvmlConfComputeSystemState_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeSystemState")
*/
    __pyx_v_obj->_ptr = ((nvmlConfComputeSystemState_t *)malloc((sizeof(nvmlConfComputeSystemState_t))));

    /* "cuda/bindings/_nvml.pyx":10563
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeSystemState_t *>malloc(sizeof(nvmlConfComputeSystemState_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeSystemState")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeSystemState_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":10564
 *             obj._ptr = <nvmlConfComputeSystemState_t *>malloc(sizeof(nvmlConfComputeSystemState_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeSystemState")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeSystemState_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10564, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeSyst_2};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10564, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 10564, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":10563
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeSystemState_t *>malloc(sizeof(nvmlConfComputeSystemState_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeSystemState")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeSystemState_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":10565
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeSystemState")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeSystemState_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlConfComputeSystemState_t))));

    /* "cuda/bindings/_nvml.pyx":10566
 *                 raise MemoryError("Error allocating ConfComputeSystemState")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeSystemState_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":10567
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeSystemState_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlConfComputeSystemState_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":10561
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeSystemState obj = ConfComputeSystemState.__new__(ConfComputeSystemState)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlConfComputeSystemState_t *>malloc(sizeof(nvmlConfComputeSystemState_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":10569
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlConfComputeSystemState_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlConfComputeSystemState_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":10570
 *         else:
 *             obj._ptr = <nvmlConfComputeSystemState_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":10571
 *             obj._ptr = <nvmlConfComputeSystemState_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":10572
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":10573
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10549
 *         return __from_data(data, "conf_compute_system_state_dtype", conf_compute_system_state_dtype, ConfComputeSystemState)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeSystemState instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeSystemState_16__reduce_cython__, "ConfComputeSystemState.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22ConfComputeSystemState_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeSystemState_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeSystemState_18__setstate_cython__, "ConfComputeSystemState.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22ConfComputeSystemState_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeSystemState_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeSystemState_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeSystemState.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10576
 * 
 * 
 * cdef _get_system_conf_compute_settings_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlSystemConfComputeSettings_v1_t pod = nvmlSystemConfComputeSettings_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_system_conf_compute_settings_v1_dtype_offsets(void) {
  nvmlSystemConfComputeSettings_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlSystemConfComputeSettings_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  size_t __pyx_t_12;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_system_conf_compute_settings_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":10577
 * 
 * cdef _get_system_conf_compute_settings_v1_dtype_offsets():
 *     cdef nvmlSystemConfComputeSettings_v1_t pod = nvmlSystemConfComputeSettings_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'environment', 'cc_feature', 'dev_tools_mode', 'multi_gpu_mode'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":10578
 * cdef _get_system_conf_compute_settings_v1_dtype_offsets():
 *     cdef nvmlSystemConfComputeSettings_v1_t pod = nvmlSystemConfComputeSettings_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'environment', 'cc_feature', 'dev_tools_mode', 'multi_gpu_mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10578, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10578, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":10579
 *     cdef nvmlSystemConfComputeSettings_v1_t pod = nvmlSystemConfComputeSettings_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'environment', 'cc_feature', 'dev_tools_mode', 'multi_gpu_mode'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10579, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10579, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 10579, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_environment);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_environment);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_environment) != (0)) __PYX_ERR(0, 10579, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_cc_feature);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_cc_feature);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_cc_feature) != (0)) __PYX_ERR(0, 10579, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_dev_tools_mode);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_dev_tools_mode);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_dev_tools_mode) != (0)) __PYX_ERR(0, 10579, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_multi_gpu_mode);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_multi_gpu_mode);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_multi_gpu_mode) != (0)) __PYX_ERR(0, 10579, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 10579, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":10580
 *     return _numpy.dtype({
 *         'names': ['version', 'environment', 'cc_feature', 'dev_tools_mode', 'multi_gpu_mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 10580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 10580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 10580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 10580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 10580, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 10580, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 10580, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 10580, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 10580, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 10579, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":10582
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.environment)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ccFeature)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10582, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":10583
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.environment)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.ccFeature)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.devToolsMode)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.environment)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 10583, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":10584
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.environment)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ccFeature)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.devToolsMode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.multiGpuMode)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.ccFeature)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 10584, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":10585
 *             (<intptr_t>&(pod.environment)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ccFeature)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.devToolsMode)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.multiGpuMode)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.devToolsMode)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 10585, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":10586
 *             (<intptr_t>&(pod.ccFeature)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.devToolsMode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.multiGpuMode)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlSystemConfComputeSettings_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.multiGpuMode)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 10586, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":10581
 *         'names': ['version', 'environment', 'cc_feature', 'dev_tools_mode', 'multi_gpu_mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.environment)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10581, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 10581, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_11) != (0)) __PYX_ERR(0, 10581, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_10) != (0)) __PYX_ERR(0, 10581, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_9) != (0)) __PYX_ERR(0, 10581, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_8) != (0)) __PYX_ERR(0, 10581, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 10579, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":10588
 *             (<intptr_t>&(pod.multiGpuMode)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlSystemConfComputeSettings_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlSystemConfComputeSettings_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10588, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 10579, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_12 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_12 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_12, (2-__pyx_t_12) | (__pyx_t_12*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10578, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10576
 * 
 * 
 * cdef _get_system_conf_compute_settings_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlSystemConfComputeSettings_v1_t pod = nvmlSystemConfComputeSettings_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_system_conf_compute_settings_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10605
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlSystemConfComputeSettings_v1_t *>calloc(1, sizeof(nvmlSystemConfComputeSettings_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":10606
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlSystemConfComputeSettings_v1_t *>calloc(1, sizeof(nvmlSystemConfComputeSettings_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating SystemConfComputeSettings_v1")
*/
  __pyx_v_self->_ptr = ((nvmlSystemConfComputeSettings_v1_t *)calloc(1, (sizeof(nvmlSystemConfComputeSettings_v1_t))));

  /* "cuda/bindings/_nvml.pyx":10607
 *     def __init__(self):
 *         self._ptr = <nvmlSystemConfComputeSettings_v1_t *>calloc(1, sizeof(nvmlSystemConfComputeSettings_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating SystemConfComputeSettings_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":10608
 *         self._ptr = <nvmlSystemConfComputeSettings_v1_t *>calloc(1, sizeof(nvmlSystemConfComputeSettings_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating SystemConfComputeSettings_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10608, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_SystemConfCompu};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10608, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 10608, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10607
 *     def __init__(self):
 *         self._ptr = <nvmlSystemConfComputeSettings_v1_t *>calloc(1, sizeof(nvmlSystemConfComputeSettings_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating SystemConfComputeSettings_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":10609
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating SystemConfComputeSettings_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":10610
 *             raise MemoryError("Error allocating SystemConfComputeSettings_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":10611
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":10605
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlSystemConfComputeSettings_v1_t *>calloc(1, sizeof(nvmlSystemConfComputeSettings_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10613
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlSystemConfComputeSettings_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self) {
  nvmlSystemConfComputeSettings_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlSystemConfComputeSettings_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":10615
 *     def __dealloc__(self):
 *         cdef nvmlSystemConfComputeSettings_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":10616
 *         cdef nvmlSystemConfComputeSettings_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":10617
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":10618
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":10615
 *     def __dealloc__(self):
 *         cdef nvmlSystemConfComputeSettings_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":10613
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlSystemConfComputeSettings_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":10620
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.SystemConfComputeSettings_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":10621
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.SystemConfComputeSettings_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10621, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10621, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10621, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10621, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10621, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_SystemConfComputeSettings_v1_ob;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 40 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10621, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10620
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.SystemConfComputeSettings_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10623
 *         return f"<{__name__}.SystemConfComputeSettings_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10626
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10626, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10623
 *         return f"<{__name__}.SystemConfComputeSettings_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10628
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":10629
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10628
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10631
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":10632
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10632, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10631
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10634
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef SystemConfComputeSettings_v1 other_
 *         if not isinstance(other, SystemConfComputeSettings_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":10636
 *     def __eq__(self, other):
 *         cdef SystemConfComputeSettings_v1 other_
 *         if not isinstance(other, SystemConfComputeSettings_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10637
 *         cdef SystemConfComputeSettings_v1 other_
 *         if not isinstance(other, SystemConfComputeSettings_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlSystemConfComputeSettings_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10636
 *     def __eq__(self, other):
 *         cdef SystemConfComputeSettings_v1 other_
 *         if not isinstance(other, SystemConfComputeSettings_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":10638
 *         if not isinstance(other, SystemConfComputeSettings_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlSystemConfComputeSettings_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1))))) __PYX_ERR(0, 10638, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":10639
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlSystemConfComputeSettings_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlSystemConfComputeSettings_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10639, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10634
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef SystemConfComputeSettings_v1 other_
 *         if not isinstance(other, SystemConfComputeSettings_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10641
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlSystemConfComputeSettings_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlSystemConfComputeSettings_v1_t *>malloc(sizeof(nvmlSystemConfComputeSettings_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":10642
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlSystemConfComputeSettings_v1_t *>malloc(sizeof(nvmlSystemConfComputeSettings_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10642, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10642, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10642, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 10642, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":10643
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlSystemConfComputeSettings_v1_t *>malloc(sizeof(nvmlSystemConfComputeSettings_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating SystemConfComputeSettings_v1")
*/
    __pyx_v_self->_ptr = ((nvmlSystemConfComputeSettings_v1_t *)malloc((sizeof(nvmlSystemConfComputeSettings_v1_t))));

    /* "cuda/bindings/_nvml.pyx":10644
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlSystemConfComputeSettings_v1_t *>malloc(sizeof(nvmlSystemConfComputeSettings_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating SystemConfComputeSettings_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlSystemConfComputeSettings_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":10645
 *             self._ptr = <nvmlSystemConfComputeSettings_v1_t *>malloc(sizeof(nvmlSystemConfComputeSettings_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating SystemConfComputeSettings_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlSystemConfComputeSettings_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10645, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_SystemConfCompu};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10645, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 10645, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":10644
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlSystemConfComputeSettings_v1_t *>malloc(sizeof(nvmlSystemConfComputeSettings_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating SystemConfComputeSettings_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlSystemConfComputeSettings_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":10646
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating SystemConfComputeSettings_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlSystemConfComputeSettings_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10646, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10646, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 10646, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlSystemConfComputeSettings_v1_t))));

    /* "cuda/bindings/_nvml.pyx":10647
 *                 raise MemoryError("Error allocating SystemConfComputeSettings_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlSystemConfComputeSettings_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":10648
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlSystemConfComputeSettings_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":10649
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10649, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10649, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 10649, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":10642
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlSystemConfComputeSettings_v1_t *>malloc(sizeof(nvmlSystemConfComputeSettings_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":10651
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 10651, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":10641
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlSystemConfComputeSettings_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlSystemConfComputeSettings_v1_t *>malloc(sizeof(nvmlSystemConfComputeSettings_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10653
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10656
 *     def version(self):
 *         """int: """
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10656, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10653
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10658
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":10660
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":10661
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_SystemConfComputeSettings_v};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10661, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 10661, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10660
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":10662
 *         if self._readonly:
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10662, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":10658
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10664
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def environment(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_11environment_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_11environment_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_11environment___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_11environment___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10667
 *     def environment(self):
 *         """int: """
 *         return self._ptr[0].environment             # <<<<<<<<<<<<<<
 * 
 *     @environment.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).environment); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10667, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10664
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def environment(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.environment.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10669
 *         return self._ptr[0].environment
 * 
 *     @environment.setter             # <<<<<<<<<<<<<<
 *     def environment(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_11environment_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_11environment_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_11environment_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_11environment_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":10671
 *     @environment.setter
 *     def environment(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")
 *         self._ptr[0].environment = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":10672
 *     def environment(self, val):
 *         if self._readonly:
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].environment = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_SystemConfComputeSettings_v};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10672, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 10672, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10671
 *     @environment.setter
 *     def environment(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")
 *         self._ptr[0].environment = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":10673
 *         if self._readonly:
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")
 *         self._ptr[0].environment = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10673, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).environment = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":10669
 *         return self._ptr[0].environment
 * 
 *     @environment.setter             # <<<<<<<<<<<<<<
 *     def environment(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.environment.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10675
 *         self._ptr[0].environment = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cc_feature(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_10cc_feature_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_10cc_feature_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_10cc_feature___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_10cc_feature___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10678
 *     def cc_feature(self):
 *         """int: """
 *         return self._ptr[0].ccFeature             # <<<<<<<<<<<<<<
 * 
 *     @cc_feature.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).ccFeature); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10678, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10675
 *         self._ptr[0].environment = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cc_feature(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.cc_feature.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10680
 *         return self._ptr[0].ccFeature
 * 
 *     @cc_feature.setter             # <<<<<<<<<<<<<<
 *     def cc_feature(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_10cc_feature_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_10cc_feature_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_10cc_feature_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_10cc_feature_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":10682
 *     @cc_feature.setter
 *     def cc_feature(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")
 *         self._ptr[0].ccFeature = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":10683
 *     def cc_feature(self, val):
 *         if self._readonly:
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].ccFeature = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_SystemConfComputeSettings_v};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10683, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 10683, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10682
 *     @cc_feature.setter
 *     def cc_feature(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")
 *         self._ptr[0].ccFeature = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":10684
 *         if self._readonly:
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")
 *         self._ptr[0].ccFeature = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10684, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).ccFeature = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":10680
 *         return self._ptr[0].ccFeature
 * 
 *     @cc_feature.setter             # <<<<<<<<<<<<<<
 *     def cc_feature(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.cc_feature.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10686
 *         self._ptr[0].ccFeature = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def dev_tools_mode(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14dev_tools_mode_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14dev_tools_mode_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14dev_tools_mode___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14dev_tools_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10689
 *     def dev_tools_mode(self):
 *         """int: """
 *         return self._ptr[0].devToolsMode             # <<<<<<<<<<<<<<
 * 
 *     @dev_tools_mode.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).devToolsMode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10689, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10686
 *         self._ptr[0].ccFeature = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def dev_tools_mode(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.dev_tools_mode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10691
 *         return self._ptr[0].devToolsMode
 * 
 *     @dev_tools_mode.setter             # <<<<<<<<<<<<<<
 *     def dev_tools_mode(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14dev_tools_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14dev_tools_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14dev_tools_mode_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14dev_tools_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":10693
 *     @dev_tools_mode.setter
 *     def dev_tools_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")
 *         self._ptr[0].devToolsMode = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":10694
 *     def dev_tools_mode(self, val):
 *         if self._readonly:
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].devToolsMode = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_SystemConfComputeSettings_v};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10694, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 10694, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10693
 *     @dev_tools_mode.setter
 *     def dev_tools_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")
 *         self._ptr[0].devToolsMode = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":10695
 *         if self._readonly:
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")
 *         self._ptr[0].devToolsMode = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10695, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).devToolsMode = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":10691
 *         return self._ptr[0].devToolsMode
 * 
 *     @dev_tools_mode.setter             # <<<<<<<<<<<<<<
 *     def dev_tools_mode(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.dev_tools_mode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10697
 *         self._ptr[0].devToolsMode = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def multi_gpu_mode(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14multi_gpu_mode_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14multi_gpu_mode_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14multi_gpu_mode___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14multi_gpu_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10700
 *     def multi_gpu_mode(self):
 *         """int: """
 *         return self._ptr[0].multiGpuMode             # <<<<<<<<<<<<<<
 * 
 *     @multi_gpu_mode.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).multiGpuMode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10700, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10697
 *         self._ptr[0].devToolsMode = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def multi_gpu_mode(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.multi_gpu_mode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10702
 *         return self._ptr[0].multiGpuMode
 * 
 *     @multi_gpu_mode.setter             # <<<<<<<<<<<<<<
 *     def multi_gpu_mode(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14multi_gpu_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14multi_gpu_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14multi_gpu_mode_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14multi_gpu_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":10704
 *     @multi_gpu_mode.setter
 *     def multi_gpu_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")
 *         self._ptr[0].multiGpuMode = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":10705
 *     def multi_gpu_mode(self, val):
 *         if self._readonly:
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].multiGpuMode = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_SystemConfComputeSettings_v};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10705, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 10705, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10704
 *     @multi_gpu_mode.setter
 *     def multi_gpu_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")
 *         self._ptr[0].multiGpuMode = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":10706
 *         if self._readonly:
 *             raise ValueError("This SystemConfComputeSettings_v1 instance is read-only")
 *         self._ptr[0].multiGpuMode = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10706, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).multiGpuMode = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":10702
 *         return self._ptr[0].multiGpuMode
 * 
 *     @multi_gpu_mode.setter             # <<<<<<<<<<<<<<
 *     def multi_gpu_mode(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.multi_gpu_mode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10708
 *         self._ptr[0].multiGpuMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an SystemConfComputeSettings_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_12from_data, "SystemConfComputeSettings_v1.from_data(data)\n\nCreate an SystemConfComputeSettings_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `system_conf_compute_settings_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 10708, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10708, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 10708, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 10708, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10708, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 10708, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":10715
 *             data (_numpy.ndarray): a single-element array of dtype `system_conf_compute_settings_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "system_conf_compute_settings_v1_dtype", system_conf_compute_settings_v1_dtype, SystemConfComputeSettings_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_system_conf_compute_settings_v1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10715, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_system_conf_compute_settings_v1, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10715, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10708
 *         self._ptr[0].multiGpuMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an SystemConfComputeSettings_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10717
 *         return __from_data(data, "system_conf_compute_settings_v1_dtype", system_conf_compute_settings_v1_dtype, SystemConfComputeSettings_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an SystemConfComputeSettings_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14from_ptr, "SystemConfComputeSettings_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an SystemConfComputeSettings_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 10717, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 10717, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 10717, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10717, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 10717, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":10718
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an SystemConfComputeSettings_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 10717, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 10717, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 10717, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10717, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 10718, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10718, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 10717, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":10717
 *         return __from_data(data, "system_conf_compute_settings_v1_dtype", system_conf_compute_settings_v1_dtype, SystemConfComputeSettings_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an SystemConfComputeSettings_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":10726
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef SystemConfComputeSettings_v1 obj = SystemConfComputeSettings_v1.__new__(SystemConfComputeSettings_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":10727
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef SystemConfComputeSettings_v1 obj = SystemConfComputeSettings_v1.__new__(SystemConfComputeSettings_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10727, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 10727, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10726
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef SystemConfComputeSettings_v1 obj = SystemConfComputeSettings_v1.__new__(SystemConfComputeSettings_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":10728
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef SystemConfComputeSettings_v1 obj = SystemConfComputeSettings_v1.__new__(SystemConfComputeSettings_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlSystemConfComputeSettings_v1_t *>malloc(sizeof(nvmlSystemConfComputeSettings_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10728, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":10729
 *             raise ValueError("ptr must not be null (0)")
 *         cdef SystemConfComputeSettings_v1 obj = SystemConfComputeSettings_v1.__new__(SystemConfComputeSettings_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlSystemConfComputeSettings_v1_t *>malloc(sizeof(nvmlSystemConfComputeSettings_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":10730
 *         cdef SystemConfComputeSettings_v1 obj = SystemConfComputeSettings_v1.__new__(SystemConfComputeSettings_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlSystemConfComputeSettings_v1_t *>malloc(sizeof(nvmlSystemConfComputeSettings_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating SystemConfComputeSettings_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlSystemConfComputeSettings_v1_t *)malloc((sizeof(nvmlSystemConfComputeSettings_v1_t))));

    /* "cuda/bindings/_nvml.pyx":10731
 *         if owner is None:
 *             obj._ptr = <nvmlSystemConfComputeSettings_v1_t *>malloc(sizeof(nvmlSystemConfComputeSettings_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating SystemConfComputeSettings_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlSystemConfComputeSettings_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":10732
 *             obj._ptr = <nvmlSystemConfComputeSettings_v1_t *>malloc(sizeof(nvmlSystemConfComputeSettings_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating SystemConfComputeSettings_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlSystemConfComputeSettings_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10732, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_SystemConfCompu};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10732, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 10732, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":10731
 *         if owner is None:
 *             obj._ptr = <nvmlSystemConfComputeSettings_v1_t *>malloc(sizeof(nvmlSystemConfComputeSettings_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating SystemConfComputeSettings_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlSystemConfComputeSettings_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":10733
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating SystemConfComputeSettings_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlSystemConfComputeSettings_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlSystemConfComputeSettings_v1_t))));

    /* "cuda/bindings/_nvml.pyx":10734
 *                 raise MemoryError("Error allocating SystemConfComputeSettings_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlSystemConfComputeSettings_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":10735
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlSystemConfComputeSettings_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlSystemConfComputeSettings_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":10729
 *             raise ValueError("ptr must not be null (0)")
 *         cdef SystemConfComputeSettings_v1 obj = SystemConfComputeSettings_v1.__new__(SystemConfComputeSettings_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlSystemConfComputeSettings_v1_t *>malloc(sizeof(nvmlSystemConfComputeSettings_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":10737
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlSystemConfComputeSettings_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlSystemConfComputeSettings_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":10738
 *         else:
 *             obj._ptr = <nvmlSystemConfComputeSettings_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":10739
 *             obj._ptr = <nvmlSystemConfComputeSettings_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":10740
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":10741
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10717
 *         return __from_data(data, "system_conf_compute_settings_v1_dtype", system_conf_compute_settings_v1_dtype, SystemConfComputeSettings_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an SystemConfComputeSettings_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_16__reduce_cython__, "SystemConfComputeSettings_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_18__setstate_cython__, "SystemConfComputeSettings_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.SystemConfComputeSettings_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10744
 * 
 * 
 * cdef _get_conf_compute_mem_size_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeMemSizeInfo_t pod = nvmlConfComputeMemSizeInfo_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_mem_size_info_dtype_offsets(void) {
  nvmlConfComputeMemSizeInfo_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlConfComputeMemSizeInfo_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_conf_compute_mem_size_info_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":10745
 * 
 * cdef _get_conf_compute_mem_size_info_dtype_offsets():
 *     cdef nvmlConfComputeMemSizeInfo_t pod = nvmlConfComputeMemSizeInfo_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['protected_mem_size_kib', 'unprotected_mem_size_kib'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":10746
 * cdef _get_conf_compute_mem_size_info_dtype_offsets():
 *     cdef nvmlConfComputeMemSizeInfo_t pod = nvmlConfComputeMemSizeInfo_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['protected_mem_size_kib', 'unprotected_mem_size_kib'],
 *         'formats': [_numpy.uint64, _numpy.uint64],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10746, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10746, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":10747
 *     cdef nvmlConfComputeMemSizeInfo_t pod = nvmlConfComputeMemSizeInfo_t()
 *     return _numpy.dtype({
 *         'names': ['protected_mem_size_kib', 'unprotected_mem_size_kib'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint64, _numpy.uint64],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10747, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10747, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_protected_mem_size_kib);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_protected_mem_size_kib);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_protected_mem_size_kib) != (0)) __PYX_ERR(0, 10747, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_unprotected_mem_size_kib);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_unprotected_mem_size_kib);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_unprotected_mem_size_kib) != (0)) __PYX_ERR(0, 10747, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 10747, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":10748
 *     return _numpy.dtype({
 *         'names': ['protected_mem_size_kib', 'unprotected_mem_size_kib'],
 *         'formats': [_numpy.uint64, _numpy.uint64],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.protectedMemSizeKib)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10748, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10748, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10748, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 10748, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10748, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 10748, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 10748, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 10747, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":10750
 *         'formats': [_numpy.uint64, _numpy.uint64],
 *         'offsets': [
 *             (<intptr_t>&(pod.protectedMemSizeKib)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.unprotectedMemSizeKib)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.protectedMemSizeKib)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10750, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":10751
 *         'offsets': [
 *             (<intptr_t>&(pod.protectedMemSizeKib)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.unprotectedMemSizeKib)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlConfComputeMemSizeInfo_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.unprotectedMemSizeKib)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 10751, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":10749
 *         'names': ['protected_mem_size_kib', 'unprotected_mem_size_kib'],
 *         'formats': [_numpy.uint64, _numpy.uint64],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.protectedMemSizeKib)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.unprotectedMemSizeKib)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10749, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 10749, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 10749, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 10747, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":10753
 *             (<intptr_t>&(pod.unprotectedMemSizeKib)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlConfComputeMemSizeInfo_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlConfComputeMemSizeInfo_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10753, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 10747, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10746, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10744
 * 
 * 
 * cdef _get_conf_compute_mem_size_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeMemSizeInfo_t pod = nvmlConfComputeMemSizeInfo_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_conf_compute_mem_size_info_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10770
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlConfComputeMemSizeInfo_t *>calloc(1, sizeof(nvmlConfComputeMemSizeInfo_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":10771
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeMemSizeInfo_t *>calloc(1, sizeof(nvmlConfComputeMemSizeInfo_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeMemSizeInfo")
*/
  __pyx_v_self->_ptr = ((nvmlConfComputeMemSizeInfo_t *)calloc(1, (sizeof(nvmlConfComputeMemSizeInfo_t))));

  /* "cuda/bindings/_nvml.pyx":10772
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeMemSizeInfo_t *>calloc(1, sizeof(nvmlConfComputeMemSizeInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ConfComputeMemSizeInfo")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":10773
 *         self._ptr = <nvmlConfComputeMemSizeInfo_t *>calloc(1, sizeof(nvmlConfComputeMemSizeInfo_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeMemSizeInfo")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10773, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeMemS};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10773, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 10773, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10772
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeMemSizeInfo_t *>calloc(1, sizeof(nvmlConfComputeMemSizeInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ConfComputeMemSizeInfo")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":10774
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeMemSizeInfo")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":10775
 *             raise MemoryError("Error allocating ConfComputeMemSizeInfo")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":10776
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":10770
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlConfComputeMemSizeInfo_t *>calloc(1, sizeof(nvmlConfComputeMemSizeInfo_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeMemSizeInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10778
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlConfComputeMemSizeInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self) {
  nvmlConfComputeMemSizeInfo_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlConfComputeMemSizeInfo_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":10780
 *     def __dealloc__(self):
 *         cdef nvmlConfComputeMemSizeInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":10781
 *         cdef nvmlConfComputeMemSizeInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":10782
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":10783
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":10780
 *     def __dealloc__(self):
 *         cdef nvmlConfComputeMemSizeInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":10778
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlConfComputeMemSizeInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":10785
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ConfComputeMemSizeInfo object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":10786
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.ConfComputeMemSizeInfo object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10786, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10786, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10786, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10786, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10786, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_ConfComputeMemSizeInfo_object_a;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 34 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10786, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10785
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ConfComputeMemSizeInfo object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeMemSizeInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10788
 *         return f"<{__name__}.ConfComputeMemSizeInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10791
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10791, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10788
 *         return f"<{__name__}.ConfComputeMemSizeInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeMemSizeInfo.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10793
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":10794
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10793
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10796
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":10797
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10797, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10796
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeMemSizeInfo.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10799
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ConfComputeMemSizeInfo other_
 *         if not isinstance(other, ConfComputeMemSizeInfo):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":10801
 *     def __eq__(self, other):
 *         cdef ConfComputeMemSizeInfo other_
 *         if not isinstance(other, ConfComputeMemSizeInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10802
 *         cdef ConfComputeMemSizeInfo other_
 *         if not isinstance(other, ConfComputeMemSizeInfo):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeMemSizeInfo_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10801
 *     def __eq__(self, other):
 *         cdef ConfComputeMemSizeInfo other_
 *         if not isinstance(other, ConfComputeMemSizeInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":10803
 *         if not isinstance(other, ConfComputeMemSizeInfo):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeMemSizeInfo_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo))))) __PYX_ERR(0, 10803, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":10804
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeMemSizeInfo_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlConfComputeMemSizeInfo_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10804, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10799
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ConfComputeMemSizeInfo other_
 *         if not isinstance(other, ConfComputeMemSizeInfo):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeMemSizeInfo.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10806
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeMemSizeInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeMemSizeInfo_t *>malloc(sizeof(nvmlConfComputeMemSizeInfo_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":10807
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlConfComputeMemSizeInfo_t *>malloc(sizeof(nvmlConfComputeMemSizeInfo_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10807, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10807, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10807, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 10807, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":10808
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeMemSizeInfo_t *>malloc(sizeof(nvmlConfComputeMemSizeInfo_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeMemSizeInfo")
*/
    __pyx_v_self->_ptr = ((nvmlConfComputeMemSizeInfo_t *)malloc((sizeof(nvmlConfComputeMemSizeInfo_t))));

    /* "cuda/bindings/_nvml.pyx":10809
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeMemSizeInfo_t *>malloc(sizeof(nvmlConfComputeMemSizeInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeMemSizeInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeMemSizeInfo_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":10810
 *             self._ptr = <nvmlConfComputeMemSizeInfo_t *>malloc(sizeof(nvmlConfComputeMemSizeInfo_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeMemSizeInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeMemSizeInfo_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10810, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeMemS};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10810, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 10810, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":10809
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeMemSizeInfo_t *>malloc(sizeof(nvmlConfComputeMemSizeInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeMemSizeInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeMemSizeInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":10811
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeMemSizeInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeMemSizeInfo_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10811, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10811, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 10811, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlConfComputeMemSizeInfo_t))));

    /* "cuda/bindings/_nvml.pyx":10812
 *                 raise MemoryError("Error allocating ConfComputeMemSizeInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeMemSizeInfo_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":10813
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeMemSizeInfo_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":10814
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10814, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10814, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 10814, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":10807
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlConfComputeMemSizeInfo_t *>malloc(sizeof(nvmlConfComputeMemSizeInfo_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":10816
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 10816, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":10806
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeMemSizeInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeMemSizeInfo_t *>malloc(sizeof(nvmlConfComputeMemSizeInfo_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeMemSizeInfo.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10818
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def protected_mem_size_kib(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_22protected_mem_size_kib_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_22protected_mem_size_kib_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_22protected_mem_size_kib___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_22protected_mem_size_kib___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10821
 *     def protected_mem_size_kib(self):
 *         """int: """
 *         return self._ptr[0].protectedMemSizeKib             # <<<<<<<<<<<<<<
 * 
 *     @protected_mem_size_kib.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).protectedMemSizeKib); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10821, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10818
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def protected_mem_size_kib(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeMemSizeInfo.protected_mem_size_kib.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10823
 *         return self._ptr[0].protectedMemSizeKib
 * 
 *     @protected_mem_size_kib.setter             # <<<<<<<<<<<<<<
 *     def protected_mem_size_kib(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_22protected_mem_size_kib_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_22protected_mem_size_kib_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_22protected_mem_size_kib_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_22protected_mem_size_kib_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":10825
 *     @protected_mem_size_kib.setter
 *     def protected_mem_size_kib(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeMemSizeInfo instance is read-only")
 *         self._ptr[0].protectedMemSizeKib = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":10826
 *     def protected_mem_size_kib(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeMemSizeInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].protectedMemSizeKib = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeMemSizeInfo_inst};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10826, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 10826, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10825
 *     @protected_mem_size_kib.setter
 *     def protected_mem_size_kib(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeMemSizeInfo instance is read-only")
 *         self._ptr[0].protectedMemSizeKib = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":10827
 *         if self._readonly:
 *             raise ValueError("This ConfComputeMemSizeInfo instance is read-only")
 *         self._ptr[0].protectedMemSizeKib = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 10827, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).protectedMemSizeKib = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":10823
 *         return self._ptr[0].protectedMemSizeKib
 * 
 *     @protected_mem_size_kib.setter             # <<<<<<<<<<<<<<
 *     def protected_mem_size_kib(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeMemSizeInfo.protected_mem_size_kib.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10829
 *         self._ptr[0].protectedMemSizeKib = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def unprotected_mem_size_kib(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_24unprotected_mem_size_kib_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_24unprotected_mem_size_kib_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_24unprotected_mem_size_kib___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_24unprotected_mem_size_kib___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10832
 *     def unprotected_mem_size_kib(self):
 *         """int: """
 *         return self._ptr[0].unprotectedMemSizeKib             # <<<<<<<<<<<<<<
 * 
 *     @unprotected_mem_size_kib.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).unprotectedMemSizeKib); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10832, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10829
 *         self._ptr[0].protectedMemSizeKib = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def unprotected_mem_size_kib(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeMemSizeInfo.unprotected_mem_size_kib.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10834
 *         return self._ptr[0].unprotectedMemSizeKib
 * 
 *     @unprotected_mem_size_kib.setter             # <<<<<<<<<<<<<<
 *     def unprotected_mem_size_kib(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_24unprotected_mem_size_kib_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_24unprotected_mem_size_kib_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_24unprotected_mem_size_kib_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_24unprotected_mem_size_kib_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":10836
 *     @unprotected_mem_size_kib.setter
 *     def unprotected_mem_size_kib(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeMemSizeInfo instance is read-only")
 *         self._ptr[0].unprotectedMemSizeKib = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":10837
 *     def unprotected_mem_size_kib(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeMemSizeInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].unprotectedMemSizeKib = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeMemSizeInfo_inst};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10837, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 10837, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10836
 *     @unprotected_mem_size_kib.setter
 *     def unprotected_mem_size_kib(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeMemSizeInfo instance is read-only")
 *         self._ptr[0].unprotectedMemSizeKib = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":10838
 *         if self._readonly:
 *             raise ValueError("This ConfComputeMemSizeInfo instance is read-only")
 *         self._ptr[0].unprotectedMemSizeKib = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 10838, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).unprotectedMemSizeKib = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":10834
 *         return self._ptr[0].unprotectedMemSizeKib
 * 
 *     @unprotected_mem_size_kib.setter             # <<<<<<<<<<<<<<
 *     def unprotected_mem_size_kib(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeMemSizeInfo.unprotected_mem_size_kib.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10840
 *         self._ptr[0].unprotectedMemSizeKib = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeMemSizeInfo instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_12from_data, "ConfComputeMemSizeInfo.from_data(data)\n\nCreate an ConfComputeMemSizeInfo instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `conf_compute_mem_size_info_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 10840, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10840, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 10840, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 10840, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10840, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 10840, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeMemSizeInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":10847
 *             data (_numpy.ndarray): a single-element array of dtype `conf_compute_mem_size_info_dtype` holding the data.
 *         """
 *         return __from_data(data, "conf_compute_mem_size_info_dtype", conf_compute_mem_size_info_dtype, ConfComputeMemSizeInfo)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_conf_compute_mem_size_info_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10847, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_conf_compute_mem_size_info_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10847, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10840
 *         self._ptr[0].unprotectedMemSizeKib = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeMemSizeInfo instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeMemSizeInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10849
 *         return __from_data(data, "conf_compute_mem_size_info_dtype", conf_compute_mem_size_info_dtype, ConfComputeMemSizeInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeMemSizeInfo instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_14from_ptr, "ConfComputeMemSizeInfo.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an ConfComputeMemSizeInfo instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 10849, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 10849, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 10849, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10849, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 10849, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":10850
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an ConfComputeMemSizeInfo instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 10849, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 10849, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 10849, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 10849, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 10850, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10850, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 10849, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeMemSizeInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":10849
 *         return __from_data(data, "conf_compute_mem_size_info_dtype", conf_compute_mem_size_info_dtype, ConfComputeMemSizeInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeMemSizeInfo instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":10858
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeMemSizeInfo obj = ConfComputeMemSizeInfo.__new__(ConfComputeMemSizeInfo)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":10859
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ConfComputeMemSizeInfo obj = ConfComputeMemSizeInfo.__new__(ConfComputeMemSizeInfo)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10859, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 10859, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10858
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeMemSizeInfo obj = ConfComputeMemSizeInfo.__new__(ConfComputeMemSizeInfo)
*/
  }

  /* "cuda/bindings/_nvml.pyx":10860
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeMemSizeInfo obj = ConfComputeMemSizeInfo.__new__(ConfComputeMemSizeInfo)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeMemSizeInfo_t *>malloc(sizeof(nvmlConfComputeMemSizeInfo_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10860, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":10861
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeMemSizeInfo obj = ConfComputeMemSizeInfo.__new__(ConfComputeMemSizeInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlConfComputeMemSizeInfo_t *>malloc(sizeof(nvmlConfComputeMemSizeInfo_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":10862
 *         cdef ConfComputeMemSizeInfo obj = ConfComputeMemSizeInfo.__new__(ConfComputeMemSizeInfo)
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeMemSizeInfo_t *>malloc(sizeof(nvmlConfComputeMemSizeInfo_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeMemSizeInfo")
*/
    __pyx_v_obj->_ptr = ((nvmlConfComputeMemSizeInfo_t *)malloc((sizeof(nvmlConfComputeMemSizeInfo_t))));

    /* "cuda/bindings/_nvml.pyx":10863
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeMemSizeInfo_t *>malloc(sizeof(nvmlConfComputeMemSizeInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeMemSizeInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeMemSizeInfo_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":10864
 *             obj._ptr = <nvmlConfComputeMemSizeInfo_t *>malloc(sizeof(nvmlConfComputeMemSizeInfo_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeMemSizeInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeMemSizeInfo_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10864, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeMemS};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10864, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 10864, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":10863
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeMemSizeInfo_t *>malloc(sizeof(nvmlConfComputeMemSizeInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeMemSizeInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeMemSizeInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":10865
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeMemSizeInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeMemSizeInfo_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlConfComputeMemSizeInfo_t))));

    /* "cuda/bindings/_nvml.pyx":10866
 *                 raise MemoryError("Error allocating ConfComputeMemSizeInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeMemSizeInfo_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":10867
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeMemSizeInfo_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlConfComputeMemSizeInfo_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":10861
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeMemSizeInfo obj = ConfComputeMemSizeInfo.__new__(ConfComputeMemSizeInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlConfComputeMemSizeInfo_t *>malloc(sizeof(nvmlConfComputeMemSizeInfo_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":10869
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlConfComputeMemSizeInfo_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlConfComputeMemSizeInfo_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":10870
 *         else:
 *             obj._ptr = <nvmlConfComputeMemSizeInfo_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":10871
 *             obj._ptr = <nvmlConfComputeMemSizeInfo_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":10872
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":10873
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10849
 *         return __from_data(data, "conf_compute_mem_size_info_dtype", conf_compute_mem_size_info_dtype, ConfComputeMemSizeInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeMemSizeInfo instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeMemSizeInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_16__reduce_cython__, "ConfComputeMemSizeInfo.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeMemSizeInfo.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_18__setstate_cython__, "ConfComputeMemSizeInfo.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeMemSizeInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeMemSizeInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10876
 * 
 * 
 * cdef _get_conf_compute_gpu_certificate_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeGpuCertificate_t pod = nvmlConfComputeGpuCertificate_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_gpu_certificate_dtype_offsets(void) {
  nvmlConfComputeGpuCertificate_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlConfComputeGpuCertificate_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  size_t __pyx_t_11;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_conf_compute_gpu_certificate_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":10877
 * 
 * cdef _get_conf_compute_gpu_certificate_dtype_offsets():
 *     cdef nvmlConfComputeGpuCertificate_t pod = nvmlConfComputeGpuCertificate_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['cert_chain_size', 'attestation_cert_chain_size', 'cert_chain', 'attestation_cert_chain'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":10878
 * cdef _get_conf_compute_gpu_certificate_dtype_offsets():
 *     cdef nvmlConfComputeGpuCertificate_t pod = nvmlConfComputeGpuCertificate_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['cert_chain_size', 'attestation_cert_chain_size', 'cert_chain', 'attestation_cert_chain'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint8, _numpy.uint8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10878, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10878, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":10879
 *     cdef nvmlConfComputeGpuCertificate_t pod = nvmlConfComputeGpuCertificate_t()
 *     return _numpy.dtype({
 *         'names': ['cert_chain_size', 'attestation_cert_chain_size', 'cert_chain', 'attestation_cert_chain'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint8, _numpy.uint8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10879, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10879, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_cert_chain_size);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_cert_chain_size);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_cert_chain_size) != (0)) __PYX_ERR(0, 10879, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_attestation_cert_chain_size);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_attestation_cert_chain_size);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_attestation_cert_chain_size) != (0)) __PYX_ERR(0, 10879, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_cert_chain);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_cert_chain);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_cert_chain) != (0)) __PYX_ERR(0, 10879, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_attestation_cert_chain);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_attestation_cert_chain);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_attestation_cert_chain) != (0)) __PYX_ERR(0, 10879, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 10879, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":10880
 *     return _numpy.dtype({
 *         'names': ['cert_chain_size', 'attestation_cert_chain_size', 'cert_chain', 'attestation_cert_chain'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint8, _numpy.uint8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.certChainSize)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10880, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10880, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10880, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 10880, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10880, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 10880, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10880, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 10880, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10880, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 10880, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 10880, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 10880, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 10880, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 10879, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":10882
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint8, _numpy.uint8],
 *         'offsets': [
 *             (<intptr_t>&(pod.certChainSize)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.attestationCertChainSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.certChain)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.certChainSize)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10882, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":10883
 *         'offsets': [
 *             (<intptr_t>&(pod.certChainSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.attestationCertChainSize)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.certChain)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.attestationCertChain)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.attestationCertChainSize)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 10883, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":10884
 *             (<intptr_t>&(pod.certChainSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.attestationCertChainSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.certChain)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.attestationCertChain)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.certChain)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 10884, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":10885
 *             (<intptr_t>&(pod.attestationCertChainSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.certChain)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.attestationCertChain)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlConfComputeGpuCertificate_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.attestationCertChain)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 10885, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":10881
 *         'names': ['cert_chain_size', 'attestation_cert_chain_size', 'cert_chain', 'attestation_cert_chain'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint8, _numpy.uint8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.certChainSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.attestationCertChainSize)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10881, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 10881, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_10) != (0)) __PYX_ERR(0, 10881, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 10881, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_8) != (0)) __PYX_ERR(0, 10881, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 10879, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":10887
 *             (<intptr_t>&(pod.attestationCertChain)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlConfComputeGpuCertificate_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlConfComputeGpuCertificate_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 10887, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 10879, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_11 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_11 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_11, (2-__pyx_t_11) | (__pyx_t_11*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10878, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10876
 * 
 * 
 * cdef _get_conf_compute_gpu_certificate_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeGpuCertificate_t pod = nvmlConfComputeGpuCertificate_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_conf_compute_gpu_certificate_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10904
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlConfComputeGpuCertificate_t *>calloc(1, sizeof(nvmlConfComputeGpuCertificate_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":10905
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeGpuCertificate_t *>calloc(1, sizeof(nvmlConfComputeGpuCertificate_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeGpuCertificate")
*/
  __pyx_v_self->_ptr = ((nvmlConfComputeGpuCertificate_t *)calloc(1, (sizeof(nvmlConfComputeGpuCertificate_t))));

  /* "cuda/bindings/_nvml.pyx":10906
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeGpuCertificate_t *>calloc(1, sizeof(nvmlConfComputeGpuCertificate_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ConfComputeGpuCertificate")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":10907
 *         self._ptr = <nvmlConfComputeGpuCertificate_t *>calloc(1, sizeof(nvmlConfComputeGpuCertificate_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeGpuCertificate")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10907, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeGpuC};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10907, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 10907, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10906
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeGpuCertificate_t *>calloc(1, sizeof(nvmlConfComputeGpuCertificate_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ConfComputeGpuCertificate")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":10908
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeGpuCertificate")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":10909
 *             raise MemoryError("Error allocating ConfComputeGpuCertificate")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":10910
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":10904
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlConfComputeGpuCertificate_t *>calloc(1, sizeof(nvmlConfComputeGpuCertificate_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10912
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlConfComputeGpuCertificate_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self) {
  nvmlConfComputeGpuCertificate_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlConfComputeGpuCertificate_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":10914
 *     def __dealloc__(self):
 *         cdef nvmlConfComputeGpuCertificate_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":10915
 *         cdef nvmlConfComputeGpuCertificate_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":10916
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":10917
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":10914
 *     def __dealloc__(self):
 *         cdef nvmlConfComputeGpuCertificate_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":10912
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlConfComputeGpuCertificate_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":10919
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ConfComputeGpuCertificate object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":10920
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.ConfComputeGpuCertificate object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10920, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10920, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10920, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10920, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10920, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_ConfComputeGpuCertificate_objec;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 37 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10920, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10919
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ConfComputeGpuCertificate object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10922
 *         return f"<{__name__}.ConfComputeGpuCertificate object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10925
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10925, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10922
 *         return f"<{__name__}.ConfComputeGpuCertificate object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10927
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":10928
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10927
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10930
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":10931
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10931, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10930
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10933
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ConfComputeGpuCertificate other_
 *         if not isinstance(other, ConfComputeGpuCertificate):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":10935
 *     def __eq__(self, other):
 *         cdef ConfComputeGpuCertificate other_
 *         if not isinstance(other, ConfComputeGpuCertificate):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":10936
 *         cdef ConfComputeGpuCertificate other_
 *         if not isinstance(other, ConfComputeGpuCertificate):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeGpuCertificate_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":10935
 *     def __eq__(self, other):
 *         cdef ConfComputeGpuCertificate other_
 *         if not isinstance(other, ConfComputeGpuCertificate):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":10937
 *         if not isinstance(other, ConfComputeGpuCertificate):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeGpuCertificate_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate))))) __PYX_ERR(0, 10937, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":10938
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeGpuCertificate_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlConfComputeGpuCertificate_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10938, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10933
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ConfComputeGpuCertificate other_
 *         if not isinstance(other, ConfComputeGpuCertificate):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10940
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeGpuCertificate_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeGpuCertificate_t *>malloc(sizeof(nvmlConfComputeGpuCertificate_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":10941
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlConfComputeGpuCertificate_t *>malloc(sizeof(nvmlConfComputeGpuCertificate_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 10941, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10941, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10941, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 10941, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":10942
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeGpuCertificate_t *>malloc(sizeof(nvmlConfComputeGpuCertificate_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGpuCertificate")
*/
    __pyx_v_self->_ptr = ((nvmlConfComputeGpuCertificate_t *)malloc((sizeof(nvmlConfComputeGpuCertificate_t))));

    /* "cuda/bindings/_nvml.pyx":10943
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeGpuCertificate_t *>malloc(sizeof(nvmlConfComputeGpuCertificate_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeGpuCertificate")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGpuCertificate_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":10944
 *             self._ptr = <nvmlConfComputeGpuCertificate_t *>malloc(sizeof(nvmlConfComputeGpuCertificate_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGpuCertificate")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGpuCertificate_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10944, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeGpuC};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10944, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 10944, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":10943
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeGpuCertificate_t *>malloc(sizeof(nvmlConfComputeGpuCertificate_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeGpuCertificate")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGpuCertificate_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":10945
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGpuCertificate")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGpuCertificate_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10945, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10945, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 10945, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlConfComputeGpuCertificate_t))));

    /* "cuda/bindings/_nvml.pyx":10946
 *                 raise MemoryError("Error allocating ConfComputeGpuCertificate")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGpuCertificate_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":10947
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGpuCertificate_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":10948
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10948, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10948, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 10948, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":10941
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlConfComputeGpuCertificate_t *>malloc(sizeof(nvmlConfComputeGpuCertificate_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":10950
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 10950, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":10940
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeGpuCertificate_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeGpuCertificate_t *>malloc(sizeof(nvmlConfComputeGpuCertificate_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10952
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cert_chain_size(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15cert_chain_size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15cert_chain_size_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15cert_chain_size___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15cert_chain_size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10955
 *     def cert_chain_size(self):
 *         """int: """
 *         return self._ptr[0].certChainSize             # <<<<<<<<<<<<<<
 * 
 *     @cert_chain_size.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).certChainSize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10955, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10952
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cert_chain_size(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.cert_chain_size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10957
 *         return self._ptr[0].certChainSize
 * 
 *     @cert_chain_size.setter             # <<<<<<<<<<<<<<
 *     def cert_chain_size(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15cert_chain_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15cert_chain_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15cert_chain_size_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15cert_chain_size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":10959
 *     @cert_chain_size.setter
 *     def cert_chain_size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")
 *         self._ptr[0].certChainSize = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":10960
 *     def cert_chain_size(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].certChainSize = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeGpuCertificate_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10960, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 10960, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10959
 *     @cert_chain_size.setter
 *     def cert_chain_size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")
 *         self._ptr[0].certChainSize = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":10961
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")
 *         self._ptr[0].certChainSize = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10961, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).certChainSize = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":10957
 *         return self._ptr[0].certChainSize
 * 
 *     @cert_chain_size.setter             # <<<<<<<<<<<<<<
 *     def cert_chain_size(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.cert_chain_size.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10963
 *         self._ptr[0].certChainSize = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def attestation_cert_chain_size(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_27attestation_cert_chain_size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_27attestation_cert_chain_size_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_27attestation_cert_chain_size___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_27attestation_cert_chain_size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10966
 *     def attestation_cert_chain_size(self):
 *         """int: """
 *         return self._ptr[0].attestationCertChainSize             # <<<<<<<<<<<<<<
 * 
 *     @attestation_cert_chain_size.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).attestationCertChainSize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10966, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10963
 *         self._ptr[0].certChainSize = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def attestation_cert_chain_size(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.attestation_cert_chain_size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10968
 *         return self._ptr[0].attestationCertChainSize
 * 
 *     @attestation_cert_chain_size.setter             # <<<<<<<<<<<<<<
 *     def attestation_cert_chain_size(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_27attestation_cert_chain_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_27attestation_cert_chain_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_27attestation_cert_chain_size_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_27attestation_cert_chain_size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":10970
 *     @attestation_cert_chain_size.setter
 *     def attestation_cert_chain_size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")
 *         self._ptr[0].attestationCertChainSize = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":10971
 *     def attestation_cert_chain_size(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].attestationCertChainSize = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeGpuCertificate_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10971, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 10971, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10970
 *     @attestation_cert_chain_size.setter
 *     def attestation_cert_chain_size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")
 *         self._ptr[0].attestationCertChainSize = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":10972
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")
 *         self._ptr[0].attestationCertChainSize = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 10972, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).attestationCertChainSize = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":10968
 *         return self._ptr[0].attestationCertChainSize
 * 
 *     @attestation_cert_chain_size.setter             # <<<<<<<<<<<<<<
 *     def attestation_cert_chain_size(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.attestation_cert_chain_size.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10974
 *         self._ptr[0].attestationCertChainSize = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cert_chain(self):
 *         """~_numpy.uint8: (array of length 4096)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_10cert_chain_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_10cert_chain_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_10cert_chain___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_10cert_chain___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10977
 *     def cert_chain(self):
 *         """~_numpy.uint8: (array of length 4096)."""
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(&(self._ptr[0].certChain))
 *         return _numpy.asarray(arr)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10977, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 5 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10977, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[5], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 10977, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_3, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 10977, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 10977, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 10977, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_allocate_buffer, Py_False, __pyx_t_5, __pyx_callargs+1, 4) < (0)) __PYX_ERR(0, 10977, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10977, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":10978
 *         """~_numpy.uint8: (array of length 4096)."""
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].certChain))             # <<<<<<<<<<<<<<
 *         return _numpy.asarray(arr)
 * 
*/
  __pyx_v_arr->data = ((char *)(&(__pyx_v_self->_ptr[0]).certChain));

  /* "cuda/bindings/_nvml.pyx":10979
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].certChain))
 *         return _numpy.asarray(arr)             # <<<<<<<<<<<<<<
 * 
 *     @cert_chain.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, ((PyObject *)__pyx_v_arr)};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10979, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10974
 *         self._ptr[0].attestationCertChainSize = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cert_chain(self):
 *         """~_numpy.uint8: (array of length 4096)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.cert_chain.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10981
 *         return _numpy.asarray(arr)
 * 
 *     @cert_chain.setter             # <<<<<<<<<<<<<<
 *     def cert_chain(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_10cert_chain_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_10cert_chain_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_10cert_chain_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_10cert_chain_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  Py_ssize_t __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":10983
 *     @cert_chain.setter
 *     def cert_chain(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":10984
 *     def cert_chain(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeGpuCertificate_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10984, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 10984, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10983
 *     @cert_chain.setter
 *     def cert_chain(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":10985
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c")             # <<<<<<<<<<<<<<
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].certChain)), <void *>(arr.data), sizeof(unsigned char) * len(val))
*/
  __pyx_t_2 = NULL;
  __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10985, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10985, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[5], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 10985, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 10985, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 10985, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 10985, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10985, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":10986
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(&(self._ptr[0].certChain)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
*/
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10986, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10986, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10986, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 10986, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_3 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_3 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_5, __pyx_v_val};
    __pyx_t_4 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 10986, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_6, __pyx_t_4, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 10986, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_4);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10986, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (__Pyx_PyObject_SetSlice(((PyObject *)__pyx_v_arr), __pyx_t_1, 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[0], 0, 0, 1) < (0)) __PYX_ERR(0, 10986, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":10987
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].certChain)), <void *>(arr.data), sizeof(unsigned char) * len(val))             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_7 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 10987, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).certChain)), ((void *)__pyx_v_arr->data), ((sizeof(unsigned char)) * __pyx_t_7)));

  /* "cuda/bindings/_nvml.pyx":10981
 *         return _numpy.asarray(arr)
 * 
 *     @cert_chain.setter             # <<<<<<<<<<<<<<
 *     def cert_chain(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.cert_chain.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10989
 *         memcpy(<void *>(&(self._ptr[0].certChain)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def attestation_cert_chain(self):
 *         """~_numpy.uint8: (array of length 5120)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_22attestation_cert_chain_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_22attestation_cert_chain_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_22attestation_cert_chain___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_22attestation_cert_chain___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":10992
 *     def attestation_cert_chain(self):
 *         """~_numpy.uint8: (array of length 5120)."""
 *         cdef view.array arr = view.array(shape=(5120,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(&(self._ptr[0].attestationCertChain))
 *         return _numpy.asarray(arr)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10992, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 5 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10992, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[6], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 10992, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_3, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 10992, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 10992, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 10992, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_allocate_buffer, Py_False, __pyx_t_5, __pyx_callargs+1, 4) < (0)) __PYX_ERR(0, 10992, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10992, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":10993
 *         """~_numpy.uint8: (array of length 5120)."""
 *         cdef view.array arr = view.array(shape=(5120,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].attestationCertChain))             # <<<<<<<<<<<<<<
 *         return _numpy.asarray(arr)
 * 
*/
  __pyx_v_arr->data = ((char *)(&(__pyx_v_self->_ptr[0]).attestationCertChain));

  /* "cuda/bindings/_nvml.pyx":10994
 *         cdef view.array arr = view.array(shape=(5120,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].attestationCertChain))
 *         return _numpy.asarray(arr)             # <<<<<<<<<<<<<<
 * 
 *     @attestation_cert_chain.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 10994, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10994, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, ((PyObject *)__pyx_v_arr)};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10994, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":10989
 *         memcpy(<void *>(&(self._ptr[0].certChain)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def attestation_cert_chain(self):
 *         """~_numpy.uint8: (array of length 5120)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.attestation_cert_chain.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":10996
 *         return _numpy.asarray(arr)
 * 
 *     @attestation_cert_chain.setter             # <<<<<<<<<<<<<<
 *     def attestation_cert_chain(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_22attestation_cert_chain_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_22attestation_cert_chain_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_22attestation_cert_chain_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_22attestation_cert_chain_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  Py_ssize_t __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":10998
 *     @attestation_cert_chain.setter
 *     def attestation_cert_chain(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")
 *         cdef view.array arr = view.array(shape=(5120,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":10999
 *     def attestation_cert_chain(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef view.array arr = view.array(shape=(5120,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeGpuCertificate_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10999, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 10999, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":10998
 *     @attestation_cert_chain.setter
 *     def attestation_cert_chain(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")
 *         cdef view.array arr = view.array(shape=(5120,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":11000
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")
 *         cdef view.array arr = view.array(shape=(5120,), itemsize=sizeof(unsigned char), format="B", mode="c")             # <<<<<<<<<<<<<<
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].attestationCertChain)), <void *>(arr.data), sizeof(unsigned char) * len(val))
*/
  __pyx_t_2 = NULL;
  __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11000, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11000, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[6], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 11000, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 11000, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 11000, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 11000, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11000, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":11001
 *             raise ValueError("This ConfComputeGpuCertificate instance is read-only")
 *         cdef view.array arr = view.array(shape=(5120,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(&(self._ptr[0].attestationCertChain)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
*/
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11001, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11001, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11001, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11001, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_3 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_3 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_5, __pyx_v_val};
    __pyx_t_4 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11001, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_6, __pyx_t_4, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 11001, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_4);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11001, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (__Pyx_PyObject_SetSlice(((PyObject *)__pyx_v_arr), __pyx_t_1, 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[0], 0, 0, 1) < (0)) __PYX_ERR(0, 11001, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":11002
 *         cdef view.array arr = view.array(shape=(5120,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].attestationCertChain)), <void *>(arr.data), sizeof(unsigned char) * len(val))             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_7 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 11002, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).attestationCertChain)), ((void *)__pyx_v_arr->data), ((sizeof(unsigned char)) * __pyx_t_7)));

  /* "cuda/bindings/_nvml.pyx":10996
 *         return _numpy.asarray(arr)
 * 
 *     @attestation_cert_chain.setter             # <<<<<<<<<<<<<<
 *     def attestation_cert_chain(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.attestation_cert_chain.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11004
 *         memcpy(<void *>(&(self._ptr[0].attestationCertChain)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeGpuCertificate instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_12from_data, "ConfComputeGpuCertificate.from_data(data)\n\nCreate an ConfComputeGpuCertificate instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `conf_compute_gpu_certificate_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 11004, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11004, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 11004, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 11004, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11004, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 11004, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":11011
 *             data (_numpy.ndarray): a single-element array of dtype `conf_compute_gpu_certificate_dtype` holding the data.
 *         """
 *         return __from_data(data, "conf_compute_gpu_certificate_dtype", conf_compute_gpu_certificate_dtype, ConfComputeGpuCertificate)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_conf_compute_gpu_certificate_dty); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11011, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_conf_compute_gpu_certificate_dty, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11011, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11004
 *         memcpy(<void *>(&(self._ptr[0].attestationCertChain)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeGpuCertificate instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11013
 *         return __from_data(data, "conf_compute_gpu_certificate_dtype", conf_compute_gpu_certificate_dtype, ConfComputeGpuCertificate)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeGpuCertificate instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_14from_ptr, "ConfComputeGpuCertificate.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an ConfComputeGpuCertificate instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 11013, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 11013, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 11013, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11013, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 11013, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":11014
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an ConfComputeGpuCertificate instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 11013, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 11013, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 11013, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11013, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 11014, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11014, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 11013, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":11013
 *         return __from_data(data, "conf_compute_gpu_certificate_dtype", conf_compute_gpu_certificate_dtype, ConfComputeGpuCertificate)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeGpuCertificate instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":11022
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeGpuCertificate obj = ConfComputeGpuCertificate.__new__(ConfComputeGpuCertificate)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":11023
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ConfComputeGpuCertificate obj = ConfComputeGpuCertificate.__new__(ConfComputeGpuCertificate)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11023, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 11023, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11022
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeGpuCertificate obj = ConfComputeGpuCertificate.__new__(ConfComputeGpuCertificate)
*/
  }

  /* "cuda/bindings/_nvml.pyx":11024
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeGpuCertificate obj = ConfComputeGpuCertificate.__new__(ConfComputeGpuCertificate)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeGpuCertificate_t *>malloc(sizeof(nvmlConfComputeGpuCertificate_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11024, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":11025
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeGpuCertificate obj = ConfComputeGpuCertificate.__new__(ConfComputeGpuCertificate)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlConfComputeGpuCertificate_t *>malloc(sizeof(nvmlConfComputeGpuCertificate_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11026
 *         cdef ConfComputeGpuCertificate obj = ConfComputeGpuCertificate.__new__(ConfComputeGpuCertificate)
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeGpuCertificate_t *>malloc(sizeof(nvmlConfComputeGpuCertificate_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGpuCertificate")
*/
    __pyx_v_obj->_ptr = ((nvmlConfComputeGpuCertificate_t *)malloc((sizeof(nvmlConfComputeGpuCertificate_t))));

    /* "cuda/bindings/_nvml.pyx":11027
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeGpuCertificate_t *>malloc(sizeof(nvmlConfComputeGpuCertificate_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeGpuCertificate")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGpuCertificate_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":11028
 *             obj._ptr = <nvmlConfComputeGpuCertificate_t *>malloc(sizeof(nvmlConfComputeGpuCertificate_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGpuCertificate")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGpuCertificate_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11028, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeGpuC};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11028, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 11028, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":11027
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeGpuCertificate_t *>malloc(sizeof(nvmlConfComputeGpuCertificate_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeGpuCertificate")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGpuCertificate_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":11029
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGpuCertificate")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGpuCertificate_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlConfComputeGpuCertificate_t))));

    /* "cuda/bindings/_nvml.pyx":11030
 *                 raise MemoryError("Error allocating ConfComputeGpuCertificate")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGpuCertificate_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":11031
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGpuCertificate_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlConfComputeGpuCertificate_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":11025
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeGpuCertificate obj = ConfComputeGpuCertificate.__new__(ConfComputeGpuCertificate)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlConfComputeGpuCertificate_t *>malloc(sizeof(nvmlConfComputeGpuCertificate_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":11033
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlConfComputeGpuCertificate_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlConfComputeGpuCertificate_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":11034
 *         else:
 *             obj._ptr = <nvmlConfComputeGpuCertificate_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":11035
 *             obj._ptr = <nvmlConfComputeGpuCertificate_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":11036
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":11037
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11013
 *         return __from_data(data, "conf_compute_gpu_certificate_dtype", conf_compute_gpu_certificate_dtype, ConfComputeGpuCertificate)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeGpuCertificate instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_16__reduce_cython__, "ConfComputeGpuCertificate.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_18__setstate_cython__, "ConfComputeGpuCertificate.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuCertificate.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11040
 * 
 * 
 * cdef _get_conf_compute_gpu_attestation_report_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeGpuAttestationReport_t pod = nvmlConfComputeGpuAttestationReport_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_gpu_attestation_report_dtype_offsets(void) {
  nvmlConfComputeGpuAttestationReport_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlConfComputeGpuAttestationReport_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  size_t __pyx_t_13;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_conf_compute_gpu_attestation_report_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":11041
 * 
 * cdef _get_conf_compute_gpu_attestation_report_dtype_offsets():
 *     cdef nvmlConfComputeGpuAttestationReport_t pod = nvmlConfComputeGpuAttestationReport_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['is_cec_attestation_report_present', 'attestation_report_size', 'cec_attestation_report_size', 'nonce', 'attestation_report', 'cec_attestation_report'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":11042
 * cdef _get_conf_compute_gpu_attestation_report_dtype_offsets():
 *     cdef nvmlConfComputeGpuAttestationReport_t pod = nvmlConfComputeGpuAttestationReport_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['is_cec_attestation_report_present', 'attestation_report_size', 'cec_attestation_report_size', 'nonce', 'attestation_report', 'cec_attestation_report'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint8, _numpy.uint8, _numpy.uint8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11042, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11042, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":11043
 *     cdef nvmlConfComputeGpuAttestationReport_t pod = nvmlConfComputeGpuAttestationReport_t()
 *     return _numpy.dtype({
 *         'names': ['is_cec_attestation_report_present', 'attestation_report_size', 'cec_attestation_report_size', 'nonce', 'attestation_report', 'cec_attestation_report'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint8, _numpy.uint8, _numpy.uint8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11043, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11043, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_is_cec_attestation_report_presen);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_is_cec_attestation_report_presen);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_is_cec_attestation_report_presen) != (0)) __PYX_ERR(0, 11043, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_attestation_report_size);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_attestation_report_size);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_attestation_report_size) != (0)) __PYX_ERR(0, 11043, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_cec_attestation_report_size);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_cec_attestation_report_size);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_cec_attestation_report_size) != (0)) __PYX_ERR(0, 11043, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_nonce);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_nonce);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_nonce) != (0)) __PYX_ERR(0, 11043, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_attestation_report);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_attestation_report);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_attestation_report) != (0)) __PYX_ERR(0, 11043, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_cec_attestation_report);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_cec_attestation_report);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_cec_attestation_report) != (0)) __PYX_ERR(0, 11043, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 11043, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":11044
 *     return _numpy.dtype({
 *         'names': ['is_cec_attestation_report_present', 'attestation_report_size', 'cec_attestation_report_size', 'nonce', 'attestation_report', 'cec_attestation_report'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint8, _numpy.uint8, _numpy.uint8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.isCecAttestationReportPresent)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 11044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 11044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 11044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 11044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 11044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 11044, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 11044, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 11044, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 11044, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 11044, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 11044, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 11043, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":11046
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint8, _numpy.uint8, _numpy.uint8],
 *         'offsets': [
 *             (<intptr_t>&(pod.isCecAttestationReportPresent)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.attestationReportSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.cecAttestationReportSize)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.isCecAttestationReportPresent)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11046, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":11047
 *         'offsets': [
 *             (<intptr_t>&(pod.isCecAttestationReportPresent)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.attestationReportSize)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.cecAttestationReportSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.nonce)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.attestationReportSize)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 11047, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":11048
 *             (<intptr_t>&(pod.isCecAttestationReportPresent)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.attestationReportSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.cecAttestationReportSize)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.nonce)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.attestationReport)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.cecAttestationReportSize)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 11048, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":11049
 *             (<intptr_t>&(pod.attestationReportSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.cecAttestationReportSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.nonce)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.attestationReport)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.cecAttestationReport)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.nonce)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 11049, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":11050
 *             (<intptr_t>&(pod.cecAttestationReportSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.nonce)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.attestationReport)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.cecAttestationReport)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.attestationReport)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 11050, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":11051
 *             (<intptr_t>&(pod.nonce)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.attestationReport)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.cecAttestationReport)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlConfComputeGpuAttestationReport_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.cecAttestationReport)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 11051, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":11045
 *         'names': ['is_cec_attestation_report_present', 'attestation_report_size', 'cec_attestation_report_size', 'nonce', 'attestation_report', 'cec_attestation_report'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint8, _numpy.uint8, _numpy.uint8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.isCecAttestationReportPresent)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.attestationReportSize)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11045, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 11045, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_12) != (0)) __PYX_ERR(0, 11045, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_11) != (0)) __PYX_ERR(0, 11045, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 11045, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_9) != (0)) __PYX_ERR(0, 11045, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_8) != (0)) __PYX_ERR(0, 11045, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 11043, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":11053
 *             (<intptr_t>&(pod.cecAttestationReport)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlConfComputeGpuAttestationReport_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlConfComputeGpuAttestationReport_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11053, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 11043, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_13 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_13 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_13, (2-__pyx_t_13) | (__pyx_t_13*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11042, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11040
 * 
 * 
 * cdef _get_conf_compute_gpu_attestation_report_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeGpuAttestationReport_t pod = nvmlConfComputeGpuAttestationReport_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_conf_compute_gpu_attestation_report_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11070
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlConfComputeGpuAttestationReport_t *>calloc(1, sizeof(nvmlConfComputeGpuAttestationReport_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":11071
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeGpuAttestationReport_t *>calloc(1, sizeof(nvmlConfComputeGpuAttestationReport_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeGpuAttestationReport")
*/
  __pyx_v_self->_ptr = ((nvmlConfComputeGpuAttestationReport_t *)calloc(1, (sizeof(nvmlConfComputeGpuAttestationReport_t))));

  /* "cuda/bindings/_nvml.pyx":11072
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeGpuAttestationReport_t *>calloc(1, sizeof(nvmlConfComputeGpuAttestationReport_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ConfComputeGpuAttestationReport")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":11073
 *         self._ptr = <nvmlConfComputeGpuAttestationReport_t *>calloc(1, sizeof(nvmlConfComputeGpuAttestationReport_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeGpuAttestationReport")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11073, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeGpuA};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11073, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 11073, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11072
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeGpuAttestationReport_t *>calloc(1, sizeof(nvmlConfComputeGpuAttestationReport_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ConfComputeGpuAttestationReport")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":11074
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeGpuAttestationReport")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":11075
 *             raise MemoryError("Error allocating ConfComputeGpuAttestationReport")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":11076
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":11070
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlConfComputeGpuAttestationReport_t *>calloc(1, sizeof(nvmlConfComputeGpuAttestationReport_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11078
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlConfComputeGpuAttestationReport_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self) {
  nvmlConfComputeGpuAttestationReport_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlConfComputeGpuAttestationReport_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":11080
 *     def __dealloc__(self):
 *         cdef nvmlConfComputeGpuAttestationReport_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11081
 *         cdef nvmlConfComputeGpuAttestationReport_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":11082
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":11083
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":11080
 *     def __dealloc__(self):
 *         cdef nvmlConfComputeGpuAttestationReport_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":11078
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlConfComputeGpuAttestationReport_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":11085
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ConfComputeGpuAttestationReport object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":11086
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.ConfComputeGpuAttestationReport object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11086, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11086, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11086, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11086, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11086, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_ConfComputeGpuAttestationReport;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 43 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11086, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11085
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ConfComputeGpuAttestationReport object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11088
 *         return f"<{__name__}.ConfComputeGpuAttestationReport object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11091
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11091, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11088
 *         return f"<{__name__}.ConfComputeGpuAttestationReport object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11093
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":11094
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11093
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11096
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":11097
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11097, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11096
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11099
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ConfComputeGpuAttestationReport other_
 *         if not isinstance(other, ConfComputeGpuAttestationReport):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":11101
 *     def __eq__(self, other):
 *         cdef ConfComputeGpuAttestationReport other_
 *         if not isinstance(other, ConfComputeGpuAttestationReport):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":11102
 *         cdef ConfComputeGpuAttestationReport other_
 *         if not isinstance(other, ConfComputeGpuAttestationReport):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeGpuAttestationReport_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":11101
 *     def __eq__(self, other):
 *         cdef ConfComputeGpuAttestationReport other_
 *         if not isinstance(other, ConfComputeGpuAttestationReport):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":11103
 *         if not isinstance(other, ConfComputeGpuAttestationReport):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeGpuAttestationReport_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport))))) __PYX_ERR(0, 11103, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":11104
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeGpuAttestationReport_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlConfComputeGpuAttestationReport_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11099
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ConfComputeGpuAttestationReport other_
 *         if not isinstance(other, ConfComputeGpuAttestationReport):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11106
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeGpuAttestationReport_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeGpuAttestationReport_t *>malloc(sizeof(nvmlConfComputeGpuAttestationReport_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":11107
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlConfComputeGpuAttestationReport_t *>malloc(sizeof(nvmlConfComputeGpuAttestationReport_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 11107, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11107, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11107, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 11107, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11108
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeGpuAttestationReport_t *>malloc(sizeof(nvmlConfComputeGpuAttestationReport_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGpuAttestationReport")
*/
    __pyx_v_self->_ptr = ((nvmlConfComputeGpuAttestationReport_t *)malloc((sizeof(nvmlConfComputeGpuAttestationReport_t))));

    /* "cuda/bindings/_nvml.pyx":11109
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeGpuAttestationReport_t *>malloc(sizeof(nvmlConfComputeGpuAttestationReport_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeGpuAttestationReport")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGpuAttestationReport_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":11110
 *             self._ptr = <nvmlConfComputeGpuAttestationReport_t *>malloc(sizeof(nvmlConfComputeGpuAttestationReport_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGpuAttestationReport")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGpuAttestationReport_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11110, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeGpuA};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11110, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 11110, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":11109
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeGpuAttestationReport_t *>malloc(sizeof(nvmlConfComputeGpuAttestationReport_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeGpuAttestationReport")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGpuAttestationReport_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":11111
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGpuAttestationReport")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGpuAttestationReport_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11111, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11111, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 11111, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlConfComputeGpuAttestationReport_t))));

    /* "cuda/bindings/_nvml.pyx":11112
 *                 raise MemoryError("Error allocating ConfComputeGpuAttestationReport")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGpuAttestationReport_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":11113
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGpuAttestationReport_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":11114
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11114, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11114, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 11114, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":11107
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlConfComputeGpuAttestationReport_t *>malloc(sizeof(nvmlConfComputeGpuAttestationReport_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":11116
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 11116, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":11106
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeGpuAttestationReport_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeGpuAttestationReport_t *>malloc(sizeof(nvmlConfComputeGpuAttestationReport_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11118
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_cec_attestation_report_present(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_33is_cec_attestation_report_present_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_33is_cec_attestation_report_present_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_33is_cec_attestation_report_present___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_33is_cec_attestation_report_present___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11121
 *     def is_cec_attestation_report_present(self):
 *         """int: """
 *         return self._ptr[0].isCecAttestationReportPresent             # <<<<<<<<<<<<<<
 * 
 *     @is_cec_attestation_report_present.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).isCecAttestationReportPresent); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11121, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11118
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_cec_attestation_report_present(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.is_cec_attestation_report_present.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11123
 *         return self._ptr[0].isCecAttestationReportPresent
 * 
 *     @is_cec_attestation_report_present.setter             # <<<<<<<<<<<<<<
 *     def is_cec_attestation_report_present(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_33is_cec_attestation_report_present_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_33is_cec_attestation_report_present_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_33is_cec_attestation_report_present_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_33is_cec_attestation_report_present_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11125
 *     @is_cec_attestation_report_present.setter
 *     def is_cec_attestation_report_present(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         self._ptr[0].isCecAttestationReportPresent = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11126
 *     def is_cec_attestation_report_present(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].isCecAttestationReportPresent = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeGpuAttestationRe};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11126, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11126, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11125
 *     @is_cec_attestation_report_present.setter
 *     def is_cec_attestation_report_present(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         self._ptr[0].isCecAttestationReportPresent = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":11127
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         self._ptr[0].isCecAttestationReportPresent = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11127, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).isCecAttestationReportPresent = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":11123
 *         return self._ptr[0].isCecAttestationReportPresent
 * 
 *     @is_cec_attestation_report_present.setter             # <<<<<<<<<<<<<<
 *     def is_cec_attestation_report_present(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.is_cec_attestation_report_present.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11129
 *         self._ptr[0].isCecAttestationReportPresent = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def attestation_report_size(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_23attestation_report_size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_23attestation_report_size_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_23attestation_report_size___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_23attestation_report_size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11132
 *     def attestation_report_size(self):
 *         """int: """
 *         return self._ptr[0].attestationReportSize             # <<<<<<<<<<<<<<
 * 
 *     @attestation_report_size.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).attestationReportSize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11132, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11129
 *         self._ptr[0].isCecAttestationReportPresent = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def attestation_report_size(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.attestation_report_size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11134
 *         return self._ptr[0].attestationReportSize
 * 
 *     @attestation_report_size.setter             # <<<<<<<<<<<<<<
 *     def attestation_report_size(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_23attestation_report_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_23attestation_report_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_23attestation_report_size_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_23attestation_report_size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11136
 *     @attestation_report_size.setter
 *     def attestation_report_size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         self._ptr[0].attestationReportSize = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11137
 *     def attestation_report_size(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].attestationReportSize = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeGpuAttestationRe};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11137, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11137, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11136
 *     @attestation_report_size.setter
 *     def attestation_report_size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         self._ptr[0].attestationReportSize = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":11138
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         self._ptr[0].attestationReportSize = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11138, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).attestationReportSize = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":11134
 *         return self._ptr[0].attestationReportSize
 * 
 *     @attestation_report_size.setter             # <<<<<<<<<<<<<<
 *     def attestation_report_size(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.attestation_report_size.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11140
 *         self._ptr[0].attestationReportSize = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cec_attestation_report_size(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_27cec_attestation_report_size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_27cec_attestation_report_size_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_27cec_attestation_report_size___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_27cec_attestation_report_size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11143
 *     def cec_attestation_report_size(self):
 *         """int: """
 *         return self._ptr[0].cecAttestationReportSize             # <<<<<<<<<<<<<<
 * 
 *     @cec_attestation_report_size.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).cecAttestationReportSize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11143, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11140
 *         self._ptr[0].attestationReportSize = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cec_attestation_report_size(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.cec_attestation_report_size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11145
 *         return self._ptr[0].cecAttestationReportSize
 * 
 *     @cec_attestation_report_size.setter             # <<<<<<<<<<<<<<
 *     def cec_attestation_report_size(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_27cec_attestation_report_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_27cec_attestation_report_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_27cec_attestation_report_size_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_27cec_attestation_report_size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11147
 *     @cec_attestation_report_size.setter
 *     def cec_attestation_report_size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         self._ptr[0].cecAttestationReportSize = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11148
 *     def cec_attestation_report_size(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].cecAttestationReportSize = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeGpuAttestationRe};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11148, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11148, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11147
 *     @cec_attestation_report_size.setter
 *     def cec_attestation_report_size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         self._ptr[0].cecAttestationReportSize = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":11149
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         self._ptr[0].cecAttestationReportSize = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11149, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).cecAttestationReportSize = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":11145
 *         return self._ptr[0].cecAttestationReportSize
 * 
 *     @cec_attestation_report_size.setter             # <<<<<<<<<<<<<<
 *     def cec_attestation_report_size(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.cec_attestation_report_size.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11151
 *         self._ptr[0].cecAttestationReportSize = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def nonce(self):
 *         """~_numpy.uint8: (array of length 32)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_5nonce_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_5nonce_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_5nonce___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_5nonce___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11154
 *     def nonce(self):
 *         """~_numpy.uint8: (array of length 32)."""
 *         cdef view.array arr = view.array(shape=(32,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(&(self._ptr[0].nonce))
 *         return _numpy.asarray(arr)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11154, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 5 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11154, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[7], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 11154, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_3, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 11154, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 11154, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 11154, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_allocate_buffer, Py_False, __pyx_t_5, __pyx_callargs+1, 4) < (0)) __PYX_ERR(0, 11154, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11154, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":11155
 *         """~_numpy.uint8: (array of length 32)."""
 *         cdef view.array arr = view.array(shape=(32,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].nonce))             # <<<<<<<<<<<<<<
 *         return _numpy.asarray(arr)
 * 
*/
  __pyx_v_arr->data = ((char *)(&(__pyx_v_self->_ptr[0]).nonce));

  /* "cuda/bindings/_nvml.pyx":11156
 *         cdef view.array arr = view.array(shape=(32,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].nonce))
 *         return _numpy.asarray(arr)             # <<<<<<<<<<<<<<
 * 
 *     @nonce.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11156, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11156, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, ((PyObject *)__pyx_v_arr)};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11156, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11151
 *         self._ptr[0].cecAttestationReportSize = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def nonce(self):
 *         """~_numpy.uint8: (array of length 32)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.nonce.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11158
 *         return _numpy.asarray(arr)
 * 
 *     @nonce.setter             # <<<<<<<<<<<<<<
 *     def nonce(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_5nonce_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_5nonce_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_5nonce_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_5nonce_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  Py_ssize_t __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11160
 *     @nonce.setter
 *     def nonce(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         cdef view.array arr = view.array(shape=(32,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11161
 *     def nonce(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef view.array arr = view.array(shape=(32,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeGpuAttestationRe};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11161, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11161, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11160
 *     @nonce.setter
 *     def nonce(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         cdef view.array arr = view.array(shape=(32,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":11162
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         cdef view.array arr = view.array(shape=(32,), itemsize=sizeof(unsigned char), format="B", mode="c")             # <<<<<<<<<<<<<<
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].nonce)), <void *>(arr.data), sizeof(unsigned char) * len(val))
*/
  __pyx_t_2 = NULL;
  __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11162, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11162, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[7], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 11162, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 11162, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 11162, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 11162, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11162, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":11163
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         cdef view.array arr = view.array(shape=(32,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(&(self._ptr[0].nonce)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
*/
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_3 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_3 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_5, __pyx_v_val};
    __pyx_t_4 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11163, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_6, __pyx_t_4, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 11163, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_4);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11163, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (__Pyx_PyObject_SetSlice(((PyObject *)__pyx_v_arr), __pyx_t_1, 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[0], 0, 0, 1) < (0)) __PYX_ERR(0, 11163, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":11164
 *         cdef view.array arr = view.array(shape=(32,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].nonce)), <void *>(arr.data), sizeof(unsigned char) * len(val))             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_7 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 11164, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).nonce)), ((void *)__pyx_v_arr->data), ((sizeof(unsigned char)) * __pyx_t_7)));

  /* "cuda/bindings/_nvml.pyx":11158
 *         return _numpy.asarray(arr)
 * 
 *     @nonce.setter             # <<<<<<<<<<<<<<
 *     def nonce(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.nonce.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11166
 *         memcpy(<void *>(&(self._ptr[0].nonce)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def attestation_report(self):
 *         """~_numpy.uint8: (array of length 8192)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18attestation_report_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18attestation_report_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18attestation_report___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18attestation_report___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11169
 *     def attestation_report(self):
 *         """~_numpy.uint8: (array of length 8192)."""
 *         cdef view.array arr = view.array(shape=(8192,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(&(self._ptr[0].attestationReport))
 *         return _numpy.asarray(arr)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11169, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 5 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11169, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[8], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 11169, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_3, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 11169, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 11169, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 11169, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_allocate_buffer, Py_False, __pyx_t_5, __pyx_callargs+1, 4) < (0)) __PYX_ERR(0, 11169, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11169, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":11170
 *         """~_numpy.uint8: (array of length 8192)."""
 *         cdef view.array arr = view.array(shape=(8192,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].attestationReport))             # <<<<<<<<<<<<<<
 *         return _numpy.asarray(arr)
 * 
*/
  __pyx_v_arr->data = ((char *)(&(__pyx_v_self->_ptr[0]).attestationReport));

  /* "cuda/bindings/_nvml.pyx":11171
 *         cdef view.array arr = view.array(shape=(8192,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].attestationReport))
 *         return _numpy.asarray(arr)             # <<<<<<<<<<<<<<
 * 
 *     @attestation_report.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11171, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11171, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, ((PyObject *)__pyx_v_arr)};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11171, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11166
 *         memcpy(<void *>(&(self._ptr[0].nonce)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def attestation_report(self):
 *         """~_numpy.uint8: (array of length 8192)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.attestation_report.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11173
 *         return _numpy.asarray(arr)
 * 
 *     @attestation_report.setter             # <<<<<<<<<<<<<<
 *     def attestation_report(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18attestation_report_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18attestation_report_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18attestation_report_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18attestation_report_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  Py_ssize_t __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11175
 *     @attestation_report.setter
 *     def attestation_report(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         cdef view.array arr = view.array(shape=(8192,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11176
 *     def attestation_report(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef view.array arr = view.array(shape=(8192,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeGpuAttestationRe};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11176, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11176, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11175
 *     @attestation_report.setter
 *     def attestation_report(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         cdef view.array arr = view.array(shape=(8192,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":11177
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         cdef view.array arr = view.array(shape=(8192,), itemsize=sizeof(unsigned char), format="B", mode="c")             # <<<<<<<<<<<<<<
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].attestationReport)), <void *>(arr.data), sizeof(unsigned char) * len(val))
*/
  __pyx_t_2 = NULL;
  __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11177, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11177, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[8], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 11177, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 11177, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 11177, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 11177, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11177, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":11178
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         cdef view.array arr = view.array(shape=(8192,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(&(self._ptr[0].attestationReport)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
*/
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11178, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11178, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11178, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11178, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_3 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_3 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_5, __pyx_v_val};
    __pyx_t_4 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11178, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_6, __pyx_t_4, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 11178, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_4);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11178, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (__Pyx_PyObject_SetSlice(((PyObject *)__pyx_v_arr), __pyx_t_1, 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[0], 0, 0, 1) < (0)) __PYX_ERR(0, 11178, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":11179
 *         cdef view.array arr = view.array(shape=(8192,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].attestationReport)), <void *>(arr.data), sizeof(unsigned char) * len(val))             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_7 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 11179, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).attestationReport)), ((void *)__pyx_v_arr->data), ((sizeof(unsigned char)) * __pyx_t_7)));

  /* "cuda/bindings/_nvml.pyx":11173
 *         return _numpy.asarray(arr)
 * 
 *     @attestation_report.setter             # <<<<<<<<<<<<<<
 *     def attestation_report(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.attestation_report.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11181
 *         memcpy(<void *>(&(self._ptr[0].attestationReport)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cec_attestation_report(self):
 *         """~_numpy.uint8: (array of length 4096)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_22cec_attestation_report_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_22cec_attestation_report_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_22cec_attestation_report___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_22cec_attestation_report___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11184
 *     def cec_attestation_report(self):
 *         """~_numpy.uint8: (array of length 4096)."""
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(&(self._ptr[0].cecAttestationReport))
 *         return _numpy.asarray(arr)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11184, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 5 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11184, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[5], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 11184, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_3, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 11184, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 11184, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 11184, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_allocate_buffer, Py_False, __pyx_t_5, __pyx_callargs+1, 4) < (0)) __PYX_ERR(0, 11184, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11184, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":11185
 *         """~_numpy.uint8: (array of length 4096)."""
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].cecAttestationReport))             # <<<<<<<<<<<<<<
 *         return _numpy.asarray(arr)
 * 
*/
  __pyx_v_arr->data = ((char *)(&(__pyx_v_self->_ptr[0]).cecAttestationReport));

  /* "cuda/bindings/_nvml.pyx":11186
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].cecAttestationReport))
 *         return _numpy.asarray(arr)             # <<<<<<<<<<<<<<
 * 
 *     @cec_attestation_report.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11186, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11186, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, ((PyObject *)__pyx_v_arr)};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11186, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11181
 *         memcpy(<void *>(&(self._ptr[0].attestationReport)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cec_attestation_report(self):
 *         """~_numpy.uint8: (array of length 4096)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.cec_attestation_report.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11188
 *         return _numpy.asarray(arr)
 * 
 *     @cec_attestation_report.setter             # <<<<<<<<<<<<<<
 *     def cec_attestation_report(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_22cec_attestation_report_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_22cec_attestation_report_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_22cec_attestation_report_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_22cec_attestation_report_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  Py_ssize_t __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11190
 *     @cec_attestation_report.setter
 *     def cec_attestation_report(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11191
 *     def cec_attestation_report(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeGpuAttestationRe};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11191, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11191, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11190
 *     @cec_attestation_report.setter
 *     def cec_attestation_report(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":11192
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c")             # <<<<<<<<<<<<<<
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].cecAttestationReport)), <void *>(arr.data), sizeof(unsigned char) * len(val))
*/
  __pyx_t_2 = NULL;
  __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11192, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11192, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[5], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 11192, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 11192, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 11192, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 11192, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11192, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":11193
 *             raise ValueError("This ConfComputeGpuAttestationReport instance is read-only")
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(&(self._ptr[0].cecAttestationReport)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
*/
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11193, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11193, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11193, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11193, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_3 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_3 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_5, __pyx_v_val};
    __pyx_t_4 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11193, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_6, __pyx_t_4, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 11193, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_4);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11193, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (__Pyx_PyObject_SetSlice(((PyObject *)__pyx_v_arr), __pyx_t_1, 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[0], 0, 0, 1) < (0)) __PYX_ERR(0, 11193, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":11194
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].cecAttestationReport)), <void *>(arr.data), sizeof(unsigned char) * len(val))             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_7 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 11194, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).cecAttestationReport)), ((void *)__pyx_v_arr->data), ((sizeof(unsigned char)) * __pyx_t_7)));

  /* "cuda/bindings/_nvml.pyx":11188
 *         return _numpy.asarray(arr)
 * 
 *     @cec_attestation_report.setter             # <<<<<<<<<<<<<<
 *     def cec_attestation_report(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.cec_attestation_report.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11196
 *         memcpy(<void *>(&(self._ptr[0].cecAttestationReport)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeGpuAttestationReport instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_12from_data, "ConfComputeGpuAttestationReport.from_data(data)\n\nCreate an ConfComputeGpuAttestationReport instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `conf_compute_gpu_attestation_report_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 11196, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11196, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 11196, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 11196, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11196, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 11196, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":11203
 *             data (_numpy.ndarray): a single-element array of dtype `conf_compute_gpu_attestation_report_dtype` holding the data.
 *         """
 *         return __from_data(data, "conf_compute_gpu_attestation_report_dtype", conf_compute_gpu_attestation_report_dtype, ConfComputeGpuAttestationReport)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_conf_compute_gpu_attestation_rep); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_conf_compute_gpu_attestation_rep, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11196
 *         memcpy(<void *>(&(self._ptr[0].cecAttestationReport)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeGpuAttestationReport instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11205
 *         return __from_data(data, "conf_compute_gpu_attestation_report_dtype", conf_compute_gpu_attestation_report_dtype, ConfComputeGpuAttestationReport)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeGpuAttestationReport instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_14from_ptr, "ConfComputeGpuAttestationReport.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an ConfComputeGpuAttestationReport instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 11205, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 11205, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 11205, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11205, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 11205, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":11206
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an ConfComputeGpuAttestationReport instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 11205, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 11205, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 11205, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11205, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 11206, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11206, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 11205, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":11205
 *         return __from_data(data, "conf_compute_gpu_attestation_report_dtype", conf_compute_gpu_attestation_report_dtype, ConfComputeGpuAttestationReport)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeGpuAttestationReport instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":11214
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeGpuAttestationReport obj = ConfComputeGpuAttestationReport.__new__(ConfComputeGpuAttestationReport)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":11215
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ConfComputeGpuAttestationReport obj = ConfComputeGpuAttestationReport.__new__(ConfComputeGpuAttestationReport)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11215, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 11215, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11214
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeGpuAttestationReport obj = ConfComputeGpuAttestationReport.__new__(ConfComputeGpuAttestationReport)
*/
  }

  /* "cuda/bindings/_nvml.pyx":11216
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeGpuAttestationReport obj = ConfComputeGpuAttestationReport.__new__(ConfComputeGpuAttestationReport)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeGpuAttestationReport_t *>malloc(sizeof(nvmlConfComputeGpuAttestationReport_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11216, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":11217
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeGpuAttestationReport obj = ConfComputeGpuAttestationReport.__new__(ConfComputeGpuAttestationReport)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlConfComputeGpuAttestationReport_t *>malloc(sizeof(nvmlConfComputeGpuAttestationReport_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11218
 *         cdef ConfComputeGpuAttestationReport obj = ConfComputeGpuAttestationReport.__new__(ConfComputeGpuAttestationReport)
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeGpuAttestationReport_t *>malloc(sizeof(nvmlConfComputeGpuAttestationReport_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGpuAttestationReport")
*/
    __pyx_v_obj->_ptr = ((nvmlConfComputeGpuAttestationReport_t *)malloc((sizeof(nvmlConfComputeGpuAttestationReport_t))));

    /* "cuda/bindings/_nvml.pyx":11219
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeGpuAttestationReport_t *>malloc(sizeof(nvmlConfComputeGpuAttestationReport_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeGpuAttestationReport")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGpuAttestationReport_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":11220
 *             obj._ptr = <nvmlConfComputeGpuAttestationReport_t *>malloc(sizeof(nvmlConfComputeGpuAttestationReport_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGpuAttestationReport")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGpuAttestationReport_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11220, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeGpuA};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11220, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 11220, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":11219
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeGpuAttestationReport_t *>malloc(sizeof(nvmlConfComputeGpuAttestationReport_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeGpuAttestationReport")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGpuAttestationReport_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":11221
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGpuAttestationReport")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGpuAttestationReport_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlConfComputeGpuAttestationReport_t))));

    /* "cuda/bindings/_nvml.pyx":11222
 *                 raise MemoryError("Error allocating ConfComputeGpuAttestationReport")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGpuAttestationReport_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":11223
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGpuAttestationReport_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlConfComputeGpuAttestationReport_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":11217
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeGpuAttestationReport obj = ConfComputeGpuAttestationReport.__new__(ConfComputeGpuAttestationReport)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlConfComputeGpuAttestationReport_t *>malloc(sizeof(nvmlConfComputeGpuAttestationReport_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":11225
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlConfComputeGpuAttestationReport_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlConfComputeGpuAttestationReport_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":11226
 *         else:
 *             obj._ptr = <nvmlConfComputeGpuAttestationReport_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":11227
 *             obj._ptr = <nvmlConfComputeGpuAttestationReport_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":11228
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":11229
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11205
 *         return __from_data(data, "conf_compute_gpu_attestation_report_dtype", conf_compute_gpu_attestation_report_dtype, ConfComputeGpuAttestationReport)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeGpuAttestationReport instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_16__reduce_cython__, "ConfComputeGpuAttestationReport.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18__setstate_cython__, "ConfComputeGpuAttestationReport.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGpuAttestationReport.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11232
 * 
 * 
 * cdef _get_conf_compute_get_key_rotation_threshold_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t pod = nvmlConfComputeGetKeyRotationThresholdInfo_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_get_key_rotation_threshold_info_v1_dtype_offsets(void) {
  nvmlConfComputeGetKeyRotationThresholdInfo_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlConfComputeGetKeyRotationThresholdInfo_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_conf_compute_get_key_rotation_threshold_info_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":11233
 * 
 * cdef _get_conf_compute_get_key_rotation_threshold_info_v1_dtype_offsets():
 *     cdef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t pod = nvmlConfComputeGetKeyRotationThresholdInfo_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'attacker_advantage'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":11234
 * cdef _get_conf_compute_get_key_rotation_threshold_info_v1_dtype_offsets():
 *     cdef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t pod = nvmlConfComputeGetKeyRotationThresholdInfo_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'attacker_advantage'],
 *         'formats': [_numpy.uint32, _numpy.uint64],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11234, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11234, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":11235
 *     cdef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t pod = nvmlConfComputeGetKeyRotationThresholdInfo_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'attacker_advantage'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint64],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11235, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11235, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 11235, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_attacker_advantage);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_attacker_advantage);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_attacker_advantage) != (0)) __PYX_ERR(0, 11235, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 11235, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":11236
 *     return _numpy.dtype({
 *         'names': ['version', 'attacker_advantage'],
 *         'formats': [_numpy.uint32, _numpy.uint64],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11236, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11236, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11236, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 11236, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11236, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 11236, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 11236, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 11235, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":11238
 *         'formats': [_numpy.uint32, _numpy.uint64],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.attackerAdvantage)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11238, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":11239
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.attackerAdvantage)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.attackerAdvantage)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 11239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":11237
 *         'names': ['version', 'attacker_advantage'],
 *         'formats': [_numpy.uint32, _numpy.uint64],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.attackerAdvantage)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11237, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 11237, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 11237, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 11235, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":11241
 *             (<intptr_t>&(pod.attackerAdvantage)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11241, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 11235, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11234, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11232
 * 
 * 
 * cdef _get_conf_compute_get_key_rotation_threshold_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t pod = nvmlConfComputeGetKeyRotationThresholdInfo_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_conf_compute_get_key_rotation_threshold_info_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11258
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>calloc(1, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":11259
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>calloc(1, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")
*/
  __pyx_v_self->_ptr = ((nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *)calloc(1, (sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))));

  /* "cuda/bindings/_nvml.pyx":11260
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>calloc(1, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":11261
 *         self._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>calloc(1, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11261, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeGetK};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11261, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 11261, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11260
 *     def __init__(self):
 *         self._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>calloc(1, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":11262
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":11263
 *             raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":11264
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":11258
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>calloc(1, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11266
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self) {
  nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":11268
 *     def __dealloc__(self):
 *         cdef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11269
 *         cdef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":11270
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":11271
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":11268
 *     def __dealloc__(self):
 *         cdef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":11266
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":11273
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ConfComputeGetKeyRotationThresholdInfo_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":11274
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.ConfComputeGetKeyRotationThresholdInfo_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_ConfComputeGetKeyRotationThresh;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 53 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11273
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ConfComputeGetKeyRotationThresholdInfo_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11276
 *         return f"<{__name__}.ConfComputeGetKeyRotationThresholdInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11279
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11279, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11276
 *         return f"<{__name__}.ConfComputeGetKeyRotationThresholdInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11281
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":11282
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11281
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11284
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":11285
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11285, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11284
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11287
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ConfComputeGetKeyRotationThresholdInfo_v1 other_
 *         if not isinstance(other, ConfComputeGetKeyRotationThresholdInfo_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":11289
 *     def __eq__(self, other):
 *         cdef ConfComputeGetKeyRotationThresholdInfo_v1 other_
 *         if not isinstance(other, ConfComputeGetKeyRotationThresholdInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":11290
 *         cdef ConfComputeGetKeyRotationThresholdInfo_v1 other_
 *         if not isinstance(other, ConfComputeGetKeyRotationThresholdInfo_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":11289
 *     def __eq__(self, other):
 *         cdef ConfComputeGetKeyRotationThresholdInfo_v1 other_
 *         if not isinstance(other, ConfComputeGetKeyRotationThresholdInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":11291
 *         if not isinstance(other, ConfComputeGetKeyRotationThresholdInfo_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1))))) __PYX_ERR(0, 11291, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":11292
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11292, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11287
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ConfComputeGetKeyRotationThresholdInfo_v1 other_
 *         if not isinstance(other, ConfComputeGetKeyRotationThresholdInfo_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11294
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>malloc(sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":11295
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>malloc(sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 11295, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11295, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11295, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 11295, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11296
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>malloc(sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")
*/
    __pyx_v_self->_ptr = ((nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *)malloc((sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":11297
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>malloc(sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":11298
 *             self._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>malloc(sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11298, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeGetK};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11298, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 11298, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":11297
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>malloc(sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":11299
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11299, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11299, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 11299, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":11300
 *                 raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":11301
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":11302
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11302, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11302, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 11302, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":11295
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>malloc(sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":11304
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 11304, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":11294
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>malloc(sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11306
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11309
 *     def version(self):
 *         """int: """
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11309, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11306
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11311
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11313
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGetKeyRotationThresholdInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11314
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGetKeyRotationThresholdInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeGetKeyRotationTh};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11314, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11314, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11313
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGetKeyRotationThresholdInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":11315
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGetKeyRotationThresholdInfo_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11315, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":11311
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11317
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def attacker_advantage(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18attacker_advantage_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18attacker_advantage_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18attacker_advantage___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18attacker_advantage___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11320
 *     def attacker_advantage(self):
 *         """int: """
 *         return self._ptr[0].attackerAdvantage             # <<<<<<<<<<<<<<
 * 
 *     @attacker_advantage.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).attackerAdvantage); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11320, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11317
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def attacker_advantage(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1.attacker_advantage.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11322
 *         return self._ptr[0].attackerAdvantage
 * 
 *     @attacker_advantage.setter             # <<<<<<<<<<<<<<
 *     def attacker_advantage(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18attacker_advantage_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18attacker_advantage_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18attacker_advantage_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18attacker_advantage_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11324
 *     @attacker_advantage.setter
 *     def attacker_advantage(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGetKeyRotationThresholdInfo_v1 instance is read-only")
 *         self._ptr[0].attackerAdvantage = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11325
 *     def attacker_advantage(self, val):
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGetKeyRotationThresholdInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].attackerAdvantage = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ConfComputeGetKeyRotationTh};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11325, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11325, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11324
 *     @attacker_advantage.setter
 *     def attacker_advantage(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ConfComputeGetKeyRotationThresholdInfo_v1 instance is read-only")
 *         self._ptr[0].attackerAdvantage = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":11326
 *         if self._readonly:
 *             raise ValueError("This ConfComputeGetKeyRotationThresholdInfo_v1 instance is read-only")
 *         self._ptr[0].attackerAdvantage = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 11326, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).attackerAdvantage = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":11322
 *         return self._ptr[0].attackerAdvantage
 * 
 *     @attacker_advantage.setter             # <<<<<<<<<<<<<<
 *     def attacker_advantage(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1.attacker_advantage.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11328
 *         self._ptr[0].attackerAdvantage = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeGetKeyRotationThresholdInfo_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_12from_data, "ConfComputeGetKeyRotationThresholdInfo_v1.from_data(data)\n\nCreate an ConfComputeGetKeyRotationThresholdInfo_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `conf_compute_get_key_rotation_threshold_info_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 11328, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11328, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 11328, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 11328, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11328, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 11328, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":11335
 *             data (_numpy.ndarray): a single-element array of dtype `conf_compute_get_key_rotation_threshold_info_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "conf_compute_get_key_rotation_threshold_info_v1_dtype", conf_compute_get_key_rotation_threshold_info_v1_dtype, ConfComputeGetKeyRotationThresholdInfo_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_conf_compute_get_key_rotation_th); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11335, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_conf_compute_get_key_rotation_th, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11335, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11328
 *         self._ptr[0].attackerAdvantage = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeGetKeyRotationThresholdInfo_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11337
 *         return __from_data(data, "conf_compute_get_key_rotation_threshold_info_v1_dtype", conf_compute_get_key_rotation_threshold_info_v1_dtype, ConfComputeGetKeyRotationThresholdInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeGetKeyRotationThresholdInfo_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_14from_ptr, "ConfComputeGetKeyRotationThresholdInfo_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an ConfComputeGetKeyRotationThresholdInfo_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 11337, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 11337, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 11337, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11337, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 11337, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":11338
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an ConfComputeGetKeyRotationThresholdInfo_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 11337, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 11337, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 11337, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11337, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 11338, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11338, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 11337, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":11337
 *         return __from_data(data, "conf_compute_get_key_rotation_threshold_info_v1_dtype", conf_compute_get_key_rotation_threshold_info_v1_dtype, ConfComputeGetKeyRotationThresholdInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeGetKeyRotationThresholdInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":11346
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeGetKeyRotationThresholdInfo_v1 obj = ConfComputeGetKeyRotationThresholdInfo_v1.__new__(ConfComputeGetKeyRotationThresholdInfo_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":11347
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ConfComputeGetKeyRotationThresholdInfo_v1 obj = ConfComputeGetKeyRotationThresholdInfo_v1.__new__(ConfComputeGetKeyRotationThresholdInfo_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11347, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 11347, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11346
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeGetKeyRotationThresholdInfo_v1 obj = ConfComputeGetKeyRotationThresholdInfo_v1.__new__(ConfComputeGetKeyRotationThresholdInfo_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":11348
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeGetKeyRotationThresholdInfo_v1 obj = ConfComputeGetKeyRotationThresholdInfo_v1.__new__(ConfComputeGetKeyRotationThresholdInfo_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>malloc(sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11348, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":11349
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeGetKeyRotationThresholdInfo_v1 obj = ConfComputeGetKeyRotationThresholdInfo_v1.__new__(ConfComputeGetKeyRotationThresholdInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>malloc(sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11350
 *         cdef ConfComputeGetKeyRotationThresholdInfo_v1 obj = ConfComputeGetKeyRotationThresholdInfo_v1.__new__(ConfComputeGetKeyRotationThresholdInfo_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>malloc(sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *)malloc((sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":11351
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>malloc(sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":11352
 *             obj._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>malloc(sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11352, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ConfComputeGetK};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11352, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 11352, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":11351
 *         if owner is None:
 *             obj._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>malloc(sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":11353
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":11354
 *                 raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":11355
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":11349
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ConfComputeGetKeyRotationThresholdInfo_v1 obj = ConfComputeGetKeyRotationThresholdInfo_v1.__new__(ConfComputeGetKeyRotationThresholdInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>malloc(sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":11357
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":11358
 *         else:
 *             obj._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":11359
 *             obj._ptr = <nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":11360
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":11361
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11337
 *         return __from_data(data, "conf_compute_get_key_rotation_threshold_info_v1_dtype", conf_compute_get_key_rotation_threshold_info_v1_dtype, ConfComputeGetKeyRotationThresholdInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeGetKeyRotationThresholdInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_16__reduce_cython__, "ConfComputeGetKeyRotationThresholdInfo_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18__setstate_cython__, "ConfComputeGetKeyRotationThresholdInfo_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11364
 * 
 * 
 * cdef _get_nvlink_supported_bw_modes_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlNvlinkSupportedBwModes_v1_t pod = nvmlNvlinkSupportedBwModes_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_nvlink_supported_bw_modes_v1_dtype_offsets(void) {
  nvmlNvlinkSupportedBwModes_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlNvlinkSupportedBwModes_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_nvlink_supported_bw_modes_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":11365
 * 
 * cdef _get_nvlink_supported_bw_modes_v1_dtype_offsets():
 *     cdef nvmlNvlinkSupportedBwModes_v1_t pod = nvmlNvlinkSupportedBwModes_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'bw_modes', 'total_bw_modes'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":11366
 * cdef _get_nvlink_supported_bw_modes_v1_dtype_offsets():
 *     cdef nvmlNvlinkSupportedBwModes_v1_t pod = nvmlNvlinkSupportedBwModes_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'bw_modes', 'total_bw_modes'],
 *         'formats': [_numpy.uint32, _numpy.uint8, _numpy.uint8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11366, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11366, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":11367
 *     cdef nvmlNvlinkSupportedBwModes_v1_t pod = nvmlNvlinkSupportedBwModes_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'bw_modes', 'total_bw_modes'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint8, _numpy.uint8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11367, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11367, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 11367, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_bw_modes);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_bw_modes);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_bw_modes) != (0)) __PYX_ERR(0, 11367, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_total_bw_modes);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_total_bw_modes);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_total_bw_modes) != (0)) __PYX_ERR(0, 11367, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 11367, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":11368
 *     return _numpy.dtype({
 *         'names': ['version', 'bw_modes', 'total_bw_modes'],
 *         'formats': [_numpy.uint32, _numpy.uint8, _numpy.uint8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 11368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 11368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 11368, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 11368, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 11368, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 11367, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":11370
 *         'formats': [_numpy.uint32, _numpy.uint8, _numpy.uint8],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bwModes)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.totalBwModes)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11370, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":11371
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bwModes)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.totalBwModes)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bwModes)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 11371, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":11372
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bwModes)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.totalBwModes)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlNvlinkSupportedBwModes_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.totalBwModes)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 11372, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":11369
 *         'names': ['version', 'bw_modes', 'total_bw_modes'],
 *         'formats': [_numpy.uint32, _numpy.uint8, _numpy.uint8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bwModes)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 11369, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 11369, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 11369, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 11367, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":11374
 *             (<intptr_t>&(pod.totalBwModes)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlNvlinkSupportedBwModes_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlNvlinkSupportedBwModes_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11374, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 11367, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11366, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11364
 * 
 * 
 * cdef _get_nvlink_supported_bw_modes_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlNvlinkSupportedBwModes_v1_t pod = nvmlNvlinkSupportedBwModes_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_nvlink_supported_bw_modes_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11391
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>calloc(1, sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":11392
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>calloc(1, sizeof(nvmlNvlinkSupportedBwModes_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")
*/
  __pyx_v_self->_ptr = ((nvmlNvlinkSupportedBwModes_v1_t *)calloc(1, (sizeof(nvmlNvlinkSupportedBwModes_v1_t))));

  /* "cuda/bindings/_nvml.pyx":11393
 *     def __init__(self):
 *         self._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>calloc(1, sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":11394
 *         self._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>calloc(1, sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11394, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvlinkSupported};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11394, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 11394, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11393
 *     def __init__(self):
 *         self._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>calloc(1, sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":11395
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":11396
 *             raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":11397
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":11391
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>calloc(1, sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11399
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlNvlinkSupportedBwModes_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self) {
  nvmlNvlinkSupportedBwModes_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlNvlinkSupportedBwModes_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":11401
 *     def __dealloc__(self):
 *         cdef nvmlNvlinkSupportedBwModes_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11402
 *         cdef nvmlNvlinkSupportedBwModes_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":11403
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":11404
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":11401
 *     def __dealloc__(self):
 *         cdef nvmlNvlinkSupportedBwModes_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":11399
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlNvlinkSupportedBwModes_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":11406
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.NvlinkSupportedBwModes_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":11407
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.NvlinkSupportedBwModes_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11407, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11407, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11407, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11407, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11407, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_NvlinkSupportedBwModes_v1_objec;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 37 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11407, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11406
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.NvlinkSupportedBwModes_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11409
 *         return f"<{__name__}.NvlinkSupportedBwModes_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11412
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11412, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11409
 *         return f"<{__name__}.NvlinkSupportedBwModes_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11414
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":11415
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11414
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11417
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":11418
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11418, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11417
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11420
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef NvlinkSupportedBwModes_v1 other_
 *         if not isinstance(other, NvlinkSupportedBwModes_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":11422
 *     def __eq__(self, other):
 *         cdef NvlinkSupportedBwModes_v1 other_
 *         if not isinstance(other, NvlinkSupportedBwModes_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":11423
 *         cdef NvlinkSupportedBwModes_v1 other_
 *         if not isinstance(other, NvlinkSupportedBwModes_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkSupportedBwModes_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":11422
 *     def __eq__(self, other):
 *         cdef NvlinkSupportedBwModes_v1 other_
 *         if not isinstance(other, NvlinkSupportedBwModes_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":11424
 *         if not isinstance(other, NvlinkSupportedBwModes_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkSupportedBwModes_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1))))) __PYX_ERR(0, 11424, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":11425
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkSupportedBwModes_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlNvlinkSupportedBwModes_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11425, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11420
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef NvlinkSupportedBwModes_v1 other_
 *         if not isinstance(other, NvlinkSupportedBwModes_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11427
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkSupportedBwModes_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>malloc(sizeof(nvmlNvlinkSupportedBwModes_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":11428
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>malloc(sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 11428, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11428, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11428, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 11428, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11429
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>malloc(sizeof(nvmlNvlinkSupportedBwModes_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")
*/
    __pyx_v_self->_ptr = ((nvmlNvlinkSupportedBwModes_v1_t *)malloc((sizeof(nvmlNvlinkSupportedBwModes_v1_t))));

    /* "cuda/bindings/_nvml.pyx":11430
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>malloc(sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkSupportedBwModes_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":11431
 *             self._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>malloc(sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11431, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvlinkSupported};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11431, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 11431, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":11430
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>malloc(sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkSupportedBwModes_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":11432
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkSupportedBwModes_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11432, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11432, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 11432, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlNvlinkSupportedBwModes_v1_t))));

    /* "cuda/bindings/_nvml.pyx":11433
 *                 raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":11434
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":11435
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11435, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11435, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 11435, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":11428
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>malloc(sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":11437
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 11437, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":11427
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkSupportedBwModes_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>malloc(sizeof(nvmlNvlinkSupportedBwModes_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11439
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11442
 *     def version(self):
 *         """int: """
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11442, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11439
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11444
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11446
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkSupportedBwModes_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11447
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvlinkSupportedBwModes_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvlinkSupportedBwModes_v1_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11447, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11447, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11446
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkSupportedBwModes_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":11448
 *         if self._readonly:
 *             raise ValueError("This NvlinkSupportedBwModes_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11448, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":11444
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11450
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bw_modes(self):
 *         """~_numpy.uint8: (array of length 23)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_8bw_modes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_8bw_modes_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_8bw_modes___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_8bw_modes___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11453
 *     def bw_modes(self):
 *         """~_numpy.uint8: (array of length 23)."""
 *         cdef view.array arr = view.array(shape=(23,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(&(self._ptr[0].bwModes))
 *         return _numpy.asarray(arr)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11453, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 5 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11453, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[9], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 11453, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_3, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 11453, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 11453, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 11453, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_allocate_buffer, Py_False, __pyx_t_5, __pyx_callargs+1, 4) < (0)) __PYX_ERR(0, 11453, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11453, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":11454
 *         """~_numpy.uint8: (array of length 23)."""
 *         cdef view.array arr = view.array(shape=(23,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].bwModes))             # <<<<<<<<<<<<<<
 *         return _numpy.asarray(arr)
 * 
*/
  __pyx_v_arr->data = ((char *)(&(__pyx_v_self->_ptr[0]).bwModes));

  /* "cuda/bindings/_nvml.pyx":11455
 *         cdef view.array arr = view.array(shape=(23,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].bwModes))
 *         return _numpy.asarray(arr)             # <<<<<<<<<<<<<<
 * 
 *     @bw_modes.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11455, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11455, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, ((PyObject *)__pyx_v_arr)};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11455, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11450
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bw_modes(self):
 *         """~_numpy.uint8: (array of length 23)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.bw_modes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11457
 *         return _numpy.asarray(arr)
 * 
 *     @bw_modes.setter             # <<<<<<<<<<<<<<
 *     def bw_modes(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_8bw_modes_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_8bw_modes_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_8bw_modes_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_8bw_modes_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  Py_ssize_t __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11459
 *     @bw_modes.setter
 *     def bw_modes(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkSupportedBwModes_v1 instance is read-only")
 *         cdef view.array arr = view.array(shape=(23,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11460
 *     def bw_modes(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvlinkSupportedBwModes_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef view.array arr = view.array(shape=(23,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvlinkSupportedBwModes_v1_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11460, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11460, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11459
 *     @bw_modes.setter
 *     def bw_modes(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkSupportedBwModes_v1 instance is read-only")
 *         cdef view.array arr = view.array(shape=(23,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":11461
 *         if self._readonly:
 *             raise ValueError("This NvlinkSupportedBwModes_v1 instance is read-only")
 *         cdef view.array arr = view.array(shape=(23,), itemsize=sizeof(unsigned char), format="B", mode="c")             # <<<<<<<<<<<<<<
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].bwModes)), <void *>(arr.data), sizeof(unsigned char) * len(val))
*/
  __pyx_t_2 = NULL;
  __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11461, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11461, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[9], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 11461, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 11461, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 11461, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 11461, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11461, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":11462
 *             raise ValueError("This NvlinkSupportedBwModes_v1 instance is read-only")
 *         cdef view.array arr = view.array(shape=(23,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(&(self._ptr[0].bwModes)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
*/
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11462, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11462, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11462, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11462, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_3 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_3 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_5, __pyx_v_val};
    __pyx_t_4 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11462, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_6, __pyx_t_4, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 11462, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_4);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11462, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (__Pyx_PyObject_SetSlice(((PyObject *)__pyx_v_arr), __pyx_t_1, 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[0], 0, 0, 1) < (0)) __PYX_ERR(0, 11462, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":11463
 *         cdef view.array arr = view.array(shape=(23,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].bwModes)), <void *>(arr.data), sizeof(unsigned char) * len(val))             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_7 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 11463, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).bwModes)), ((void *)__pyx_v_arr->data), ((sizeof(unsigned char)) * __pyx_t_7)));

  /* "cuda/bindings/_nvml.pyx":11457
 *         return _numpy.asarray(arr)
 * 
 *     @bw_modes.setter             # <<<<<<<<<<<<<<
 *     def bw_modes(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.bw_modes.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11465
 *         memcpy(<void *>(&(self._ptr[0].bwModes)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def total_bw_modes(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14total_bw_modes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14total_bw_modes_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14total_bw_modes___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14total_bw_modes___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11468
 *     def total_bw_modes(self):
 *         """int: """
 *         return self._ptr[0].totalBwModes             # <<<<<<<<<<<<<<
 * 
 *     @total_bw_modes.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_char((__pyx_v_self->_ptr[0]).totalBwModes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11468, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11465
 *         memcpy(<void *>(&(self._ptr[0].bwModes)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def total_bw_modes(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.total_bw_modes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11470
 *         return self._ptr[0].totalBwModes
 * 
 *     @total_bw_modes.setter             # <<<<<<<<<<<<<<
 *     def total_bw_modes(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14total_bw_modes_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14total_bw_modes_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14total_bw_modes_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14total_bw_modes_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned char __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11472
 *     @total_bw_modes.setter
 *     def total_bw_modes(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkSupportedBwModes_v1 instance is read-only")
 *         self._ptr[0].totalBwModes = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11473
 *     def total_bw_modes(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvlinkSupportedBwModes_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].totalBwModes = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvlinkSupportedBwModes_v1_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11473, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11473, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11472
 *     @total_bw_modes.setter
 *     def total_bw_modes(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkSupportedBwModes_v1 instance is read-only")
 *         self._ptr[0].totalBwModes = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":11474
 *         if self._readonly:
 *             raise ValueError("This NvlinkSupportedBwModes_v1 instance is read-only")
 *         self._ptr[0].totalBwModes = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_char(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned char)-1) && PyErr_Occurred())) __PYX_ERR(0, 11474, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).totalBwModes = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":11470
 *         return self._ptr[0].totalBwModes
 * 
 *     @total_bw_modes.setter             # <<<<<<<<<<<<<<
 *     def total_bw_modes(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.total_bw_modes.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11476
 *         self._ptr[0].totalBwModes = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvlinkSupportedBwModes_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_12from_data, "NvlinkSupportedBwModes_v1.from_data(data)\n\nCreate an NvlinkSupportedBwModes_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `nvlink_supported_bw_modes_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 11476, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11476, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 11476, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 11476, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11476, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 11476, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":11483
 *             data (_numpy.ndarray): a single-element array of dtype `nvlink_supported_bw_modes_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "nvlink_supported_bw_modes_v1_dtype", nvlink_supported_bw_modes_v1_dtype, NvlinkSupportedBwModes_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_nvlink_supported_bw_modes_v1_dty); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11483, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_nvlink_supported_bw_modes_v1_dty, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11483, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11476
 *         self._ptr[0].totalBwModes = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvlinkSupportedBwModes_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11485
 *         return __from_data(data, "nvlink_supported_bw_modes_v1_dtype", nvlink_supported_bw_modes_v1_dtype, NvlinkSupportedBwModes_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkSupportedBwModes_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14from_ptr, "NvlinkSupportedBwModes_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an NvlinkSupportedBwModes_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 11485, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 11485, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 11485, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11485, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 11485, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":11486
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an NvlinkSupportedBwModes_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 11485, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 11485, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 11485, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11485, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 11486, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11486, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 11485, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":11485
 *         return __from_data(data, "nvlink_supported_bw_modes_v1_dtype", nvlink_supported_bw_modes_v1_dtype, NvlinkSupportedBwModes_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkSupportedBwModes_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":11494
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkSupportedBwModes_v1 obj = NvlinkSupportedBwModes_v1.__new__(NvlinkSupportedBwModes_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":11495
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef NvlinkSupportedBwModes_v1 obj = NvlinkSupportedBwModes_v1.__new__(NvlinkSupportedBwModes_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11495, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 11495, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11494
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkSupportedBwModes_v1 obj = NvlinkSupportedBwModes_v1.__new__(NvlinkSupportedBwModes_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":11496
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkSupportedBwModes_v1 obj = NvlinkSupportedBwModes_v1.__new__(NvlinkSupportedBwModes_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>malloc(sizeof(nvmlNvlinkSupportedBwModes_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11496, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":11497
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkSupportedBwModes_v1 obj = NvlinkSupportedBwModes_v1.__new__(NvlinkSupportedBwModes_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>malloc(sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11498
 *         cdef NvlinkSupportedBwModes_v1 obj = NvlinkSupportedBwModes_v1.__new__(NvlinkSupportedBwModes_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>malloc(sizeof(nvmlNvlinkSupportedBwModes_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlNvlinkSupportedBwModes_v1_t *)malloc((sizeof(nvmlNvlinkSupportedBwModes_v1_t))));

    /* "cuda/bindings/_nvml.pyx":11499
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>malloc(sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkSupportedBwModes_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":11500
 *             obj._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>malloc(sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11500, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvlinkSupported};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11500, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 11500, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":11499
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>malloc(sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkSupportedBwModes_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":11501
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkSupportedBwModes_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlNvlinkSupportedBwModes_v1_t))));

    /* "cuda/bindings/_nvml.pyx":11502
 *                 raise MemoryError("Error allocating NvlinkSupportedBwModes_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":11503
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":11497
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkSupportedBwModes_v1 obj = NvlinkSupportedBwModes_v1.__new__(NvlinkSupportedBwModes_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>malloc(sizeof(nvmlNvlinkSupportedBwModes_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":11505
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlNvlinkSupportedBwModes_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":11506
 *         else:
 *             obj._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":11507
 *             obj._ptr = <nvmlNvlinkSupportedBwModes_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":11508
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":11509
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11485
 *         return __from_data(data, "nvlink_supported_bw_modes_v1_dtype", nvlink_supported_bw_modes_v1_dtype, NvlinkSupportedBwModes_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkSupportedBwModes_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_16__reduce_cython__, "NvlinkSupportedBwModes_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_18__setstate_cython__, "NvlinkSupportedBwModes_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSupportedBwModes_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11512
 * 
 * 
 * cdef _get_nvlink_get_bw_mode_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlNvlinkGetBwMode_v1_t pod = nvmlNvlinkGetBwMode_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_nvlink_get_bw_mode_v1_dtype_offsets(void) {
  nvmlNvlinkGetBwMode_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlNvlinkGetBwMode_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_nvlink_get_bw_mode_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":11513
 * 
 * cdef _get_nvlink_get_bw_mode_v1_dtype_offsets():
 *     cdef nvmlNvlinkGetBwMode_v1_t pod = nvmlNvlinkGetBwMode_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'b_is_best', 'bw_mode'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":11514
 * cdef _get_nvlink_get_bw_mode_v1_dtype_offsets():
 *     cdef nvmlNvlinkGetBwMode_v1_t pod = nvmlNvlinkGetBwMode_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'b_is_best', 'bw_mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11514, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11514, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":11515
 *     cdef nvmlNvlinkGetBwMode_v1_t pod = nvmlNvlinkGetBwMode_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'b_is_best', 'bw_mode'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11515, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11515, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 11515, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_b_is_best);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_b_is_best);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_b_is_best) != (0)) __PYX_ERR(0, 11515, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_bw_mode);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_bw_mode);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_bw_mode) != (0)) __PYX_ERR(0, 11515, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 11515, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":11516
 *     return _numpy.dtype({
 *         'names': ['version', 'b_is_best', 'bw_mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 11516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 11516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 11516, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 11516, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 11516, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 11515, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":11518
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint8],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bIsBest)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bwMode)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11518, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":11519
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bIsBest)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bwMode)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bIsBest)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 11519, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":11520
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bIsBest)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bwMode)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlNvlinkGetBwMode_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bwMode)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 11520, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":11517
 *         'names': ['version', 'b_is_best', 'bw_mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bIsBest)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11517, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 11517, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 11517, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 11517, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 11515, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":11522
 *             (<intptr_t>&(pod.bwMode)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlNvlinkGetBwMode_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlNvlinkGetBwMode_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11522, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 11515, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11514, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11512
 * 
 * 
 * cdef _get_nvlink_get_bw_mode_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlNvlinkGetBwMode_v1_t pod = nvmlNvlinkGetBwMode_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_nvlink_get_bw_mode_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11539
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlNvlinkGetBwMode_v1_t *>calloc(1, sizeof(nvmlNvlinkGetBwMode_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":11540
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlNvlinkGetBwMode_v1_t *>calloc(1, sizeof(nvmlNvlinkGetBwMode_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvlinkGetBwMode_v1")
*/
  __pyx_v_self->_ptr = ((nvmlNvlinkGetBwMode_v1_t *)calloc(1, (sizeof(nvmlNvlinkGetBwMode_v1_t))));

  /* "cuda/bindings/_nvml.pyx":11541
 *     def __init__(self):
 *         self._ptr = <nvmlNvlinkGetBwMode_v1_t *>calloc(1, sizeof(nvmlNvlinkGetBwMode_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating NvlinkGetBwMode_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":11542
 *         self._ptr = <nvmlNvlinkGetBwMode_v1_t *>calloc(1, sizeof(nvmlNvlinkGetBwMode_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvlinkGetBwMode_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11542, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvlinkGetBwMode};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11542, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 11542, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11541
 *     def __init__(self):
 *         self._ptr = <nvmlNvlinkGetBwMode_v1_t *>calloc(1, sizeof(nvmlNvlinkGetBwMode_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating NvlinkGetBwMode_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":11543
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvlinkGetBwMode_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":11544
 *             raise MemoryError("Error allocating NvlinkGetBwMode_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":11545
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":11539
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlNvlinkGetBwMode_v1_t *>calloc(1, sizeof(nvmlNvlinkGetBwMode_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11547
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlNvlinkGetBwMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self) {
  nvmlNvlinkGetBwMode_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlNvlinkGetBwMode_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":11549
 *     def __dealloc__(self):
 *         cdef nvmlNvlinkGetBwMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11550
 *         cdef nvmlNvlinkGetBwMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":11551
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":11552
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":11549
 *     def __dealloc__(self):
 *         cdef nvmlNvlinkGetBwMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":11547
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlNvlinkGetBwMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":11554
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.NvlinkGetBwMode_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":11555
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.NvlinkGetBwMode_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11555, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11555, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11555, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11555, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11555, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_NvlinkGetBwMode_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 30 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11555, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11554
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.NvlinkGetBwMode_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11557
 *         return f"<{__name__}.NvlinkGetBwMode_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11560
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11560, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11557
 *         return f"<{__name__}.NvlinkGetBwMode_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11562
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":11563
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11562
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11565
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":11566
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11566, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11565
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11568
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef NvlinkGetBwMode_v1 other_
 *         if not isinstance(other, NvlinkGetBwMode_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":11570
 *     def __eq__(self, other):
 *         cdef NvlinkGetBwMode_v1 other_
 *         if not isinstance(other, NvlinkGetBwMode_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":11571
 *         cdef NvlinkGetBwMode_v1 other_
 *         if not isinstance(other, NvlinkGetBwMode_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkGetBwMode_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":11570
 *     def __eq__(self, other):
 *         cdef NvlinkGetBwMode_v1 other_
 *         if not isinstance(other, NvlinkGetBwMode_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":11572
 *         if not isinstance(other, NvlinkGetBwMode_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkGetBwMode_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1))))) __PYX_ERR(0, 11572, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":11573
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkGetBwMode_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlNvlinkGetBwMode_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11573, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11568
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef NvlinkGetBwMode_v1 other_
 *         if not isinstance(other, NvlinkGetBwMode_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11575
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkGetBwMode_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkGetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkGetBwMode_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":11576
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlNvlinkGetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkGetBwMode_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 11576, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11576, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11576, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 11576, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11577
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkGetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkGetBwMode_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkGetBwMode_v1")
*/
    __pyx_v_self->_ptr = ((nvmlNvlinkGetBwMode_v1_t *)malloc((sizeof(nvmlNvlinkGetBwMode_v1_t))));

    /* "cuda/bindings/_nvml.pyx":11578
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkGetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkGetBwMode_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkGetBwMode_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkGetBwMode_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":11579
 *             self._ptr = <nvmlNvlinkGetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkGetBwMode_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkGetBwMode_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkGetBwMode_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11579, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvlinkGetBwMode};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11579, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 11579, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":11578
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkGetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkGetBwMode_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkGetBwMode_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkGetBwMode_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":11580
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkGetBwMode_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkGetBwMode_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11580, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11580, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 11580, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlNvlinkGetBwMode_v1_t))));

    /* "cuda/bindings/_nvml.pyx":11581
 *                 raise MemoryError("Error allocating NvlinkGetBwMode_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkGetBwMode_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":11582
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkGetBwMode_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":11583
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11583, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11583, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 11583, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":11576
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlNvlinkGetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkGetBwMode_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":11585
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 11585, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":11575
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkGetBwMode_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkGetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkGetBwMode_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11587
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11590
 *     def version(self):
 *         """int: """
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11590, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11587
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11592
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11594
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkGetBwMode_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11595
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvlinkGetBwMode_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvlinkGetBwMode_v1_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11595, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11595, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11594
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkGetBwMode_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":11596
 *         if self._readonly:
 *             raise ValueError("This NvlinkGetBwMode_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11596, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":11592
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11598
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def b_is_best(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_9b_is_best_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_9b_is_best_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_9b_is_best___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_9b_is_best___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11601
 *     def b_is_best(self):
 *         """int: """
 *         return self._ptr[0].bIsBest             # <<<<<<<<<<<<<<
 * 
 *     @b_is_best.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).bIsBest); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11601, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11598
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def b_is_best(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.b_is_best.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11603
 *         return self._ptr[0].bIsBest
 * 
 *     @b_is_best.setter             # <<<<<<<<<<<<<<
 *     def b_is_best(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_9b_is_best_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_9b_is_best_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_9b_is_best_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_9b_is_best_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11605
 *     @b_is_best.setter
 *     def b_is_best(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkGetBwMode_v1 instance is read-only")
 *         self._ptr[0].bIsBest = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11606
 *     def b_is_best(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvlinkGetBwMode_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].bIsBest = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvlinkGetBwMode_v1_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11606, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11606, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11605
 *     @b_is_best.setter
 *     def b_is_best(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkGetBwMode_v1 instance is read-only")
 *         self._ptr[0].bIsBest = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":11607
 *         if self._readonly:
 *             raise ValueError("This NvlinkGetBwMode_v1 instance is read-only")
 *         self._ptr[0].bIsBest = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11607, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).bIsBest = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":11603
 *         return self._ptr[0].bIsBest
 * 
 *     @b_is_best.setter             # <<<<<<<<<<<<<<
 *     def b_is_best(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.b_is_best.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11609
 *         self._ptr[0].bIsBest = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bw_mode(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7bw_mode_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7bw_mode_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7bw_mode___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7bw_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11612
 *     def bw_mode(self):
 *         """int: """
 *         return self._ptr[0].bwMode             # <<<<<<<<<<<<<<
 * 
 *     @bw_mode.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_char((__pyx_v_self->_ptr[0]).bwMode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11612, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11609
 *         self._ptr[0].bIsBest = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bw_mode(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.bw_mode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11614
 *         return self._ptr[0].bwMode
 * 
 *     @bw_mode.setter             # <<<<<<<<<<<<<<
 *     def bw_mode(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7bw_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7bw_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7bw_mode_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7bw_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned char __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11616
 *     @bw_mode.setter
 *     def bw_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkGetBwMode_v1 instance is read-only")
 *         self._ptr[0].bwMode = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11617
 *     def bw_mode(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvlinkGetBwMode_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].bwMode = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvlinkGetBwMode_v1_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11617, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11617, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11616
 *     @bw_mode.setter
 *     def bw_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkGetBwMode_v1 instance is read-only")
 *         self._ptr[0].bwMode = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":11618
 *         if self._readonly:
 *             raise ValueError("This NvlinkGetBwMode_v1 instance is read-only")
 *         self._ptr[0].bwMode = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_char(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned char)-1) && PyErr_Occurred())) __PYX_ERR(0, 11618, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).bwMode = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":11614
 *         return self._ptr[0].bwMode
 * 
 *     @bw_mode.setter             # <<<<<<<<<<<<<<
 *     def bw_mode(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.bw_mode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11620
 *         self._ptr[0].bwMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvlinkGetBwMode_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_12from_data, "NvlinkGetBwMode_v1.from_data(data)\n\nCreate an NvlinkGetBwMode_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `nvlink_get_bw_mode_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 11620, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11620, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 11620, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 11620, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11620, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 11620, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":11627
 *             data (_numpy.ndarray): a single-element array of dtype `nvlink_get_bw_mode_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "nvlink_get_bw_mode_v1_dtype", nvlink_get_bw_mode_v1_dtype, NvlinkGetBwMode_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_nvlink_get_bw_mode_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11627, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_nvlink_get_bw_mode_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11627, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11620
 *         self._ptr[0].bwMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvlinkGetBwMode_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11629
 *         return __from_data(data, "nvlink_get_bw_mode_v1_dtype", nvlink_get_bw_mode_v1_dtype, NvlinkGetBwMode_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkGetBwMode_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_14from_ptr, "NvlinkGetBwMode_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an NvlinkGetBwMode_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 11629, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 11629, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 11629, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11629, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 11629, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":11630
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an NvlinkGetBwMode_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 11629, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 11629, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 11629, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11629, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 11630, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11630, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 11629, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":11629
 *         return __from_data(data, "nvlink_get_bw_mode_v1_dtype", nvlink_get_bw_mode_v1_dtype, NvlinkGetBwMode_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkGetBwMode_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":11638
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkGetBwMode_v1 obj = NvlinkGetBwMode_v1.__new__(NvlinkGetBwMode_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":11639
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef NvlinkGetBwMode_v1 obj = NvlinkGetBwMode_v1.__new__(NvlinkGetBwMode_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11639, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 11639, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11638
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkGetBwMode_v1 obj = NvlinkGetBwMode_v1.__new__(NvlinkGetBwMode_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":11640
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkGetBwMode_v1 obj = NvlinkGetBwMode_v1.__new__(NvlinkGetBwMode_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkGetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkGetBwMode_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11640, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":11641
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkGetBwMode_v1 obj = NvlinkGetBwMode_v1.__new__(NvlinkGetBwMode_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlNvlinkGetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkGetBwMode_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11642
 *         cdef NvlinkGetBwMode_v1 obj = NvlinkGetBwMode_v1.__new__(NvlinkGetBwMode_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkGetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkGetBwMode_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkGetBwMode_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlNvlinkGetBwMode_v1_t *)malloc((sizeof(nvmlNvlinkGetBwMode_v1_t))));

    /* "cuda/bindings/_nvml.pyx":11643
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkGetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkGetBwMode_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkGetBwMode_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkGetBwMode_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":11644
 *             obj._ptr = <nvmlNvlinkGetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkGetBwMode_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkGetBwMode_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkGetBwMode_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11644, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvlinkGetBwMode};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11644, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 11644, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":11643
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkGetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkGetBwMode_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkGetBwMode_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkGetBwMode_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":11645
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkGetBwMode_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkGetBwMode_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlNvlinkGetBwMode_v1_t))));

    /* "cuda/bindings/_nvml.pyx":11646
 *                 raise MemoryError("Error allocating NvlinkGetBwMode_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkGetBwMode_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":11647
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkGetBwMode_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlNvlinkGetBwMode_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":11641
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkGetBwMode_v1 obj = NvlinkGetBwMode_v1.__new__(NvlinkGetBwMode_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlNvlinkGetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkGetBwMode_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":11649
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlNvlinkGetBwMode_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlNvlinkGetBwMode_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":11650
 *         else:
 *             obj._ptr = <nvmlNvlinkGetBwMode_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":11651
 *             obj._ptr = <nvmlNvlinkGetBwMode_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":11652
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":11653
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11629
 *         return __from_data(data, "nvlink_get_bw_mode_v1_dtype", nvlink_get_bw_mode_v1_dtype, NvlinkGetBwMode_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkGetBwMode_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_16__reduce_cython__, "NvlinkGetBwMode_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_18__setstate_cython__, "NvlinkGetBwMode_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkGetBwMode_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11656
 * 
 * 
 * cdef _get_nvlink_set_bw_mode_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlNvlinkSetBwMode_v1_t pod = nvmlNvlinkSetBwMode_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_nvlink_set_bw_mode_v1_dtype_offsets(void) {
  nvmlNvlinkSetBwMode_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlNvlinkSetBwMode_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_nvlink_set_bw_mode_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":11657
 * 
 * cdef _get_nvlink_set_bw_mode_v1_dtype_offsets():
 *     cdef nvmlNvlinkSetBwMode_v1_t pod = nvmlNvlinkSetBwMode_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'b_set_best', 'bw_mode'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":11658
 * cdef _get_nvlink_set_bw_mode_v1_dtype_offsets():
 *     cdef nvmlNvlinkSetBwMode_v1_t pod = nvmlNvlinkSetBwMode_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'b_set_best', 'bw_mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11658, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11658, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":11659
 *     cdef nvmlNvlinkSetBwMode_v1_t pod = nvmlNvlinkSetBwMode_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'b_set_best', 'bw_mode'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11659, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11659, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 11659, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_b_set_best);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_b_set_best);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_b_set_best) != (0)) __PYX_ERR(0, 11659, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_bw_mode);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_bw_mode);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_bw_mode) != (0)) __PYX_ERR(0, 11659, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 11659, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":11660
 *     return _numpy.dtype({
 *         'names': ['version', 'b_set_best', 'bw_mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11660, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11660, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11660, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 11660, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11660, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 11660, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11660, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 11660, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 11660, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 11660, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 11659, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":11662
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint8],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bSetBest)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bwMode)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11662, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":11663
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bSetBest)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bwMode)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bSetBest)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 11663, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":11664
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bSetBest)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bwMode)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlNvlinkSetBwMode_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bwMode)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 11664, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":11661
 *         'names': ['version', 'b_set_best', 'bw_mode'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bSetBest)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11661, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 11661, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 11661, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 11661, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 11659, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":11666
 *             (<intptr_t>&(pod.bwMode)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlNvlinkSetBwMode_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlNvlinkSetBwMode_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11666, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 11659, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11658, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11656
 * 
 * 
 * cdef _get_nvlink_set_bw_mode_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlNvlinkSetBwMode_v1_t pod = nvmlNvlinkSetBwMode_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_nvlink_set_bw_mode_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11683
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlNvlinkSetBwMode_v1_t *>calloc(1, sizeof(nvmlNvlinkSetBwMode_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":11684
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlNvlinkSetBwMode_v1_t *>calloc(1, sizeof(nvmlNvlinkSetBwMode_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvlinkSetBwMode_v1")
*/
  __pyx_v_self->_ptr = ((nvmlNvlinkSetBwMode_v1_t *)calloc(1, (sizeof(nvmlNvlinkSetBwMode_v1_t))));

  /* "cuda/bindings/_nvml.pyx":11685
 *     def __init__(self):
 *         self._ptr = <nvmlNvlinkSetBwMode_v1_t *>calloc(1, sizeof(nvmlNvlinkSetBwMode_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating NvlinkSetBwMode_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":11686
 *         self._ptr = <nvmlNvlinkSetBwMode_v1_t *>calloc(1, sizeof(nvmlNvlinkSetBwMode_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvlinkSetBwMode_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11686, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvlinkSetBwMode};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11686, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 11686, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11685
 *     def __init__(self):
 *         self._ptr = <nvmlNvlinkSetBwMode_v1_t *>calloc(1, sizeof(nvmlNvlinkSetBwMode_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating NvlinkSetBwMode_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":11687
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvlinkSetBwMode_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":11688
 *             raise MemoryError("Error allocating NvlinkSetBwMode_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":11689
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":11683
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlNvlinkSetBwMode_v1_t *>calloc(1, sizeof(nvmlNvlinkSetBwMode_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11691
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlNvlinkSetBwMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self) {
  nvmlNvlinkSetBwMode_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlNvlinkSetBwMode_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":11693
 *     def __dealloc__(self):
 *         cdef nvmlNvlinkSetBwMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11694
 *         cdef nvmlNvlinkSetBwMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":11695
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":11696
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":11693
 *     def __dealloc__(self):
 *         cdef nvmlNvlinkSetBwMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":11691
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlNvlinkSetBwMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":11698
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.NvlinkSetBwMode_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":11699
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.NvlinkSetBwMode_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11699, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11699, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11699, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11699, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11699, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_NvlinkSetBwMode_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 30 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11699, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11698
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.NvlinkSetBwMode_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11701
 *         return f"<{__name__}.NvlinkSetBwMode_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11704
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11704, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11701
 *         return f"<{__name__}.NvlinkSetBwMode_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11706
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":11707
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11706
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11709
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":11710
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11710, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11709
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11712
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef NvlinkSetBwMode_v1 other_
 *         if not isinstance(other, NvlinkSetBwMode_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":11714
 *     def __eq__(self, other):
 *         cdef NvlinkSetBwMode_v1 other_
 *         if not isinstance(other, NvlinkSetBwMode_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":11715
 *         cdef NvlinkSetBwMode_v1 other_
 *         if not isinstance(other, NvlinkSetBwMode_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkSetBwMode_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":11714
 *     def __eq__(self, other):
 *         cdef NvlinkSetBwMode_v1 other_
 *         if not isinstance(other, NvlinkSetBwMode_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":11716
 *         if not isinstance(other, NvlinkSetBwMode_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkSetBwMode_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1))))) __PYX_ERR(0, 11716, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":11717
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkSetBwMode_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlNvlinkSetBwMode_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11717, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11712
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef NvlinkSetBwMode_v1 other_
 *         if not isinstance(other, NvlinkSetBwMode_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11719
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkSetBwMode_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkSetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkSetBwMode_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":11720
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlNvlinkSetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkSetBwMode_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 11720, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11720, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11720, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 11720, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11721
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkSetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkSetBwMode_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkSetBwMode_v1")
*/
    __pyx_v_self->_ptr = ((nvmlNvlinkSetBwMode_v1_t *)malloc((sizeof(nvmlNvlinkSetBwMode_v1_t))));

    /* "cuda/bindings/_nvml.pyx":11722
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkSetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkSetBwMode_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkSetBwMode_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkSetBwMode_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":11723
 *             self._ptr = <nvmlNvlinkSetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkSetBwMode_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkSetBwMode_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkSetBwMode_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11723, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvlinkSetBwMode};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11723, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 11723, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":11722
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkSetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkSetBwMode_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkSetBwMode_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkSetBwMode_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":11724
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkSetBwMode_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkSetBwMode_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11724, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11724, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 11724, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlNvlinkSetBwMode_v1_t))));

    /* "cuda/bindings/_nvml.pyx":11725
 *                 raise MemoryError("Error allocating NvlinkSetBwMode_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkSetBwMode_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":11726
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkSetBwMode_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":11727
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11727, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11727, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 11727, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":11720
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlNvlinkSetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkSetBwMode_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":11729
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 11729, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":11719
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkSetBwMode_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkSetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkSetBwMode_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11731
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11734
 *     def version(self):
 *         """int: """
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11734, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11731
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11736
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11738
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkSetBwMode_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11739
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvlinkSetBwMode_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvlinkSetBwMode_v1_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11739, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11739, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11738
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkSetBwMode_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":11740
 *         if self._readonly:
 *             raise ValueError("This NvlinkSetBwMode_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11740, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":11736
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11742
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def b_set_best(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_10b_set_best_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_10b_set_best_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_10b_set_best___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_10b_set_best___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11745
 *     def b_set_best(self):
 *         """int: """
 *         return self._ptr[0].bSetBest             # <<<<<<<<<<<<<<
 * 
 *     @b_set_best.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).bSetBest); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11745, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11742
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def b_set_best(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.b_set_best.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11747
 *         return self._ptr[0].bSetBest
 * 
 *     @b_set_best.setter             # <<<<<<<<<<<<<<
 *     def b_set_best(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_10b_set_best_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_10b_set_best_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_10b_set_best_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_10b_set_best_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11749
 *     @b_set_best.setter
 *     def b_set_best(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkSetBwMode_v1 instance is read-only")
 *         self._ptr[0].bSetBest = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11750
 *     def b_set_best(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvlinkSetBwMode_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].bSetBest = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvlinkSetBwMode_v1_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11750, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11750, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11749
 *     @b_set_best.setter
 *     def b_set_best(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkSetBwMode_v1 instance is read-only")
 *         self._ptr[0].bSetBest = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":11751
 *         if self._readonly:
 *             raise ValueError("This NvlinkSetBwMode_v1 instance is read-only")
 *         self._ptr[0].bSetBest = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11751, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).bSetBest = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":11747
 *         return self._ptr[0].bSetBest
 * 
 *     @b_set_best.setter             # <<<<<<<<<<<<<<
 *     def b_set_best(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.b_set_best.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11753
 *         self._ptr[0].bSetBest = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bw_mode(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7bw_mode_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7bw_mode_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7bw_mode___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7bw_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11756
 *     def bw_mode(self):
 *         """int: """
 *         return self._ptr[0].bwMode             # <<<<<<<<<<<<<<
 * 
 *     @bw_mode.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_char((__pyx_v_self->_ptr[0]).bwMode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11756, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11753
 *         self._ptr[0].bSetBest = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bw_mode(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.bw_mode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11758
 *         return self._ptr[0].bwMode
 * 
 *     @bw_mode.setter             # <<<<<<<<<<<<<<
 *     def bw_mode(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7bw_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7bw_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7bw_mode_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7bw_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned char __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11760
 *     @bw_mode.setter
 *     def bw_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkSetBwMode_v1 instance is read-only")
 *         self._ptr[0].bwMode = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11761
 *     def bw_mode(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvlinkSetBwMode_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].bwMode = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvlinkSetBwMode_v1_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11761, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11761, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11760
 *     @bw_mode.setter
 *     def bw_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkSetBwMode_v1 instance is read-only")
 *         self._ptr[0].bwMode = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":11762
 *         if self._readonly:
 *             raise ValueError("This NvlinkSetBwMode_v1 instance is read-only")
 *         self._ptr[0].bwMode = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_char(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned char)-1) && PyErr_Occurred())) __PYX_ERR(0, 11762, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).bwMode = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":11758
 *         return self._ptr[0].bwMode
 * 
 *     @bw_mode.setter             # <<<<<<<<<<<<<<
 *     def bw_mode(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.bw_mode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11764
 *         self._ptr[0].bwMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvlinkSetBwMode_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_12from_data, "NvlinkSetBwMode_v1.from_data(data)\n\nCreate an NvlinkSetBwMode_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `nvlink_set_bw_mode_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 11764, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11764, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 11764, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 11764, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11764, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 11764, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":11771
 *             data (_numpy.ndarray): a single-element array of dtype `nvlink_set_bw_mode_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "nvlink_set_bw_mode_v1_dtype", nvlink_set_bw_mode_v1_dtype, NvlinkSetBwMode_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_nvlink_set_bw_mode_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11771, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_nvlink_set_bw_mode_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11771, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11764
 *         self._ptr[0].bwMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvlinkSetBwMode_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11773
 *         return __from_data(data, "nvlink_set_bw_mode_v1_dtype", nvlink_set_bw_mode_v1_dtype, NvlinkSetBwMode_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkSetBwMode_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_14from_ptr, "NvlinkSetBwMode_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an NvlinkSetBwMode_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 11773, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 11773, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 11773, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11773, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 11773, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":11774
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an NvlinkSetBwMode_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 11773, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 11773, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 11773, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11773, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 11774, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11774, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 11773, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":11773
 *         return __from_data(data, "nvlink_set_bw_mode_v1_dtype", nvlink_set_bw_mode_v1_dtype, NvlinkSetBwMode_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkSetBwMode_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":11782
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkSetBwMode_v1 obj = NvlinkSetBwMode_v1.__new__(NvlinkSetBwMode_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":11783
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef NvlinkSetBwMode_v1 obj = NvlinkSetBwMode_v1.__new__(NvlinkSetBwMode_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11783, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 11783, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11782
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkSetBwMode_v1 obj = NvlinkSetBwMode_v1.__new__(NvlinkSetBwMode_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":11784
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkSetBwMode_v1 obj = NvlinkSetBwMode_v1.__new__(NvlinkSetBwMode_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkSetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkSetBwMode_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11784, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":11785
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkSetBwMode_v1 obj = NvlinkSetBwMode_v1.__new__(NvlinkSetBwMode_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlNvlinkSetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkSetBwMode_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11786
 *         cdef NvlinkSetBwMode_v1 obj = NvlinkSetBwMode_v1.__new__(NvlinkSetBwMode_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkSetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkSetBwMode_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkSetBwMode_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlNvlinkSetBwMode_v1_t *)malloc((sizeof(nvmlNvlinkSetBwMode_v1_t))));

    /* "cuda/bindings/_nvml.pyx":11787
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkSetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkSetBwMode_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkSetBwMode_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkSetBwMode_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":11788
 *             obj._ptr = <nvmlNvlinkSetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkSetBwMode_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkSetBwMode_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkSetBwMode_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11788, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvlinkSetBwMode};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11788, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 11788, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":11787
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkSetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkSetBwMode_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkSetBwMode_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkSetBwMode_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":11789
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkSetBwMode_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkSetBwMode_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlNvlinkSetBwMode_v1_t))));

    /* "cuda/bindings/_nvml.pyx":11790
 *                 raise MemoryError("Error allocating NvlinkSetBwMode_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkSetBwMode_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":11791
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkSetBwMode_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlNvlinkSetBwMode_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":11785
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkSetBwMode_v1 obj = NvlinkSetBwMode_v1.__new__(NvlinkSetBwMode_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlNvlinkSetBwMode_v1_t *>malloc(sizeof(nvmlNvlinkSetBwMode_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":11793
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlNvlinkSetBwMode_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlNvlinkSetBwMode_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":11794
 *         else:
 *             obj._ptr = <nvmlNvlinkSetBwMode_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":11795
 *             obj._ptr = <nvmlNvlinkSetBwMode_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":11796
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":11797
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11773
 *         return __from_data(data, "nvlink_set_bw_mode_v1_dtype", nvlink_set_bw_mode_v1_dtype, NvlinkSetBwMode_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkSetBwMode_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_16__reduce_cython__, "NvlinkSetBwMode_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_18__setstate_cython__, "NvlinkSetBwMode_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkSetBwMode_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11800
 * 
 * 
 * cdef _get_vgpu_version_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuVersion_t pod = nvmlVgpuVersion_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_version_dtype_offsets(void) {
  nvmlVgpuVersion_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuVersion_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_version_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":11801
 * 
 * cdef _get_vgpu_version_dtype_offsets():
 *     cdef nvmlVgpuVersion_t pod = nvmlVgpuVersion_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['min_version', 'max_version'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":11802
 * cdef _get_vgpu_version_dtype_offsets():
 *     cdef nvmlVgpuVersion_t pod = nvmlVgpuVersion_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['min_version', 'max_version'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11802, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11802, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":11803
 *     cdef nvmlVgpuVersion_t pod = nvmlVgpuVersion_t()
 *     return _numpy.dtype({
 *         'names': ['min_version', 'max_version'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11803, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11803, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_min_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_min_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_min_version) != (0)) __PYX_ERR(0, 11803, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_max_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_max_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_max_version) != (0)) __PYX_ERR(0, 11803, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 11803, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":11804
 *     return _numpy.dtype({
 *         'names': ['min_version', 'max_version'],
 *         'formats': [_numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.minVersion)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11804, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11804, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11804, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 11804, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11804, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 11804, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 11804, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 11803, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":11806
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.minVersion)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.maxVersion)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.minVersion)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11806, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":11807
 *         'offsets': [
 *             (<intptr_t>&(pod.minVersion)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxVersion)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuVersion_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.maxVersion)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 11807, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":11805
 *         'names': ['min_version', 'max_version'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.minVersion)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.maxVersion)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11805, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 11805, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 11805, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 11803, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":11809
 *             (<intptr_t>&(pod.maxVersion)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuVersion_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuVersion_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11809, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 11803, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11802, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11800
 * 
 * 
 * cdef _get_vgpu_version_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuVersion_t pod = nvmlVgpuVersion_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_version_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11826
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuVersion_t *>calloc(1, sizeof(nvmlVgpuVersion_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":11827
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuVersion_t *>calloc(1, sizeof(nvmlVgpuVersion_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuVersion")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuVersion_t *)calloc(1, (sizeof(nvmlVgpuVersion_t))));

  /* "cuda/bindings/_nvml.pyx":11828
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuVersion_t *>calloc(1, sizeof(nvmlVgpuVersion_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuVersion")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":11829
 *         self._ptr = <nvmlVgpuVersion_t *>calloc(1, sizeof(nvmlVgpuVersion_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuVersion")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11829, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuVersion};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11829, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 11829, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11828
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuVersion_t *>calloc(1, sizeof(nvmlVgpuVersion_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuVersion")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":11830
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuVersion")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":11831
 *             raise MemoryError("Error allocating VgpuVersion")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":11832
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":11826
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuVersion_t *>calloc(1, sizeof(nvmlVgpuVersion_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuVersion.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11834
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuVersion_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self) {
  nvmlVgpuVersion_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuVersion_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":11836
 *     def __dealloc__(self):
 *         cdef nvmlVgpuVersion_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11837
 *         cdef nvmlVgpuVersion_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":11838
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":11839
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":11836
 *     def __dealloc__(self):
 *         cdef nvmlVgpuVersion_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":11834
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuVersion_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":11841
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuVersion object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":11842
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuVersion object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11842, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11842, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11842, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11842, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11842, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuVersion_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 23 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11842, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11841
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuVersion object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuVersion.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11844
 *         return f"<{__name__}.VgpuVersion object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11847
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11847, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11844
 *         return f"<{__name__}.VgpuVersion object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuVersion.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11849
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_11VgpuVersion__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":11850
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11849
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11852
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":11853
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11853, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11852
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuVersion.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11855
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuVersion other_
 *         if not isinstance(other, VgpuVersion):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":11857
 *     def __eq__(self, other):
 *         cdef VgpuVersion other_
 *         if not isinstance(other, VgpuVersion):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":11858
 *         cdef VgpuVersion other_
 *         if not isinstance(other, VgpuVersion):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuVersion_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":11857
 *     def __eq__(self, other):
 *         cdef VgpuVersion other_
 *         if not isinstance(other, VgpuVersion):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":11859
 *         if not isinstance(other, VgpuVersion):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuVersion_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion))))) __PYX_ERR(0, 11859, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":11860
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuVersion_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuVersion_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11860, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11855
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuVersion other_
 *         if not isinstance(other, VgpuVersion):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuVersion.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11862
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuVersion_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuVersion_t *>malloc(sizeof(nvmlVgpuVersion_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":11863
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuVersion_t *>malloc(sizeof(nvmlVgpuVersion_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 11863, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11863, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11863, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 11863, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11864
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuVersion_t *>malloc(sizeof(nvmlVgpuVersion_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuVersion")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuVersion_t *)malloc((sizeof(nvmlVgpuVersion_t))));

    /* "cuda/bindings/_nvml.pyx":11865
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuVersion_t *>malloc(sizeof(nvmlVgpuVersion_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuVersion")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuVersion_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":11866
 *             self._ptr = <nvmlVgpuVersion_t *>malloc(sizeof(nvmlVgpuVersion_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuVersion")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuVersion_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11866, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuVersion};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11866, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 11866, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":11865
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuVersion_t *>malloc(sizeof(nvmlVgpuVersion_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuVersion")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuVersion_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":11867
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuVersion")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuVersion_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11867, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11867, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 11867, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuVersion_t))));

    /* "cuda/bindings/_nvml.pyx":11868
 *                 raise MemoryError("Error allocating VgpuVersion")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuVersion_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":11869
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuVersion_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":11870
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11870, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11870, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 11870, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":11863
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuVersion_t *>malloc(sizeof(nvmlVgpuVersion_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":11872
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 11872, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":11862
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuVersion_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuVersion_t *>malloc(sizeof(nvmlVgpuVersion_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuVersion.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11874
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def min_version(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_11min_version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_11min_version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_11min_version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_11min_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11877
 *     def min_version(self):
 *         """int: """
 *         return self._ptr[0].minVersion             # <<<<<<<<<<<<<<
 * 
 *     @min_version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).minVersion); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11877, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11874
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def min_version(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuVersion.min_version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11879
 *         return self._ptr[0].minVersion
 * 
 *     @min_version.setter             # <<<<<<<<<<<<<<
 *     def min_version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_11min_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_11min_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_11min_version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_11min_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11881
 *     @min_version.setter
 *     def min_version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuVersion instance is read-only")
 *         self._ptr[0].minVersion = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11882
 *     def min_version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuVersion instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].minVersion = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuVersion_instance_is_rea};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11882, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11882, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11881
 *     @min_version.setter
 *     def min_version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuVersion instance is read-only")
 *         self._ptr[0].minVersion = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":11883
 *         if self._readonly:
 *             raise ValueError("This VgpuVersion instance is read-only")
 *         self._ptr[0].minVersion = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11883, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).minVersion = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":11879
 *         return self._ptr[0].minVersion
 * 
 *     @min_version.setter             # <<<<<<<<<<<<<<
 *     def min_version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuVersion.min_version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11885
 *         self._ptr[0].minVersion = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def max_version(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_11max_version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_11max_version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_11max_version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_11max_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11888
 *     def max_version(self):
 *         """int: """
 *         return self._ptr[0].maxVersion             # <<<<<<<<<<<<<<
 * 
 *     @max_version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).maxVersion); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11888, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11885
 *         self._ptr[0].minVersion = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def max_version(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuVersion.max_version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11890
 *         return self._ptr[0].maxVersion
 * 
 *     @max_version.setter             # <<<<<<<<<<<<<<
 *     def max_version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_11max_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_11max_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_11max_version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_11max_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":11892
 *     @max_version.setter
 *     def max_version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuVersion instance is read-only")
 *         self._ptr[0].maxVersion = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":11893
 *     def max_version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuVersion instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].maxVersion = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuVersion_instance_is_rea};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11893, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 11893, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11892
 *     @max_version.setter
 *     def max_version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuVersion instance is read-only")
 *         self._ptr[0].maxVersion = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":11894
 *         if self._readonly:
 *             raise ValueError("This VgpuVersion instance is read-only")
 *         self._ptr[0].maxVersion = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11894, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).maxVersion = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":11890
 *         return self._ptr[0].maxVersion
 * 
 *     @max_version.setter             # <<<<<<<<<<<<<<
 *     def max_version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuVersion.max_version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11896
 *         self._ptr[0].maxVersion = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuVersion instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_11VgpuVersion_12from_data, "VgpuVersion.from_data(data)\n\nCreate an VgpuVersion instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_version_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_11VgpuVersion_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11VgpuVersion_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 11896, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11896, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 11896, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 11896, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11896, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 11896, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuVersion.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":11903
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_version_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_version_dtype", vgpu_version_dtype, VgpuVersion)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_version_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11903, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_version_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11903, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11896
 *         self._ptr[0].maxVersion = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuVersion instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuVersion.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11905
 *         return __from_data(data, "vgpu_version_dtype", vgpu_version_dtype, VgpuVersion)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuVersion instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_11VgpuVersion_14from_ptr, "VgpuVersion.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuVersion instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_11VgpuVersion_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11VgpuVersion_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 11905, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 11905, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 11905, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11905, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 11905, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":11906
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuVersion instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 11905, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 11905, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 11905, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 11905, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 11906, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 11906, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 11905, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuVersion.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":11905
 *         return __from_data(data, "vgpu_version_dtype", vgpu_version_dtype, VgpuVersion)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuVersion instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":11914
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuVersion obj = VgpuVersion.__new__(VgpuVersion)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":11915
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuVersion obj = VgpuVersion.__new__(VgpuVersion)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11915, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 11915, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11914
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuVersion obj = VgpuVersion.__new__(VgpuVersion)
*/
  }

  /* "cuda/bindings/_nvml.pyx":11916
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuVersion obj = VgpuVersion.__new__(VgpuVersion)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuVersion_t *>malloc(sizeof(nvmlVgpuVersion_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuVersion(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11916, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":11917
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuVersion obj = VgpuVersion.__new__(VgpuVersion)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuVersion_t *>malloc(sizeof(nvmlVgpuVersion_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11918
 *         cdef VgpuVersion obj = VgpuVersion.__new__(VgpuVersion)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuVersion_t *>malloc(sizeof(nvmlVgpuVersion_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuVersion")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuVersion_t *)malloc((sizeof(nvmlVgpuVersion_t))));

    /* "cuda/bindings/_nvml.pyx":11919
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuVersion_t *>malloc(sizeof(nvmlVgpuVersion_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuVersion")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuVersion_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":11920
 *             obj._ptr = <nvmlVgpuVersion_t *>malloc(sizeof(nvmlVgpuVersion_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuVersion")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuVersion_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11920, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuVersion};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11920, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 11920, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":11919
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuVersion_t *>malloc(sizeof(nvmlVgpuVersion_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuVersion")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuVersion_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":11921
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuVersion")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuVersion_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuVersion_t))));

    /* "cuda/bindings/_nvml.pyx":11922
 *                 raise MemoryError("Error allocating VgpuVersion")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuVersion_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":11923
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuVersion_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuVersion_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":11917
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuVersion obj = VgpuVersion.__new__(VgpuVersion)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuVersion_t *>malloc(sizeof(nvmlVgpuVersion_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":11925
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuVersion_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuVersion_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":11926
 *         else:
 *             obj._ptr = <nvmlVgpuVersion_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":11927
 *             obj._ptr = <nvmlVgpuVersion_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":11928
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":11929
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11905
 *         return __from_data(data, "vgpu_version_dtype", vgpu_version_dtype, VgpuVersion)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuVersion instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuVersion.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_11VgpuVersion_16__reduce_cython__, "VgpuVersion.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_11VgpuVersion_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11VgpuVersion_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuVersion.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_11VgpuVersion_18__setstate_cython__, "VgpuVersion.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_11VgpuVersion_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11VgpuVersion_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuVersion.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_11VgpuVersion_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuVersion.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11932
 * 
 * 
 * cdef _get_vgpu_metadata_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuMetadata_t pod = nvmlVgpuMetadata_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_metadata_dtype_offsets(void) {
  nvmlVgpuMetadata_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuMetadata_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  PyObject *__pyx_t_14 = NULL;
  PyObject *__pyx_t_15 = NULL;
  PyObject *__pyx_t_16 = NULL;
  size_t __pyx_t_17;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_metadata_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":11933
 * 
 * cdef _get_vgpu_metadata_dtype_offsets():
 *     cdef nvmlVgpuMetadata_t pod = nvmlVgpuMetadata_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'revision', 'guest_info_state', 'guest_driver_version', 'host_driver_version', 'reserved', 'vgpu_virtualization_caps', 'guest_vgpu_version', 'opaque_data_size', 'opaque_data'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":11934
 * cdef _get_vgpu_metadata_dtype_offsets():
 *     cdef nvmlVgpuMetadata_t pod = nvmlVgpuMetadata_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'revision', 'guest_info_state', 'guest_driver_version', 'host_driver_version', 'reserved', 'vgpu_virtualization_caps', 'guest_vgpu_version', 'opaque_data_size', 'opaque_data'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.int8, _numpy.int8, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11934, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11934, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":11935
 *     cdef nvmlVgpuMetadata_t pod = nvmlVgpuMetadata_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'revision', 'guest_info_state', 'guest_driver_version', 'host_driver_version', 'reserved', 'vgpu_virtualization_caps', 'guest_vgpu_version', 'opaque_data_size', 'opaque_data'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.int8, _numpy.int8, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11935, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(10); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11935, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 11935, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_revision);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_revision);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_revision) != (0)) __PYX_ERR(0, 11935, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_guest_info_state);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_guest_info_state);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_guest_info_state) != (0)) __PYX_ERR(0, 11935, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_guest_driver_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_guest_driver_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_guest_driver_version) != (0)) __PYX_ERR(0, 11935, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_host_driver_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_host_driver_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_host_driver_version) != (0)) __PYX_ERR(0, 11935, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_reserved);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_reserved);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_reserved) != (0)) __PYX_ERR(0, 11935, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_vgpu_virtualization_caps);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_vgpu_virtualization_caps);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_vgpu_virtualization_caps) != (0)) __PYX_ERR(0, 11935, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_guest_vgpu_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_guest_vgpu_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_mstate_global->__pyx_n_u_guest_vgpu_version) != (0)) __PYX_ERR(0, 11935, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_opaque_data_size);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_opaque_data_size);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_mstate_global->__pyx_n_u_opaque_data_size) != (0)) __PYX_ERR(0, 11935, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_opaque_data);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_opaque_data);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 9, __pyx_mstate_global->__pyx_n_u_opaque_data) != (0)) __PYX_ERR(0, 11935, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 11935, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":11936
 *     return _numpy.dtype({
 *         'names': ['version', 'revision', 'guest_info_state', 'guest_driver_version', 'host_driver_version', 'reserved', 'vgpu_virtualization_caps', 'guest_vgpu_version', 'opaque_data_size', 'opaque_data'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.int8, _numpy.int8, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_15 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_16 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(10); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11936, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 11936, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 11936, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 11936, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 11936, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 11936, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 11936, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 11936, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_t_14) != (0)) __PYX_ERR(0, 11936, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_t_15) != (0)) __PYX_ERR(0, 11936, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_16);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 9, __pyx_t_16) != (0)) __PYX_ERR(0, 11936, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  __pyx_t_14 = 0;
  __pyx_t_15 = 0;
  __pyx_t_16 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 11935, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":11938
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.int8, _numpy.int8, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.revision)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.guestInfoState)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 11938, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":11939
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.revision)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.guestInfoState)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.guestDriverVersion)) - (<intptr_t>&pod),
*/
  __pyx_t_16 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.revision)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 11939, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);

  /* "cuda/bindings/_nvml.pyx":11940
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.revision)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.guestInfoState)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.guestDriverVersion)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hostDriverVersion)) - (<intptr_t>&pod),
*/
  __pyx_t_15 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.guestInfoState)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 11940, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);

  /* "cuda/bindings/_nvml.pyx":11941
 *             (<intptr_t>&(pod.revision)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.guestInfoState)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.guestDriverVersion)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.hostDriverVersion)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),
*/
  __pyx_t_14 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.guestDriverVersion)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 11941, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);

  /* "cuda/bindings/_nvml.pyx":11942
 *             (<intptr_t>&(pod.guestInfoState)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.guestDriverVersion)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hostDriverVersion)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuVirtualizationCaps)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.hostDriverVersion)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 11942, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":11943
 *             (<intptr_t>&(pod.guestDriverVersion)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hostDriverVersion)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vgpuVirtualizationCaps)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.guestVgpuVersion)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.reserved)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 11943, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":11944
 *             (<intptr_t>&(pod.hostDriverVersion)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuVirtualizationCaps)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.guestVgpuVersion)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.opaqueDataSize)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vgpuVirtualizationCaps)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 11944, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":11945
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuVirtualizationCaps)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.guestVgpuVersion)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.opaqueDataSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.opaqueData)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.guestVgpuVersion)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 11945, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":11946
 *             (<intptr_t>&(pod.vgpuVirtualizationCaps)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.guestVgpuVersion)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.opaqueDataSize)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.opaqueData)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.opaqueDataSize)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 11946, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":11947
 *             (<intptr_t>&(pod.guestVgpuVersion)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.opaqueDataSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.opaqueData)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuMetadata_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.opaqueData)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 11947, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":11937
 *         'names': ['version', 'revision', 'guest_info_state', 'guest_driver_version', 'host_driver_version', 'reserved', 'vgpu_virtualization_caps', 'guest_vgpu_version', 'opaque_data_size', 'opaque_data'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int32, _numpy.int8, _numpy.int8, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.revision)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(10); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11937, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 11937, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_16);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_16) != (0)) __PYX_ERR(0, 11937, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_15) != (0)) __PYX_ERR(0, 11937, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_14) != (0)) __PYX_ERR(0, 11937, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_13) != (0)) __PYX_ERR(0, 11937, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 11937, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_11) != (0)) __PYX_ERR(0, 11937, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 7, __pyx_t_10) != (0)) __PYX_ERR(0, 11937, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 8, __pyx_t_9) != (0)) __PYX_ERR(0, 11937, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 9, __pyx_t_8) != (0)) __PYX_ERR(0, 11937, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_16 = 0;
  __pyx_t_15 = 0;
  __pyx_t_14 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 11935, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":11949
 *             (<intptr_t>&(pod.opaqueData)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuMetadata_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuMetadata_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 11949, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 11935, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_17 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_17 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_17, (2-__pyx_t_17) | (__pyx_t_17*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11934, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11932
 * 
 * 
 * cdef _get_vgpu_metadata_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuMetadata_t pod = nvmlVgpuMetadata_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_XDECREF(__pyx_t_14);
  __Pyx_XDECREF(__pyx_t_15);
  __Pyx_XDECREF(__pyx_t_16);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_metadata_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11966
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuMetadata_t *>calloc(1, sizeof(nvmlVgpuMetadata_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":11967
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuMetadata_t *>calloc(1, sizeof(nvmlVgpuMetadata_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuMetadata")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuMetadata_t *)calloc(1, (sizeof(nvmlVgpuMetadata_t))));

  /* "cuda/bindings/_nvml.pyx":11968
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuMetadata_t *>calloc(1, sizeof(nvmlVgpuMetadata_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuMetadata")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":11969
 *         self._ptr = <nvmlVgpuMetadata_t *>calloc(1, sizeof(nvmlVgpuMetadata_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuMetadata")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11969, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuMetadata};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11969, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 11969, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":11968
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuMetadata_t *>calloc(1, sizeof(nvmlVgpuMetadata_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuMetadata")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":11970
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuMetadata")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":11971
 *             raise MemoryError("Error allocating VgpuMetadata")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":11972
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":11966
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuMetadata_t *>calloc(1, sizeof(nvmlVgpuMetadata_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11974
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuMetadata_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self) {
  nvmlVgpuMetadata_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuMetadata_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":11976
 *     def __dealloc__(self):
 *         cdef nvmlVgpuMetadata_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":11977
 *         cdef nvmlVgpuMetadata_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":11978
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":11979
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":11976
 *     def __dealloc__(self):
 *         cdef nvmlVgpuMetadata_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":11974
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuMetadata_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":11981
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuMetadata object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":11982
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuMetadata object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11982, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11982, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11982, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11982, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11982, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuMetadata_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 11982, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11981
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuMetadata object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11984
 *         return f"<{__name__}.VgpuMetadata object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":11987
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11987, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11984
 *         return f"<{__name__}.VgpuMetadata object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11989
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_12VgpuMetadata__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":11990
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11989
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11992
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":11993
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11993, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11992
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":11995
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuMetadata other_
 *         if not isinstance(other, VgpuMetadata):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":11997
 *     def __eq__(self, other):
 *         cdef VgpuMetadata other_
 *         if not isinstance(other, VgpuMetadata):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":11998
 *         cdef VgpuMetadata other_
 *         if not isinstance(other, VgpuMetadata):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuMetadata_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":11997
 *     def __eq__(self, other):
 *         cdef VgpuMetadata other_
 *         if not isinstance(other, VgpuMetadata):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":11999
 *         if not isinstance(other, VgpuMetadata):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuMetadata_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata))))) __PYX_ERR(0, 11999, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":12000
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuMetadata_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuMetadata_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12000, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":11995
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuMetadata other_
 *         if not isinstance(other, VgpuMetadata):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12002
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuMetadata_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuMetadata_t *>malloc(sizeof(nvmlVgpuMetadata_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":12003
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuMetadata_t *>malloc(sizeof(nvmlVgpuMetadata_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 12003, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12003, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12003, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 12003, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":12004
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuMetadata_t *>malloc(sizeof(nvmlVgpuMetadata_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuMetadata")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuMetadata_t *)malloc((sizeof(nvmlVgpuMetadata_t))));

    /* "cuda/bindings/_nvml.pyx":12005
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuMetadata_t *>malloc(sizeof(nvmlVgpuMetadata_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuMetadata")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuMetadata_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":12006
 *             self._ptr = <nvmlVgpuMetadata_t *>malloc(sizeof(nvmlVgpuMetadata_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuMetadata")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuMetadata_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12006, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuMetadata};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12006, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 12006, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":12005
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuMetadata_t *>malloc(sizeof(nvmlVgpuMetadata_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuMetadata")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuMetadata_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":12007
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuMetadata")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuMetadata_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12007, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12007, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 12007, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuMetadata_t))));

    /* "cuda/bindings/_nvml.pyx":12008
 *                 raise MemoryError("Error allocating VgpuMetadata")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuMetadata_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":12009
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuMetadata_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":12010
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12010, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12010, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 12010, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":12003
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuMetadata_t *>malloc(sizeof(nvmlVgpuMetadata_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":12012
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 12012, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":12002
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuMetadata_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuMetadata_t *>malloc(sizeof(nvmlVgpuMetadata_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12014
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12017
 *     def version(self):
 *         """int: """
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12017, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12014
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12019
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12021
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12022
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuMetadata_instance_is_re};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12022, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12022, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12021
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12023
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12023, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12019
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12025
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def revision(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_8revision_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_8revision_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_8revision___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_8revision___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12028
 *     def revision(self):
 *         """int: """
 *         return self._ptr[0].revision             # <<<<<<<<<<<<<<
 * 
 *     @revision.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).revision); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12028, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12025
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def revision(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.revision.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12030
 *         return self._ptr[0].revision
 * 
 *     @revision.setter             # <<<<<<<<<<<<<<
 *     def revision(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_8revision_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_8revision_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_8revision_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_8revision_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12032
 *     @revision.setter
 *     def revision(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].revision = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12033
 *     def revision(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].revision = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuMetadata_instance_is_re};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12033, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12033, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12032
 *     @revision.setter
 *     def revision(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].revision = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12034
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].revision = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12034, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).revision = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12030
 *         return self._ptr[0].revision
 * 
 *     @revision.setter             # <<<<<<<<<<<<<<
 *     def revision(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.revision.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12036
 *         self._ptr[0].revision = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def guest_info_state(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_16guest_info_state_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_16guest_info_state_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_16guest_info_state___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_16guest_info_state___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12039
 *     def guest_info_state(self):
 *         """int: """
 *         return <int>(self._ptr[0].guestInfoState)             # <<<<<<<<<<<<<<
 * 
 *     @guest_info_state.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int(((int)(__pyx_v_self->_ptr[0]).guestInfoState)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12039, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12036
 *         self._ptr[0].revision = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def guest_info_state(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.guest_info_state.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12041
 *         return <int>(self._ptr[0].guestInfoState)
 * 
 *     @guest_info_state.setter             # <<<<<<<<<<<<<<
 *     def guest_info_state(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_16guest_info_state_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_16guest_info_state_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_16guest_info_state_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_16guest_info_state_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12043
 *     @guest_info_state.setter
 *     def guest_info_state(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].guestInfoState = <nvmlVgpuGuestInfoState_t><int>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12044
 *     def guest_info_state(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].guestInfoState = <nvmlVgpuGuestInfoState_t><int>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuMetadata_instance_is_re};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12044, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12044, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12043
 *     @guest_info_state.setter
 *     def guest_info_state(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].guestInfoState = <nvmlVgpuGuestInfoState_t><int>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12045
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].guestInfoState = <nvmlVgpuGuestInfoState_t><int>val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12045, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).guestInfoState = ((nvmlVgpuGuestInfoState_t)((int)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":12041
 *         return <int>(self._ptr[0].guestInfoState)
 * 
 *     @guest_info_state.setter             # <<<<<<<<<<<<<<
 *     def guest_info_state(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.guest_info_state.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12047
 *         self._ptr[0].guestInfoState = <nvmlVgpuGuestInfoState_t><int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def guest_driver_version(self):
 *         """~_numpy.int8: (array of length 80)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_20guest_driver_version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_20guest_driver_version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_20guest_driver_version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_20guest_driver_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12050
 *     def guest_driver_version(self):
 *         """~_numpy.int8: (array of length 80)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].guestDriverVersion)             # <<<<<<<<<<<<<<
 * 
 *     @guest_driver_version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).guestDriverVersion); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12050, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12047
 *         self._ptr[0].guestInfoState = <nvmlVgpuGuestInfoState_t><int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def guest_driver_version(self):
 *         """~_numpy.int8: (array of length 80)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.guest_driver_version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12052
 *         return cpython.PyUnicode_FromString(self._ptr[0].guestDriverVersion)
 * 
 *     @guest_driver_version.setter             # <<<<<<<<<<<<<<
 *     def guest_driver_version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_20guest_driver_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_20guest_driver_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_20guest_driver_version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_20guest_driver_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12054
 *     @guest_driver_version.setter
 *     def guest_driver_version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12055
 *     def guest_driver_version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 80:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuMetadata_instance_is_re};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12055, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12055, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12054
 *     @guest_driver_version.setter
 *     def guest_driver_version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":12056
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 80:
 *             raise ValueError("String too long for field guest_driver_version, max length is 79")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12056, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 12056, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":12057
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 80:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field guest_driver_version, max length is 79")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 12057, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 12057, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 80);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":12058
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 80:
 *             raise ValueError("String too long for field guest_driver_version, max length is 79")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].guestDriverVersion), <void *>ptr, 80)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_guest};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12058, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12058, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12057
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 80:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field guest_driver_version, max length is 79")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":12059
 *         if len(buf) >= 80:
 *             raise ValueError("String too long for field guest_driver_version, max length is 79")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].guestDriverVersion), <void *>ptr, 80)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 12059, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 12059, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":12060
 *             raise ValueError("String too long for field guest_driver_version, max length is 79")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].guestDriverVersion), <void *>ptr, 80)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).guestDriverVersion), ((void *)__pyx_v_ptr), 80));

  /* "cuda/bindings/_nvml.pyx":12052
 *         return cpython.PyUnicode_FromString(self._ptr[0].guestDriverVersion)
 * 
 *     @guest_driver_version.setter             # <<<<<<<<<<<<<<
 *     def guest_driver_version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.guest_driver_version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12062
 *         memcpy(<void *>(self._ptr[0].guestDriverVersion), <void *>ptr, 80)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def host_driver_version(self):
 *         """~_numpy.int8: (array of length 80)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_19host_driver_version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_19host_driver_version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_19host_driver_version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_19host_driver_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12065
 *     def host_driver_version(self):
 *         """~_numpy.int8: (array of length 80)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].hostDriverVersion)             # <<<<<<<<<<<<<<
 * 
 *     @host_driver_version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).hostDriverVersion); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12065, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12062
 *         memcpy(<void *>(self._ptr[0].guestDriverVersion), <void *>ptr, 80)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def host_driver_version(self):
 *         """~_numpy.int8: (array of length 80)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.host_driver_version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12067
 *         return cpython.PyUnicode_FromString(self._ptr[0].hostDriverVersion)
 * 
 *     @host_driver_version.setter             # <<<<<<<<<<<<<<
 *     def host_driver_version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_19host_driver_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_19host_driver_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_19host_driver_version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_19host_driver_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12069
 *     @host_driver_version.setter
 *     def host_driver_version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12070
 *     def host_driver_version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 80:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuMetadata_instance_is_re};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12070, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12070, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12069
 *     @host_driver_version.setter
 *     def host_driver_version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":12071
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 80:
 *             raise ValueError("String too long for field host_driver_version, max length is 79")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12071, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 12071, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":12072
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 80:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field host_driver_version, max length is 79")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 12072, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 12072, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 80);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":12073
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 80:
 *             raise ValueError("String too long for field host_driver_version, max length is 79")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].hostDriverVersion), <void *>ptr, 80)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_host_d};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12073, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12073, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12072
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 80:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field host_driver_version, max length is 79")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":12074
 *         if len(buf) >= 80:
 *             raise ValueError("String too long for field host_driver_version, max length is 79")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].hostDriverVersion), <void *>ptr, 80)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 12074, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 12074, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":12075
 *             raise ValueError("String too long for field host_driver_version, max length is 79")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].hostDriverVersion), <void *>ptr, 80)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).hostDriverVersion), ((void *)__pyx_v_ptr), 80));

  /* "cuda/bindings/_nvml.pyx":12067
 *         return cpython.PyUnicode_FromString(self._ptr[0].hostDriverVersion)
 * 
 *     @host_driver_version.setter             # <<<<<<<<<<<<<<
 *     def host_driver_version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.host_driver_version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12077
 *         memcpy(<void *>(self._ptr[0].hostDriverVersion), <void *>ptr, 80)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_virtualization_caps(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_24vgpu_virtualization_caps_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_24vgpu_virtualization_caps_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_24vgpu_virtualization_caps___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_24vgpu_virtualization_caps___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12080
 *     def vgpu_virtualization_caps(self):
 *         """int: """
 *         return self._ptr[0].vgpuVirtualizationCaps             # <<<<<<<<<<<<<<
 * 
 *     @vgpu_virtualization_caps.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).vgpuVirtualizationCaps); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12080, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12077
 *         memcpy(<void *>(self._ptr[0].hostDriverVersion), <void *>ptr, 80)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_virtualization_caps(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.vgpu_virtualization_caps.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12082
 *         return self._ptr[0].vgpuVirtualizationCaps
 * 
 *     @vgpu_virtualization_caps.setter             # <<<<<<<<<<<<<<
 *     def vgpu_virtualization_caps(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_24vgpu_virtualization_caps_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_24vgpu_virtualization_caps_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_24vgpu_virtualization_caps_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_24vgpu_virtualization_caps_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12084
 *     @vgpu_virtualization_caps.setter
 *     def vgpu_virtualization_caps(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].vgpuVirtualizationCaps = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12085
 *     def vgpu_virtualization_caps(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].vgpuVirtualizationCaps = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuMetadata_instance_is_re};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12085, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12085, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12084
 *     @vgpu_virtualization_caps.setter
 *     def vgpu_virtualization_caps(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].vgpuVirtualizationCaps = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12086
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].vgpuVirtualizationCaps = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12086, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).vgpuVirtualizationCaps = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12082
 *         return self._ptr[0].vgpuVirtualizationCaps
 * 
 *     @vgpu_virtualization_caps.setter             # <<<<<<<<<<<<<<
 *     def vgpu_virtualization_caps(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.vgpu_virtualization_caps.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12088
 *         self._ptr[0].vgpuVirtualizationCaps = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def guest_vgpu_version(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_18guest_vgpu_version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_18guest_vgpu_version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_18guest_vgpu_version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_18guest_vgpu_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12091
 *     def guest_vgpu_version(self):
 *         """int: """
 *         return self._ptr[0].guestVgpuVersion             # <<<<<<<<<<<<<<
 * 
 *     @guest_vgpu_version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).guestVgpuVersion); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12091, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12088
 *         self._ptr[0].vgpuVirtualizationCaps = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def guest_vgpu_version(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.guest_vgpu_version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12093
 *         return self._ptr[0].guestVgpuVersion
 * 
 *     @guest_vgpu_version.setter             # <<<<<<<<<<<<<<
 *     def guest_vgpu_version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_18guest_vgpu_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_18guest_vgpu_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_18guest_vgpu_version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_18guest_vgpu_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12095
 *     @guest_vgpu_version.setter
 *     def guest_vgpu_version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].guestVgpuVersion = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12096
 *     def guest_vgpu_version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].guestVgpuVersion = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuMetadata_instance_is_re};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12096, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12096, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12095
 *     @guest_vgpu_version.setter
 *     def guest_vgpu_version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].guestVgpuVersion = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12097
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].guestVgpuVersion = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12097, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).guestVgpuVersion = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12093
 *         return self._ptr[0].guestVgpuVersion
 * 
 *     @guest_vgpu_version.setter             # <<<<<<<<<<<<<<
 *     def guest_vgpu_version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.guest_vgpu_version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12099
 *         self._ptr[0].guestVgpuVersion = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def opaque_data_size(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_16opaque_data_size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_16opaque_data_size_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_16opaque_data_size___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_16opaque_data_size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12102
 *     def opaque_data_size(self):
 *         """int: """
 *         return self._ptr[0].opaqueDataSize             # <<<<<<<<<<<<<<
 * 
 *     @opaque_data_size.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).opaqueDataSize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12102, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12099
 *         self._ptr[0].guestVgpuVersion = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def opaque_data_size(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.opaque_data_size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12104
 *         return self._ptr[0].opaqueDataSize
 * 
 *     @opaque_data_size.setter             # <<<<<<<<<<<<<<
 *     def opaque_data_size(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_16opaque_data_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_16opaque_data_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_16opaque_data_size_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_16opaque_data_size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12106
 *     @opaque_data_size.setter
 *     def opaque_data_size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].opaqueDataSize = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12107
 *     def opaque_data_size(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].opaqueDataSize = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuMetadata_instance_is_re};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12107, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12107, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12106
 *     @opaque_data_size.setter
 *     def opaque_data_size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].opaqueDataSize = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12108
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         self._ptr[0].opaqueDataSize = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12108, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).opaqueDataSize = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12104
 *         return self._ptr[0].opaqueDataSize
 * 
 *     @opaque_data_size.setter             # <<<<<<<<<<<<<<
 *     def opaque_data_size(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.opaque_data_size.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12110
 *         self._ptr[0].opaqueDataSize = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def opaque_data(self):
 *         """~_numpy.int8: (array of length 4)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_11opaque_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_11opaque_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_11opaque_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_11opaque_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12113
 *     def opaque_data(self):
 *         """~_numpy.int8: (array of length 4)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].opaqueData)             # <<<<<<<<<<<<<<
 * 
 *     @opaque_data.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).opaqueData); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12110
 *         self._ptr[0].opaqueDataSize = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def opaque_data(self):
 *         """~_numpy.int8: (array of length 4)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.opaque_data.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12115
 *         return cpython.PyUnicode_FromString(self._ptr[0].opaqueData)
 * 
 *     @opaque_data.setter             # <<<<<<<<<<<<<<
 *     def opaque_data(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_11opaque_data_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_11opaque_data_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_11opaque_data_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_11opaque_data_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12117
 *     @opaque_data.setter
 *     def opaque_data(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12118
 *     def opaque_data(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 4:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuMetadata_instance_is_re};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12118, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12118, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12117
 *     @opaque_data.setter
 *     def opaque_data(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":12119
 *         if self._readonly:
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 4:
 *             raise ValueError("String too long for field opaque_data, max length is 3")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12119, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 12119, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":12120
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 4:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field opaque_data, max length is 3")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 12120, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 12120, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 4);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":12121
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 4:
 *             raise ValueError("String too long for field opaque_data, max length is 3")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].opaqueData), <void *>ptr, 4)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_opaque};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12121, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12121, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12120
 *             raise ValueError("This VgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 4:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field opaque_data, max length is 3")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":12122
 *         if len(buf) >= 4:
 *             raise ValueError("String too long for field opaque_data, max length is 3")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].opaqueData), <void *>ptr, 4)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 12122, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 12122, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":12123
 *             raise ValueError("String too long for field opaque_data, max length is 3")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].opaqueData), <void *>ptr, 4)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).opaqueData), ((void *)__pyx_v_ptr), 4));

  /* "cuda/bindings/_nvml.pyx":12115
 *         return cpython.PyUnicode_FromString(self._ptr[0].opaqueData)
 * 
 *     @opaque_data.setter             # <<<<<<<<<<<<<<
 *     def opaque_data(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.opaque_data.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12125
 *         memcpy(<void *>(self._ptr[0].opaqueData), <void *>ptr, 4)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuMetadata instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_12VgpuMetadata_12from_data, "VgpuMetadata.from_data(data)\n\nCreate an VgpuMetadata instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_metadata_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_12VgpuMetadata_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_12VgpuMetadata_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12125, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12125, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 12125, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 12125, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12125, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 12125, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":12132
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_metadata_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_metadata_dtype", vgpu_metadata_dtype, VgpuMetadata)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_metadata_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12132, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_metadata_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12132, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12125
 *         memcpy(<void *>(self._ptr[0].opaqueData), <void *>ptr, 4)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuMetadata instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12134
 *         return __from_data(data, "vgpu_metadata_dtype", vgpu_metadata_dtype, VgpuMetadata)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuMetadata instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_12VgpuMetadata_14from_ptr, "VgpuMetadata.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuMetadata instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_12VgpuMetadata_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_12VgpuMetadata_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12134, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 12134, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 12134, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12134, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 12134, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":12135
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuMetadata instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 12134, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 12134, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 12134, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12134, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 12135, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12135, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 12134, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":12134
 *         return __from_data(data, "vgpu_metadata_dtype", vgpu_metadata_dtype, VgpuMetadata)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuMetadata instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":12143
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuMetadata obj = VgpuMetadata.__new__(VgpuMetadata)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":12144
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuMetadata obj = VgpuMetadata.__new__(VgpuMetadata)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12144, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 12144, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12143
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuMetadata obj = VgpuMetadata.__new__(VgpuMetadata)
*/
  }

  /* "cuda/bindings/_nvml.pyx":12145
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuMetadata obj = VgpuMetadata.__new__(VgpuMetadata)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuMetadata_t *>malloc(sizeof(nvmlVgpuMetadata_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuMetadata(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12145, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":12146
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuMetadata obj = VgpuMetadata.__new__(VgpuMetadata)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuMetadata_t *>malloc(sizeof(nvmlVgpuMetadata_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":12147
 *         cdef VgpuMetadata obj = VgpuMetadata.__new__(VgpuMetadata)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuMetadata_t *>malloc(sizeof(nvmlVgpuMetadata_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuMetadata")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuMetadata_t *)malloc((sizeof(nvmlVgpuMetadata_t))));

    /* "cuda/bindings/_nvml.pyx":12148
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuMetadata_t *>malloc(sizeof(nvmlVgpuMetadata_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuMetadata")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuMetadata_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":12149
 *             obj._ptr = <nvmlVgpuMetadata_t *>malloc(sizeof(nvmlVgpuMetadata_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuMetadata")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuMetadata_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12149, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuMetadata};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12149, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 12149, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":12148
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuMetadata_t *>malloc(sizeof(nvmlVgpuMetadata_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuMetadata")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuMetadata_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":12150
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuMetadata")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuMetadata_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuMetadata_t))));

    /* "cuda/bindings/_nvml.pyx":12151
 *                 raise MemoryError("Error allocating VgpuMetadata")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuMetadata_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":12152
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuMetadata_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuMetadata_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":12146
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuMetadata obj = VgpuMetadata.__new__(VgpuMetadata)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuMetadata_t *>malloc(sizeof(nvmlVgpuMetadata_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":12154
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuMetadata_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuMetadata_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":12155
 *         else:
 *             obj._ptr = <nvmlVgpuMetadata_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":12156
 *             obj._ptr = <nvmlVgpuMetadata_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":12157
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":12158
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12134
 *         return __from_data(data, "vgpu_metadata_dtype", vgpu_metadata_dtype, VgpuMetadata)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuMetadata instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_12VgpuMetadata_16__reduce_cython__, "VgpuMetadata.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_12VgpuMetadata_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_12VgpuMetadata_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_12VgpuMetadata_18__setstate_cython__, "VgpuMetadata.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_12VgpuMetadata_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_12VgpuMetadata_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12VgpuMetadata_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuMetadata.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12161
 * 
 * 
 * cdef _get_vgpu_pgpu_compatibility_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuPgpuCompatibility_t pod = nvmlVgpuPgpuCompatibility_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_pgpu_compatibility_dtype_offsets(void) {
  nvmlVgpuPgpuCompatibility_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuPgpuCompatibility_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_pgpu_compatibility_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":12162
 * 
 * cdef _get_vgpu_pgpu_compatibility_dtype_offsets():
 *     cdef nvmlVgpuPgpuCompatibility_t pod = nvmlVgpuPgpuCompatibility_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['vgpu_vm_compatibility', 'compatibility_limit_code'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":12163
 * cdef _get_vgpu_pgpu_compatibility_dtype_offsets():
 *     cdef nvmlVgpuPgpuCompatibility_t pod = nvmlVgpuPgpuCompatibility_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['vgpu_vm_compatibility', 'compatibility_limit_code'],
 *         'formats': [_numpy.int32, _numpy.int32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":12164
 *     cdef nvmlVgpuPgpuCompatibility_t pod = nvmlVgpuPgpuCompatibility_t()
 *     return _numpy.dtype({
 *         'names': ['vgpu_vm_compatibility', 'compatibility_limit_code'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.int32, _numpy.int32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12164, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12164, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_vgpu_vm_compatibility);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_vgpu_vm_compatibility);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_vgpu_vm_compatibility) != (0)) __PYX_ERR(0, 12164, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_compatibility_limit_code);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_compatibility_limit_code);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_compatibility_limit_code) != (0)) __PYX_ERR(0, 12164, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 12164, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":12165
 *     return _numpy.dtype({
 *         'names': ['vgpu_vm_compatibility', 'compatibility_limit_code'],
 *         'formats': [_numpy.int32, _numpy.int32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.vgpuVmCompatibility)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12165, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12165, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12165, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12165, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12165, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 12165, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 12165, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 12164, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":12167
 *         'formats': [_numpy.int32, _numpy.int32],
 *         'offsets': [
 *             (<intptr_t>&(pod.vgpuVmCompatibility)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.compatibilityLimitCode)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vgpuVmCompatibility)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12167, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":12168
 *         'offsets': [
 *             (<intptr_t>&(pod.vgpuVmCompatibility)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.compatibilityLimitCode)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuPgpuCompatibility_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.compatibilityLimitCode)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12168, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":12166
 *         'names': ['vgpu_vm_compatibility', 'compatibility_limit_code'],
 *         'formats': [_numpy.int32, _numpy.int32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vgpuVmCompatibility)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.compatibilityLimitCode)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12166, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 12166, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 12166, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 12164, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":12170
 *             (<intptr_t>&(pod.compatibilityLimitCode)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuPgpuCompatibility_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuPgpuCompatibility_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12170, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 12164, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12163, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12161
 * 
 * 
 * cdef _get_vgpu_pgpu_compatibility_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuPgpuCompatibility_t pod = nvmlVgpuPgpuCompatibility_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_pgpu_compatibility_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12187
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuPgpuCompatibility_t *>calloc(1, sizeof(nvmlVgpuPgpuCompatibility_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":12188
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuPgpuCompatibility_t *>calloc(1, sizeof(nvmlVgpuPgpuCompatibility_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuPgpuCompatibility")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuPgpuCompatibility_t *)calloc(1, (sizeof(nvmlVgpuPgpuCompatibility_t))));

  /* "cuda/bindings/_nvml.pyx":12189
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuPgpuCompatibility_t *>calloc(1, sizeof(nvmlVgpuPgpuCompatibility_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuPgpuCompatibility")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":12190
 *         self._ptr = <nvmlVgpuPgpuCompatibility_t *>calloc(1, sizeof(nvmlVgpuPgpuCompatibility_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuPgpuCompatibility")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12190, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuPgpuCompati};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12190, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 12190, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12189
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuPgpuCompatibility_t *>calloc(1, sizeof(nvmlVgpuPgpuCompatibility_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuPgpuCompatibility")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":12191
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuPgpuCompatibility")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":12192
 *             raise MemoryError("Error allocating VgpuPgpuCompatibility")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":12193
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":12187
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuPgpuCompatibility_t *>calloc(1, sizeof(nvmlVgpuPgpuCompatibility_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuCompatibility.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12195
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuPgpuCompatibility_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self) {
  nvmlVgpuPgpuCompatibility_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuPgpuCompatibility_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":12197
 *     def __dealloc__(self):
 *         cdef nvmlVgpuPgpuCompatibility_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":12198
 *         cdef nvmlVgpuPgpuCompatibility_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":12199
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":12200
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":12197
 *     def __dealloc__(self):
 *         cdef nvmlVgpuPgpuCompatibility_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":12195
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuPgpuCompatibility_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":12202
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuPgpuCompatibility object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":12203
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuPgpuCompatibility object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuPgpuCompatibility_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 33 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12202
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuPgpuCompatibility object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuCompatibility.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12205
 *         return f"<{__name__}.VgpuPgpuCompatibility object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12208
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12208, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12205
 *         return f"<{__name__}.VgpuPgpuCompatibility object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuCompatibility.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12210
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":12211
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12210
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12213
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":12214
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12214, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12213
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuCompatibility.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12216
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuPgpuCompatibility other_
 *         if not isinstance(other, VgpuPgpuCompatibility):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":12218
 *     def __eq__(self, other):
 *         cdef VgpuPgpuCompatibility other_
 *         if not isinstance(other, VgpuPgpuCompatibility):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":12219
 *         cdef VgpuPgpuCompatibility other_
 *         if not isinstance(other, VgpuPgpuCompatibility):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPgpuCompatibility_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":12218
 *     def __eq__(self, other):
 *         cdef VgpuPgpuCompatibility other_
 *         if not isinstance(other, VgpuPgpuCompatibility):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":12220
 *         if not isinstance(other, VgpuPgpuCompatibility):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPgpuCompatibility_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility))))) __PYX_ERR(0, 12220, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":12221
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPgpuCompatibility_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuPgpuCompatibility_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12221, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12216
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuPgpuCompatibility other_
 *         if not isinstance(other, VgpuPgpuCompatibility):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuCompatibility.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12223
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPgpuCompatibility_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPgpuCompatibility_t *>malloc(sizeof(nvmlVgpuPgpuCompatibility_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":12224
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuPgpuCompatibility_t *>malloc(sizeof(nvmlVgpuPgpuCompatibility_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 12224, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12224, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12224, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 12224, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":12225
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPgpuCompatibility_t *>malloc(sizeof(nvmlVgpuPgpuCompatibility_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPgpuCompatibility")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuPgpuCompatibility_t *)malloc((sizeof(nvmlVgpuPgpuCompatibility_t))));

    /* "cuda/bindings/_nvml.pyx":12226
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPgpuCompatibility_t *>malloc(sizeof(nvmlVgpuPgpuCompatibility_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuPgpuCompatibility")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPgpuCompatibility_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":12227
 *             self._ptr = <nvmlVgpuPgpuCompatibility_t *>malloc(sizeof(nvmlVgpuPgpuCompatibility_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPgpuCompatibility")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPgpuCompatibility_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12227, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuPgpuCompati};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12227, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 12227, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":12226
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPgpuCompatibility_t *>malloc(sizeof(nvmlVgpuPgpuCompatibility_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuPgpuCompatibility")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPgpuCompatibility_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":12228
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPgpuCompatibility")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPgpuCompatibility_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12228, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12228, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 12228, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuPgpuCompatibility_t))));

    /* "cuda/bindings/_nvml.pyx":12229
 *                 raise MemoryError("Error allocating VgpuPgpuCompatibility")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPgpuCompatibility_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":12230
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPgpuCompatibility_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":12231
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12231, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12231, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 12231, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":12224
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuPgpuCompatibility_t *>malloc(sizeof(nvmlVgpuPgpuCompatibility_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":12233
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 12233, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":12223
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPgpuCompatibility_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPgpuCompatibility_t *>malloc(sizeof(nvmlVgpuPgpuCompatibility_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuCompatibility.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12235
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_vm_compatibility(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_21vgpu_vm_compatibility_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_21vgpu_vm_compatibility_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_21vgpu_vm_compatibility___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_21vgpu_vm_compatibility___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12238
 *     def vgpu_vm_compatibility(self):
 *         """int: """
 *         return <int>(self._ptr[0].vgpuVmCompatibility)             # <<<<<<<<<<<<<<
 * 
 *     @vgpu_vm_compatibility.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int(((int)(__pyx_v_self->_ptr[0]).vgpuVmCompatibility)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12238, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12235
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_vm_compatibility(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuCompatibility.vgpu_vm_compatibility.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12240
 *         return <int>(self._ptr[0].vgpuVmCompatibility)
 * 
 *     @vgpu_vm_compatibility.setter             # <<<<<<<<<<<<<<
 *     def vgpu_vm_compatibility(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_21vgpu_vm_compatibility_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_21vgpu_vm_compatibility_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_21vgpu_vm_compatibility_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_21vgpu_vm_compatibility_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12242
 *     @vgpu_vm_compatibility.setter
 *     def vgpu_vm_compatibility(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuCompatibility instance is read-only")
 *         self._ptr[0].vgpuVmCompatibility = <nvmlVgpuVmCompatibility_t><int>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12243
 *     def vgpu_vm_compatibility(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuCompatibility instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].vgpuVmCompatibility = <nvmlVgpuVmCompatibility_t><int>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuPgpuCompatibility_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12243, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12243, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12242
 *     @vgpu_vm_compatibility.setter
 *     def vgpu_vm_compatibility(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuCompatibility instance is read-only")
 *         self._ptr[0].vgpuVmCompatibility = <nvmlVgpuVmCompatibility_t><int>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12244
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuCompatibility instance is read-only")
 *         self._ptr[0].vgpuVmCompatibility = <nvmlVgpuVmCompatibility_t><int>val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12244, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).vgpuVmCompatibility = ((nvmlVgpuVmCompatibility_t)((int)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":12240
 *         return <int>(self._ptr[0].vgpuVmCompatibility)
 * 
 *     @vgpu_vm_compatibility.setter             # <<<<<<<<<<<<<<
 *     def vgpu_vm_compatibility(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuCompatibility.vgpu_vm_compatibility.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12246
 *         self._ptr[0].vgpuVmCompatibility = <nvmlVgpuVmCompatibility_t><int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def compatibility_limit_code(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_24compatibility_limit_code_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_24compatibility_limit_code_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_24compatibility_limit_code___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_24compatibility_limit_code___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12249
 *     def compatibility_limit_code(self):
 *         """int: """
 *         return <int>(self._ptr[0].compatibilityLimitCode)             # <<<<<<<<<<<<<<
 * 
 *     @compatibility_limit_code.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int(((int)(__pyx_v_self->_ptr[0]).compatibilityLimitCode)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12249, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12246
 *         self._ptr[0].vgpuVmCompatibility = <nvmlVgpuVmCompatibility_t><int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def compatibility_limit_code(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuCompatibility.compatibility_limit_code.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12251
 *         return <int>(self._ptr[0].compatibilityLimitCode)
 * 
 *     @compatibility_limit_code.setter             # <<<<<<<<<<<<<<
 *     def compatibility_limit_code(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_24compatibility_limit_code_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_24compatibility_limit_code_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_24compatibility_limit_code_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_24compatibility_limit_code_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12253
 *     @compatibility_limit_code.setter
 *     def compatibility_limit_code(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuCompatibility instance is read-only")
 *         self._ptr[0].compatibilityLimitCode = <nvmlVgpuPgpuCompatibilityLimitCode_t><int>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12254
 *     def compatibility_limit_code(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuCompatibility instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].compatibilityLimitCode = <nvmlVgpuPgpuCompatibilityLimitCode_t><int>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuPgpuCompatibility_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12254, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12254, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12253
 *     @compatibility_limit_code.setter
 *     def compatibility_limit_code(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuCompatibility instance is read-only")
 *         self._ptr[0].compatibilityLimitCode = <nvmlVgpuPgpuCompatibilityLimitCode_t><int>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12255
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuCompatibility instance is read-only")
 *         self._ptr[0].compatibilityLimitCode = <nvmlVgpuPgpuCompatibilityLimitCode_t><int>val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12255, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).compatibilityLimitCode = ((nvmlVgpuPgpuCompatibilityLimitCode_t)((int)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":12251
 *         return <int>(self._ptr[0].compatibilityLimitCode)
 * 
 *     @compatibility_limit_code.setter             # <<<<<<<<<<<<<<
 *     def compatibility_limit_code(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuCompatibility.compatibility_limit_code.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12257
 *         self._ptr[0].compatibilityLimitCode = <nvmlVgpuPgpuCompatibilityLimitCode_t><int>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuPgpuCompatibility instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_12from_data, "VgpuPgpuCompatibility.from_data(data)\n\nCreate an VgpuPgpuCompatibility instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_pgpu_compatibility_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12257, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12257, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 12257, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 12257, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12257, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 12257, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuCompatibility.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":12264
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_pgpu_compatibility_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_pgpu_compatibility_dtype", vgpu_pgpu_compatibility_dtype, VgpuPgpuCompatibility)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_pgpu_compatibility_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12264, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_pgpu_compatibility_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12264, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12257
 *         self._ptr[0].compatibilityLimitCode = <nvmlVgpuPgpuCompatibilityLimitCode_t><int>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuPgpuCompatibility instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuCompatibility.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12266
 *         return __from_data(data, "vgpu_pgpu_compatibility_dtype", vgpu_pgpu_compatibility_dtype, VgpuPgpuCompatibility)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuPgpuCompatibility instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_14from_ptr, "VgpuPgpuCompatibility.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuPgpuCompatibility instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12266, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 12266, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 12266, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12266, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 12266, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":12267
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuPgpuCompatibility instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 12266, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 12266, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 12266, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12266, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 12267, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12267, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 12266, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuCompatibility.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":12266
 *         return __from_data(data, "vgpu_pgpu_compatibility_dtype", vgpu_pgpu_compatibility_dtype, VgpuPgpuCompatibility)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuPgpuCompatibility instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":12275
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPgpuCompatibility obj = VgpuPgpuCompatibility.__new__(VgpuPgpuCompatibility)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":12276
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuPgpuCompatibility obj = VgpuPgpuCompatibility.__new__(VgpuPgpuCompatibility)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12276, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 12276, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12275
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPgpuCompatibility obj = VgpuPgpuCompatibility.__new__(VgpuPgpuCompatibility)
*/
  }

  /* "cuda/bindings/_nvml.pyx":12277
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPgpuCompatibility obj = VgpuPgpuCompatibility.__new__(VgpuPgpuCompatibility)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuPgpuCompatibility_t *>malloc(sizeof(nvmlVgpuPgpuCompatibility_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12277, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":12278
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPgpuCompatibility obj = VgpuPgpuCompatibility.__new__(VgpuPgpuCompatibility)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuPgpuCompatibility_t *>malloc(sizeof(nvmlVgpuPgpuCompatibility_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":12279
 *         cdef VgpuPgpuCompatibility obj = VgpuPgpuCompatibility.__new__(VgpuPgpuCompatibility)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuPgpuCompatibility_t *>malloc(sizeof(nvmlVgpuPgpuCompatibility_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPgpuCompatibility")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuPgpuCompatibility_t *)malloc((sizeof(nvmlVgpuPgpuCompatibility_t))));

    /* "cuda/bindings/_nvml.pyx":12280
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuPgpuCompatibility_t *>malloc(sizeof(nvmlVgpuPgpuCompatibility_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuPgpuCompatibility")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPgpuCompatibility_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":12281
 *             obj._ptr = <nvmlVgpuPgpuCompatibility_t *>malloc(sizeof(nvmlVgpuPgpuCompatibility_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPgpuCompatibility")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPgpuCompatibility_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12281, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuPgpuCompati};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12281, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 12281, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":12280
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuPgpuCompatibility_t *>malloc(sizeof(nvmlVgpuPgpuCompatibility_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuPgpuCompatibility")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPgpuCompatibility_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":12282
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPgpuCompatibility")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPgpuCompatibility_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuPgpuCompatibility_t))));

    /* "cuda/bindings/_nvml.pyx":12283
 *                 raise MemoryError("Error allocating VgpuPgpuCompatibility")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPgpuCompatibility_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":12284
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPgpuCompatibility_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuPgpuCompatibility_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":12278
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPgpuCompatibility obj = VgpuPgpuCompatibility.__new__(VgpuPgpuCompatibility)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuPgpuCompatibility_t *>malloc(sizeof(nvmlVgpuPgpuCompatibility_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":12286
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuPgpuCompatibility_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuPgpuCompatibility_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":12287
 *         else:
 *             obj._ptr = <nvmlVgpuPgpuCompatibility_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":12288
 *             obj._ptr = <nvmlVgpuPgpuCompatibility_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":12289
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":12290
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12266
 *         return __from_data(data, "vgpu_pgpu_compatibility_dtype", vgpu_pgpu_compatibility_dtype, VgpuPgpuCompatibility)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuPgpuCompatibility instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuCompatibility.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_16__reduce_cython__, "VgpuPgpuCompatibility.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuCompatibility.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_18__setstate_cython__, "VgpuPgpuCompatibility.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuCompatibility.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuCompatibility.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12293
 * 
 * 
 * cdef _get_gpu_instance_placement_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuInstancePlacement_t pod = nvmlGpuInstancePlacement_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_gpu_instance_placement_dtype_offsets(void) {
  nvmlGpuInstancePlacement_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlGpuInstancePlacement_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_gpu_instance_placement_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":12294
 * 
 * cdef _get_gpu_instance_placement_dtype_offsets():
 *     cdef nvmlGpuInstancePlacement_t pod = nvmlGpuInstancePlacement_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['start', 'size_'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":12295
 * cdef _get_gpu_instance_placement_dtype_offsets():
 *     cdef nvmlGpuInstancePlacement_t pod = nvmlGpuInstancePlacement_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['start', 'size_'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12295, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12295, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":12296
 *     cdef nvmlGpuInstancePlacement_t pod = nvmlGpuInstancePlacement_t()
 *     return _numpy.dtype({
 *         'names': ['start', 'size_'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12296, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12296, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_start);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_start);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_start) != (0)) __PYX_ERR(0, 12296, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_size_2);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_size_2);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_size_2) != (0)) __PYX_ERR(0, 12296, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 12296, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":12297
 *     return _numpy.dtype({
 *         'names': ['start', 'size_'],
 *         'formats': [_numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.start)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 12297, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 12297, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 12296, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":12299
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.start)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.size)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.start)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12299, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":12300
 *         'offsets': [
 *             (<intptr_t>&(pod.start)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.size)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlGpuInstancePlacement_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.size)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12300, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":12298
 *         'names': ['start', 'size_'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.start)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.size)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12298, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 12298, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 12298, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 12296, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":12302
 *             (<intptr_t>&(pod.size)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlGpuInstancePlacement_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlGpuInstancePlacement_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12302, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 12296, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12295, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12293
 * 
 * 
 * cdef _get_gpu_instance_placement_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuInstancePlacement_t pod = nvmlGpuInstancePlacement_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_gpu_instance_placement_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12324
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=gpu_instance_placement_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12324, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12324, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 12324, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12324, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 12324, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":12325
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=gpu_instance_placement_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlGpuInstancePlacement_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12325, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12325, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_gpu_instance_placement_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12325, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12325, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 12325, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12325, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":12326
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=gpu_instance_placement_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlGpuInstancePlacement_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlGpuInstancePlacement_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12326, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12326, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12326, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":12327
 *         arr = _numpy.empty(size, dtype=gpu_instance_placement_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlGpuInstancePlacement_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlGpuInstancePlacement_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12327, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlGpuInstancePlacement_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12327, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12327, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 12327, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":12328
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlGpuInstancePlacement_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlGpuInstancePlacement_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12328, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12328, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlGpuInstancePlacement_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12328, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12328, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 12327, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 12327, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":12324
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=gpu_instance_placement_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12330
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlGpuInstancePlacement_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.GpuInstancePlacement_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":12331
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.GpuInstancePlacement_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12331, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12331, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 12331, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":12332
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.GpuInstancePlacement_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.GpuInstancePlacement object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12332, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12332, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12332, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12332, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12332, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12332, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12332, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_GpuInstancePlacement_Array;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 28 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12332, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":12331
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.GpuInstancePlacement_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":12334
 *             return f"<{__name__}.GpuInstancePlacement_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.GpuInstancePlacement object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12334, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12334, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12334, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12334, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12334, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_GpuInstancePlacement_object_at;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 32 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12334, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":12330
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlGpuInstancePlacement_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.GpuInstancePlacement_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12336
 *             return f"<{__name__}.GpuInstancePlacement object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12339
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12339, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12339, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12336
 *             return f"<{__name__}.GpuInstancePlacement object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12341
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_20GpuInstancePlacement__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":12342
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12342, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12342, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 12342, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12341
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12344
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":12345
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12345, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12345, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 12345, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":12346
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12346, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 12346, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12345
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":12348
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12348, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12348, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12344
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12350
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":12351
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12351, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 12351, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12350
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12353
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, GpuInstancePlacement)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":12354
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, GpuInstancePlacement)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":12355
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, GpuInstancePlacement)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12355, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12355, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12355, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12355, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 12355, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12355, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12355, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12355, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12355, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 12355, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":12356
 *         cdef object self_data = self._data
 *         if (not isinstance(other, GpuInstancePlacement)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":12355
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, GpuInstancePlacement)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":12357
 *         if (not isinstance(other, GpuInstancePlacement)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12357, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12357, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12357, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 12357, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12357, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12353
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, GpuInstancePlacement)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12359
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def start(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5start_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5start_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5start___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5start___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12362
 *     def start(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.start[0])
 *         return self._data.start
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12362, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 12362, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":12363
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.start[0])             # <<<<<<<<<<<<<<
 *         return self._data.start
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_start); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12363, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12363, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12363, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":12362
 *     def start(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.start[0])
 *         return self._data.start
*/
  }

  /* "cuda/bindings/_nvml.pyx":12364
 *         if self._data.size == 1:
 *             return int(self._data.start[0])
 *         return self._data.start             # <<<<<<<<<<<<<<
 * 
 *     @start.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_start); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12364, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12359
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def start(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.start.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12366
 *         return self._data.start
 * 
 *     @start.setter             # <<<<<<<<<<<<<<
 *     def start(self, val):
 *         self._data.start = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5start_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5start_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5start_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5start_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":12368
 *     @start.setter
 *     def start(self, val):
 *         self._data.start = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_start, __pyx_v_val) < (0)) __PYX_ERR(0, 12368, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":12366
 *         return self._data.start
 * 
 *     @start.setter             # <<<<<<<<<<<<<<
 *     def start(self, val):
 *         self._data.start = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.start.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12370
 *         self._data.start = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def size_(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5size__1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5size__1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5size____get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5size____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12373
 *     def size_(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.size_[0])
 *         return self._data.size_
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12373, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 12373, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":12374
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.size_[0])             # <<<<<<<<<<<<<<
 *         return self._data.size_
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12374, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12374, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12374, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":12373
 *     def size_(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.size_[0])
 *         return self._data.size_
*/
  }

  /* "cuda/bindings/_nvml.pyx":12375
 *         if self._data.size == 1:
 *             return int(self._data.size_[0])
 *         return self._data.size_             # <<<<<<<<<<<<<<
 * 
 *     @size_.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12375, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12370
 *         self._data.start = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def size_(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.size_.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12377
 *         return self._data.size_
 * 
 *     @size_.setter             # <<<<<<<<<<<<<<
 *     def size_(self, val):
 *         self._data.size_ = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5size__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5size__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5size__2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5size__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":12379
 *     @size_.setter
 *     def size_(self, val):
 *         self._data.size_ = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size_2, __pyx_v_val) < (0)) __PYX_ERR(0, 12379, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":12377
 *         return self._data.size_
 * 
 *     @size_.setter             # <<<<<<<<<<<<<<
 *     def size_(self, val):
 *         self._data.size_ = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.size_.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12381
 *         self._data.size_ = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":12384
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":12385
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 12385, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":12386
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12386, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 12386, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":12387
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":12388
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12388, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 12388, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":12387
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":12389
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return GpuInstancePlacement.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":12390
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return GpuInstancePlacement.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":12389
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return GpuInstancePlacement.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":12391
 *             if key_ < 0:
 *                 key_ += size
 *             return GpuInstancePlacement.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == gpu_instance_placement_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12391, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12391, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":12384
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":12392
 *                 key_ += size
 *             return GpuInstancePlacement.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == gpu_instance_placement_dtype:
 *             return GpuInstancePlacement.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12392, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":12393
 *             return GpuInstancePlacement.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == gpu_instance_placement_dtype:             # <<<<<<<<<<<<<<
 *             return GpuInstancePlacement.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12393, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12393, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 12393, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12393, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_gpu_instance_placement_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12393, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12393, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 12393, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":12394
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == gpu_instance_placement_dtype:
 *             return GpuInstancePlacement.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12394, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":12393
 *             return GpuInstancePlacement.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == gpu_instance_placement_dtype:             # <<<<<<<<<<<<<<
 *             return GpuInstancePlacement.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":12395
 *         if isinstance(out, _numpy.recarray) and out.dtype == gpu_instance_placement_dtype:
 *             return GpuInstancePlacement.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12381
 *         self._data.size_ = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12397
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":12398
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 12398, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":12397
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12400
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuInstancePlacement instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_20GpuInstancePlacement_14from_data, "GpuInstancePlacement.from_data(data)\n\nCreate an GpuInstancePlacement instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `gpu_instance_placement_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_20GpuInstancePlacement_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20GpuInstancePlacement_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12400, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12400, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 12400, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 12400, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12400, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 12400, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":12407
 *             data (_numpy.ndarray): a 1D array of dtype `gpu_instance_placement_dtype` holding the data.
 *         """
 *         cdef GpuInstancePlacement obj = GpuInstancePlacement.__new__(GpuInstancePlacement)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstancePlacement(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12407, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":12408
 *         """
 *         cdef GpuInstancePlacement obj = GpuInstancePlacement.__new__(GpuInstancePlacement)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12408, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12408, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 12408, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":12409
 *         cdef GpuInstancePlacement obj = GpuInstancePlacement.__new__(GpuInstancePlacement)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12409, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 12409, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12408
 *         """
 *         cdef GpuInstancePlacement obj = GpuInstancePlacement.__new__(GpuInstancePlacement)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":12410
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != gpu_instance_placement_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12410, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 12410, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":12411
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != gpu_instance_placement_dtype:
 *             raise ValueError("data array must be of dtype gpu_instance_placement_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12411, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 12411, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12410
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != gpu_instance_placement_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":12412
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != gpu_instance_placement_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype gpu_instance_placement_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12412, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_gpu_instance_placement_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12412, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12412, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 12412, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":12413
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != gpu_instance_placement_dtype:
 *             raise ValueError("data array must be of dtype gpu_instance_placement_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_gpu};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12413, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 12413, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12412
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != gpu_instance_placement_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype gpu_instance_placement_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":12414
 *         if data.dtype != gpu_instance_placement_dtype:
 *             raise ValueError("data array must be of dtype gpu_instance_placement_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12414, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12414, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12414, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":12416
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12400
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuInstancePlacement instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12418
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an GpuInstancePlacement instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_20GpuInstancePlacement_16from_ptr, "GpuInstancePlacement.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an GpuInstancePlacement instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_20GpuInstancePlacement_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20GpuInstancePlacement_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12418, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 12418, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 12418, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12418, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 12418, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 12418, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 12418, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 12418, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12418, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 12419, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 12419, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12419, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":12419
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an GpuInstancePlacement instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 12418, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":12418
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an GpuInstancePlacement instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":12427
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstancePlacement obj = GpuInstancePlacement.__new__(GpuInstancePlacement)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":12428
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef GpuInstancePlacement obj = GpuInstancePlacement.__new__(GpuInstancePlacement)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12428, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 12428, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12427
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstancePlacement obj = GpuInstancePlacement.__new__(GpuInstancePlacement)
*/
  }

  /* "cuda/bindings/_nvml.pyx":12429
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstancePlacement obj = GpuInstancePlacement.__new__(GpuInstancePlacement)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstancePlacement(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12429, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":12430
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstancePlacement obj = GpuInstancePlacement.__new__(GpuInstancePlacement)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlGpuInstancePlacement_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12430, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12430, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":12432
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlGpuInstancePlacement_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=gpu_instance_placement_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12432, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":12431
 *         cdef GpuInstancePlacement obj = GpuInstancePlacement.__new__(GpuInstancePlacement)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlGpuInstancePlacement_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=gpu_instance_placement_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlGpuInstancePlacement_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12431, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":12433
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlGpuInstancePlacement_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=gpu_instance_placement_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12433, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12433, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12433, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_gpu_instance_placement_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12433, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12433, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 12433, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 12433, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12433, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":12434
 *             <char*>ptr, sizeof(nvmlGpuInstancePlacement_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=gpu_instance_placement_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12434, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12434, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12434, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":12436
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12418
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an GpuInstancePlacement instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12320
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_20GpuInstancePlacement_18__reduce_cython__, "GpuInstancePlacement.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_20GpuInstancePlacement_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20GpuInstancePlacement_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_GpuInstancePlacement, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_GpuInstancePlacement, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_GpuInstancePlacement, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_GpuInstancePlacement, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_GpuInstancePlacem); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_GpuInstancePlacement, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_GpuInstancePlacement, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_GpuInstancePlacement, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_GpuInstancePlacement__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_GpuInstancePlacem); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_GpuInstancePlacement, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_GpuInstancePlacement__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_20GpuInstancePlacement_20__setstate_cython__, "GpuInstancePlacement.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_20GpuInstancePlacement_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20GpuInstancePlacement_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20GpuInstancePlacement_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_GpuInstancePlacement, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_GpuInstancePlacement__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_GpuInstancePlacement__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_GpuInstancePlacement, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_GpuInstancePlacement__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstancePlacement.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12439
 * 
 * 
 * cdef _get_gpu_instance_profile_info_v2_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuInstanceProfileInfo_v2_t pod = nvmlGpuInstanceProfileInfo_v2_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_gpu_instance_profile_info_v2_dtype_offsets(void) {
  nvmlGpuInstanceProfileInfo_v2_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlGpuInstanceProfileInfo_v2_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  PyObject *__pyx_t_14 = NULL;
  PyObject *__pyx_t_15 = NULL;
  PyObject *__pyx_t_16 = NULL;
  PyObject *__pyx_t_17 = NULL;
  PyObject *__pyx_t_18 = NULL;
  PyObject *__pyx_t_19 = NULL;
  size_t __pyx_t_20;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_gpu_instance_profile_info_v2_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":12440
 * 
 * cdef _get_gpu_instance_profile_info_v2_dtype_offsets():
 *     cdef nvmlGpuInstanceProfileInfo_v2_t pod = nvmlGpuInstanceProfileInfo_v2_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'id', 'is_p2p_supported', 'slice_count', 'instance_count', 'multiprocessor_count', 'copy_engine_count', 'decoder_count', 'encoder_count', 'jpeg_count', 'ofa_count', 'memory_size_mb', 'name'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":12441
 * cdef _get_gpu_instance_profile_info_v2_dtype_offsets():
 *     cdef nvmlGpuInstanceProfileInfo_v2_t pod = nvmlGpuInstanceProfileInfo_v2_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'id', 'is_p2p_supported', 'slice_count', 'instance_count', 'multiprocessor_count', 'copy_engine_count', 'decoder_count', 'encoder_count', 'jpeg_count', 'ofa_count', 'memory_size_mb', 'name'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.int8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":12442
 *     cdef nvmlGpuInstanceProfileInfo_v2_t pod = nvmlGpuInstanceProfileInfo_v2_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'id', 'is_p2p_supported', 'slice_count', 'instance_count', 'multiprocessor_count', 'copy_engine_count', 'decoder_count', 'encoder_count', 'jpeg_count', 'ofa_count', 'memory_size_mb', 'name'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.int8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12442, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(13); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12442, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 12442, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_id) != (0)) __PYX_ERR(0, 12442, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_is_p2p_supported);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_is_p2p_supported);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_is_p2p_supported) != (0)) __PYX_ERR(0, 12442, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_slice_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_slice_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_slice_count) != (0)) __PYX_ERR(0, 12442, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_instance_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_instance_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_instance_count) != (0)) __PYX_ERR(0, 12442, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_multiprocessor_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_multiprocessor_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_multiprocessor_count) != (0)) __PYX_ERR(0, 12442, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_copy_engine_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_copy_engine_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_copy_engine_count) != (0)) __PYX_ERR(0, 12442, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_decoder_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_decoder_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_mstate_global->__pyx_n_u_decoder_count) != (0)) __PYX_ERR(0, 12442, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_encoder_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_encoder_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_mstate_global->__pyx_n_u_encoder_count) != (0)) __PYX_ERR(0, 12442, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_jpeg_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_jpeg_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 9, __pyx_mstate_global->__pyx_n_u_jpeg_count) != (0)) __PYX_ERR(0, 12442, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_ofa_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_ofa_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 10, __pyx_mstate_global->__pyx_n_u_ofa_count) != (0)) __PYX_ERR(0, 12442, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_memory_size_mb);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_memory_size_mb);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 11, __pyx_mstate_global->__pyx_n_u_memory_size_mb) != (0)) __PYX_ERR(0, 12442, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_name);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_name);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 12, __pyx_mstate_global->__pyx_n_u_name) != (0)) __PYX_ERR(0, 12442, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 12442, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":12443
 *     return _numpy.dtype({
 *         'names': ['version', 'id', 'is_p2p_supported', 'slice_count', 'instance_count', 'multiprocessor_count', 'copy_engine_count', 'decoder_count', 'encoder_count', 'jpeg_count', 'ofa_count', 'memory_size_mb', 'name'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.int8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_15 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_16 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_17 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_17);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_18 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_18);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_19 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_19);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(13); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 12443, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 12443, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 12443, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 12443, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 12443, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 12443, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 12443, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_t_14) != (0)) __PYX_ERR(0, 12443, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_t_15) != (0)) __PYX_ERR(0, 12443, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_16);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 9, __pyx_t_16) != (0)) __PYX_ERR(0, 12443, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_17);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 10, __pyx_t_17) != (0)) __PYX_ERR(0, 12443, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_18);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 11, __pyx_t_18) != (0)) __PYX_ERR(0, 12443, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_19);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 12, __pyx_t_19) != (0)) __PYX_ERR(0, 12443, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  __pyx_t_14 = 0;
  __pyx_t_15 = 0;
  __pyx_t_16 = 0;
  __pyx_t_17 = 0;
  __pyx_t_18 = 0;
  __pyx_t_19 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 12442, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":12445
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.int8],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.isP2pSupported)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12445, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":12446
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.isP2pSupported)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),
*/
  __pyx_t_19 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.id)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 12446, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_19);

  /* "cuda/bindings/_nvml.pyx":12447
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.isP2pSupported)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),
*/
  __pyx_t_18 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.isP2pSupported)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 12447, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_18);

  /* "cuda/bindings/_nvml.pyx":12448
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.isP2pSupported)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
*/
  __pyx_t_17 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sliceCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 12448, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_17);

  /* "cuda/bindings/_nvml.pyx":12449
 *             (<intptr_t>&(pod.isP2pSupported)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.copyEngineCount)) - (<intptr_t>&pod),
*/
  __pyx_t_16 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.instanceCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 12449, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);

  /* "cuda/bindings/_nvml.pyx":12450
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.copyEngineCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decoderCount)) - (<intptr_t>&pod),
*/
  __pyx_t_15 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.multiprocessorCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 12450, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);

  /* "cuda/bindings/_nvml.pyx":12451
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.copyEngineCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.decoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encoderCount)) - (<intptr_t>&pod),
*/
  __pyx_t_14 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.copyEngineCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 12451, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);

  /* "cuda/bindings/_nvml.pyx":12452
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.copyEngineCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decoderCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.encoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.jpegCount)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.decoderCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 12452, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":12453
 *             (<intptr_t>&(pod.copyEngineCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encoderCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.jpegCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ofaCount)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.encoderCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 12453, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":12454
 *             (<intptr_t>&(pod.decoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.jpegCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.ofaCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memorySizeMB)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.jpegCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 12454, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":12455
 *             (<intptr_t>&(pod.encoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.jpegCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ofaCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.memorySizeMB)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.ofaCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12455, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":12456
 *             (<intptr_t>&(pod.jpegCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ofaCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memorySizeMB)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.memorySizeMB)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12456, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":12457
 *             (<intptr_t>&(pod.ofaCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memorySizeMB)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlGpuInstanceProfileInfo_v2_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.name)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12457, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":12444
 *         'names': ['version', 'id', 'is_p2p_supported', 'slice_count', 'instance_count', 'multiprocessor_count', 'copy_engine_count', 'decoder_count', 'encoder_count', 'jpeg_count', 'ofa_count', 'memory_size_mb', 'name'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.int8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(13); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12444, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 12444, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_19);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_19) != (0)) __PYX_ERR(0, 12444, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_18);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_18) != (0)) __PYX_ERR(0, 12444, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_17);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_17) != (0)) __PYX_ERR(0, 12444, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_16);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_16) != (0)) __PYX_ERR(0, 12444, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_15) != (0)) __PYX_ERR(0, 12444, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_14) != (0)) __PYX_ERR(0, 12444, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 7, __pyx_t_13) != (0)) __PYX_ERR(0, 12444, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 8, __pyx_t_12) != (0)) __PYX_ERR(0, 12444, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 9, __pyx_t_11) != (0)) __PYX_ERR(0, 12444, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 10, __pyx_t_10) != (0)) __PYX_ERR(0, 12444, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 11, __pyx_t_9) != (0)) __PYX_ERR(0, 12444, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 12, __pyx_t_8) != (0)) __PYX_ERR(0, 12444, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_19 = 0;
  __pyx_t_18 = 0;
  __pyx_t_17 = 0;
  __pyx_t_16 = 0;
  __pyx_t_15 = 0;
  __pyx_t_14 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 12442, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":12459
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlGpuInstanceProfileInfo_v2_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlGpuInstanceProfileInfo_v2_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12459, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 12442, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_20 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_20 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_20, (2-__pyx_t_20) | (__pyx_t_20*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12441, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12439
 * 
 * 
 * cdef _get_gpu_instance_profile_info_v2_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuInstanceProfileInfo_v2_t pod = nvmlGpuInstanceProfileInfo_v2_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_XDECREF(__pyx_t_14);
  __Pyx_XDECREF(__pyx_t_15);
  __Pyx_XDECREF(__pyx_t_16);
  __Pyx_XDECREF(__pyx_t_17);
  __Pyx_XDECREF(__pyx_t_18);
  __Pyx_XDECREF(__pyx_t_19);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_gpu_instance_profile_info_v2_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12476
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>calloc(1, sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":12477
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>calloc(1, sizeof(nvmlGpuInstanceProfileInfo_v2_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")
*/
  __pyx_v_self->_ptr = ((nvmlGpuInstanceProfileInfo_v2_t *)calloc(1, (sizeof(nvmlGpuInstanceProfileInfo_v2_t))));

  /* "cuda/bindings/_nvml.pyx":12478
 *     def __init__(self):
 *         self._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>calloc(1, sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":12479
 *         self._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>calloc(1, sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12479, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuInstanceProf};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12479, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 12479, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12478
 *     def __init__(self):
 *         self._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>calloc(1, sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":12480
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":12481
 *             raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":12482
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":12476
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>calloc(1, sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12484
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGpuInstanceProfileInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  nvmlGpuInstanceProfileInfo_v2_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlGpuInstanceProfileInfo_v2_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":12486
 *     def __dealloc__(self):
 *         cdef nvmlGpuInstanceProfileInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":12487
 *         cdef nvmlGpuInstanceProfileInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":12488
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":12489
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":12486
 *     def __dealloc__(self):
 *         cdef nvmlGpuInstanceProfileInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":12484
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGpuInstanceProfileInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":12491
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GpuInstanceProfileInfo_v2 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":12492
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.GpuInstanceProfileInfo_v2 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12492, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12492, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12492, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12492, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12492, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_GpuInstanceProfileInfo_v2_objec;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 37 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12492, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12491
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GpuInstanceProfileInfo_v2 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12494
 *         return f"<{__name__}.GpuInstanceProfileInfo_v2 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12497
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12497, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12494
 *         return f"<{__name__}.GpuInstanceProfileInfo_v2 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12499
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":12500
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12499
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12502
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":12503
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12503, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12502
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12505
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GpuInstanceProfileInfo_v2 other_
 *         if not isinstance(other, GpuInstanceProfileInfo_v2):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":12507
 *     def __eq__(self, other):
 *         cdef GpuInstanceProfileInfo_v2 other_
 *         if not isinstance(other, GpuInstanceProfileInfo_v2):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":12508
 *         cdef GpuInstanceProfileInfo_v2 other_
 *         if not isinstance(other, GpuInstanceProfileInfo_v2):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuInstanceProfileInfo_v2_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":12507
 *     def __eq__(self, other):
 *         cdef GpuInstanceProfileInfo_v2 other_
 *         if not isinstance(other, GpuInstanceProfileInfo_v2):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":12509
 *         if not isinstance(other, GpuInstanceProfileInfo_v2):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuInstanceProfileInfo_v2_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2))))) __PYX_ERR(0, 12509, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":12510
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuInstanceProfileInfo_v2_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlGpuInstanceProfileInfo_v2_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12510, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12505
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GpuInstanceProfileInfo_v2 other_
 *         if not isinstance(other, GpuInstanceProfileInfo_v2):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12512
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuInstanceProfileInfo_v2_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v2_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":12513
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 12513, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12513, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12513, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 12513, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":12514
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v2_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")
*/
    __pyx_v_self->_ptr = ((nvmlGpuInstanceProfileInfo_v2_t *)malloc((sizeof(nvmlGpuInstanceProfileInfo_v2_t))));

    /* "cuda/bindings/_nvml.pyx":12515
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceProfileInfo_v2_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":12516
 *             self._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12516, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuInstanceProf};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12516, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 12516, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":12515
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceProfileInfo_v2_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":12517
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceProfileInfo_v2_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12517, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12517, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 12517, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlGpuInstanceProfileInfo_v2_t))));

    /* "cuda/bindings/_nvml.pyx":12518
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":12519
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":12520
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12520, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12520, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 12520, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":12513
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":12522
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 12522, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":12512
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuInstanceProfileInfo_v2_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v2_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12524
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12527
 *     def version(self):
 *         """int: """
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12527, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12524
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12529
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12531
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12532
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v2_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12532, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12532, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12531
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12533
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12533, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12529
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12535
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def id(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_2id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_2id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_2id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_2id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12538
 *     def id(self):
 *         """int: """
 *         return self._ptr[0].id             # <<<<<<<<<<<<<<
 * 
 *     @id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12538, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12535
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def id(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12540
 *         return self._ptr[0].id
 * 
 *     @id.setter             # <<<<<<<<<<<<<<
 *     def id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_2id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_2id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_2id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_2id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12542
 *     @id.setter
 *     def id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].id = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12543
 *     def id(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].id = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v2_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12543, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12543, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12542
 *     @id.setter
 *     def id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].id = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12544
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].id = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12544, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).id = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12540
 *         return self._ptr[0].id
 * 
 *     @id.setter             # <<<<<<<<<<<<<<
 *     def id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12546
 *         self._ptr[0].id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_p2p_supported(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16is_p2p_supported_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16is_p2p_supported_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16is_p2p_supported___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16is_p2p_supported___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12549
 *     def is_p2p_supported(self):
 *         """int: """
 *         return self._ptr[0].isP2pSupported             # <<<<<<<<<<<<<<
 * 
 *     @is_p2p_supported.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).isP2pSupported); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12549, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12546
 *         self._ptr[0].id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_p2p_supported(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.is_p2p_supported.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12551
 *         return self._ptr[0].isP2pSupported
 * 
 *     @is_p2p_supported.setter             # <<<<<<<<<<<<<<
 *     def is_p2p_supported(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16is_p2p_supported_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16is_p2p_supported_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16is_p2p_supported_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16is_p2p_supported_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12553
 *     @is_p2p_supported.setter
 *     def is_p2p_supported(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].isP2pSupported = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12554
 *     def is_p2p_supported(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].isP2pSupported = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v2_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12554, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12554, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12553
 *     @is_p2p_supported.setter
 *     def is_p2p_supported(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].isP2pSupported = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12555
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].isP2pSupported = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12555, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).isP2pSupported = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12551
 *         return self._ptr[0].isP2pSupported
 * 
 *     @is_p2p_supported.setter             # <<<<<<<<<<<<<<
 *     def is_p2p_supported(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.is_p2p_supported.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12557
 *         self._ptr[0].isP2pSupported = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def slice_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_11slice_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_11slice_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_11slice_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_11slice_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12560
 *     def slice_count(self):
 *         """int: """
 *         return self._ptr[0].sliceCount             # <<<<<<<<<<<<<<
 * 
 *     @slice_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sliceCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12560, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12557
 *         self._ptr[0].isP2pSupported = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def slice_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.slice_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12562
 *         return self._ptr[0].sliceCount
 * 
 *     @slice_count.setter             # <<<<<<<<<<<<<<
 *     def slice_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_11slice_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_11slice_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_11slice_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_11slice_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12564
 *     @slice_count.setter
 *     def slice_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sliceCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12565
 *     def slice_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sliceCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v2_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12565, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12565, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12564
 *     @slice_count.setter
 *     def slice_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sliceCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12566
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sliceCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12566, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sliceCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12562
 *         return self._ptr[0].sliceCount
 * 
 *     @slice_count.setter             # <<<<<<<<<<<<<<
 *     def slice_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.slice_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12568
 *         self._ptr[0].sliceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def instance_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14instance_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14instance_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14instance_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14instance_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12571
 *     def instance_count(self):
 *         """int: """
 *         return self._ptr[0].instanceCount             # <<<<<<<<<<<<<<
 * 
 *     @instance_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).instanceCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12571, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12568
 *         self._ptr[0].sliceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def instance_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.instance_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12573
 *         return self._ptr[0].instanceCount
 * 
 *     @instance_count.setter             # <<<<<<<<<<<<<<
 *     def instance_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14instance_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14instance_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14instance_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14instance_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12575
 *     @instance_count.setter
 *     def instance_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].instanceCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12576
 *     def instance_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].instanceCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v2_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12576, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12576, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12575
 *     @instance_count.setter
 *     def instance_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].instanceCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12577
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].instanceCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12577, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).instanceCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12573
 *         return self._ptr[0].instanceCount
 * 
 *     @instance_count.setter             # <<<<<<<<<<<<<<
 *     def instance_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.instance_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12579
 *         self._ptr[0].instanceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_20multiprocessor_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_20multiprocessor_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_20multiprocessor_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_20multiprocessor_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12582
 *     def multiprocessor_count(self):
 *         """int: """
 *         return self._ptr[0].multiprocessorCount             # <<<<<<<<<<<<<<
 * 
 *     @multiprocessor_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).multiprocessorCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12582, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12579
 *         self._ptr[0].instanceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.multiprocessor_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12584
 *         return self._ptr[0].multiprocessorCount
 * 
 *     @multiprocessor_count.setter             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_20multiprocessor_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_20multiprocessor_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_20multiprocessor_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_20multiprocessor_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12586
 *     @multiprocessor_count.setter
 *     def multiprocessor_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].multiprocessorCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12587
 *     def multiprocessor_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].multiprocessorCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v2_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12587, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12587, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12586
 *     @multiprocessor_count.setter
 *     def multiprocessor_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].multiprocessorCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12588
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].multiprocessorCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12588, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).multiprocessorCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12584
 *         return self._ptr[0].multiprocessorCount
 * 
 *     @multiprocessor_count.setter             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.multiprocessor_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12590
 *         self._ptr[0].multiprocessorCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def copy_engine_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17copy_engine_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17copy_engine_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17copy_engine_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17copy_engine_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12593
 *     def copy_engine_count(self):
 *         """int: """
 *         return self._ptr[0].copyEngineCount             # <<<<<<<<<<<<<<
 * 
 *     @copy_engine_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).copyEngineCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12593, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12590
 *         self._ptr[0].multiprocessorCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def copy_engine_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.copy_engine_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12595
 *         return self._ptr[0].copyEngineCount
 * 
 *     @copy_engine_count.setter             # <<<<<<<<<<<<<<
 *     def copy_engine_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17copy_engine_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17copy_engine_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17copy_engine_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17copy_engine_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12597
 *     @copy_engine_count.setter
 *     def copy_engine_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].copyEngineCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12598
 *     def copy_engine_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].copyEngineCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v2_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12598, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12598, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12597
 *     @copy_engine_count.setter
 *     def copy_engine_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].copyEngineCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12599
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].copyEngineCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12599, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).copyEngineCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12595
 *         return self._ptr[0].copyEngineCount
 * 
 *     @copy_engine_count.setter             # <<<<<<<<<<<<<<
 *     def copy_engine_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.copy_engine_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12601
 *         self._ptr[0].copyEngineCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def decoder_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13decoder_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13decoder_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13decoder_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13decoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12604
 *     def decoder_count(self):
 *         """int: """
 *         return self._ptr[0].decoderCount             # <<<<<<<<<<<<<<
 * 
 *     @decoder_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).decoderCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12604, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12601
 *         self._ptr[0].copyEngineCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def decoder_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.decoder_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12606
 *         return self._ptr[0].decoderCount
 * 
 *     @decoder_count.setter             # <<<<<<<<<<<<<<
 *     def decoder_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13decoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13decoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13decoder_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13decoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12608
 *     @decoder_count.setter
 *     def decoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].decoderCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12609
 *     def decoder_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].decoderCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v2_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12609, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12609, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12608
 *     @decoder_count.setter
 *     def decoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].decoderCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12610
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].decoderCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12610, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).decoderCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12606
 *         return self._ptr[0].decoderCount
 * 
 *     @decoder_count.setter             # <<<<<<<<<<<<<<
 *     def decoder_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.decoder_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12612
 *         self._ptr[0].decoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def encoder_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13encoder_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13encoder_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13encoder_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13encoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12615
 *     def encoder_count(self):
 *         """int: """
 *         return self._ptr[0].encoderCount             # <<<<<<<<<<<<<<
 * 
 *     @encoder_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).encoderCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12615, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12612
 *         self._ptr[0].decoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def encoder_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.encoder_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12617
 *         return self._ptr[0].encoderCount
 * 
 *     @encoder_count.setter             # <<<<<<<<<<<<<<
 *     def encoder_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13encoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13encoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13encoder_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13encoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12619
 *     @encoder_count.setter
 *     def encoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].encoderCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12620
 *     def encoder_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].encoderCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v2_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12620, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12620, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12619
 *     @encoder_count.setter
 *     def encoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].encoderCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12621
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].encoderCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12621, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).encoderCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12617
 *         return self._ptr[0].encoderCount
 * 
 *     @encoder_count.setter             # <<<<<<<<<<<<<<
 *     def encoder_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.encoder_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12623
 *         self._ptr[0].encoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def jpeg_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_10jpeg_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_10jpeg_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_10jpeg_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_10jpeg_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12626
 *     def jpeg_count(self):
 *         """int: """
 *         return self._ptr[0].jpegCount             # <<<<<<<<<<<<<<
 * 
 *     @jpeg_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).jpegCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12626, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12623
 *         self._ptr[0].encoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def jpeg_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.jpeg_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12628
 *         return self._ptr[0].jpegCount
 * 
 *     @jpeg_count.setter             # <<<<<<<<<<<<<<
 *     def jpeg_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_10jpeg_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_10jpeg_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_10jpeg_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_10jpeg_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12630
 *     @jpeg_count.setter
 *     def jpeg_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].jpegCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12631
 *     def jpeg_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].jpegCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v2_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12631, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12631, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12630
 *     @jpeg_count.setter
 *     def jpeg_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].jpegCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12632
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].jpegCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12632, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).jpegCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12628
 *         return self._ptr[0].jpegCount
 * 
 *     @jpeg_count.setter             # <<<<<<<<<<<<<<
 *     def jpeg_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.jpeg_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12634
 *         self._ptr[0].jpegCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ofa_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_9ofa_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_9ofa_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_9ofa_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_9ofa_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12637
 *     def ofa_count(self):
 *         """int: """
 *         return self._ptr[0].ofaCount             # <<<<<<<<<<<<<<
 * 
 *     @ofa_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).ofaCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12637, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12634
 *         self._ptr[0].jpegCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ofa_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.ofa_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12639
 *         return self._ptr[0].ofaCount
 * 
 *     @ofa_count.setter             # <<<<<<<<<<<<<<
 *     def ofa_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_9ofa_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_9ofa_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_9ofa_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_9ofa_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12641
 *     @ofa_count.setter
 *     def ofa_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].ofaCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12642
 *     def ofa_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].ofaCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v2_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12642, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12642, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12641
 *     @ofa_count.setter
 *     def ofa_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].ofaCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12643
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].ofaCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12643, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).ofaCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12639
 *         return self._ptr[0].ofaCount
 * 
 *     @ofa_count.setter             # <<<<<<<<<<<<<<
 *     def ofa_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.ofa_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12645
 *         self._ptr[0].ofaCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def memory_size_mb(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14memory_size_mb_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14memory_size_mb_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14memory_size_mb___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14memory_size_mb___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12648
 *     def memory_size_mb(self):
 *         """int: """
 *         return self._ptr[0].memorySizeMB             # <<<<<<<<<<<<<<
 * 
 *     @memory_size_mb.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).memorySizeMB); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12648, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12645
 *         self._ptr[0].ofaCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def memory_size_mb(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.memory_size_mb.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12650
 *         return self._ptr[0].memorySizeMB
 * 
 *     @memory_size_mb.setter             # <<<<<<<<<<<<<<
 *     def memory_size_mb(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14memory_size_mb_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14memory_size_mb_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14memory_size_mb_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14memory_size_mb_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12652
 *     @memory_size_mb.setter
 *     def memory_size_mb(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].memorySizeMB = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12653
 *     def memory_size_mb(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].memorySizeMB = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v2_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12653, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12653, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12652
 *     @memory_size_mb.setter
 *     def memory_size_mb(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].memorySizeMB = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12654
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].memorySizeMB = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12654, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).memorySizeMB = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12650
 *         return self._ptr[0].memorySizeMB
 * 
 *     @memory_size_mb.setter             # <<<<<<<<<<<<<<
 *     def memory_size_mb(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.memory_size_mb.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12656
 *         self._ptr[0].memorySizeMB = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def name(self):
 *         """~_numpy.int8: (array of length 96)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_4name_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_4name_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_4name___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_4name___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12659
 *     def name(self):
 *         """~_numpy.int8: (array of length 96)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].name)             # <<<<<<<<<<<<<<
 * 
 *     @name.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).name); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12659, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12656
 *         self._ptr[0].memorySizeMB = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def name(self):
 *         """~_numpy.int8: (array of length 96)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.name.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12661
 *         return cpython.PyUnicode_FromString(self._ptr[0].name)
 * 
 *     @name.setter             # <<<<<<<<<<<<<<
 *     def name(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_4name_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_4name_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_4name_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_4name_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12663
 *     @name.setter
 *     def name(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12664
 *     def name(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v2_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12664, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12664, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12663
 *     @name.setter
 *     def name(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":12665
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field name, max length is 95")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12665, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 12665, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":12666
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 12666, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 12666, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 96);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":12667
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field name, max length is 95")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_name_m};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12667, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12667, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12666
 *             raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":12668
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 12668, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 12668, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":12669
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).name), ((void *)__pyx_v_ptr), 96));

  /* "cuda/bindings/_nvml.pyx":12661
 *         return cpython.PyUnicode_FromString(self._ptr[0].name)
 * 
 *     @name.setter             # <<<<<<<<<<<<<<
 *     def name(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.name.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12671
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuInstanceProfileInfo_v2 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_12from_data, "GpuInstanceProfileInfo_v2.from_data(data)\n\nCreate an GpuInstanceProfileInfo_v2 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `gpu_instance_profile_info_v2_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12671, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12671, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 12671, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 12671, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12671, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 12671, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":12678
 *             data (_numpy.ndarray): a single-element array of dtype `gpu_instance_profile_info_v2_dtype` holding the data.
 *         """
 *         return __from_data(data, "gpu_instance_profile_info_v2_dtype", gpu_instance_profile_info_v2_dtype, GpuInstanceProfileInfo_v2)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_gpu_instance_profile_info_v2_dty); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12678, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_gpu_instance_profile_info_v2_dty, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12678, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12671
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuInstanceProfileInfo_v2 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12680
 *         return __from_data(data, "gpu_instance_profile_info_v2_dtype", gpu_instance_profile_info_v2_dtype, GpuInstanceProfileInfo_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuInstanceProfileInfo_v2 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14from_ptr, "GpuInstanceProfileInfo_v2.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an GpuInstanceProfileInfo_v2 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12680, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 12680, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 12680, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12680, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 12680, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":12681
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an GpuInstanceProfileInfo_v2 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 12680, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 12680, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 12680, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12680, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 12681, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12681, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 12680, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":12680
 *         return __from_data(data, "gpu_instance_profile_info_v2_dtype", gpu_instance_profile_info_v2_dtype, GpuInstanceProfileInfo_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuInstanceProfileInfo_v2 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":12689
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstanceProfileInfo_v2 obj = GpuInstanceProfileInfo_v2.__new__(GpuInstanceProfileInfo_v2)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":12690
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef GpuInstanceProfileInfo_v2 obj = GpuInstanceProfileInfo_v2.__new__(GpuInstanceProfileInfo_v2)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12690, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 12690, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12689
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstanceProfileInfo_v2 obj = GpuInstanceProfileInfo_v2.__new__(GpuInstanceProfileInfo_v2)
*/
  }

  /* "cuda/bindings/_nvml.pyx":12691
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstanceProfileInfo_v2 obj = GpuInstanceProfileInfo_v2.__new__(GpuInstanceProfileInfo_v2)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v2_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12691, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":12692
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstanceProfileInfo_v2 obj = GpuInstanceProfileInfo_v2.__new__(GpuInstanceProfileInfo_v2)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":12693
 *         cdef GpuInstanceProfileInfo_v2 obj = GpuInstanceProfileInfo_v2.__new__(GpuInstanceProfileInfo_v2)
 *         if owner is None:
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v2_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")
*/
    __pyx_v_obj->_ptr = ((nvmlGpuInstanceProfileInfo_v2_t *)malloc((sizeof(nvmlGpuInstanceProfileInfo_v2_t))));

    /* "cuda/bindings/_nvml.pyx":12694
 *         if owner is None:
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceProfileInfo_v2_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":12695
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12695, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuInstanceProf};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12695, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 12695, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":12694
 *         if owner is None:
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceProfileInfo_v2_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":12696
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceProfileInfo_v2_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlGpuInstanceProfileInfo_v2_t))));

    /* "cuda/bindings/_nvml.pyx":12697
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":12698
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":12692
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstanceProfileInfo_v2 obj = GpuInstanceProfileInfo_v2.__new__(GpuInstanceProfileInfo_v2)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v2_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":12700
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlGpuInstanceProfileInfo_v2_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":12701
 *         else:
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":12702
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v2_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":12703
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":12704
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12680
 *         return __from_data(data, "gpu_instance_profile_info_v2_dtype", gpu_instance_profile_info_v2_dtype, GpuInstanceProfileInfo_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuInstanceProfileInfo_v2 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16__reduce_cython__, "GpuInstanceProfileInfo_v2.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_18__setstate_cython__, "GpuInstanceProfileInfo_v2.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v2.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12707
 * 
 * 
 * cdef _get_gpu_instance_profile_info_v3_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuInstanceProfileInfo_v3_t pod = nvmlGpuInstanceProfileInfo_v3_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_gpu_instance_profile_info_v3_dtype_offsets(void) {
  nvmlGpuInstanceProfileInfo_v3_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlGpuInstanceProfileInfo_v3_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  PyObject *__pyx_t_14 = NULL;
  PyObject *__pyx_t_15 = NULL;
  PyObject *__pyx_t_16 = NULL;
  PyObject *__pyx_t_17 = NULL;
  PyObject *__pyx_t_18 = NULL;
  PyObject *__pyx_t_19 = NULL;
  size_t __pyx_t_20;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_gpu_instance_profile_info_v3_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":12708
 * 
 * cdef _get_gpu_instance_profile_info_v3_dtype_offsets():
 *     cdef nvmlGpuInstanceProfileInfo_v3_t pod = nvmlGpuInstanceProfileInfo_v3_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'copy_engine_count', 'decoder_count', 'encoder_count', 'jpeg_count', 'ofa_count', 'memory_size_mb', 'name', 'capabilities'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":12709
 * cdef _get_gpu_instance_profile_info_v3_dtype_offsets():
 *     cdef nvmlGpuInstanceProfileInfo_v3_t pod = nvmlGpuInstanceProfileInfo_v3_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'copy_engine_count', 'decoder_count', 'encoder_count', 'jpeg_count', 'ofa_count', 'memory_size_mb', 'name', 'capabilities'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.int8, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12709, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12709, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":12710
 *     cdef nvmlGpuInstanceProfileInfo_v3_t pod = nvmlGpuInstanceProfileInfo_v3_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'copy_engine_count', 'decoder_count', 'encoder_count', 'jpeg_count', 'ofa_count', 'memory_size_mb', 'name', 'capabilities'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.int8, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12710, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(13); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12710, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 12710, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_id) != (0)) __PYX_ERR(0, 12710, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_slice_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_slice_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_slice_count) != (0)) __PYX_ERR(0, 12710, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_instance_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_instance_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_instance_count) != (0)) __PYX_ERR(0, 12710, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_multiprocessor_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_multiprocessor_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_multiprocessor_count) != (0)) __PYX_ERR(0, 12710, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_copy_engine_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_copy_engine_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_copy_engine_count) != (0)) __PYX_ERR(0, 12710, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_decoder_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_decoder_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_decoder_count) != (0)) __PYX_ERR(0, 12710, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_encoder_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_encoder_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_mstate_global->__pyx_n_u_encoder_count) != (0)) __PYX_ERR(0, 12710, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_jpeg_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_jpeg_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_mstate_global->__pyx_n_u_jpeg_count) != (0)) __PYX_ERR(0, 12710, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_ofa_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_ofa_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 9, __pyx_mstate_global->__pyx_n_u_ofa_count) != (0)) __PYX_ERR(0, 12710, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_memory_size_mb);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_memory_size_mb);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 10, __pyx_mstate_global->__pyx_n_u_memory_size_mb) != (0)) __PYX_ERR(0, 12710, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_name);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_name);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 11, __pyx_mstate_global->__pyx_n_u_name) != (0)) __PYX_ERR(0, 12710, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_capabilities);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_capabilities);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 12, __pyx_mstate_global->__pyx_n_u_capabilities) != (0)) __PYX_ERR(0, 12710, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 12710, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":12711
 *     return _numpy.dtype({
 *         'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'copy_engine_count', 'decoder_count', 'encoder_count', 'jpeg_count', 'ofa_count', 'memory_size_mb', 'name', 'capabilities'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.int8, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_15 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_16 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_17 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_17);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_18 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_18);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_19 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_19);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(13); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 12711, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 12711, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 12711, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 12711, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 12711, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 12711, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 12711, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_t_14) != (0)) __PYX_ERR(0, 12711, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_t_15) != (0)) __PYX_ERR(0, 12711, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_16);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 9, __pyx_t_16) != (0)) __PYX_ERR(0, 12711, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_17);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 10, __pyx_t_17) != (0)) __PYX_ERR(0, 12711, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_18);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 11, __pyx_t_18) != (0)) __PYX_ERR(0, 12711, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_19);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 12, __pyx_t_19) != (0)) __PYX_ERR(0, 12711, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  __pyx_t_14 = 0;
  __pyx_t_15 = 0;
  __pyx_t_16 = 0;
  __pyx_t_17 = 0;
  __pyx_t_18 = 0;
  __pyx_t_19 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 12710, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":12713
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.int8, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12713, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":12714
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),
*/
  __pyx_t_19 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.id)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 12714, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_19);

  /* "cuda/bindings/_nvml.pyx":12715
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
*/
  __pyx_t_18 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sliceCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 12715, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_18);

  /* "cuda/bindings/_nvml.pyx":12716
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.copyEngineCount)) - (<intptr_t>&pod),
*/
  __pyx_t_17 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.instanceCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 12716, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_17);

  /* "cuda/bindings/_nvml.pyx":12717
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.copyEngineCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decoderCount)) - (<intptr_t>&pod),
*/
  __pyx_t_16 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.multiprocessorCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 12717, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);

  /* "cuda/bindings/_nvml.pyx":12718
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.copyEngineCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.decoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encoderCount)) - (<intptr_t>&pod),
*/
  __pyx_t_15 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.copyEngineCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 12718, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);

  /* "cuda/bindings/_nvml.pyx":12719
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.copyEngineCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decoderCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.encoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.jpegCount)) - (<intptr_t>&pod),
*/
  __pyx_t_14 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.decoderCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 12719, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);

  /* "cuda/bindings/_nvml.pyx":12720
 *             (<intptr_t>&(pod.copyEngineCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encoderCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.jpegCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ofaCount)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.encoderCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 12720, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":12721
 *             (<intptr_t>&(pod.decoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.jpegCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.ofaCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memorySizeMB)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.jpegCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 12721, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":12722
 *             (<intptr_t>&(pod.encoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.jpegCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ofaCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.memorySizeMB)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.ofaCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 12722, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":12723
 *             (<intptr_t>&(pod.jpegCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ofaCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memorySizeMB)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.capabilities)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.memorySizeMB)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12723, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":12724
 *             (<intptr_t>&(pod.ofaCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memorySizeMB)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.capabilities)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.name)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12724, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":12725
 *             (<intptr_t>&(pod.memorySizeMB)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.capabilities)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlGpuInstanceProfileInfo_v3_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.capabilities)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12725, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":12712
 *         'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'copy_engine_count', 'decoder_count', 'encoder_count', 'jpeg_count', 'ofa_count', 'memory_size_mb', 'name', 'capabilities'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.int8, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(13); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12712, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 12712, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_19);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_19) != (0)) __PYX_ERR(0, 12712, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_18);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_18) != (0)) __PYX_ERR(0, 12712, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_17);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_17) != (0)) __PYX_ERR(0, 12712, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_16);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_16) != (0)) __PYX_ERR(0, 12712, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_15) != (0)) __PYX_ERR(0, 12712, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_14) != (0)) __PYX_ERR(0, 12712, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 7, __pyx_t_13) != (0)) __PYX_ERR(0, 12712, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 8, __pyx_t_12) != (0)) __PYX_ERR(0, 12712, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 9, __pyx_t_11) != (0)) __PYX_ERR(0, 12712, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 10, __pyx_t_10) != (0)) __PYX_ERR(0, 12712, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 11, __pyx_t_9) != (0)) __PYX_ERR(0, 12712, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 12, __pyx_t_8) != (0)) __PYX_ERR(0, 12712, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_19 = 0;
  __pyx_t_18 = 0;
  __pyx_t_17 = 0;
  __pyx_t_16 = 0;
  __pyx_t_15 = 0;
  __pyx_t_14 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 12710, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":12727
 *             (<intptr_t>&(pod.capabilities)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlGpuInstanceProfileInfo_v3_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlGpuInstanceProfileInfo_v3_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12727, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 12710, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_20 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_20 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_20, (2-__pyx_t_20) | (__pyx_t_20*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12709, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12707
 * 
 * 
 * cdef _get_gpu_instance_profile_info_v3_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuInstanceProfileInfo_v3_t pod = nvmlGpuInstanceProfileInfo_v3_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_XDECREF(__pyx_t_14);
  __Pyx_XDECREF(__pyx_t_15);
  __Pyx_XDECREF(__pyx_t_16);
  __Pyx_XDECREF(__pyx_t_17);
  __Pyx_XDECREF(__pyx_t_18);
  __Pyx_XDECREF(__pyx_t_19);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_gpu_instance_profile_info_v3_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12744
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>calloc(1, sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":12745
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>calloc(1, sizeof(nvmlGpuInstanceProfileInfo_v3_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")
*/
  __pyx_v_self->_ptr = ((nvmlGpuInstanceProfileInfo_v3_t *)calloc(1, (sizeof(nvmlGpuInstanceProfileInfo_v3_t))));

  /* "cuda/bindings/_nvml.pyx":12746
 *     def __init__(self):
 *         self._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>calloc(1, sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":12747
 *         self._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>calloc(1, sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12747, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuInstanceProf_2};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12747, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 12747, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12746
 *     def __init__(self):
 *         self._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>calloc(1, sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":12748
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":12749
 *             raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":12750
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":12744
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>calloc(1, sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12752
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGpuInstanceProfileInfo_v3_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  nvmlGpuInstanceProfileInfo_v3_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlGpuInstanceProfileInfo_v3_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":12754
 *     def __dealloc__(self):
 *         cdef nvmlGpuInstanceProfileInfo_v3_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":12755
 *         cdef nvmlGpuInstanceProfileInfo_v3_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":12756
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":12757
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":12754
 *     def __dealloc__(self):
 *         cdef nvmlGpuInstanceProfileInfo_v3_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":12752
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGpuInstanceProfileInfo_v3_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":12759
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GpuInstanceProfileInfo_v3 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":12760
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.GpuInstanceProfileInfo_v3 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12760, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12760, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12760, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12760, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12760, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_GpuInstanceProfileInfo_v3_objec;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 37 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12760, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12759
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GpuInstanceProfileInfo_v3 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12762
 *         return f"<{__name__}.GpuInstanceProfileInfo_v3 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12765
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12765, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12762
 *         return f"<{__name__}.GpuInstanceProfileInfo_v3 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12767
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":12768
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12767
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12770
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":12771
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12771, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12770
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12773
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GpuInstanceProfileInfo_v3 other_
 *         if not isinstance(other, GpuInstanceProfileInfo_v3):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":12775
 *     def __eq__(self, other):
 *         cdef GpuInstanceProfileInfo_v3 other_
 *         if not isinstance(other, GpuInstanceProfileInfo_v3):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":12776
 *         cdef GpuInstanceProfileInfo_v3 other_
 *         if not isinstance(other, GpuInstanceProfileInfo_v3):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuInstanceProfileInfo_v3_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":12775
 *     def __eq__(self, other):
 *         cdef GpuInstanceProfileInfo_v3 other_
 *         if not isinstance(other, GpuInstanceProfileInfo_v3):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":12777
 *         if not isinstance(other, GpuInstanceProfileInfo_v3):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuInstanceProfileInfo_v3_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3))))) __PYX_ERR(0, 12777, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":12778
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuInstanceProfileInfo_v3_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlGpuInstanceProfileInfo_v3_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12778, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12773
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GpuInstanceProfileInfo_v3 other_
 *         if not isinstance(other, GpuInstanceProfileInfo_v3):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12780
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuInstanceProfileInfo_v3_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v3_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":12781
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 12781, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12781, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12781, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 12781, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":12782
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v3_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")
*/
    __pyx_v_self->_ptr = ((nvmlGpuInstanceProfileInfo_v3_t *)malloc((sizeof(nvmlGpuInstanceProfileInfo_v3_t))));

    /* "cuda/bindings/_nvml.pyx":12783
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceProfileInfo_v3_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":12784
 *             self._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12784, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuInstanceProf_2};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12784, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 12784, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":12783
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceProfileInfo_v3_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":12785
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceProfileInfo_v3_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12785, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12785, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 12785, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlGpuInstanceProfileInfo_v3_t))));

    /* "cuda/bindings/_nvml.pyx":12786
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":12787
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":12788
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12788, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12788, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 12788, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":12781
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":12790
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 12790, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":12780
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuInstanceProfileInfo_v3_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v3_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12792
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12795
 *     def version(self):
 *         """int: """
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12795, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12792
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12797
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12799
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12800
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v3_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12800, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12800, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12799
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12801
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12801, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12797
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12803
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def id(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_2id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_2id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_2id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_2id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12806
 *     def id(self):
 *         """int: """
 *         return self._ptr[0].id             # <<<<<<<<<<<<<<
 * 
 *     @id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12806, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12803
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def id(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12808
 *         return self._ptr[0].id
 * 
 *     @id.setter             # <<<<<<<<<<<<<<
 *     def id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_2id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_2id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_2id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_2id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12810
 *     @id.setter
 *     def id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].id = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12811
 *     def id(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].id = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v3_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12811, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12811, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12810
 *     @id.setter
 *     def id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].id = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12812
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].id = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12812, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).id = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12808
 *         return self._ptr[0].id
 * 
 *     @id.setter             # <<<<<<<<<<<<<<
 *     def id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12814
 *         self._ptr[0].id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def slice_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_11slice_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_11slice_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_11slice_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_11slice_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12817
 *     def slice_count(self):
 *         """int: """
 *         return self._ptr[0].sliceCount             # <<<<<<<<<<<<<<
 * 
 *     @slice_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sliceCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12817, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12814
 *         self._ptr[0].id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def slice_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.slice_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12819
 *         return self._ptr[0].sliceCount
 * 
 *     @slice_count.setter             # <<<<<<<<<<<<<<
 *     def slice_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_11slice_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_11slice_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_11slice_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_11slice_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12821
 *     @slice_count.setter
 *     def slice_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sliceCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12822
 *     def slice_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sliceCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v3_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12822, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12822, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12821
 *     @slice_count.setter
 *     def slice_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sliceCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12823
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sliceCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12823, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sliceCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12819
 *         return self._ptr[0].sliceCount
 * 
 *     @slice_count.setter             # <<<<<<<<<<<<<<
 *     def slice_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.slice_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12825
 *         self._ptr[0].sliceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def instance_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14instance_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14instance_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14instance_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14instance_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12828
 *     def instance_count(self):
 *         """int: """
 *         return self._ptr[0].instanceCount             # <<<<<<<<<<<<<<
 * 
 *     @instance_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).instanceCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12828, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12825
 *         self._ptr[0].sliceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def instance_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.instance_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12830
 *         return self._ptr[0].instanceCount
 * 
 *     @instance_count.setter             # <<<<<<<<<<<<<<
 *     def instance_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14instance_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14instance_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14instance_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14instance_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12832
 *     @instance_count.setter
 *     def instance_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].instanceCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12833
 *     def instance_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].instanceCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v3_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12833, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12833, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12832
 *     @instance_count.setter
 *     def instance_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].instanceCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12834
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].instanceCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12834, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).instanceCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12830
 *         return self._ptr[0].instanceCount
 * 
 *     @instance_count.setter             # <<<<<<<<<<<<<<
 *     def instance_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.instance_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12836
 *         self._ptr[0].instanceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_20multiprocessor_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_20multiprocessor_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_20multiprocessor_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_20multiprocessor_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12839
 *     def multiprocessor_count(self):
 *         """int: """
 *         return self._ptr[0].multiprocessorCount             # <<<<<<<<<<<<<<
 * 
 *     @multiprocessor_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).multiprocessorCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12839, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12836
 *         self._ptr[0].instanceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.multiprocessor_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12841
 *         return self._ptr[0].multiprocessorCount
 * 
 *     @multiprocessor_count.setter             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_20multiprocessor_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_20multiprocessor_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_20multiprocessor_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_20multiprocessor_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12843
 *     @multiprocessor_count.setter
 *     def multiprocessor_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].multiprocessorCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12844
 *     def multiprocessor_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].multiprocessorCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v3_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12844, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12844, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12843
 *     @multiprocessor_count.setter
 *     def multiprocessor_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].multiprocessorCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12845
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].multiprocessorCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12845, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).multiprocessorCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12841
 *         return self._ptr[0].multiprocessorCount
 * 
 *     @multiprocessor_count.setter             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.multiprocessor_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12847
 *         self._ptr[0].multiprocessorCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def copy_engine_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17copy_engine_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17copy_engine_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17copy_engine_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17copy_engine_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12850
 *     def copy_engine_count(self):
 *         """int: """
 *         return self._ptr[0].copyEngineCount             # <<<<<<<<<<<<<<
 * 
 *     @copy_engine_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).copyEngineCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12850, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12847
 *         self._ptr[0].multiprocessorCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def copy_engine_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.copy_engine_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12852
 *         return self._ptr[0].copyEngineCount
 * 
 *     @copy_engine_count.setter             # <<<<<<<<<<<<<<
 *     def copy_engine_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17copy_engine_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17copy_engine_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17copy_engine_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17copy_engine_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12854
 *     @copy_engine_count.setter
 *     def copy_engine_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].copyEngineCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12855
 *     def copy_engine_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].copyEngineCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v3_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12855, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12855, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12854
 *     @copy_engine_count.setter
 *     def copy_engine_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].copyEngineCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12856
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].copyEngineCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12856, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).copyEngineCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12852
 *         return self._ptr[0].copyEngineCount
 * 
 *     @copy_engine_count.setter             # <<<<<<<<<<<<<<
 *     def copy_engine_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.copy_engine_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12858
 *         self._ptr[0].copyEngineCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def decoder_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13decoder_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13decoder_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13decoder_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13decoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12861
 *     def decoder_count(self):
 *         """int: """
 *         return self._ptr[0].decoderCount             # <<<<<<<<<<<<<<
 * 
 *     @decoder_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).decoderCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12861, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12858
 *         self._ptr[0].copyEngineCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def decoder_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.decoder_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12863
 *         return self._ptr[0].decoderCount
 * 
 *     @decoder_count.setter             # <<<<<<<<<<<<<<
 *     def decoder_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13decoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13decoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13decoder_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13decoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12865
 *     @decoder_count.setter
 *     def decoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].decoderCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12866
 *     def decoder_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].decoderCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v3_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12866, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12866, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12865
 *     @decoder_count.setter
 *     def decoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].decoderCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12867
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].decoderCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12867, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).decoderCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12863
 *         return self._ptr[0].decoderCount
 * 
 *     @decoder_count.setter             # <<<<<<<<<<<<<<
 *     def decoder_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.decoder_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12869
 *         self._ptr[0].decoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def encoder_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13encoder_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13encoder_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13encoder_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13encoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12872
 *     def encoder_count(self):
 *         """int: """
 *         return self._ptr[0].encoderCount             # <<<<<<<<<<<<<<
 * 
 *     @encoder_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).encoderCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12872, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12869
 *         self._ptr[0].decoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def encoder_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.encoder_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12874
 *         return self._ptr[0].encoderCount
 * 
 *     @encoder_count.setter             # <<<<<<<<<<<<<<
 *     def encoder_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13encoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13encoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13encoder_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13encoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12876
 *     @encoder_count.setter
 *     def encoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].encoderCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12877
 *     def encoder_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].encoderCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v3_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12877, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12877, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12876
 *     @encoder_count.setter
 *     def encoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].encoderCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12878
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].encoderCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12878, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).encoderCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12874
 *         return self._ptr[0].encoderCount
 * 
 *     @encoder_count.setter             # <<<<<<<<<<<<<<
 *     def encoder_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.encoder_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12880
 *         self._ptr[0].encoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def jpeg_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_10jpeg_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_10jpeg_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_10jpeg_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_10jpeg_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12883
 *     def jpeg_count(self):
 *         """int: """
 *         return self._ptr[0].jpegCount             # <<<<<<<<<<<<<<
 * 
 *     @jpeg_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).jpegCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12883, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12880
 *         self._ptr[0].encoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def jpeg_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.jpeg_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12885
 *         return self._ptr[0].jpegCount
 * 
 *     @jpeg_count.setter             # <<<<<<<<<<<<<<
 *     def jpeg_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_10jpeg_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_10jpeg_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_10jpeg_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_10jpeg_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12887
 *     @jpeg_count.setter
 *     def jpeg_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].jpegCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12888
 *     def jpeg_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].jpegCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v3_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12888, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12888, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12887
 *     @jpeg_count.setter
 *     def jpeg_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].jpegCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12889
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].jpegCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12889, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).jpegCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12885
 *         return self._ptr[0].jpegCount
 * 
 *     @jpeg_count.setter             # <<<<<<<<<<<<<<
 *     def jpeg_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.jpeg_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12891
 *         self._ptr[0].jpegCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ofa_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_9ofa_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_9ofa_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_9ofa_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_9ofa_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12894
 *     def ofa_count(self):
 *         """int: """
 *         return self._ptr[0].ofaCount             # <<<<<<<<<<<<<<
 * 
 *     @ofa_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).ofaCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12891
 *         self._ptr[0].jpegCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ofa_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.ofa_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12896
 *         return self._ptr[0].ofaCount
 * 
 *     @ofa_count.setter             # <<<<<<<<<<<<<<
 *     def ofa_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_9ofa_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_9ofa_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_9ofa_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_9ofa_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12898
 *     @ofa_count.setter
 *     def ofa_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].ofaCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12899
 *     def ofa_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].ofaCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v3_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12899, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12899, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12898
 *     @ofa_count.setter
 *     def ofa_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].ofaCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12900
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].ofaCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12900, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).ofaCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12896
 *         return self._ptr[0].ofaCount
 * 
 *     @ofa_count.setter             # <<<<<<<<<<<<<<
 *     def ofa_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.ofa_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12902
 *         self._ptr[0].ofaCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def memory_size_mb(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14memory_size_mb_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14memory_size_mb_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14memory_size_mb___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14memory_size_mb___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12905
 *     def memory_size_mb(self):
 *         """int: """
 *         return self._ptr[0].memorySizeMB             # <<<<<<<<<<<<<<
 * 
 *     @memory_size_mb.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).memorySizeMB); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12905, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12902
 *         self._ptr[0].ofaCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def memory_size_mb(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.memory_size_mb.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12907
 *         return self._ptr[0].memorySizeMB
 * 
 *     @memory_size_mb.setter             # <<<<<<<<<<<<<<
 *     def memory_size_mb(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14memory_size_mb_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14memory_size_mb_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14memory_size_mb_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14memory_size_mb_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12909
 *     @memory_size_mb.setter
 *     def memory_size_mb(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].memorySizeMB = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12910
 *     def memory_size_mb(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].memorySizeMB = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v3_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12910, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12910, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12909
 *     @memory_size_mb.setter
 *     def memory_size_mb(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].memorySizeMB = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12911
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].memorySizeMB = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12911, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).memorySizeMB = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12907
 *         return self._ptr[0].memorySizeMB
 * 
 *     @memory_size_mb.setter             # <<<<<<<<<<<<<<
 *     def memory_size_mb(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.memory_size_mb.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12913
 *         self._ptr[0].memorySizeMB = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def name(self):
 *         """~_numpy.int8: (array of length 96)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_4name_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_4name_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_4name___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_4name___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12916
 *     def name(self):
 *         """~_numpy.int8: (array of length 96)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].name)             # <<<<<<<<<<<<<<
 * 
 *     @name.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).name); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12916, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12913
 *         self._ptr[0].memorySizeMB = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def name(self):
 *         """~_numpy.int8: (array of length 96)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.name.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12918
 *         return cpython.PyUnicode_FromString(self._ptr[0].name)
 * 
 *     @name.setter             # <<<<<<<<<<<<<<
 *     def name(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_4name_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_4name_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_4name_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_4name_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12920
 *     @name.setter
 *     def name(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12921
 *     def name(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v3_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12921, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12921, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12920
 *     @name.setter
 *     def name(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":12922
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field name, max length is 95")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12922, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 12922, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":12923
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 12923, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 12923, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 96);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":12924
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field name, max length is 95")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_name_m};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12924, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12924, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12923
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":12925
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 12925, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 12925, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":12926
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).name), ((void *)__pyx_v_ptr), 96));

  /* "cuda/bindings/_nvml.pyx":12918
 *         return cpython.PyUnicode_FromString(self._ptr[0].name)
 * 
 *     @name.setter             # <<<<<<<<<<<<<<
 *     def name(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.name.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12928
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def capabilities(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12capabilities_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12capabilities_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12capabilities___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12capabilities___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":12931
 *     def capabilities(self):
 *         """int: """
 *         return self._ptr[0].capabilities             # <<<<<<<<<<<<<<
 * 
 *     @capabilities.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).capabilities); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12931, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12928
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def capabilities(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.capabilities.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12933
 *         return self._ptr[0].capabilities
 * 
 *     @capabilities.setter             # <<<<<<<<<<<<<<
 *     def capabilities(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12capabilities_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12capabilities_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12capabilities_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12capabilities_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":12935
 *     @capabilities.setter
 *     def capabilities(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].capabilities = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":12936
 *     def capabilities(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].capabilities = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceProfileInfo_v3_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12936, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 12936, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12935
 *     @capabilities.setter
 *     def capabilities(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].capabilities = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":12937
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].capabilities = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12937, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).capabilities = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":12933
 *         return self._ptr[0].capabilities
 * 
 *     @capabilities.setter             # <<<<<<<<<<<<<<
 *     def capabilities(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.capabilities.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12939
 *         self._ptr[0].capabilities = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuInstanceProfileInfo_v3 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12from_data, "GpuInstanceProfileInfo_v3.from_data(data)\n\nCreate an GpuInstanceProfileInfo_v3 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `gpu_instance_profile_info_v3_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12939, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12939, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 12939, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 12939, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12939, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 12939, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":12946
 *             data (_numpy.ndarray): a single-element array of dtype `gpu_instance_profile_info_v3_dtype` holding the data.
 *         """
 *         return __from_data(data, "gpu_instance_profile_info_v3_dtype", gpu_instance_profile_info_v3_dtype, GpuInstanceProfileInfo_v3)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_gpu_instance_profile_info_v3_dty); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12946, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_gpu_instance_profile_info_v3_dty, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12946, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12939
 *         self._ptr[0].capabilities = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuInstanceProfileInfo_v3 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12948
 *         return __from_data(data, "gpu_instance_profile_info_v3_dtype", gpu_instance_profile_info_v3_dtype, GpuInstanceProfileInfo_v3)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuInstanceProfileInfo_v3 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14from_ptr, "GpuInstanceProfileInfo_v3.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an GpuInstanceProfileInfo_v3 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12948, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 12948, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 12948, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12948, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 12948, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":12949
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an GpuInstanceProfileInfo_v3 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 12948, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 12948, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 12948, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12948, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 12949, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12949, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 12948, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":12948
 *         return __from_data(data, "gpu_instance_profile_info_v3_dtype", gpu_instance_profile_info_v3_dtype, GpuInstanceProfileInfo_v3)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuInstanceProfileInfo_v3 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":12957
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstanceProfileInfo_v3 obj = GpuInstanceProfileInfo_v3.__new__(GpuInstanceProfileInfo_v3)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":12958
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef GpuInstanceProfileInfo_v3 obj = GpuInstanceProfileInfo_v3.__new__(GpuInstanceProfileInfo_v3)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12958, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 12958, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":12957
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstanceProfileInfo_v3 obj = GpuInstanceProfileInfo_v3.__new__(GpuInstanceProfileInfo_v3)
*/
  }

  /* "cuda/bindings/_nvml.pyx":12959
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstanceProfileInfo_v3 obj = GpuInstanceProfileInfo_v3.__new__(GpuInstanceProfileInfo_v3)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v3_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12959, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":12960
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstanceProfileInfo_v3 obj = GpuInstanceProfileInfo_v3.__new__(GpuInstanceProfileInfo_v3)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":12961
 *         cdef GpuInstanceProfileInfo_v3 obj = GpuInstanceProfileInfo_v3.__new__(GpuInstanceProfileInfo_v3)
 *         if owner is None:
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v3_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")
*/
    __pyx_v_obj->_ptr = ((nvmlGpuInstanceProfileInfo_v3_t *)malloc((sizeof(nvmlGpuInstanceProfileInfo_v3_t))));

    /* "cuda/bindings/_nvml.pyx":12962
 *         if owner is None:
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceProfileInfo_v3_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":12963
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12963, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuInstanceProf_2};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12963, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 12963, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":12962
 *         if owner is None:
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceProfileInfo_v3_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":12964
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceProfileInfo_v3_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlGpuInstanceProfileInfo_v3_t))));

    /* "cuda/bindings/_nvml.pyx":12965
 *                 raise MemoryError("Error allocating GpuInstanceProfileInfo_v3")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":12966
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":12960
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstanceProfileInfo_v3 obj = GpuInstanceProfileInfo_v3.__new__(GpuInstanceProfileInfo_v3)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlGpuInstanceProfileInfo_v3_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":12968
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlGpuInstanceProfileInfo_v3_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":12969
 *         else:
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":12970
 *             obj._ptr = <nvmlGpuInstanceProfileInfo_v3_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":12971
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":12972
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12948
 *         return __from_data(data, "gpu_instance_profile_info_v3_dtype", gpu_instance_profile_info_v3_dtype, GpuInstanceProfileInfo_v3)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuInstanceProfileInfo_v3 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_16__reduce_cython__, "GpuInstanceProfileInfo_v3.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_18__setstate_cython__, "GpuInstanceProfileInfo_v3.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceProfileInfo_v3.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":12975
 * 
 * 
 * cdef _get_compute_instance_placement_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlComputeInstancePlacement_t pod = nvmlComputeInstancePlacement_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_compute_instance_placement_dtype_offsets(void) {
  nvmlComputeInstancePlacement_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlComputeInstancePlacement_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_compute_instance_placement_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":12976
 * 
 * cdef _get_compute_instance_placement_dtype_offsets():
 *     cdef nvmlComputeInstancePlacement_t pod = nvmlComputeInstancePlacement_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['start', 'size_'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":12977
 * cdef _get_compute_instance_placement_dtype_offsets():
 *     cdef nvmlComputeInstancePlacement_t pod = nvmlComputeInstancePlacement_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['start', 'size_'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12977, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12977, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":12978
 *     cdef nvmlComputeInstancePlacement_t pod = nvmlComputeInstancePlacement_t()
 *     return _numpy.dtype({
 *         'names': ['start', 'size_'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12978, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12978, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_start);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_start);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_start) != (0)) __PYX_ERR(0, 12978, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_size_2);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_size_2);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_size_2) != (0)) __PYX_ERR(0, 12978, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 12978, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":12979
 *     return _numpy.dtype({
 *         'names': ['start', 'size_'],
 *         'formats': [_numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.start)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 12979, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 12979, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 12978, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":12981
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.start)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.size)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.start)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12981, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":12982
 *         'offsets': [
 *             (<intptr_t>&(pod.start)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.size)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlComputeInstancePlacement_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.size)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12982, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":12980
 *         'names': ['start', 'size_'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.start)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.size)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12980, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 12980, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 12980, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 12978, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":12984
 *             (<intptr_t>&(pod.size)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlComputeInstancePlacement_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlComputeInstancePlacement_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12984, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 12978, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12977, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":12975
 * 
 * 
 * cdef _get_compute_instance_placement_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlComputeInstancePlacement_t pod = nvmlComputeInstancePlacement_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_compute_instance_placement_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13006
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=compute_instance_placement_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13006, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13006, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 13006, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13006, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 13006, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":13007
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=compute_instance_placement_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlComputeInstancePlacement_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13007, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13007, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_compute_instance_placement_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13007, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13007, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 13007, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13007, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":13008
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=compute_instance_placement_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlComputeInstancePlacement_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlComputeInstancePlacement_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13008, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13008, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13008, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":13009
 *         arr = _numpy.empty(size, dtype=compute_instance_placement_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlComputeInstancePlacement_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlComputeInstancePlacement_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13009, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlComputeInstancePlacement_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13009, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13009, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 13009, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":13010
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlComputeInstancePlacement_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlComputeInstancePlacement_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13010, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13010, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlComputeInstancePlacement_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13010, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13010, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 13009, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 13009, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":13006
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=compute_instance_placement_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13012
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlComputeInstancePlacement_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.ComputeInstancePlacement_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":13013
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.ComputeInstancePlacement_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13013, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13013, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 13013, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":13014
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.ComputeInstancePlacement_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.ComputeInstancePlacement object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13014, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13014, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13014, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13014, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13014, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13014, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13014, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_ComputeInstancePlacement_Array;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 32 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13014, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":13013
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.ComputeInstancePlacement_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":13016
 *             return f"<{__name__}.ComputeInstancePlacement_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.ComputeInstancePlacement object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13016, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13016, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13016, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13016, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13016, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_ComputeInstancePlacement_object;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 36 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13016, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":13012
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlComputeInstancePlacement_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.ComputeInstancePlacement_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13018
 *             return f"<{__name__}.ComputeInstancePlacement object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13021
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13018
 *             return f"<{__name__}.ComputeInstancePlacement object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13023
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_24ComputeInstancePlacement__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":13024
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13024, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13024, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 13024, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13023
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13026
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":13027
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13027, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13027, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 13027, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":13028
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13028, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 13028, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13027
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":13030
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13026
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13032
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":13033
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 13033, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13032
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13035
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ComputeInstancePlacement)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":13036
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, ComputeInstancePlacement)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":13037
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ComputeInstancePlacement)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13037, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 13037, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13037, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 13037, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":13038
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ComputeInstancePlacement)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":13037
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ComputeInstancePlacement)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":13039
 *         if (not isinstance(other, ComputeInstancePlacement)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13039, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13039, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13039, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 13039, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13039, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13035
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, ComputeInstancePlacement)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13041
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def start(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5start_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5start_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5start___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5start___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13044
 *     def start(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.start[0])
 *         return self._data.start
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 13044, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":13045
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.start[0])             # <<<<<<<<<<<<<<
 *         return self._data.start
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_start); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13045, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13045, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13045, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":13044
 *     def start(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.start[0])
 *         return self._data.start
*/
  }

  /* "cuda/bindings/_nvml.pyx":13046
 *         if self._data.size == 1:
 *             return int(self._data.start[0])
 *         return self._data.start             # <<<<<<<<<<<<<<
 * 
 *     @start.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_start); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13046, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13041
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def start(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.start.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13048
 *         return self._data.start
 * 
 *     @start.setter             # <<<<<<<<<<<<<<
 *     def start(self, val):
 *         self._data.start = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5start_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5start_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5start_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5start_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":13050
 *     @start.setter
 *     def start(self, val):
 *         self._data.start = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_start, __pyx_v_val) < (0)) __PYX_ERR(0, 13050, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":13048
 *         return self._data.start
 * 
 *     @start.setter             # <<<<<<<<<<<<<<
 *     def start(self, val):
 *         self._data.start = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.start.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13052
 *         self._data.start = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def size_(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5size__1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5size__1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5size____get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5size____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13055
 *     def size_(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.size_[0])
 *         return self._data.size_
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13055, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 13055, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":13056
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.size_[0])             # <<<<<<<<<<<<<<
 *         return self._data.size_
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13056, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13056, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13056, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":13055
 *     def size_(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.size_[0])
 *         return self._data.size_
*/
  }

  /* "cuda/bindings/_nvml.pyx":13057
 *         if self._data.size == 1:
 *             return int(self._data.size_[0])
 *         return self._data.size_             # <<<<<<<<<<<<<<
 * 
 *     @size_.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13057, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13052
 *         self._data.start = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def size_(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.size_.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13059
 *         return self._data.size_
 * 
 *     @size_.setter             # <<<<<<<<<<<<<<
 *     def size_(self, val):
 *         self._data.size_ = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5size__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5size__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5size__2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5size__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":13061
 *     @size_.setter
 *     def size_(self, val):
 *         self._data.size_ = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size_2, __pyx_v_val) < (0)) __PYX_ERR(0, 13061, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":13059
 *         return self._data.size_
 * 
 *     @size_.setter             # <<<<<<<<<<<<<<
 *     def size_(self, val):
 *         self._data.size_ = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.size_.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13063
 *         self._data.size_ = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":13066
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":13067
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 13067, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":13068
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13068, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 13068, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":13069
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":13070
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13070, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 13070, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":13069
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":13071
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return ComputeInstancePlacement.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":13072
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return ComputeInstancePlacement.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":13071
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return ComputeInstancePlacement.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":13073
 *             if key_ < 0:
 *                 key_ += size
 *             return ComputeInstancePlacement.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == compute_instance_placement_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13073, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13073, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":13066
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":13074
 *                 key_ += size
 *             return ComputeInstancePlacement.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == compute_instance_placement_dtype:
 *             return ComputeInstancePlacement.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13074, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":13075
 *             return ComputeInstancePlacement.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == compute_instance_placement_dtype:             # <<<<<<<<<<<<<<
 *             return ComputeInstancePlacement.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13075, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13075, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 13075, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13075, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_compute_instance_placement_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13075, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13075, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 13075, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":13076
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == compute_instance_placement_dtype:
 *             return ComputeInstancePlacement.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13076, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":13075
 *             return ComputeInstancePlacement.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == compute_instance_placement_dtype:             # <<<<<<<<<<<<<<
 *             return ComputeInstancePlacement.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":13077
 *         if isinstance(out, _numpy.recarray) and out.dtype == compute_instance_placement_dtype:
 *             return ComputeInstancePlacement.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13063
 *         self._data.size_ = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13079
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":13080
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 13080, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":13079
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13082
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ComputeInstancePlacement instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_14from_data, "ComputeInstancePlacement.from_data(data)\n\nCreate an ComputeInstancePlacement instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `compute_instance_placement_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13082, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13082, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 13082, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 13082, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13082, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 13082, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":13089
 *             data (_numpy.ndarray): a 1D array of dtype `compute_instance_placement_dtype` holding the data.
 *         """
 *         cdef ComputeInstancePlacement obj = ComputeInstancePlacement.__new__(ComputeInstancePlacement)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstancePlacement(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13089, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":13090
 *         """
 *         cdef ComputeInstancePlacement obj = ComputeInstancePlacement.__new__(ComputeInstancePlacement)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 13090, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":13091
 *         cdef ComputeInstancePlacement obj = ComputeInstancePlacement.__new__(ComputeInstancePlacement)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13091, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 13091, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13090
 *         """
 *         cdef ComputeInstancePlacement obj = ComputeInstancePlacement.__new__(ComputeInstancePlacement)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":13092
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != compute_instance_placement_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13092, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 13092, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":13093
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != compute_instance_placement_dtype:
 *             raise ValueError("data array must be of dtype compute_instance_placement_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13093, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 13093, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13092
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != compute_instance_placement_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":13094
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != compute_instance_placement_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype compute_instance_placement_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13094, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_compute_instance_placement_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13094, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13094, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 13094, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":13095
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != compute_instance_placement_dtype:
 *             raise ValueError("data array must be of dtype compute_instance_placement_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_comp};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13095, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 13095, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13094
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != compute_instance_placement_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype compute_instance_placement_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":13096
 *         if data.dtype != compute_instance_placement_dtype:
 *             raise ValueError("data array must be of dtype compute_instance_placement_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13096, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13096, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13096, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":13098
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13082
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ComputeInstancePlacement instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13100
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ComputeInstancePlacement instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_16from_ptr, "ComputeInstancePlacement.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an ComputeInstancePlacement instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13100, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 13100, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13100, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13100, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 13100, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 13100, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 13100, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13100, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13100, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 13101, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 13101, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13101, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":13101
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an ComputeInstancePlacement instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 13100, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":13100
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ComputeInstancePlacement instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":13109
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstancePlacement obj = ComputeInstancePlacement.__new__(ComputeInstancePlacement)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":13110
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ComputeInstancePlacement obj = ComputeInstancePlacement.__new__(ComputeInstancePlacement)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13110, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 13110, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13109
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstancePlacement obj = ComputeInstancePlacement.__new__(ComputeInstancePlacement)
*/
  }

  /* "cuda/bindings/_nvml.pyx":13111
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstancePlacement obj = ComputeInstancePlacement.__new__(ComputeInstancePlacement)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstancePlacement(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13111, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":13112
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstancePlacement obj = ComputeInstancePlacement.__new__(ComputeInstancePlacement)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlComputeInstancePlacement_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13112, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13112, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":13114
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlComputeInstancePlacement_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=compute_instance_placement_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13114, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":13113
 *         cdef ComputeInstancePlacement obj = ComputeInstancePlacement.__new__(ComputeInstancePlacement)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlComputeInstancePlacement_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=compute_instance_placement_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlComputeInstancePlacement_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":13115
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlComputeInstancePlacement_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=compute_instance_placement_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13115, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13115, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13115, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_compute_instance_placement_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13115, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13115, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 13115, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 13115, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13115, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":13116
 *             <char*>ptr, sizeof(nvmlComputeInstancePlacement_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=compute_instance_placement_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13116, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13116, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13116, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":13118
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13100
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ComputeInstancePlacement instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13002
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_18__reduce_cython__, "ComputeInstancePlacement.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_ComputeInstancePlacement, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_ComputeInstancePlacement, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_ComputeInstancePlacement, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_ComputeInstancePlacement, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ComputeInstancePl); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_ComputeInstancePlacement, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_ComputeInstancePlacement, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_ComputeInstancePlacement, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_ComputeInstancePlacement__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ComputeInstancePl); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ComputeInstancePlacement, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ComputeInstancePlacement__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_20__setstate_cython__, "ComputeInstancePlacement.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_ComputeInstancePlacement, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_ComputeInstancePlacement__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ComputeInstancePlacement__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ComputeInstancePlacement, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ComputeInstancePlacement__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstancePlacement.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13121
 * 
 * 
 * cdef _get_compute_instance_profile_info_v2_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlComputeInstanceProfileInfo_v2_t pod = nvmlComputeInstanceProfileInfo_v2_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_compute_instance_profile_info_v2_dtype_offsets(void) {
  nvmlComputeInstanceProfileInfo_v2_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlComputeInstanceProfileInfo_v2_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  PyObject *__pyx_t_14 = NULL;
  PyObject *__pyx_t_15 = NULL;
  PyObject *__pyx_t_16 = NULL;
  PyObject *__pyx_t_17 = NULL;
  size_t __pyx_t_18;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_compute_instance_profile_info_v2_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":13122
 * 
 * cdef _get_compute_instance_profile_info_v2_dtype_offsets():
 *     cdef nvmlComputeInstanceProfileInfo_v2_t pod = nvmlComputeInstanceProfileInfo_v2_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'name'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":13123
 * cdef _get_compute_instance_profile_info_v2_dtype_offsets():
 *     cdef nvmlComputeInstanceProfileInfo_v2_t pod = nvmlComputeInstanceProfileInfo_v2_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'name'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13123, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13123, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":13124
 *     cdef nvmlComputeInstanceProfileInfo_v2_t pod = nvmlComputeInstanceProfileInfo_v2_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'name'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13124, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(11); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13124, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 13124, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_id) != (0)) __PYX_ERR(0, 13124, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_slice_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_slice_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_slice_count) != (0)) __PYX_ERR(0, 13124, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_instance_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_instance_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_instance_count) != (0)) __PYX_ERR(0, 13124, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_multiprocessor_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_multiprocessor_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_multiprocessor_count) != (0)) __PYX_ERR(0, 13124, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_shared_copy_engine_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_shared_copy_engine_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_shared_copy_engine_count) != (0)) __PYX_ERR(0, 13124, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_shared_decoder_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_shared_decoder_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_shared_decoder_count) != (0)) __PYX_ERR(0, 13124, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_shared_encoder_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_shared_encoder_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_mstate_global->__pyx_n_u_shared_encoder_count) != (0)) __PYX_ERR(0, 13124, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_shared_jpeg_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_shared_jpeg_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_mstate_global->__pyx_n_u_shared_jpeg_count) != (0)) __PYX_ERR(0, 13124, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_shared_ofa_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_shared_ofa_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 9, __pyx_mstate_global->__pyx_n_u_shared_ofa_count) != (0)) __PYX_ERR(0, 13124, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_name);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_name);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 10, __pyx_mstate_global->__pyx_n_u_name) != (0)) __PYX_ERR(0, 13124, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 13124, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":13125
 *     return _numpy.dtype({
 *         'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'name'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_15 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_16 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_17 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_17);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(11); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 13125, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 13125, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 13125, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 13125, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 13125, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 13125, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 13125, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_t_14) != (0)) __PYX_ERR(0, 13125, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_t_15) != (0)) __PYX_ERR(0, 13125, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_16);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 9, __pyx_t_16) != (0)) __PYX_ERR(0, 13125, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_17);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 10, __pyx_t_17) != (0)) __PYX_ERR(0, 13125, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  __pyx_t_14 = 0;
  __pyx_t_15 = 0;
  __pyx_t_16 = 0;
  __pyx_t_17 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 13124, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":13127
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13127, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":13128
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),
*/
  __pyx_t_17 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.id)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 13128, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_17);

  /* "cuda/bindings/_nvml.pyx":13129
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
*/
  __pyx_t_16 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sliceCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 13129, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);

  /* "cuda/bindings/_nvml.pyx":13130
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedCopyEngineCount)) - (<intptr_t>&pod),
*/
  __pyx_t_15 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.instanceCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 13130, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);

  /* "cuda/bindings/_nvml.pyx":13131
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sharedCopyEngineCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedDecoderCount)) - (<intptr_t>&pod),
*/
  __pyx_t_14 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.multiprocessorCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 13131, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);

  /* "cuda/bindings/_nvml.pyx":13132
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedCopyEngineCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sharedDecoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedEncoderCount)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sharedCopyEngineCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 13132, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":13133
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedCopyEngineCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedDecoderCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sharedEncoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedJpegCount)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sharedDecoderCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 13133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":13134
 *             (<intptr_t>&(pod.sharedCopyEngineCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedDecoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedEncoderCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sharedJpegCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedOfaCount)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sharedEncoderCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 13134, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":13135
 *             (<intptr_t>&(pod.sharedDecoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedEncoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedJpegCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sharedOfaCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sharedJpegCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13135, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":13136
 *             (<intptr_t>&(pod.sharedEncoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedJpegCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedOfaCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sharedOfaCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13136, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":13137
 *             (<intptr_t>&(pod.sharedJpegCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedOfaCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlComputeInstanceProfileInfo_v2_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.name)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13137, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":13126
 *         'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'name'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(11); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13126, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 13126, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_17);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_17) != (0)) __PYX_ERR(0, 13126, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_16);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_16) != (0)) __PYX_ERR(0, 13126, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_15) != (0)) __PYX_ERR(0, 13126, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_14) != (0)) __PYX_ERR(0, 13126, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_13) != (0)) __PYX_ERR(0, 13126, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_12) != (0)) __PYX_ERR(0, 13126, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 7, __pyx_t_11) != (0)) __PYX_ERR(0, 13126, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 8, __pyx_t_10) != (0)) __PYX_ERR(0, 13126, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 9, __pyx_t_9) != (0)) __PYX_ERR(0, 13126, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 10, __pyx_t_8) != (0)) __PYX_ERR(0, 13126, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_17 = 0;
  __pyx_t_16 = 0;
  __pyx_t_15 = 0;
  __pyx_t_14 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 13124, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":13139
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlComputeInstanceProfileInfo_v2_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlComputeInstanceProfileInfo_v2_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13139, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 13124, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_18 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_18 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_18, (2-__pyx_t_18) | (__pyx_t_18*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13123, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13121
 * 
 * 
 * cdef _get_compute_instance_profile_info_v2_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlComputeInstanceProfileInfo_v2_t pod = nvmlComputeInstanceProfileInfo_v2_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_XDECREF(__pyx_t_14);
  __Pyx_XDECREF(__pyx_t_15);
  __Pyx_XDECREF(__pyx_t_16);
  __Pyx_XDECREF(__pyx_t_17);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_compute_instance_profile_info_v2_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13156
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>calloc(1, sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":13157
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>calloc(1, sizeof(nvmlComputeInstanceProfileInfo_v2_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")
*/
  __pyx_v_self->_ptr = ((nvmlComputeInstanceProfileInfo_v2_t *)calloc(1, (sizeof(nvmlComputeInstanceProfileInfo_v2_t))));

  /* "cuda/bindings/_nvml.pyx":13158
 *     def __init__(self):
 *         self._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>calloc(1, sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":13159
 *         self._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>calloc(1, sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13159, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ComputeInstance};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13159, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 13159, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13158
 *     def __init__(self):
 *         self._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>calloc(1, sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":13160
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":13161
 *             raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":13162
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":13156
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>calloc(1, sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13164
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlComputeInstanceProfileInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  nvmlComputeInstanceProfileInfo_v2_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlComputeInstanceProfileInfo_v2_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":13166
 *     def __dealloc__(self):
 *         cdef nvmlComputeInstanceProfileInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":13167
 *         cdef nvmlComputeInstanceProfileInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":13168
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":13169
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":13166
 *     def __dealloc__(self):
 *         cdef nvmlComputeInstanceProfileInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":13164
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlComputeInstanceProfileInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":13171
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ComputeInstanceProfileInfo_v2 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":13172
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.ComputeInstanceProfileInfo_v2 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13172, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13172, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13172, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13172, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13172, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_ComputeInstanceProfileInfo_v2_o;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 41 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13172, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13171
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ComputeInstanceProfileInfo_v2 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13174
 *         return f"<{__name__}.ComputeInstanceProfileInfo_v2 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13177
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13177, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13174
 *         return f"<{__name__}.ComputeInstanceProfileInfo_v2 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13179
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":13180
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13179
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13182
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":13183
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13183, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13182
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13185
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ComputeInstanceProfileInfo_v2 other_
 *         if not isinstance(other, ComputeInstanceProfileInfo_v2):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":13187
 *     def __eq__(self, other):
 *         cdef ComputeInstanceProfileInfo_v2 other_
 *         if not isinstance(other, ComputeInstanceProfileInfo_v2):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":13188
 *         cdef ComputeInstanceProfileInfo_v2 other_
 *         if not isinstance(other, ComputeInstanceProfileInfo_v2):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlComputeInstanceProfileInfo_v2_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":13187
 *     def __eq__(self, other):
 *         cdef ComputeInstanceProfileInfo_v2 other_
 *         if not isinstance(other, ComputeInstanceProfileInfo_v2):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":13189
 *         if not isinstance(other, ComputeInstanceProfileInfo_v2):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlComputeInstanceProfileInfo_v2_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2))))) __PYX_ERR(0, 13189, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":13190
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlComputeInstanceProfileInfo_v2_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlComputeInstanceProfileInfo_v2_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13190, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13185
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ComputeInstanceProfileInfo_v2 other_
 *         if not isinstance(other, ComputeInstanceProfileInfo_v2):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13192
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlComputeInstanceProfileInfo_v2_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":13193
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 13193, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13193, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13193, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 13193, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":13194
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")
*/
    __pyx_v_self->_ptr = ((nvmlComputeInstanceProfileInfo_v2_t *)malloc((sizeof(nvmlComputeInstanceProfileInfo_v2_t))));

    /* "cuda/bindings/_nvml.pyx":13195
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceProfileInfo_v2_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":13196
 *             self._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13196, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ComputeInstance};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13196, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 13196, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":13195
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceProfileInfo_v2_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":13197
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceProfileInfo_v2_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13197, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13197, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 13197, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlComputeInstanceProfileInfo_v2_t))));

    /* "cuda/bindings/_nvml.pyx":13198
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":13199
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":13200
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13200, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13200, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 13200, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":13193
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":13202
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 13202, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":13192
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlComputeInstanceProfileInfo_v2_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13204
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13207
 *     def version(self):
 *         """int: """
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13207, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13204
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13209
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13211
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13212
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13212, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13212, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13211
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13213
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13213, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13209
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13215
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def id(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_2id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_2id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_2id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_2id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13218
 *     def id(self):
 *         """int: """
 *         return self._ptr[0].id             # <<<<<<<<<<<<<<
 * 
 *     @id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13218, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13215
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def id(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13220
 *         return self._ptr[0].id
 * 
 *     @id.setter             # <<<<<<<<<<<<<<
 *     def id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_2id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_2id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_2id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_2id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13222
 *     @id.setter
 *     def id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].id = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13223
 *     def id(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].id = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13223, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13223, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13222
 *     @id.setter
 *     def id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].id = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13224
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].id = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13224, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).id = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13220
 *         return self._ptr[0].id
 * 
 *     @id.setter             # <<<<<<<<<<<<<<
 *     def id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13226
 *         self._ptr[0].id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def slice_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_11slice_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_11slice_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_11slice_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_11slice_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13229
 *     def slice_count(self):
 *         """int: """
 *         return self._ptr[0].sliceCount             # <<<<<<<<<<<<<<
 * 
 *     @slice_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sliceCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13229, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13226
 *         self._ptr[0].id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def slice_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.slice_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13231
 *         return self._ptr[0].sliceCount
 * 
 *     @slice_count.setter             # <<<<<<<<<<<<<<
 *     def slice_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_11slice_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_11slice_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_11slice_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_11slice_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13233
 *     @slice_count.setter
 *     def slice_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sliceCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13234
 *     def slice_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sliceCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13234, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13234, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13233
 *     @slice_count.setter
 *     def slice_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sliceCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13235
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sliceCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13235, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sliceCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13231
 *         return self._ptr[0].sliceCount
 * 
 *     @slice_count.setter             # <<<<<<<<<<<<<<
 *     def slice_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.slice_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13237
 *         self._ptr[0].sliceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def instance_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14instance_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14instance_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14instance_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14instance_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13240
 *     def instance_count(self):
 *         """int: """
 *         return self._ptr[0].instanceCount             # <<<<<<<<<<<<<<
 * 
 *     @instance_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).instanceCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13240, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13237
 *         self._ptr[0].sliceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def instance_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.instance_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13242
 *         return self._ptr[0].instanceCount
 * 
 *     @instance_count.setter             # <<<<<<<<<<<<<<
 *     def instance_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14instance_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14instance_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14instance_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14instance_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13244
 *     @instance_count.setter
 *     def instance_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].instanceCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13245
 *     def instance_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].instanceCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13245, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13245, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13244
 *     @instance_count.setter
 *     def instance_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].instanceCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13246
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].instanceCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13246, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).instanceCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13242
 *         return self._ptr[0].instanceCount
 * 
 *     @instance_count.setter             # <<<<<<<<<<<<<<
 *     def instance_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.instance_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13248
 *         self._ptr[0].instanceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20multiprocessor_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20multiprocessor_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20multiprocessor_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20multiprocessor_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13251
 *     def multiprocessor_count(self):
 *         """int: """
 *         return self._ptr[0].multiprocessorCount             # <<<<<<<<<<<<<<
 * 
 *     @multiprocessor_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).multiprocessorCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13251, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13248
 *         self._ptr[0].instanceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.multiprocessor_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13253
 *         return self._ptr[0].multiprocessorCount
 * 
 *     @multiprocessor_count.setter             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20multiprocessor_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20multiprocessor_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20multiprocessor_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20multiprocessor_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13255
 *     @multiprocessor_count.setter
 *     def multiprocessor_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].multiprocessorCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13256
 *     def multiprocessor_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].multiprocessorCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13256, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13256, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13255
 *     @multiprocessor_count.setter
 *     def multiprocessor_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].multiprocessorCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13257
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].multiprocessorCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13257, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).multiprocessorCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13253
 *         return self._ptr[0].multiprocessorCount
 * 
 *     @multiprocessor_count.setter             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.multiprocessor_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13259
 *         self._ptr[0].multiprocessorCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_copy_engine_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_24shared_copy_engine_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_24shared_copy_engine_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_24shared_copy_engine_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_24shared_copy_engine_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13262
 *     def shared_copy_engine_count(self):
 *         """int: """
 *         return self._ptr[0].sharedCopyEngineCount             # <<<<<<<<<<<<<<
 * 
 *     @shared_copy_engine_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sharedCopyEngineCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13262, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13259
 *         self._ptr[0].multiprocessorCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_copy_engine_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.shared_copy_engine_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13264
 *         return self._ptr[0].sharedCopyEngineCount
 * 
 *     @shared_copy_engine_count.setter             # <<<<<<<<<<<<<<
 *     def shared_copy_engine_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_24shared_copy_engine_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_24shared_copy_engine_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_24shared_copy_engine_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_24shared_copy_engine_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13266
 *     @shared_copy_engine_count.setter
 *     def shared_copy_engine_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sharedCopyEngineCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13267
 *     def shared_copy_engine_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sharedCopyEngineCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13267, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13267, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13266
 *     @shared_copy_engine_count.setter
 *     def shared_copy_engine_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sharedCopyEngineCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13268
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sharedCopyEngineCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13268, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sharedCopyEngineCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13264
 *         return self._ptr[0].sharedCopyEngineCount
 * 
 *     @shared_copy_engine_count.setter             # <<<<<<<<<<<<<<
 *     def shared_copy_engine_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.shared_copy_engine_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13270
 *         self._ptr[0].sharedCopyEngineCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_decoder_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_decoder_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_decoder_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_decoder_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_decoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13273
 *     def shared_decoder_count(self):
 *         """int: """
 *         return self._ptr[0].sharedDecoderCount             # <<<<<<<<<<<<<<
 * 
 *     @shared_decoder_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sharedDecoderCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13273, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13270
 *         self._ptr[0].sharedCopyEngineCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_decoder_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.shared_decoder_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13275
 *         return self._ptr[0].sharedDecoderCount
 * 
 *     @shared_decoder_count.setter             # <<<<<<<<<<<<<<
 *     def shared_decoder_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_decoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_decoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_decoder_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_decoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13277
 *     @shared_decoder_count.setter
 *     def shared_decoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sharedDecoderCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13278
 *     def shared_decoder_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sharedDecoderCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13278, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13278, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13277
 *     @shared_decoder_count.setter
 *     def shared_decoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sharedDecoderCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13279
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sharedDecoderCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13279, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sharedDecoderCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13275
 *         return self._ptr[0].sharedDecoderCount
 * 
 *     @shared_decoder_count.setter             # <<<<<<<<<<<<<<
 *     def shared_decoder_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.shared_decoder_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13281
 *         self._ptr[0].sharedDecoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_encoder_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_encoder_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_encoder_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_encoder_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_encoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13284
 *     def shared_encoder_count(self):
 *         """int: """
 *         return self._ptr[0].sharedEncoderCount             # <<<<<<<<<<<<<<
 * 
 *     @shared_encoder_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sharedEncoderCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13284, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13281
 *         self._ptr[0].sharedDecoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_encoder_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.shared_encoder_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13286
 *         return self._ptr[0].sharedEncoderCount
 * 
 *     @shared_encoder_count.setter             # <<<<<<<<<<<<<<
 *     def shared_encoder_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_encoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_encoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_encoder_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_encoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13288
 *     @shared_encoder_count.setter
 *     def shared_encoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sharedEncoderCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13289
 *     def shared_encoder_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sharedEncoderCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13289, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13289, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13288
 *     @shared_encoder_count.setter
 *     def shared_encoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sharedEncoderCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13290
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sharedEncoderCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13290, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sharedEncoderCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13286
 *         return self._ptr[0].sharedEncoderCount
 * 
 *     @shared_encoder_count.setter             # <<<<<<<<<<<<<<
 *     def shared_encoder_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.shared_encoder_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13292
 *         self._ptr[0].sharedEncoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_jpeg_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17shared_jpeg_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17shared_jpeg_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17shared_jpeg_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17shared_jpeg_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13295
 *     def shared_jpeg_count(self):
 *         """int: """
 *         return self._ptr[0].sharedJpegCount             # <<<<<<<<<<<<<<
 * 
 *     @shared_jpeg_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sharedJpegCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13295, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13292
 *         self._ptr[0].sharedEncoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_jpeg_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.shared_jpeg_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13297
 *         return self._ptr[0].sharedJpegCount
 * 
 *     @shared_jpeg_count.setter             # <<<<<<<<<<<<<<
 *     def shared_jpeg_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17shared_jpeg_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17shared_jpeg_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17shared_jpeg_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17shared_jpeg_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13299
 *     @shared_jpeg_count.setter
 *     def shared_jpeg_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sharedJpegCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13300
 *     def shared_jpeg_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sharedJpegCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13300, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13300, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13299
 *     @shared_jpeg_count.setter
 *     def shared_jpeg_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sharedJpegCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13301
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sharedJpegCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13301, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sharedJpegCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13297
 *         return self._ptr[0].sharedJpegCount
 * 
 *     @shared_jpeg_count.setter             # <<<<<<<<<<<<<<
 *     def shared_jpeg_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.shared_jpeg_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13303
 *         self._ptr[0].sharedJpegCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_ofa_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16shared_ofa_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16shared_ofa_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16shared_ofa_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16shared_ofa_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13306
 *     def shared_ofa_count(self):
 *         """int: """
 *         return self._ptr[0].sharedOfaCount             # <<<<<<<<<<<<<<
 * 
 *     @shared_ofa_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sharedOfaCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13306, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13303
 *         self._ptr[0].sharedJpegCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_ofa_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.shared_ofa_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13308
 *         return self._ptr[0].sharedOfaCount
 * 
 *     @shared_ofa_count.setter             # <<<<<<<<<<<<<<
 *     def shared_ofa_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16shared_ofa_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16shared_ofa_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16shared_ofa_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16shared_ofa_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13310
 *     @shared_ofa_count.setter
 *     def shared_ofa_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sharedOfaCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13311
 *     def shared_ofa_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sharedOfaCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13311, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13311, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13310
 *     @shared_ofa_count.setter
 *     def shared_ofa_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sharedOfaCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13312
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         self._ptr[0].sharedOfaCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13312, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sharedOfaCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13308
 *         return self._ptr[0].sharedOfaCount
 * 
 *     @shared_ofa_count.setter             # <<<<<<<<<<<<<<
 *     def shared_ofa_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.shared_ofa_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13314
 *         self._ptr[0].sharedOfaCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def name(self):
 *         """~_numpy.int8: (array of length 96)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_4name_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_4name_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_4name___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_4name___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13317
 *     def name(self):
 *         """~_numpy.int8: (array of length 96)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].name)             # <<<<<<<<<<<<<<
 * 
 *     @name.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).name); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13317, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13314
 *         self._ptr[0].sharedOfaCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def name(self):
 *         """~_numpy.int8: (array of length 96)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.name.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13319
 *         return cpython.PyUnicode_FromString(self._ptr[0].name)
 * 
 *     @name.setter             # <<<<<<<<<<<<<<
 *     def name(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_4name_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_4name_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_4name_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_4name_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13321
 *     @name.setter
 *     def name(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13322
 *     def name(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13322, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13322, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13321
 *     @name.setter
 *     def name(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":13323
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field name, max length is 95")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13323, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 13323, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":13324
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 13324, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 13324, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 96);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":13325
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field name, max length is 95")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_name_m};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13325, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13325, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13324
 *             raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":13326
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 13326, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 13326, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":13327
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).name), ((void *)__pyx_v_ptr), 96));

  /* "cuda/bindings/_nvml.pyx":13319
 *         return cpython.PyUnicode_FromString(self._ptr[0].name)
 * 
 *     @name.setter             # <<<<<<<<<<<<<<
 *     def name(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.name.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13329
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ComputeInstanceProfileInfo_v2 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_12from_data, "ComputeInstanceProfileInfo_v2.from_data(data)\n\nCreate an ComputeInstanceProfileInfo_v2 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `compute_instance_profile_info_v2_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13329, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13329, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 13329, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 13329, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13329, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 13329, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":13336
 *             data (_numpy.ndarray): a single-element array of dtype `compute_instance_profile_info_v2_dtype` holding the data.
 *         """
 *         return __from_data(data, "compute_instance_profile_info_v2_dtype", compute_instance_profile_info_v2_dtype, ComputeInstanceProfileInfo_v2)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_compute_instance_profile_info_v2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13336, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_compute_instance_profile_info_v2, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13336, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13329
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ComputeInstanceProfileInfo_v2 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13338
 *         return __from_data(data, "compute_instance_profile_info_v2_dtype", compute_instance_profile_info_v2_dtype, ComputeInstanceProfileInfo_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ComputeInstanceProfileInfo_v2 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14from_ptr, "ComputeInstanceProfileInfo_v2.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an ComputeInstanceProfileInfo_v2 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13338, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 13338, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13338, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13338, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 13338, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":13339
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an ComputeInstanceProfileInfo_v2 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 13338, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 13338, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13338, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13338, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 13339, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13339, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 13338, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":13338
 *         return __from_data(data, "compute_instance_profile_info_v2_dtype", compute_instance_profile_info_v2_dtype, ComputeInstanceProfileInfo_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ComputeInstanceProfileInfo_v2 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":13347
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstanceProfileInfo_v2 obj = ComputeInstanceProfileInfo_v2.__new__(ComputeInstanceProfileInfo_v2)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":13348
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ComputeInstanceProfileInfo_v2 obj = ComputeInstanceProfileInfo_v2.__new__(ComputeInstanceProfileInfo_v2)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13348, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 13348, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13347
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstanceProfileInfo_v2 obj = ComputeInstanceProfileInfo_v2.__new__(ComputeInstanceProfileInfo_v2)
*/
  }

  /* "cuda/bindings/_nvml.pyx":13349
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstanceProfileInfo_v2 obj = ComputeInstanceProfileInfo_v2.__new__(ComputeInstanceProfileInfo_v2)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13349, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":13350
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstanceProfileInfo_v2 obj = ComputeInstanceProfileInfo_v2.__new__(ComputeInstanceProfileInfo_v2)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":13351
 *         cdef ComputeInstanceProfileInfo_v2 obj = ComputeInstanceProfileInfo_v2.__new__(ComputeInstanceProfileInfo_v2)
 *         if owner is None:
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")
*/
    __pyx_v_obj->_ptr = ((nvmlComputeInstanceProfileInfo_v2_t *)malloc((sizeof(nvmlComputeInstanceProfileInfo_v2_t))));

    /* "cuda/bindings/_nvml.pyx":13352
 *         if owner is None:
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceProfileInfo_v2_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":13353
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13353, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ComputeInstance};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13353, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 13353, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":13352
 *         if owner is None:
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceProfileInfo_v2_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":13354
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceProfileInfo_v2_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlComputeInstanceProfileInfo_v2_t))));

    /* "cuda/bindings/_nvml.pyx":13355
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":13356
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":13350
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstanceProfileInfo_v2 obj = ComputeInstanceProfileInfo_v2.__new__(ComputeInstanceProfileInfo_v2)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":13358
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlComputeInstanceProfileInfo_v2_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":13359
 *         else:
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":13360
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v2_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":13361
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":13362
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13338
 *         return __from_data(data, "compute_instance_profile_info_v2_dtype", compute_instance_profile_info_v2_dtype, ComputeInstanceProfileInfo_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ComputeInstanceProfileInfo_v2 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16__reduce_cython__, "ComputeInstanceProfileInfo_v2.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_18__setstate_cython__, "ComputeInstanceProfileInfo_v2.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v2.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13365
 * 
 * 
 * cdef _get_compute_instance_profile_info_v3_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlComputeInstanceProfileInfo_v3_t pod = nvmlComputeInstanceProfileInfo_v3_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_compute_instance_profile_info_v3_dtype_offsets(void) {
  nvmlComputeInstanceProfileInfo_v3_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlComputeInstanceProfileInfo_v3_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  PyObject *__pyx_t_14 = NULL;
  PyObject *__pyx_t_15 = NULL;
  PyObject *__pyx_t_16 = NULL;
  PyObject *__pyx_t_17 = NULL;
  PyObject *__pyx_t_18 = NULL;
  size_t __pyx_t_19;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_compute_instance_profile_info_v3_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":13366
 * 
 * cdef _get_compute_instance_profile_info_v3_dtype_offsets():
 *     cdef nvmlComputeInstanceProfileInfo_v3_t pod = nvmlComputeInstanceProfileInfo_v3_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'name', 'capabilities'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":13367
 * cdef _get_compute_instance_profile_info_v3_dtype_offsets():
 *     cdef nvmlComputeInstanceProfileInfo_v3_t pod = nvmlComputeInstanceProfileInfo_v3_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'name', 'capabilities'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13367, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13367, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":13368
 *     cdef nvmlComputeInstanceProfileInfo_v3_t pod = nvmlComputeInstanceProfileInfo_v3_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'name', 'capabilities'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(12); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 13368, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_id) != (0)) __PYX_ERR(0, 13368, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_slice_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_slice_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_slice_count) != (0)) __PYX_ERR(0, 13368, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_instance_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_instance_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_instance_count) != (0)) __PYX_ERR(0, 13368, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_multiprocessor_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_multiprocessor_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_multiprocessor_count) != (0)) __PYX_ERR(0, 13368, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_shared_copy_engine_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_shared_copy_engine_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_shared_copy_engine_count) != (0)) __PYX_ERR(0, 13368, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_shared_decoder_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_shared_decoder_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_shared_decoder_count) != (0)) __PYX_ERR(0, 13368, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_shared_encoder_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_shared_encoder_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_mstate_global->__pyx_n_u_shared_encoder_count) != (0)) __PYX_ERR(0, 13368, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_shared_jpeg_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_shared_jpeg_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_mstate_global->__pyx_n_u_shared_jpeg_count) != (0)) __PYX_ERR(0, 13368, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_shared_ofa_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_shared_ofa_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 9, __pyx_mstate_global->__pyx_n_u_shared_ofa_count) != (0)) __PYX_ERR(0, 13368, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_name);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_name);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 10, __pyx_mstate_global->__pyx_n_u_name) != (0)) __PYX_ERR(0, 13368, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_capabilities);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_capabilities);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 11, __pyx_mstate_global->__pyx_n_u_capabilities) != (0)) __PYX_ERR(0, 13368, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 13368, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":13369
 *     return _numpy.dtype({
 *         'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'name', 'capabilities'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_15 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_16 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_17 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_17);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_18 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_18);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(12); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 13369, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 13369, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 13369, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 13369, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 13369, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 13369, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 13369, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_t_14) != (0)) __PYX_ERR(0, 13369, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 8, __pyx_t_15) != (0)) __PYX_ERR(0, 13369, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_16);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 9, __pyx_t_16) != (0)) __PYX_ERR(0, 13369, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_17);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 10, __pyx_t_17) != (0)) __PYX_ERR(0, 13369, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_18);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 11, __pyx_t_18) != (0)) __PYX_ERR(0, 13369, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  __pyx_t_14 = 0;
  __pyx_t_15 = 0;
  __pyx_t_16 = 0;
  __pyx_t_17 = 0;
  __pyx_t_18 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 13368, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":13371
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13371, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":13372
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),
*/
  __pyx_t_18 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.id)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 13372, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_18);

  /* "cuda/bindings/_nvml.pyx":13373
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
*/
  __pyx_t_17 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sliceCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 13373, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_17);

  /* "cuda/bindings/_nvml.pyx":13374
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedCopyEngineCount)) - (<intptr_t>&pod),
*/
  __pyx_t_16 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.instanceCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 13374, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);

  /* "cuda/bindings/_nvml.pyx":13375
 *             (<intptr_t>&(pod.sliceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sharedCopyEngineCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedDecoderCount)) - (<intptr_t>&pod),
*/
  __pyx_t_15 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.multiprocessorCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 13375, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);

  /* "cuda/bindings/_nvml.pyx":13376
 *             (<intptr_t>&(pod.instanceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedCopyEngineCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sharedDecoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedEncoderCount)) - (<intptr_t>&pod),
*/
  __pyx_t_14 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sharedCopyEngineCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 13376, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);

  /* "cuda/bindings/_nvml.pyx":13377
 *             (<intptr_t>&(pod.multiprocessorCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedCopyEngineCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedDecoderCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sharedEncoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedJpegCount)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sharedDecoderCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 13377, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":13378
 *             (<intptr_t>&(pod.sharedCopyEngineCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedDecoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedEncoderCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sharedJpegCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedOfaCount)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sharedEncoderCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 13378, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":13379
 *             (<intptr_t>&(pod.sharedDecoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedEncoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedJpegCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sharedOfaCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sharedJpegCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 13379, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":13380
 *             (<intptr_t>&(pod.sharedEncoderCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedJpegCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedOfaCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.capabilities)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sharedOfaCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13380, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":13381
 *             (<intptr_t>&(pod.sharedJpegCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sharedOfaCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.capabilities)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.name)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13381, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":13382
 *             (<intptr_t>&(pod.sharedOfaCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.name)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.capabilities)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlComputeInstanceProfileInfo_v3_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.capabilities)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13382, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":13370
 *         'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'name', 'capabilities'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.int8, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(12); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13370, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 13370, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_18);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_18) != (0)) __PYX_ERR(0, 13370, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_17);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_17) != (0)) __PYX_ERR(0, 13370, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_16);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_16) != (0)) __PYX_ERR(0, 13370, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_15);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_15) != (0)) __PYX_ERR(0, 13370, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_14) != (0)) __PYX_ERR(0, 13370, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 13370, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 7, __pyx_t_12) != (0)) __PYX_ERR(0, 13370, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 8, __pyx_t_11) != (0)) __PYX_ERR(0, 13370, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 9, __pyx_t_10) != (0)) __PYX_ERR(0, 13370, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 10, __pyx_t_9) != (0)) __PYX_ERR(0, 13370, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 11, __pyx_t_8) != (0)) __PYX_ERR(0, 13370, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_18 = 0;
  __pyx_t_17 = 0;
  __pyx_t_16 = 0;
  __pyx_t_15 = 0;
  __pyx_t_14 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 13368, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":13384
 *             (<intptr_t>&(pod.capabilities)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlComputeInstanceProfileInfo_v3_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlComputeInstanceProfileInfo_v3_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13384, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 13368, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_19 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_19 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_19, (2-__pyx_t_19) | (__pyx_t_19*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13367, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13365
 * 
 * 
 * cdef _get_compute_instance_profile_info_v3_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlComputeInstanceProfileInfo_v3_t pod = nvmlComputeInstanceProfileInfo_v3_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_XDECREF(__pyx_t_14);
  __Pyx_XDECREF(__pyx_t_15);
  __Pyx_XDECREF(__pyx_t_16);
  __Pyx_XDECREF(__pyx_t_17);
  __Pyx_XDECREF(__pyx_t_18);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_compute_instance_profile_info_v3_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13401
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>calloc(1, sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":13402
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>calloc(1, sizeof(nvmlComputeInstanceProfileInfo_v3_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")
*/
  __pyx_v_self->_ptr = ((nvmlComputeInstanceProfileInfo_v3_t *)calloc(1, (sizeof(nvmlComputeInstanceProfileInfo_v3_t))));

  /* "cuda/bindings/_nvml.pyx":13403
 *     def __init__(self):
 *         self._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>calloc(1, sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":13404
 *         self._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>calloc(1, sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13404, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ComputeInstance_2};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13404, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 13404, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13403
 *     def __init__(self):
 *         self._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>calloc(1, sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":13405
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":13406
 *             raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":13407
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":13401
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>calloc(1, sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13409
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlComputeInstanceProfileInfo_v3_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  nvmlComputeInstanceProfileInfo_v3_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlComputeInstanceProfileInfo_v3_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":13411
 *     def __dealloc__(self):
 *         cdef nvmlComputeInstanceProfileInfo_v3_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":13412
 *         cdef nvmlComputeInstanceProfileInfo_v3_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":13413
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":13414
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":13411
 *     def __dealloc__(self):
 *         cdef nvmlComputeInstanceProfileInfo_v3_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":13409
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlComputeInstanceProfileInfo_v3_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":13416
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ComputeInstanceProfileInfo_v3 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":13417
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.ComputeInstanceProfileInfo_v3 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13417, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13417, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13417, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13417, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13417, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_ComputeInstanceProfileInfo_v3_o;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 41 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13417, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13416
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ComputeInstanceProfileInfo_v3 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13419
 *         return f"<{__name__}.ComputeInstanceProfileInfo_v3 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13422
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13422, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13419
 *         return f"<{__name__}.ComputeInstanceProfileInfo_v3 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13424
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":13425
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13424
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13427
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":13428
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13428, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13427
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13430
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ComputeInstanceProfileInfo_v3 other_
 *         if not isinstance(other, ComputeInstanceProfileInfo_v3):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":13432
 *     def __eq__(self, other):
 *         cdef ComputeInstanceProfileInfo_v3 other_
 *         if not isinstance(other, ComputeInstanceProfileInfo_v3):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":13433
 *         cdef ComputeInstanceProfileInfo_v3 other_
 *         if not isinstance(other, ComputeInstanceProfileInfo_v3):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlComputeInstanceProfileInfo_v3_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":13432
 *     def __eq__(self, other):
 *         cdef ComputeInstanceProfileInfo_v3 other_
 *         if not isinstance(other, ComputeInstanceProfileInfo_v3):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":13434
 *         if not isinstance(other, ComputeInstanceProfileInfo_v3):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlComputeInstanceProfileInfo_v3_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3))))) __PYX_ERR(0, 13434, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":13435
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlComputeInstanceProfileInfo_v3_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlComputeInstanceProfileInfo_v3_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13435, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13430
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ComputeInstanceProfileInfo_v3 other_
 *         if not isinstance(other, ComputeInstanceProfileInfo_v3):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13437
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlComputeInstanceProfileInfo_v3_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":13438
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 13438, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 13438, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":13439
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")
*/
    __pyx_v_self->_ptr = ((nvmlComputeInstanceProfileInfo_v3_t *)malloc((sizeof(nvmlComputeInstanceProfileInfo_v3_t))));

    /* "cuda/bindings/_nvml.pyx":13440
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceProfileInfo_v3_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":13441
 *             self._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13441, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ComputeInstance_2};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13441, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 13441, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":13440
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceProfileInfo_v3_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":13442
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceProfileInfo_v3_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13442, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13442, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 13442, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlComputeInstanceProfileInfo_v3_t))));

    /* "cuda/bindings/_nvml.pyx":13443
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":13444
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":13445
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13445, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13445, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 13445, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":13438
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":13447
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 13447, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":13437
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlComputeInstanceProfileInfo_v3_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13449
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13452
 *     def version(self):
 *         """int: """
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13452, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13449
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13454
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13456
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13457
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo_2};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13457, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13457, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13456
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13458
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13458, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13454
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13460
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def id(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_2id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_2id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_2id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_2id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13463
 *     def id(self):
 *         """int: """
 *         return self._ptr[0].id             # <<<<<<<<<<<<<<
 * 
 *     @id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13463, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13460
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def id(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13465
 *         return self._ptr[0].id
 * 
 *     @id.setter             # <<<<<<<<<<<<<<
 *     def id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_2id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_2id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_2id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_2id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13467
 *     @id.setter
 *     def id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].id = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13468
 *     def id(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].id = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo_2};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13468, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13468, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13467
 *     @id.setter
 *     def id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].id = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13469
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].id = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13469, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).id = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13465
 *         return self._ptr[0].id
 * 
 *     @id.setter             # <<<<<<<<<<<<<<
 *     def id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13471
 *         self._ptr[0].id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def slice_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_11slice_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_11slice_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_11slice_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_11slice_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13474
 *     def slice_count(self):
 *         """int: """
 *         return self._ptr[0].sliceCount             # <<<<<<<<<<<<<<
 * 
 *     @slice_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sliceCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13474, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13471
 *         self._ptr[0].id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def slice_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.slice_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13476
 *         return self._ptr[0].sliceCount
 * 
 *     @slice_count.setter             # <<<<<<<<<<<<<<
 *     def slice_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_11slice_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_11slice_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_11slice_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_11slice_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13478
 *     @slice_count.setter
 *     def slice_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sliceCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13479
 *     def slice_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sliceCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo_2};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13479, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13479, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13478
 *     @slice_count.setter
 *     def slice_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sliceCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13480
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sliceCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13480, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sliceCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13476
 *         return self._ptr[0].sliceCount
 * 
 *     @slice_count.setter             # <<<<<<<<<<<<<<
 *     def slice_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.slice_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13482
 *         self._ptr[0].sliceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def instance_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14instance_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14instance_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14instance_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14instance_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13485
 *     def instance_count(self):
 *         """int: """
 *         return self._ptr[0].instanceCount             # <<<<<<<<<<<<<<
 * 
 *     @instance_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).instanceCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13485, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13482
 *         self._ptr[0].sliceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def instance_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.instance_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13487
 *         return self._ptr[0].instanceCount
 * 
 *     @instance_count.setter             # <<<<<<<<<<<<<<
 *     def instance_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14instance_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14instance_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14instance_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14instance_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13489
 *     @instance_count.setter
 *     def instance_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].instanceCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13490
 *     def instance_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].instanceCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo_2};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13490, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13490, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13489
 *     @instance_count.setter
 *     def instance_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].instanceCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13491
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].instanceCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13491, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).instanceCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13487
 *         return self._ptr[0].instanceCount
 * 
 *     @instance_count.setter             # <<<<<<<<<<<<<<
 *     def instance_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.instance_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13493
 *         self._ptr[0].instanceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20multiprocessor_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20multiprocessor_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20multiprocessor_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20multiprocessor_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13496
 *     def multiprocessor_count(self):
 *         """int: """
 *         return self._ptr[0].multiprocessorCount             # <<<<<<<<<<<<<<
 * 
 *     @multiprocessor_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).multiprocessorCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13496, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13493
 *         self._ptr[0].instanceCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.multiprocessor_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13498
 *         return self._ptr[0].multiprocessorCount
 * 
 *     @multiprocessor_count.setter             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20multiprocessor_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20multiprocessor_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20multiprocessor_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20multiprocessor_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13500
 *     @multiprocessor_count.setter
 *     def multiprocessor_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].multiprocessorCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13501
 *     def multiprocessor_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].multiprocessorCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo_2};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13501, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13501, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13500
 *     @multiprocessor_count.setter
 *     def multiprocessor_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].multiprocessorCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13502
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].multiprocessorCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13502, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).multiprocessorCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13498
 *         return self._ptr[0].multiprocessorCount
 * 
 *     @multiprocessor_count.setter             # <<<<<<<<<<<<<<
 *     def multiprocessor_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.multiprocessor_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13504
 *         self._ptr[0].multiprocessorCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_copy_engine_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_24shared_copy_engine_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_24shared_copy_engine_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_24shared_copy_engine_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_24shared_copy_engine_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13507
 *     def shared_copy_engine_count(self):
 *         """int: """
 *         return self._ptr[0].sharedCopyEngineCount             # <<<<<<<<<<<<<<
 * 
 *     @shared_copy_engine_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sharedCopyEngineCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13507, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13504
 *         self._ptr[0].multiprocessorCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_copy_engine_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.shared_copy_engine_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13509
 *         return self._ptr[0].sharedCopyEngineCount
 * 
 *     @shared_copy_engine_count.setter             # <<<<<<<<<<<<<<
 *     def shared_copy_engine_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_24shared_copy_engine_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_24shared_copy_engine_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_24shared_copy_engine_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_24shared_copy_engine_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13511
 *     @shared_copy_engine_count.setter
 *     def shared_copy_engine_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sharedCopyEngineCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13512
 *     def shared_copy_engine_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sharedCopyEngineCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo_2};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13512, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13512, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13511
 *     @shared_copy_engine_count.setter
 *     def shared_copy_engine_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sharedCopyEngineCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13513
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sharedCopyEngineCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13513, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sharedCopyEngineCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13509
 *         return self._ptr[0].sharedCopyEngineCount
 * 
 *     @shared_copy_engine_count.setter             # <<<<<<<<<<<<<<
 *     def shared_copy_engine_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.shared_copy_engine_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13515
 *         self._ptr[0].sharedCopyEngineCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_decoder_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_decoder_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_decoder_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_decoder_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_decoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13518
 *     def shared_decoder_count(self):
 *         """int: """
 *         return self._ptr[0].sharedDecoderCount             # <<<<<<<<<<<<<<
 * 
 *     @shared_decoder_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sharedDecoderCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13518, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13515
 *         self._ptr[0].sharedCopyEngineCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_decoder_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.shared_decoder_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13520
 *         return self._ptr[0].sharedDecoderCount
 * 
 *     @shared_decoder_count.setter             # <<<<<<<<<<<<<<
 *     def shared_decoder_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_decoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_decoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_decoder_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_decoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13522
 *     @shared_decoder_count.setter
 *     def shared_decoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sharedDecoderCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13523
 *     def shared_decoder_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sharedDecoderCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo_2};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13523, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13523, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13522
 *     @shared_decoder_count.setter
 *     def shared_decoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sharedDecoderCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13524
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sharedDecoderCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13524, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sharedDecoderCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13520
 *         return self._ptr[0].sharedDecoderCount
 * 
 *     @shared_decoder_count.setter             # <<<<<<<<<<<<<<
 *     def shared_decoder_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.shared_decoder_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13526
 *         self._ptr[0].sharedDecoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_encoder_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_encoder_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_encoder_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_encoder_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_encoder_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13529
 *     def shared_encoder_count(self):
 *         """int: """
 *         return self._ptr[0].sharedEncoderCount             # <<<<<<<<<<<<<<
 * 
 *     @shared_encoder_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sharedEncoderCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13529, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13526
 *         self._ptr[0].sharedDecoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_encoder_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.shared_encoder_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13531
 *         return self._ptr[0].sharedEncoderCount
 * 
 *     @shared_encoder_count.setter             # <<<<<<<<<<<<<<
 *     def shared_encoder_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_encoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_encoder_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_encoder_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_encoder_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13533
 *     @shared_encoder_count.setter
 *     def shared_encoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sharedEncoderCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13534
 *     def shared_encoder_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sharedEncoderCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo_2};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13534, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13534, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13533
 *     @shared_encoder_count.setter
 *     def shared_encoder_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sharedEncoderCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13535
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sharedEncoderCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13535, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sharedEncoderCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13531
 *         return self._ptr[0].sharedEncoderCount
 * 
 *     @shared_encoder_count.setter             # <<<<<<<<<<<<<<
 *     def shared_encoder_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.shared_encoder_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13537
 *         self._ptr[0].sharedEncoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_jpeg_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17shared_jpeg_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17shared_jpeg_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17shared_jpeg_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17shared_jpeg_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13540
 *     def shared_jpeg_count(self):
 *         """int: """
 *         return self._ptr[0].sharedJpegCount             # <<<<<<<<<<<<<<
 * 
 *     @shared_jpeg_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sharedJpegCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13540, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13537
 *         self._ptr[0].sharedEncoderCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_jpeg_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.shared_jpeg_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13542
 *         return self._ptr[0].sharedJpegCount
 * 
 *     @shared_jpeg_count.setter             # <<<<<<<<<<<<<<
 *     def shared_jpeg_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17shared_jpeg_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17shared_jpeg_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17shared_jpeg_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17shared_jpeg_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13544
 *     @shared_jpeg_count.setter
 *     def shared_jpeg_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sharedJpegCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13545
 *     def shared_jpeg_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sharedJpegCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo_2};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13545, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13545, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13544
 *     @shared_jpeg_count.setter
 *     def shared_jpeg_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sharedJpegCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13546
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sharedJpegCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13546, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sharedJpegCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13542
 *         return self._ptr[0].sharedJpegCount
 * 
 *     @shared_jpeg_count.setter             # <<<<<<<<<<<<<<
 *     def shared_jpeg_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.shared_jpeg_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13548
 *         self._ptr[0].sharedJpegCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_ofa_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16shared_ofa_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16shared_ofa_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16shared_ofa_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16shared_ofa_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13551
 *     def shared_ofa_count(self):
 *         """int: """
 *         return self._ptr[0].sharedOfaCount             # <<<<<<<<<<<<<<
 * 
 *     @shared_ofa_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).sharedOfaCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13551, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13548
 *         self._ptr[0].sharedJpegCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def shared_ofa_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.shared_ofa_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13553
 *         return self._ptr[0].sharedOfaCount
 * 
 *     @shared_ofa_count.setter             # <<<<<<<<<<<<<<
 *     def shared_ofa_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16shared_ofa_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16shared_ofa_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16shared_ofa_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16shared_ofa_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13555
 *     @shared_ofa_count.setter
 *     def shared_ofa_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sharedOfaCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13556
 *     def shared_ofa_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sharedOfaCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo_2};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13556, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13556, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13555
 *     @shared_ofa_count.setter
 *     def shared_ofa_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sharedOfaCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13557
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].sharedOfaCount = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13557, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sharedOfaCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13553
 *         return self._ptr[0].sharedOfaCount
 * 
 *     @shared_ofa_count.setter             # <<<<<<<<<<<<<<
 *     def shared_ofa_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.shared_ofa_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13559
 *         self._ptr[0].sharedOfaCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def name(self):
 *         """~_numpy.int8: (array of length 96)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_4name_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_4name_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_4name___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_4name___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13562
 *     def name(self):
 *         """~_numpy.int8: (array of length 96)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].name)             # <<<<<<<<<<<<<<
 * 
 *     @name.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).name); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13562, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13559
 *         self._ptr[0].sharedOfaCount = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def name(self):
 *         """~_numpy.int8: (array of length 96)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.name.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13564
 *         return cpython.PyUnicode_FromString(self._ptr[0].name)
 * 
 *     @name.setter             # <<<<<<<<<<<<<<
 *     def name(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_4name_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_4name_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_4name_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_4name_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13566
 *     @name.setter
 *     def name(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13567
 *     def name(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo_2};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13567, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13567, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13566
 *     @name.setter
 *     def name(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":13568
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field name, max length is 95")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13568, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 13568, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":13569
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 13569, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 13569, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 96);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":13570
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field name, max length is 95")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_name_m};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13570, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13570, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13569
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 96:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":13571
 *         if len(buf) >= 96:
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 13571, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 13571, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":13572
 *             raise ValueError("String too long for field name, max length is 95")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).name), ((void *)__pyx_v_ptr), 96));

  /* "cuda/bindings/_nvml.pyx":13564
 *         return cpython.PyUnicode_FromString(self._ptr[0].name)
 * 
 *     @name.setter             # <<<<<<<<<<<<<<
 *     def name(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.name.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13574
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def capabilities(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12capabilities_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12capabilities_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12capabilities___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12capabilities___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13577
 *     def capabilities(self):
 *         """int: """
 *         return self._ptr[0].capabilities             # <<<<<<<<<<<<<<
 * 
 *     @capabilities.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).capabilities); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13577, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13574
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def capabilities(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.capabilities.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13579
 *         return self._ptr[0].capabilities
 * 
 *     @capabilities.setter             # <<<<<<<<<<<<<<
 *     def capabilities(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12capabilities_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12capabilities_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12capabilities_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12capabilities_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13581
 *     @capabilities.setter
 *     def capabilities(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].capabilities = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13582
 *     def capabilities(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].capabilities = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceProfileInfo_2};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13582, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13582, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13581
 *     @capabilities.setter
 *     def capabilities(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].capabilities = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13583
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only")
 *         self._ptr[0].capabilities = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13583, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).capabilities = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13579
 *         return self._ptr[0].capabilities
 * 
 *     @capabilities.setter             # <<<<<<<<<<<<<<
 *     def capabilities(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.capabilities.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13585
 *         self._ptr[0].capabilities = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ComputeInstanceProfileInfo_v3 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12from_data, "ComputeInstanceProfileInfo_v3.from_data(data)\n\nCreate an ComputeInstanceProfileInfo_v3 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `compute_instance_profile_info_v3_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13585, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13585, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 13585, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 13585, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13585, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 13585, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":13592
 *             data (_numpy.ndarray): a single-element array of dtype `compute_instance_profile_info_v3_dtype` holding the data.
 *         """
 *         return __from_data(data, "compute_instance_profile_info_v3_dtype", compute_instance_profile_info_v3_dtype, ComputeInstanceProfileInfo_v3)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_compute_instance_profile_info_v3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13592, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_compute_instance_profile_info_v3, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13592, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13585
 *         self._ptr[0].capabilities = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ComputeInstanceProfileInfo_v3 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13594
 *         return __from_data(data, "compute_instance_profile_info_v3_dtype", compute_instance_profile_info_v3_dtype, ComputeInstanceProfileInfo_v3)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ComputeInstanceProfileInfo_v3 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14from_ptr, "ComputeInstanceProfileInfo_v3.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an ComputeInstanceProfileInfo_v3 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13594, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 13594, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13594, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13594, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 13594, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":13595
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an ComputeInstanceProfileInfo_v3 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 13594, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 13594, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13594, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13594, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 13595, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13595, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 13594, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":13594
 *         return __from_data(data, "compute_instance_profile_info_v3_dtype", compute_instance_profile_info_v3_dtype, ComputeInstanceProfileInfo_v3)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ComputeInstanceProfileInfo_v3 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":13603
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstanceProfileInfo_v3 obj = ComputeInstanceProfileInfo_v3.__new__(ComputeInstanceProfileInfo_v3)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":13604
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ComputeInstanceProfileInfo_v3 obj = ComputeInstanceProfileInfo_v3.__new__(ComputeInstanceProfileInfo_v3)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13604, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 13604, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13603
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstanceProfileInfo_v3 obj = ComputeInstanceProfileInfo_v3.__new__(ComputeInstanceProfileInfo_v3)
*/
  }

  /* "cuda/bindings/_nvml.pyx":13605
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstanceProfileInfo_v3 obj = ComputeInstanceProfileInfo_v3.__new__(ComputeInstanceProfileInfo_v3)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13605, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":13606
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstanceProfileInfo_v3 obj = ComputeInstanceProfileInfo_v3.__new__(ComputeInstanceProfileInfo_v3)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":13607
 *         cdef ComputeInstanceProfileInfo_v3 obj = ComputeInstanceProfileInfo_v3.__new__(ComputeInstanceProfileInfo_v3)
 *         if owner is None:
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")
*/
    __pyx_v_obj->_ptr = ((nvmlComputeInstanceProfileInfo_v3_t *)malloc((sizeof(nvmlComputeInstanceProfileInfo_v3_t))));

    /* "cuda/bindings/_nvml.pyx":13608
 *         if owner is None:
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceProfileInfo_v3_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":13609
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13609, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ComputeInstance_2};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13609, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 13609, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":13608
 *         if owner is None:
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceProfileInfo_v3_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":13610
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceProfileInfo_v3_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlComputeInstanceProfileInfo_v3_t))));

    /* "cuda/bindings/_nvml.pyx":13611
 *                 raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":13612
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":13606
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstanceProfileInfo_v3 obj = ComputeInstanceProfileInfo_v3.__new__(ComputeInstanceProfileInfo_v3)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":13614
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlComputeInstanceProfileInfo_v3_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":13615
 *         else:
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":13616
 *             obj._ptr = <nvmlComputeInstanceProfileInfo_v3_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":13617
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":13618
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13594
 *         return __from_data(data, "compute_instance_profile_info_v3_dtype", compute_instance_profile_info_v3_dtype, ComputeInstanceProfileInfo_v3)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ComputeInstanceProfileInfo_v3 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16__reduce_cython__, "ComputeInstanceProfileInfo_v3.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_18__setstate_cython__, "ComputeInstanceProfileInfo_v3.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceProfileInfo_v3.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13621
 * 
 * 
 * cdef _get_gpm_support_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGpmSupport_t pod = nvmlGpmSupport_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_gpm_support_dtype_offsets(void) {
  nvmlGpmSupport_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlGpmSupport_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_gpm_support_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":13622
 * 
 * cdef _get_gpm_support_dtype_offsets():
 *     cdef nvmlGpmSupport_t pod = nvmlGpmSupport_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'is_supported_device'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":13623
 * cdef _get_gpm_support_dtype_offsets():
 *     cdef nvmlGpmSupport_t pod = nvmlGpmSupport_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'is_supported_device'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13623, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13623, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":13624
 *     cdef nvmlGpmSupport_t pod = nvmlGpmSupport_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'is_supported_device'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13624, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13624, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 13624, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_is_supported_device);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_is_supported_device);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_is_supported_device) != (0)) __PYX_ERR(0, 13624, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 13624, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":13625
 *     return _numpy.dtype({
 *         'names': ['version', 'is_supported_device'],
 *         'formats': [_numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13625, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13625, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13625, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13625, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13625, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 13625, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 13625, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 13624, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":13627
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.isSupportedDevice)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13627, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":13628
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.isSupportedDevice)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlGpmSupport_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.isSupportedDevice)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13628, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":13626
 *         'names': ['version', 'is_supported_device'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.isSupportedDevice)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13626, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 13626, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 13626, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 13624, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":13630
 *             (<intptr_t>&(pod.isSupportedDevice)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlGpmSupport_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlGpmSupport_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13630, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 13624, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13623, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13621
 * 
 * 
 * cdef _get_gpm_support_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGpmSupport_t pod = nvmlGpmSupport_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_gpm_support_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13647
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGpmSupport_t *>calloc(1, sizeof(nvmlGpmSupport_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":13648
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlGpmSupport_t *>calloc(1, sizeof(nvmlGpmSupport_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpmSupport")
*/
  __pyx_v_self->_ptr = ((nvmlGpmSupport_t *)calloc(1, (sizeof(nvmlGpmSupport_t))));

  /* "cuda/bindings/_nvml.pyx":13649
 *     def __init__(self):
 *         self._ptr = <nvmlGpmSupport_t *>calloc(1, sizeof(nvmlGpmSupport_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GpmSupport")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":13650
 *         self._ptr = <nvmlGpmSupport_t *>calloc(1, sizeof(nvmlGpmSupport_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpmSupport")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13650, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpmSupport};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13650, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 13650, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13649
 *     def __init__(self):
 *         self._ptr = <nvmlGpmSupport_t *>calloc(1, sizeof(nvmlGpmSupport_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GpmSupport")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":13651
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpmSupport")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":13652
 *             raise MemoryError("Error allocating GpmSupport")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":13653
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":13647
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGpmSupport_t *>calloc(1, sizeof(nvmlGpmSupport_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpmSupport.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13655
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGpmSupport_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self) {
  nvmlGpmSupport_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlGpmSupport_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":13657
 *     def __dealloc__(self):
 *         cdef nvmlGpmSupport_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":13658
 *         cdef nvmlGpmSupport_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":13659
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":13660
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":13657
 *     def __dealloc__(self):
 *         cdef nvmlGpmSupport_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":13655
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGpmSupport_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":13662
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GpmSupport object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":13663
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.GpmSupport object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13663, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13663, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13663, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13663, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13663, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_GpmSupport_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 22 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13663, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13662
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GpmSupport object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpmSupport.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13665
 *         return f"<{__name__}.GpmSupport object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13668
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13668, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13665
 *         return f"<{__name__}.GpmSupport object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpmSupport.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13670
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_10GpmSupport__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":13671
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13670
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13673
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":13674
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13674, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13673
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpmSupport.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13676
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GpmSupport other_
 *         if not isinstance(other, GpmSupport):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":13678
 *     def __eq__(self, other):
 *         cdef GpmSupport other_
 *         if not isinstance(other, GpmSupport):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":13679
 *         cdef GpmSupport other_
 *         if not isinstance(other, GpmSupport):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpmSupport_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":13678
 *     def __eq__(self, other):
 *         cdef GpmSupport other_
 *         if not isinstance(other, GpmSupport):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":13680
 *         if not isinstance(other, GpmSupport):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpmSupport_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport))))) __PYX_ERR(0, 13680, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":13681
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpmSupport_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlGpmSupport_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13681, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13676
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GpmSupport other_
 *         if not isinstance(other, GpmSupport):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpmSupport.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13683
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpmSupport_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpmSupport_t *>malloc(sizeof(nvmlGpmSupport_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":13684
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGpmSupport_t *>malloc(sizeof(nvmlGpmSupport_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 13684, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13684, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13684, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 13684, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":13685
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpmSupport_t *>malloc(sizeof(nvmlGpmSupport_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpmSupport")
*/
    __pyx_v_self->_ptr = ((nvmlGpmSupport_t *)malloc((sizeof(nvmlGpmSupport_t))));

    /* "cuda/bindings/_nvml.pyx":13686
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpmSupport_t *>malloc(sizeof(nvmlGpmSupport_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpmSupport")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpmSupport_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":13687
 *             self._ptr = <nvmlGpmSupport_t *>malloc(sizeof(nvmlGpmSupport_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpmSupport")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpmSupport_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13687, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpmSupport};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13687, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 13687, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":13686
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpmSupport_t *>malloc(sizeof(nvmlGpmSupport_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpmSupport")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpmSupport_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":13688
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpmSupport")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpmSupport_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13688, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13688, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 13688, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlGpmSupport_t))));

    /* "cuda/bindings/_nvml.pyx":13689
 *                 raise MemoryError("Error allocating GpmSupport")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpmSupport_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":13690
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpmSupport_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":13691
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13691, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13691, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 13691, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":13684
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGpmSupport_t *>malloc(sizeof(nvmlGpmSupport_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":13693
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 13693, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":13683
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpmSupport_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpmSupport_t *>malloc(sizeof(nvmlGpmSupport_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpmSupport.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13695
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: Set to NVML_GPM_SUPPORT_VERSION."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13698
 *     def version(self):
 *         """int: IN: Set to NVML_GPM_SUPPORT_VERSION."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13698, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13695
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: Set to NVML_GPM_SUPPORT_VERSION."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpmSupport.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13700
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13702
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpmSupport instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13703
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpmSupport instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpmSupport_instance_is_read};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13703, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13703, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13702
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpmSupport instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13704
 *         if self._readonly:
 *             raise ValueError("This GpmSupport instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13704, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13700
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpmSupport.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13706
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_supported_device(self):
 *         """int: OUT: Indicates device support."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_19is_supported_device_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_19is_supported_device_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_19is_supported_device___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_19is_supported_device___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13709
 *     def is_supported_device(self):
 *         """int: OUT: Indicates device support."""
 *         return self._ptr[0].isSupportedDevice             # <<<<<<<<<<<<<<
 * 
 *     @is_supported_device.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).isSupportedDevice); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13709, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13706
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_supported_device(self):
 *         """int: OUT: Indicates device support."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpmSupport.is_supported_device.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13711
 *         return self._ptr[0].isSupportedDevice
 * 
 *     @is_supported_device.setter             # <<<<<<<<<<<<<<
 *     def is_supported_device(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_19is_supported_device_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_19is_supported_device_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_19is_supported_device_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_19is_supported_device_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13713
 *     @is_supported_device.setter
 *     def is_supported_device(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpmSupport instance is read-only")
 *         self._ptr[0].isSupportedDevice = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13714
 *     def is_supported_device(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpmSupport instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].isSupportedDevice = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpmSupport_instance_is_read};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13714, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13714, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13713
 *     @is_supported_device.setter
 *     def is_supported_device(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpmSupport instance is read-only")
 *         self._ptr[0].isSupportedDevice = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13715
 *         if self._readonly:
 *             raise ValueError("This GpmSupport instance is read-only")
 *         self._ptr[0].isSupportedDevice = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13715, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).isSupportedDevice = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13711
 *         return self._ptr[0].isSupportedDevice
 * 
 *     @is_supported_device.setter             # <<<<<<<<<<<<<<
 *     def is_supported_device(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpmSupport.is_supported_device.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13717
 *         self._ptr[0].isSupportedDevice = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpmSupport instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_10GpmSupport_12from_data, "GpmSupport.from_data(data)\n\nCreate an GpmSupport instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `gpm_support_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_10GpmSupport_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10GpmSupport_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13717, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13717, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 13717, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 13717, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13717, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 13717, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpmSupport.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":13724
 *             data (_numpy.ndarray): a single-element array of dtype `gpm_support_dtype` holding the data.
 *         """
 *         return __from_data(data, "gpm_support_dtype", gpm_support_dtype, GpmSupport)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_gpm_support_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13724, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_gpm_support_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13724, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13717
 *         self._ptr[0].isSupportedDevice = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpmSupport instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpmSupport.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13726
 *         return __from_data(data, "gpm_support_dtype", gpm_support_dtype, GpmSupport)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpmSupport instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_10GpmSupport_14from_ptr, "GpmSupport.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an GpmSupport instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_10GpmSupport_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10GpmSupport_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13726, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 13726, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13726, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13726, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 13726, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":13727
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an GpmSupport instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 13726, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 13726, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13726, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13726, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 13727, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13727, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 13726, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpmSupport.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":13726
 *         return __from_data(data, "gpm_support_dtype", gpm_support_dtype, GpmSupport)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpmSupport instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":13735
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpmSupport obj = GpmSupport.__new__(GpmSupport)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":13736
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef GpmSupport obj = GpmSupport.__new__(GpmSupport)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13736, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 13736, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13735
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpmSupport obj = GpmSupport.__new__(GpmSupport)
*/
  }

  /* "cuda/bindings/_nvml.pyx":13737
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpmSupport obj = GpmSupport.__new__(GpmSupport)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlGpmSupport_t *>malloc(sizeof(nvmlGpmSupport_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_GpmSupport(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13737, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":13738
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpmSupport obj = GpmSupport.__new__(GpmSupport)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGpmSupport_t *>malloc(sizeof(nvmlGpmSupport_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":13739
 *         cdef GpmSupport obj = GpmSupport.__new__(GpmSupport)
 *         if owner is None:
 *             obj._ptr = <nvmlGpmSupport_t *>malloc(sizeof(nvmlGpmSupport_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpmSupport")
*/
    __pyx_v_obj->_ptr = ((nvmlGpmSupport_t *)malloc((sizeof(nvmlGpmSupport_t))));

    /* "cuda/bindings/_nvml.pyx":13740
 *         if owner is None:
 *             obj._ptr = <nvmlGpmSupport_t *>malloc(sizeof(nvmlGpmSupport_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpmSupport")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpmSupport_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":13741
 *             obj._ptr = <nvmlGpmSupport_t *>malloc(sizeof(nvmlGpmSupport_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpmSupport")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpmSupport_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13741, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpmSupport};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13741, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 13741, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":13740
 *         if owner is None:
 *             obj._ptr = <nvmlGpmSupport_t *>malloc(sizeof(nvmlGpmSupport_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpmSupport")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpmSupport_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":13742
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpmSupport")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpmSupport_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlGpmSupport_t))));

    /* "cuda/bindings/_nvml.pyx":13743
 *                 raise MemoryError("Error allocating GpmSupport")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpmSupport_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":13744
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpmSupport_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlGpmSupport_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":13738
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpmSupport obj = GpmSupport.__new__(GpmSupport)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGpmSupport_t *>malloc(sizeof(nvmlGpmSupport_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":13746
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlGpmSupport_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlGpmSupport_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":13747
 *         else:
 *             obj._ptr = <nvmlGpmSupport_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":13748
 *             obj._ptr = <nvmlGpmSupport_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":13749
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":13750
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13726
 *         return __from_data(data, "gpm_support_dtype", gpm_support_dtype, GpmSupport)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpmSupport instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpmSupport.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_10GpmSupport_16__reduce_cython__, "GpmSupport.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_10GpmSupport_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10GpmSupport_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GpmSupport.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_10GpmSupport_18__setstate_cython__, "GpmSupport.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_10GpmSupport_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10GpmSupport_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpmSupport.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10GpmSupport_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GpmSupport.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13753
 * 
 * 
 * cdef _get_device_capabilities_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlDeviceCapabilities_v1_t pod = nvmlDeviceCapabilities_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_device_capabilities_v1_dtype_offsets(void) {
  nvmlDeviceCapabilities_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlDeviceCapabilities_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_device_capabilities_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":13754
 * 
 * cdef _get_device_capabilities_v1_dtype_offsets():
 *     cdef nvmlDeviceCapabilities_v1_t pod = nvmlDeviceCapabilities_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'cap_mask'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":13755
 * cdef _get_device_capabilities_v1_dtype_offsets():
 *     cdef nvmlDeviceCapabilities_v1_t pod = nvmlDeviceCapabilities_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'cap_mask'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13755, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13755, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":13756
 *     cdef nvmlDeviceCapabilities_v1_t pod = nvmlDeviceCapabilities_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'cap_mask'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13756, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13756, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 13756, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_cap_mask);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_cap_mask);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_cap_mask) != (0)) __PYX_ERR(0, 13756, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 13756, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":13757
 *     return _numpy.dtype({
 *         'names': ['version', 'cap_mask'],
 *         'formats': [_numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13757, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13757, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13757, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13757, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13757, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 13757, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 13757, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 13756, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":13759
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.capMask)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13759, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":13760
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.capMask)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlDeviceCapabilities_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.capMask)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13760, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":13758
 *         'names': ['version', 'cap_mask'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.capMask)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13758, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 13758, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 13758, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 13756, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":13762
 *             (<intptr_t>&(pod.capMask)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlDeviceCapabilities_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlDeviceCapabilities_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13762, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 13756, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13755, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13753
 * 
 * 
 * cdef _get_device_capabilities_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlDeviceCapabilities_v1_t pod = nvmlDeviceCapabilities_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_device_capabilities_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13779
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlDeviceCapabilities_v1_t *>calloc(1, sizeof(nvmlDeviceCapabilities_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":13780
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlDeviceCapabilities_v1_t *>calloc(1, sizeof(nvmlDeviceCapabilities_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DeviceCapabilities_v1")
*/
  __pyx_v_self->_ptr = ((nvmlDeviceCapabilities_v1_t *)calloc(1, (sizeof(nvmlDeviceCapabilities_v1_t))));

  /* "cuda/bindings/_nvml.pyx":13781
 *     def __init__(self):
 *         self._ptr = <nvmlDeviceCapabilities_v1_t *>calloc(1, sizeof(nvmlDeviceCapabilities_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating DeviceCapabilities_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":13782
 *         self._ptr = <nvmlDeviceCapabilities_v1_t *>calloc(1, sizeof(nvmlDeviceCapabilities_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DeviceCapabilities_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13782, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DeviceCapabilit};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13782, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 13782, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13781
 *     def __init__(self):
 *         self._ptr = <nvmlDeviceCapabilities_v1_t *>calloc(1, sizeof(nvmlDeviceCapabilities_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating DeviceCapabilities_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":13783
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DeviceCapabilities_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":13784
 *             raise MemoryError("Error allocating DeviceCapabilities_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":13785
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":13779
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlDeviceCapabilities_v1_t *>calloc(1, sizeof(nvmlDeviceCapabilities_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCapabilities_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13787
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlDeviceCapabilities_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self) {
  nvmlDeviceCapabilities_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlDeviceCapabilities_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":13789
 *     def __dealloc__(self):
 *         cdef nvmlDeviceCapabilities_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":13790
 *         cdef nvmlDeviceCapabilities_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":13791
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":13792
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":13789
 *     def __dealloc__(self):
 *         cdef nvmlDeviceCapabilities_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":13787
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlDeviceCapabilities_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":13794
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.DeviceCapabilities_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":13795
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.DeviceCapabilities_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13795, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13795, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13795, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13795, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13795, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_DeviceCapabilities_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 33 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13795, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13794
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.DeviceCapabilities_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCapabilities_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13797
 *         return f"<{__name__}.DeviceCapabilities_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13800
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13800, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13797
 *         return f"<{__name__}.DeviceCapabilities_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCapabilities_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13802
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":13803
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13802
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13805
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":13806
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13806, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13805
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCapabilities_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13808
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef DeviceCapabilities_v1 other_
 *         if not isinstance(other, DeviceCapabilities_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":13810
 *     def __eq__(self, other):
 *         cdef DeviceCapabilities_v1 other_
 *         if not isinstance(other, DeviceCapabilities_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":13811
 *         cdef DeviceCapabilities_v1 other_
 *         if not isinstance(other, DeviceCapabilities_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceCapabilities_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":13810
 *     def __eq__(self, other):
 *         cdef DeviceCapabilities_v1 other_
 *         if not isinstance(other, DeviceCapabilities_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":13812
 *         if not isinstance(other, DeviceCapabilities_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceCapabilities_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1))))) __PYX_ERR(0, 13812, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":13813
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceCapabilities_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlDeviceCapabilities_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13813, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13808
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef DeviceCapabilities_v1 other_
 *         if not isinstance(other, DeviceCapabilities_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCapabilities_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13815
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceCapabilities_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceCapabilities_v1_t *>malloc(sizeof(nvmlDeviceCapabilities_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":13816
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlDeviceCapabilities_v1_t *>malloc(sizeof(nvmlDeviceCapabilities_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 13816, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13816, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13816, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 13816, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":13817
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceCapabilities_v1_t *>malloc(sizeof(nvmlDeviceCapabilities_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceCapabilities_v1")
*/
    __pyx_v_self->_ptr = ((nvmlDeviceCapabilities_v1_t *)malloc((sizeof(nvmlDeviceCapabilities_v1_t))));

    /* "cuda/bindings/_nvml.pyx":13818
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceCapabilities_v1_t *>malloc(sizeof(nvmlDeviceCapabilities_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DeviceCapabilities_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceCapabilities_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":13819
 *             self._ptr = <nvmlDeviceCapabilities_v1_t *>malloc(sizeof(nvmlDeviceCapabilities_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceCapabilities_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceCapabilities_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13819, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DeviceCapabilit};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13819, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 13819, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":13818
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceCapabilities_v1_t *>malloc(sizeof(nvmlDeviceCapabilities_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DeviceCapabilities_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceCapabilities_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":13820
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceCapabilities_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceCapabilities_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13820, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13820, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 13820, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlDeviceCapabilities_v1_t))));

    /* "cuda/bindings/_nvml.pyx":13821
 *                 raise MemoryError("Error allocating DeviceCapabilities_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceCapabilities_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":13822
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceCapabilities_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":13823
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13823, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13823, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 13823, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":13816
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlDeviceCapabilities_v1_t *>malloc(sizeof(nvmlDeviceCapabilities_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":13825
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 13825, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":13815
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceCapabilities_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceCapabilities_v1_t *>malloc(sizeof(nvmlDeviceCapabilities_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCapabilities_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13827
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: the API version number"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13830
 *     def version(self):
 *         """int: the API version number"""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13830, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13827
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: the API version number"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCapabilities_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13832
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13834
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceCapabilities_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13835
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This DeviceCapabilities_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DeviceCapabilities_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13835, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13835, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13834
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceCapabilities_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13836
 *         if self._readonly:
 *             raise ValueError("This DeviceCapabilities_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13836, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13832
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCapabilities_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13838
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cap_mask(self):
 *         """int: OUT: Bit mask of capabilities."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_8cap_mask_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_8cap_mask_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_8cap_mask___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_8cap_mask___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13841
 *     def cap_mask(self):
 *         """int: OUT: Bit mask of capabilities."""
 *         return self._ptr[0].capMask             # <<<<<<<<<<<<<<
 * 
 *     @cap_mask.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).capMask); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13841, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13838
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cap_mask(self):
 *         """int: OUT: Bit mask of capabilities."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCapabilities_v1.cap_mask.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13843
 *         return self._ptr[0].capMask
 * 
 *     @cap_mask.setter             # <<<<<<<<<<<<<<
 *     def cap_mask(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_8cap_mask_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_8cap_mask_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_8cap_mask_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_8cap_mask_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13845
 *     @cap_mask.setter
 *     def cap_mask(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceCapabilities_v1 instance is read-only")
 *         self._ptr[0].capMask = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13846
 *     def cap_mask(self, val):
 *         if self._readonly:
 *             raise ValueError("This DeviceCapabilities_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].capMask = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DeviceCapabilities_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13846, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13846, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13845
 *     @cap_mask.setter
 *     def cap_mask(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceCapabilities_v1 instance is read-only")
 *         self._ptr[0].capMask = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13847
 *         if self._readonly:
 *             raise ValueError("This DeviceCapabilities_v1 instance is read-only")
 *         self._ptr[0].capMask = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13847, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).capMask = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13843
 *         return self._ptr[0].capMask
 * 
 *     @cap_mask.setter             # <<<<<<<<<<<<<<
 *     def cap_mask(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCapabilities_v1.cap_mask.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13849
 *         self._ptr[0].capMask = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DeviceCapabilities_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_12from_data, "DeviceCapabilities_v1.from_data(data)\n\nCreate an DeviceCapabilities_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `device_capabilities_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13849, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13849, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 13849, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 13849, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13849, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 13849, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCapabilities_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":13856
 *             data (_numpy.ndarray): a single-element array of dtype `device_capabilities_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "device_capabilities_v1_dtype", device_capabilities_v1_dtype, DeviceCapabilities_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_device_capabilities_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13856, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_device_capabilities_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13856, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13849
 *         self._ptr[0].capMask = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DeviceCapabilities_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCapabilities_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13858
 *         return __from_data(data, "device_capabilities_v1_dtype", device_capabilities_v1_dtype, DeviceCapabilities_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DeviceCapabilities_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_14from_ptr, "DeviceCapabilities_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an DeviceCapabilities_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13858, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 13858, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13858, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13858, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 13858, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":13859
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an DeviceCapabilities_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 13858, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 13858, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13858, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13858, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 13859, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13859, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 13858, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCapabilities_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":13858
 *         return __from_data(data, "device_capabilities_v1_dtype", device_capabilities_v1_dtype, DeviceCapabilities_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DeviceCapabilities_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":13867
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceCapabilities_v1 obj = DeviceCapabilities_v1.__new__(DeviceCapabilities_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":13868
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef DeviceCapabilities_v1 obj = DeviceCapabilities_v1.__new__(DeviceCapabilities_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13868, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 13868, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13867
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceCapabilities_v1 obj = DeviceCapabilities_v1.__new__(DeviceCapabilities_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":13869
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceCapabilities_v1 obj = DeviceCapabilities_v1.__new__(DeviceCapabilities_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlDeviceCapabilities_v1_t *>malloc(sizeof(nvmlDeviceCapabilities_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_DeviceCapabilities_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13869, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":13870
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceCapabilities_v1 obj = DeviceCapabilities_v1.__new__(DeviceCapabilities_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlDeviceCapabilities_v1_t *>malloc(sizeof(nvmlDeviceCapabilities_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":13871
 *         cdef DeviceCapabilities_v1 obj = DeviceCapabilities_v1.__new__(DeviceCapabilities_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlDeviceCapabilities_v1_t *>malloc(sizeof(nvmlDeviceCapabilities_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceCapabilities_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlDeviceCapabilities_v1_t *)malloc((sizeof(nvmlDeviceCapabilities_v1_t))));

    /* "cuda/bindings/_nvml.pyx":13872
 *         if owner is None:
 *             obj._ptr = <nvmlDeviceCapabilities_v1_t *>malloc(sizeof(nvmlDeviceCapabilities_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DeviceCapabilities_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceCapabilities_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":13873
 *             obj._ptr = <nvmlDeviceCapabilities_v1_t *>malloc(sizeof(nvmlDeviceCapabilities_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceCapabilities_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceCapabilities_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13873, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DeviceCapabilit};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13873, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 13873, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":13872
 *         if owner is None:
 *             obj._ptr = <nvmlDeviceCapabilities_v1_t *>malloc(sizeof(nvmlDeviceCapabilities_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DeviceCapabilities_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceCapabilities_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":13874
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceCapabilities_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceCapabilities_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlDeviceCapabilities_v1_t))));

    /* "cuda/bindings/_nvml.pyx":13875
 *                 raise MemoryError("Error allocating DeviceCapabilities_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceCapabilities_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":13876
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceCapabilities_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlDeviceCapabilities_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":13870
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceCapabilities_v1 obj = DeviceCapabilities_v1.__new__(DeviceCapabilities_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlDeviceCapabilities_v1_t *>malloc(sizeof(nvmlDeviceCapabilities_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":13878
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlDeviceCapabilities_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlDeviceCapabilities_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":13879
 *         else:
 *             obj._ptr = <nvmlDeviceCapabilities_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":13880
 *             obj._ptr = <nvmlDeviceCapabilities_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":13881
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":13882
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13858
 *         return __from_data(data, "device_capabilities_v1_dtype", device_capabilities_v1_dtype, DeviceCapabilities_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DeviceCapabilities_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCapabilities_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_16__reduce_cython__, "DeviceCapabilities_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCapabilities_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_18__setstate_cython__, "DeviceCapabilities_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCapabilities_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceCapabilities_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13885
 * 
 * 
 * cdef _get_device_addressing_mode_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlDeviceAddressingMode_v1_t pod = nvmlDeviceAddressingMode_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_device_addressing_mode_v1_dtype_offsets(void) {
  nvmlDeviceAddressingMode_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlDeviceAddressingMode_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_device_addressing_mode_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":13886
 * 
 * cdef _get_device_addressing_mode_v1_dtype_offsets():
 *     cdef nvmlDeviceAddressingMode_v1_t pod = nvmlDeviceAddressingMode_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'value'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":13887
 * cdef _get_device_addressing_mode_v1_dtype_offsets():
 *     cdef nvmlDeviceAddressingMode_v1_t pod = nvmlDeviceAddressingMode_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'value'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13887, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13887, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":13888
 *     cdef nvmlDeviceAddressingMode_v1_t pod = nvmlDeviceAddressingMode_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'value'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13888, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13888, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 13888, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_value);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_value);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_value) != (0)) __PYX_ERR(0, 13888, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 13888, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":13889
 *     return _numpy.dtype({
 *         'names': ['version', 'value'],
 *         'formats': [_numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13889, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 13889, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 13889, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 13888, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":13891
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.value)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13891, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":13892
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.value)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlDeviceAddressingMode_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.value)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13892, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":13890
 *         'names': ['version', 'value'],
 *         'formats': [_numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.value)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13890, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 13890, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 13890, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 13888, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":13894
 *             (<intptr_t>&(pod.value)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlDeviceAddressingMode_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlDeviceAddressingMode_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 13888, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13887, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13885
 * 
 * 
 * cdef _get_device_addressing_mode_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlDeviceAddressingMode_v1_t pod = nvmlDeviceAddressingMode_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_device_addressing_mode_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13911
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlDeviceAddressingMode_v1_t *>calloc(1, sizeof(nvmlDeviceAddressingMode_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":13912
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlDeviceAddressingMode_v1_t *>calloc(1, sizeof(nvmlDeviceAddressingMode_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DeviceAddressingMode_v1")
*/
  __pyx_v_self->_ptr = ((nvmlDeviceAddressingMode_v1_t *)calloc(1, (sizeof(nvmlDeviceAddressingMode_v1_t))));

  /* "cuda/bindings/_nvml.pyx":13913
 *     def __init__(self):
 *         self._ptr = <nvmlDeviceAddressingMode_v1_t *>calloc(1, sizeof(nvmlDeviceAddressingMode_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating DeviceAddressingMode_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":13914
 *         self._ptr = <nvmlDeviceAddressingMode_v1_t *>calloc(1, sizeof(nvmlDeviceAddressingMode_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DeviceAddressingMode_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13914, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DeviceAddressin};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13914, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 13914, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13913
 *     def __init__(self):
 *         self._ptr = <nvmlDeviceAddressingMode_v1_t *>calloc(1, sizeof(nvmlDeviceAddressingMode_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating DeviceAddressingMode_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":13915
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DeviceAddressingMode_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":13916
 *             raise MemoryError("Error allocating DeviceAddressingMode_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":13917
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":13911
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlDeviceAddressingMode_v1_t *>calloc(1, sizeof(nvmlDeviceAddressingMode_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAddressingMode_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13919
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlDeviceAddressingMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self) {
  nvmlDeviceAddressingMode_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlDeviceAddressingMode_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":13921
 *     def __dealloc__(self):
 *         cdef nvmlDeviceAddressingMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":13922
 *         cdef nvmlDeviceAddressingMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":13923
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":13924
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":13921
 *     def __dealloc__(self):
 *         cdef nvmlDeviceAddressingMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":13919
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlDeviceAddressingMode_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":13926
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.DeviceAddressingMode_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":13927
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.DeviceAddressingMode_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13927, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13927, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13927, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13927, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13927, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_DeviceAddressingMode_v1_object;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 35 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13927, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13926
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.DeviceAddressingMode_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAddressingMode_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13929
 *         return f"<{__name__}.DeviceAddressingMode_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13932
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13932, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13929
 *         return f"<{__name__}.DeviceAddressingMode_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAddressingMode_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13934
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":13935
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13934
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13937
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":13938
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13938, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13937
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAddressingMode_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13940
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef DeviceAddressingMode_v1 other_
 *         if not isinstance(other, DeviceAddressingMode_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":13942
 *     def __eq__(self, other):
 *         cdef DeviceAddressingMode_v1 other_
 *         if not isinstance(other, DeviceAddressingMode_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":13943
 *         cdef DeviceAddressingMode_v1 other_
 *         if not isinstance(other, DeviceAddressingMode_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceAddressingMode_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":13942
 *     def __eq__(self, other):
 *         cdef DeviceAddressingMode_v1 other_
 *         if not isinstance(other, DeviceAddressingMode_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":13944
 *         if not isinstance(other, DeviceAddressingMode_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceAddressingMode_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1))))) __PYX_ERR(0, 13944, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":13945
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceAddressingMode_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlDeviceAddressingMode_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13945, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13940
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef DeviceAddressingMode_v1 other_
 *         if not isinstance(other, DeviceAddressingMode_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAddressingMode_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13947
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceAddressingMode_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceAddressingMode_v1_t *>malloc(sizeof(nvmlDeviceAddressingMode_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":13948
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlDeviceAddressingMode_v1_t *>malloc(sizeof(nvmlDeviceAddressingMode_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 13948, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13948, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13948, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 13948, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":13949
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceAddressingMode_v1_t *>malloc(sizeof(nvmlDeviceAddressingMode_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceAddressingMode_v1")
*/
    __pyx_v_self->_ptr = ((nvmlDeviceAddressingMode_v1_t *)malloc((sizeof(nvmlDeviceAddressingMode_v1_t))));

    /* "cuda/bindings/_nvml.pyx":13950
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceAddressingMode_v1_t *>malloc(sizeof(nvmlDeviceAddressingMode_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DeviceAddressingMode_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceAddressingMode_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":13951
 *             self._ptr = <nvmlDeviceAddressingMode_v1_t *>malloc(sizeof(nvmlDeviceAddressingMode_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceAddressingMode_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceAddressingMode_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13951, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DeviceAddressin};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13951, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 13951, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":13950
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceAddressingMode_v1_t *>malloc(sizeof(nvmlDeviceAddressingMode_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DeviceAddressingMode_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceAddressingMode_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":13952
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceAddressingMode_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceAddressingMode_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13952, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13952, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 13952, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlDeviceAddressingMode_v1_t))));

    /* "cuda/bindings/_nvml.pyx":13953
 *                 raise MemoryError("Error allocating DeviceAddressingMode_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceAddressingMode_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":13954
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDeviceAddressingMode_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":13955
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13955, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13955, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 13955, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":13948
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlDeviceAddressingMode_v1_t *>malloc(sizeof(nvmlDeviceAddressingMode_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":13957
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 13957, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":13947
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDeviceAddressingMode_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDeviceAddressingMode_v1_t *>malloc(sizeof(nvmlDeviceAddressingMode_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAddressingMode_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13959
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: API version."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13962
 *     def version(self):
 *         """int: API version."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13962, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13959
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: API version."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAddressingMode_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13964
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13966
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAddressingMode_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13967
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This DeviceAddressingMode_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DeviceAddressingMode_v1_ins};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13967, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13967, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13966
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAddressingMode_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13968
 *         if self._readonly:
 *             raise ValueError("This DeviceAddressingMode_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13968, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13964
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAddressingMode_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13970
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def value(self):
 *         """int: One of `nvmlDeviceAddressingModeType_t`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_5value_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_5value_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_5value___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_5value___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":13973
 *     def value(self):
 *         """int: One of `nvmlDeviceAddressingModeType_t`."""
 *         return self._ptr[0].value             # <<<<<<<<<<<<<<
 * 
 *     @value.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13973, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13970
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def value(self):
 *         """int: One of `nvmlDeviceAddressingModeType_t`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAddressingMode_v1.value.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13975
 *         return self._ptr[0].value
 * 
 *     @value.setter             # <<<<<<<<<<<<<<
 *     def value(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_5value_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_5value_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_5value_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_5value_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":13977
 *     @value.setter
 *     def value(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAddressingMode_v1 instance is read-only")
 *         self._ptr[0].value = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":13978
 *     def value(self, val):
 *         if self._readonly:
 *             raise ValueError("This DeviceAddressingMode_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].value = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DeviceAddressingMode_v1_ins};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13978, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 13978, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13977
 *     @value.setter
 *     def value(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DeviceAddressingMode_v1 instance is read-only")
 *         self._ptr[0].value = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":13979
 *         if self._readonly:
 *             raise ValueError("This DeviceAddressingMode_v1 instance is read-only")
 *         self._ptr[0].value = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13979, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).value = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":13975
 *         return self._ptr[0].value
 * 
 *     @value.setter             # <<<<<<<<<<<<<<
 *     def value(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAddressingMode_v1.value.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13981
 *         self._ptr[0].value = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DeviceAddressingMode_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_12from_data, "DeviceAddressingMode_v1.from_data(data)\n\nCreate an DeviceAddressingMode_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `device_addressing_mode_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13981, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13981, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 13981, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 13981, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13981, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 13981, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAddressingMode_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":13988
 *             data (_numpy.ndarray): a single-element array of dtype `device_addressing_mode_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "device_addressing_mode_v1_dtype", device_addressing_mode_v1_dtype, DeviceAddressingMode_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_device_addressing_mode_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13988, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_device_addressing_mode_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13988, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13981
 *         self._ptr[0].value = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DeviceAddressingMode_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAddressingMode_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":13990
 *         return __from_data(data, "device_addressing_mode_v1_dtype", device_addressing_mode_v1_dtype, DeviceAddressingMode_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DeviceAddressingMode_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_14from_ptr, "DeviceAddressingMode_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an DeviceAddressingMode_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13990, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 13990, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13990, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13990, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 13990, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":13991
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an DeviceAddressingMode_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 13990, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 13990, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13990, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13990, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 13991, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13991, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 13990, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAddressingMode_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":13990
 *         return __from_data(data, "device_addressing_mode_v1_dtype", device_addressing_mode_v1_dtype, DeviceAddressingMode_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DeviceAddressingMode_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":13999
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceAddressingMode_v1 obj = DeviceAddressingMode_v1.__new__(DeviceAddressingMode_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":14000
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef DeviceAddressingMode_v1 obj = DeviceAddressingMode_v1.__new__(DeviceAddressingMode_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14000, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 14000, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":13999
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceAddressingMode_v1 obj = DeviceAddressingMode_v1.__new__(DeviceAddressingMode_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":14001
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceAddressingMode_v1 obj = DeviceAddressingMode_v1.__new__(DeviceAddressingMode_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlDeviceAddressingMode_v1_t *>malloc(sizeof(nvmlDeviceAddressingMode_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14001, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":14002
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceAddressingMode_v1 obj = DeviceAddressingMode_v1.__new__(DeviceAddressingMode_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlDeviceAddressingMode_v1_t *>malloc(sizeof(nvmlDeviceAddressingMode_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14003
 *         cdef DeviceAddressingMode_v1 obj = DeviceAddressingMode_v1.__new__(DeviceAddressingMode_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlDeviceAddressingMode_v1_t *>malloc(sizeof(nvmlDeviceAddressingMode_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceAddressingMode_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlDeviceAddressingMode_v1_t *)malloc((sizeof(nvmlDeviceAddressingMode_v1_t))));

    /* "cuda/bindings/_nvml.pyx":14004
 *         if owner is None:
 *             obj._ptr = <nvmlDeviceAddressingMode_v1_t *>malloc(sizeof(nvmlDeviceAddressingMode_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DeviceAddressingMode_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceAddressingMode_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":14005
 *             obj._ptr = <nvmlDeviceAddressingMode_v1_t *>malloc(sizeof(nvmlDeviceAddressingMode_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceAddressingMode_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceAddressingMode_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14005, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DeviceAddressin};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14005, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 14005, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":14004
 *         if owner is None:
 *             obj._ptr = <nvmlDeviceAddressingMode_v1_t *>malloc(sizeof(nvmlDeviceAddressingMode_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DeviceAddressingMode_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceAddressingMode_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":14006
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DeviceAddressingMode_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceAddressingMode_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlDeviceAddressingMode_v1_t))));

    /* "cuda/bindings/_nvml.pyx":14007
 *                 raise MemoryError("Error allocating DeviceAddressingMode_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceAddressingMode_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":14008
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDeviceAddressingMode_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlDeviceAddressingMode_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":14002
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DeviceAddressingMode_v1 obj = DeviceAddressingMode_v1.__new__(DeviceAddressingMode_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlDeviceAddressingMode_v1_t *>malloc(sizeof(nvmlDeviceAddressingMode_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":14010
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlDeviceAddressingMode_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlDeviceAddressingMode_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":14011
 *         else:
 *             obj._ptr = <nvmlDeviceAddressingMode_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":14012
 *             obj._ptr = <nvmlDeviceAddressingMode_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":14013
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":14014
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":13990
 *         return __from_data(data, "device_addressing_mode_v1_dtype", device_addressing_mode_v1_dtype, DeviceAddressingMode_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DeviceAddressingMode_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAddressingMode_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_16__reduce_cython__, "DeviceAddressingMode_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAddressingMode_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_18__setstate_cython__, "DeviceAddressingMode_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAddressingMode_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.DeviceAddressingMode_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14017
 * 
 * 
 * cdef _get_repair_status_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlRepairStatus_v1_t pod = nvmlRepairStatus_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_repair_status_v1_dtype_offsets(void) {
  nvmlRepairStatus_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlRepairStatus_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_repair_status_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":14018
 * 
 * cdef _get_repair_status_v1_dtype_offsets():
 *     cdef nvmlRepairStatus_v1_t pod = nvmlRepairStatus_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'b_channel_repair_pending', 'b_tpc_repair_pending'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":14019
 * cdef _get_repair_status_v1_dtype_offsets():
 *     cdef nvmlRepairStatus_v1_t pod = nvmlRepairStatus_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'b_channel_repair_pending', 'b_tpc_repair_pending'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14019, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14019, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":14020
 *     cdef nvmlRepairStatus_v1_t pod = nvmlRepairStatus_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'b_channel_repair_pending', 'b_tpc_repair_pending'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14020, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14020, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 14020, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_b_channel_repair_pending);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_b_channel_repair_pending);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_b_channel_repair_pending) != (0)) __PYX_ERR(0, 14020, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_b_tpc_repair_pending);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_b_tpc_repair_pending);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_b_tpc_repair_pending) != (0)) __PYX_ERR(0, 14020, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 14020, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":14021
 *     return _numpy.dtype({
 *         'names': ['version', 'b_channel_repair_pending', 'b_tpc_repair_pending'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 14021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 14021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 14021, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 14021, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 14021, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 14020, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":14023
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bChannelRepairPending)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bTpcRepairPending)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14023, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":14024
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bChannelRepairPending)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bTpcRepairPending)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bChannelRepairPending)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 14024, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":14025
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bChannelRepairPending)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bTpcRepairPending)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlRepairStatus_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bTpcRepairPending)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 14025, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":14022
 *         'names': ['version', 'b_channel_repair_pending', 'b_tpc_repair_pending'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bChannelRepairPending)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14022, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 14022, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 14022, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 14022, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 14020, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":14027
 *             (<intptr_t>&(pod.bTpcRepairPending)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlRepairStatus_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlRepairStatus_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14027, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 14020, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14019, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14017
 * 
 * 
 * cdef _get_repair_status_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlRepairStatus_v1_t pod = nvmlRepairStatus_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_repair_status_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14044
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlRepairStatus_v1_t *>calloc(1, sizeof(nvmlRepairStatus_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":14045
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlRepairStatus_v1_t *>calloc(1, sizeof(nvmlRepairStatus_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating RepairStatus_v1")
*/
  __pyx_v_self->_ptr = ((nvmlRepairStatus_v1_t *)calloc(1, (sizeof(nvmlRepairStatus_v1_t))));

  /* "cuda/bindings/_nvml.pyx":14046
 *     def __init__(self):
 *         self._ptr = <nvmlRepairStatus_v1_t *>calloc(1, sizeof(nvmlRepairStatus_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating RepairStatus_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":14047
 *         self._ptr = <nvmlRepairStatus_v1_t *>calloc(1, sizeof(nvmlRepairStatus_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating RepairStatus_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14047, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_RepairStatus_v1};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14047, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 14047, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14046
 *     def __init__(self):
 *         self._ptr = <nvmlRepairStatus_v1_t *>calloc(1, sizeof(nvmlRepairStatus_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating RepairStatus_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":14048
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating RepairStatus_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":14049
 *             raise MemoryError("Error allocating RepairStatus_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":14050
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":14044
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlRepairStatus_v1_t *>calloc(1, sizeof(nvmlRepairStatus_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14052
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlRepairStatus_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self) {
  nvmlRepairStatus_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlRepairStatus_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":14054
 *     def __dealloc__(self):
 *         cdef nvmlRepairStatus_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14055
 *         cdef nvmlRepairStatus_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":14056
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":14057
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":14054
 *     def __dealloc__(self):
 *         cdef nvmlRepairStatus_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":14052
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlRepairStatus_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":14059
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.RepairStatus_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":14060
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.RepairStatus_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14060, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14060, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14060, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14060, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14060, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_RepairStatus_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 27 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14060, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14059
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.RepairStatus_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14062
 *         return f"<{__name__}.RepairStatus_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14065
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14065, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14062
 *         return f"<{__name__}.RepairStatus_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14067
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_15RepairStatus_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":14068
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14067
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14070
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":14071
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14071, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14070
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14073
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef RepairStatus_v1 other_
 *         if not isinstance(other, RepairStatus_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":14075
 *     def __eq__(self, other):
 *         cdef RepairStatus_v1 other_
 *         if not isinstance(other, RepairStatus_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":14076
 *         cdef RepairStatus_v1 other_
 *         if not isinstance(other, RepairStatus_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlRepairStatus_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":14075
 *     def __eq__(self, other):
 *         cdef RepairStatus_v1 other_
 *         if not isinstance(other, RepairStatus_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":14077
 *         if not isinstance(other, RepairStatus_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlRepairStatus_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1))))) __PYX_ERR(0, 14077, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":14078
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlRepairStatus_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlRepairStatus_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14078, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14073
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef RepairStatus_v1 other_
 *         if not isinstance(other, RepairStatus_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14080
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlRepairStatus_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlRepairStatus_v1_t *>malloc(sizeof(nvmlRepairStatus_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":14081
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlRepairStatus_v1_t *>malloc(sizeof(nvmlRepairStatus_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 14081, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14081, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14081, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 14081, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14082
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlRepairStatus_v1_t *>malloc(sizeof(nvmlRepairStatus_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating RepairStatus_v1")
*/
    __pyx_v_self->_ptr = ((nvmlRepairStatus_v1_t *)malloc((sizeof(nvmlRepairStatus_v1_t))));

    /* "cuda/bindings/_nvml.pyx":14083
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlRepairStatus_v1_t *>malloc(sizeof(nvmlRepairStatus_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating RepairStatus_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlRepairStatus_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":14084
 *             self._ptr = <nvmlRepairStatus_v1_t *>malloc(sizeof(nvmlRepairStatus_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating RepairStatus_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlRepairStatus_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14084, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_RepairStatus_v1};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14084, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 14084, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":14083
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlRepairStatus_v1_t *>malloc(sizeof(nvmlRepairStatus_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating RepairStatus_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlRepairStatus_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":14085
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating RepairStatus_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlRepairStatus_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14085, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14085, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 14085, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlRepairStatus_v1_t))));

    /* "cuda/bindings/_nvml.pyx":14086
 *                 raise MemoryError("Error allocating RepairStatus_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlRepairStatus_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":14087
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlRepairStatus_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":14088
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14088, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14088, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 14088, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":14081
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlRepairStatus_v1_t *>malloc(sizeof(nvmlRepairStatus_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":14090
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 14090, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":14080
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlRepairStatus_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlRepairStatus_v1_t *>malloc(sizeof(nvmlRepairStatus_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14092
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: API version number."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14095
 *     def version(self):
 *         """int: API version number."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14095, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14092
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: API version number."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14097
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14099
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This RepairStatus_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14100
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This RepairStatus_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_RepairStatus_v1_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14100, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14100, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14099
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This RepairStatus_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14101
 *         if self._readonly:
 *             raise ValueError("This RepairStatus_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14101, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":14097
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14103
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def b_channel_repair_pending(self):
 *         """int: Reference to `unsigned` int."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_24b_channel_repair_pending_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_24b_channel_repair_pending_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_24b_channel_repair_pending___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_24b_channel_repair_pending___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14106
 *     def b_channel_repair_pending(self):
 *         """int: Reference to `unsigned` int."""
 *         return self._ptr[0].bChannelRepairPending             # <<<<<<<<<<<<<<
 * 
 *     @b_channel_repair_pending.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).bChannelRepairPending); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14106, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14103
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def b_channel_repair_pending(self):
 *         """int: Reference to `unsigned` int."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.b_channel_repair_pending.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14108
 *         return self._ptr[0].bChannelRepairPending
 * 
 *     @b_channel_repair_pending.setter             # <<<<<<<<<<<<<<
 *     def b_channel_repair_pending(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_24b_channel_repair_pending_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_24b_channel_repair_pending_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_24b_channel_repair_pending_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_24b_channel_repair_pending_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14110
 *     @b_channel_repair_pending.setter
 *     def b_channel_repair_pending(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This RepairStatus_v1 instance is read-only")
 *         self._ptr[0].bChannelRepairPending = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14111
 *     def b_channel_repair_pending(self, val):
 *         if self._readonly:
 *             raise ValueError("This RepairStatus_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].bChannelRepairPending = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_RepairStatus_v1_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14111, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14111, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14110
 *     @b_channel_repair_pending.setter
 *     def b_channel_repair_pending(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This RepairStatus_v1 instance is read-only")
 *         self._ptr[0].bChannelRepairPending = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14112
 *         if self._readonly:
 *             raise ValueError("This RepairStatus_v1 instance is read-only")
 *         self._ptr[0].bChannelRepairPending = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14112, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).bChannelRepairPending = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":14108
 *         return self._ptr[0].bChannelRepairPending
 * 
 *     @b_channel_repair_pending.setter             # <<<<<<<<<<<<<<
 *     def b_channel_repair_pending(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.b_channel_repair_pending.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14114
 *         self._ptr[0].bChannelRepairPending = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def b_tpc_repair_pending(self):
 *         """int: Reference to `unsigned` int."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_20b_tpc_repair_pending_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_20b_tpc_repair_pending_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_20b_tpc_repair_pending___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_20b_tpc_repair_pending___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14117
 *     def b_tpc_repair_pending(self):
 *         """int: Reference to `unsigned` int."""
 *         return self._ptr[0].bTpcRepairPending             # <<<<<<<<<<<<<<
 * 
 *     @b_tpc_repair_pending.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).bTpcRepairPending); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14117, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14114
 *         self._ptr[0].bChannelRepairPending = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def b_tpc_repair_pending(self):
 *         """int: Reference to `unsigned` int."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.b_tpc_repair_pending.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14119
 *         return self._ptr[0].bTpcRepairPending
 * 
 *     @b_tpc_repair_pending.setter             # <<<<<<<<<<<<<<
 *     def b_tpc_repair_pending(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_20b_tpc_repair_pending_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_20b_tpc_repair_pending_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_20b_tpc_repair_pending_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_20b_tpc_repair_pending_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14121
 *     @b_tpc_repair_pending.setter
 *     def b_tpc_repair_pending(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This RepairStatus_v1 instance is read-only")
 *         self._ptr[0].bTpcRepairPending = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14122
 *     def b_tpc_repair_pending(self, val):
 *         if self._readonly:
 *             raise ValueError("This RepairStatus_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].bTpcRepairPending = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_RepairStatus_v1_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14122, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14122, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14121
 *     @b_tpc_repair_pending.setter
 *     def b_tpc_repair_pending(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This RepairStatus_v1 instance is read-only")
 *         self._ptr[0].bTpcRepairPending = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14123
 *         if self._readonly:
 *             raise ValueError("This RepairStatus_v1 instance is read-only")
 *         self._ptr[0].bTpcRepairPending = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14123, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).bTpcRepairPending = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":14119
 *         return self._ptr[0].bTpcRepairPending
 * 
 *     @b_tpc_repair_pending.setter             # <<<<<<<<<<<<<<
 *     def b_tpc_repair_pending(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.b_tpc_repair_pending.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14125
 *         self._ptr[0].bTpcRepairPending = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an RepairStatus_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15RepairStatus_v1_12from_data, "RepairStatus_v1.from_data(data)\n\nCreate an RepairStatus_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `repair_status_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15RepairStatus_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15RepairStatus_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14125, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14125, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 14125, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 14125, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14125, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 14125, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":14132
 *             data (_numpy.ndarray): a single-element array of dtype `repair_status_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "repair_status_v1_dtype", repair_status_v1_dtype, RepairStatus_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_repair_status_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14132, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_repair_status_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14132, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14125
 *         self._ptr[0].bTpcRepairPending = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an RepairStatus_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14134
 *         return __from_data(data, "repair_status_v1_dtype", repair_status_v1_dtype, RepairStatus_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an RepairStatus_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15RepairStatus_v1_14from_ptr, "RepairStatus_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an RepairStatus_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15RepairStatus_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15RepairStatus_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14134, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 14134, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14134, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14134, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 14134, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":14135
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an RepairStatus_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 14134, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 14134, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14134, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14134, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 14135, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14135, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 14134, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":14134
 *         return __from_data(data, "repair_status_v1_dtype", repair_status_v1_dtype, RepairStatus_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an RepairStatus_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":14143
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef RepairStatus_v1 obj = RepairStatus_v1.__new__(RepairStatus_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":14144
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef RepairStatus_v1 obj = RepairStatus_v1.__new__(RepairStatus_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14144, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 14144, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14143
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef RepairStatus_v1 obj = RepairStatus_v1.__new__(RepairStatus_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":14145
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef RepairStatus_v1 obj = RepairStatus_v1.__new__(RepairStatus_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlRepairStatus_v1_t *>malloc(sizeof(nvmlRepairStatus_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_RepairStatus_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14145, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":14146
 *             raise ValueError("ptr must not be null (0)")
 *         cdef RepairStatus_v1 obj = RepairStatus_v1.__new__(RepairStatus_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlRepairStatus_v1_t *>malloc(sizeof(nvmlRepairStatus_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14147
 *         cdef RepairStatus_v1 obj = RepairStatus_v1.__new__(RepairStatus_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlRepairStatus_v1_t *>malloc(sizeof(nvmlRepairStatus_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating RepairStatus_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlRepairStatus_v1_t *)malloc((sizeof(nvmlRepairStatus_v1_t))));

    /* "cuda/bindings/_nvml.pyx":14148
 *         if owner is None:
 *             obj._ptr = <nvmlRepairStatus_v1_t *>malloc(sizeof(nvmlRepairStatus_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating RepairStatus_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlRepairStatus_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":14149
 *             obj._ptr = <nvmlRepairStatus_v1_t *>malloc(sizeof(nvmlRepairStatus_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating RepairStatus_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlRepairStatus_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14149, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_RepairStatus_v1};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14149, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 14149, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":14148
 *         if owner is None:
 *             obj._ptr = <nvmlRepairStatus_v1_t *>malloc(sizeof(nvmlRepairStatus_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating RepairStatus_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlRepairStatus_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":14150
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating RepairStatus_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlRepairStatus_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlRepairStatus_v1_t))));

    /* "cuda/bindings/_nvml.pyx":14151
 *                 raise MemoryError("Error allocating RepairStatus_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlRepairStatus_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":14152
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlRepairStatus_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlRepairStatus_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":14146
 *             raise ValueError("ptr must not be null (0)")
 *         cdef RepairStatus_v1 obj = RepairStatus_v1.__new__(RepairStatus_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlRepairStatus_v1_t *>malloc(sizeof(nvmlRepairStatus_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":14154
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlRepairStatus_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlRepairStatus_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":14155
 *         else:
 *             obj._ptr = <nvmlRepairStatus_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":14156
 *             obj._ptr = <nvmlRepairStatus_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":14157
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":14158
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14134
 *         return __from_data(data, "repair_status_v1_dtype", repair_status_v1_dtype, RepairStatus_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an RepairStatus_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15RepairStatus_v1_16__reduce_cython__, "RepairStatus_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15RepairStatus_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15RepairStatus_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15RepairStatus_v1_18__setstate_cython__, "RepairStatus_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15RepairStatus_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15RepairStatus_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15RepairStatus_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.RepairStatus_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14161
 * 
 * 
 * cdef _get_pdi_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlPdi_v1_t pod = nvmlPdi_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_pdi_v1_dtype_offsets(void) {
  nvmlPdi_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlPdi_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_pdi_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":14162
 * 
 * cdef _get_pdi_v1_dtype_offsets():
 *     cdef nvmlPdi_v1_t pod = nvmlPdi_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'value'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":14163
 * cdef _get_pdi_v1_dtype_offsets():
 *     cdef nvmlPdi_v1_t pod = nvmlPdi_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'value'],
 *         'formats': [_numpy.uint32, _numpy.uint64],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":14164
 *     cdef nvmlPdi_v1_t pod = nvmlPdi_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'value'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint64],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14164, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14164, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 14164, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_value);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_value);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_value) != (0)) __PYX_ERR(0, 14164, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 14164, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":14165
 *     return _numpy.dtype({
 *         'names': ['version', 'value'],
 *         'formats': [_numpy.uint32, _numpy.uint64],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14165, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14165, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14165, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 14165, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14165, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 14165, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 14165, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 14164, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":14167
 *         'formats': [_numpy.uint32, _numpy.uint64],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.value)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14167, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":14168
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.value)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlPdi_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.value)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 14168, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":14166
 *         'names': ['version', 'value'],
 *         'formats': [_numpy.uint32, _numpy.uint64],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.value)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14166, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 14166, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 14166, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 14164, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":14170
 *             (<intptr_t>&(pod.value)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlPdi_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlPdi_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14170, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 14164, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14163, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14161
 * 
 * 
 * cdef _get_pdi_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlPdi_v1_t pod = nvmlPdi_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_pdi_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14187
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlPdi_v1_t *>calloc(1, sizeof(nvmlPdi_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":14188
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlPdi_v1_t *>calloc(1, sizeof(nvmlPdi_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating Pdi_v1")
*/
  __pyx_v_self->_ptr = ((nvmlPdi_v1_t *)calloc(1, (sizeof(nvmlPdi_v1_t))));

  /* "cuda/bindings/_nvml.pyx":14189
 *     def __init__(self):
 *         self._ptr = <nvmlPdi_v1_t *>calloc(1, sizeof(nvmlPdi_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating Pdi_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":14190
 *         self._ptr = <nvmlPdi_v1_t *>calloc(1, sizeof(nvmlPdi_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating Pdi_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14190, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_Pdi_v1};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14190, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 14190, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14189
 *     def __init__(self):
 *         self._ptr = <nvmlPdi_v1_t *>calloc(1, sizeof(nvmlPdi_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating Pdi_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":14191
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating Pdi_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":14192
 *             raise MemoryError("Error allocating Pdi_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":14193
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":14187
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlPdi_v1_t *>calloc(1, sizeof(nvmlPdi_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.Pdi_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14195
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlPdi_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self) {
  nvmlPdi_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlPdi_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":14197
 *     def __dealloc__(self):
 *         cdef nvmlPdi_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14198
 *         cdef nvmlPdi_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":14199
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":14200
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":14197
 *     def __dealloc__(self):
 *         cdef nvmlPdi_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":14195
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlPdi_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":14202
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.Pdi_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":14203
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.Pdi_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_Pdi_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 18 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14202
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.Pdi_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.Pdi_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14205
 *         return f"<{__name__}.Pdi_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14208
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14208, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14205
 *         return f"<{__name__}.Pdi_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Pdi_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14210
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_6Pdi_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":14211
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14210
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14213
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":14214
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14214, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14213
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Pdi_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14216
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef Pdi_v1 other_
 *         if not isinstance(other, Pdi_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":14218
 *     def __eq__(self, other):
 *         cdef Pdi_v1 other_
 *         if not isinstance(other, Pdi_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":14219
 *         cdef Pdi_v1 other_
 *         if not isinstance(other, Pdi_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPdi_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":14218
 *     def __eq__(self, other):
 *         cdef Pdi_v1 other_
 *         if not isinstance(other, Pdi_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":14220
 *         if not isinstance(other, Pdi_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPdi_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1))))) __PYX_ERR(0, 14220, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":14221
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPdi_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlPdi_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14221, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14216
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef Pdi_v1 other_
 *         if not isinstance(other, Pdi_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.Pdi_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14223
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPdi_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPdi_v1_t *>malloc(sizeof(nvmlPdi_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":14224
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlPdi_v1_t *>malloc(sizeof(nvmlPdi_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 14224, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14224, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14224, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 14224, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14225
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPdi_v1_t *>malloc(sizeof(nvmlPdi_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating Pdi_v1")
*/
    __pyx_v_self->_ptr = ((nvmlPdi_v1_t *)malloc((sizeof(nvmlPdi_v1_t))));

    /* "cuda/bindings/_nvml.pyx":14226
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPdi_v1_t *>malloc(sizeof(nvmlPdi_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Pdi_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPdi_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":14227
 *             self._ptr = <nvmlPdi_v1_t *>malloc(sizeof(nvmlPdi_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating Pdi_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPdi_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14227, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_Pdi_v1};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14227, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 14227, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":14226
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPdi_v1_t *>malloc(sizeof(nvmlPdi_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Pdi_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPdi_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":14228
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating Pdi_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPdi_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14228, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14228, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 14228, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlPdi_v1_t))));

    /* "cuda/bindings/_nvml.pyx":14229
 *                 raise MemoryError("Error allocating Pdi_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPdi_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":14230
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlPdi_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":14231
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14231, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14231, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 14231, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":14224
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlPdi_v1_t *>malloc(sizeof(nvmlPdi_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":14233
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 14233, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":14223
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlPdi_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlPdi_v1_t *>malloc(sizeof(nvmlPdi_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.Pdi_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14235
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: API version number."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14238
 *     def version(self):
 *         """int: API version number."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14238, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14235
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: API version number."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Pdi_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14240
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14242
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Pdi_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14243
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This Pdi_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Pdi_v1_instance_is_read_onl};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14243, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14243, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14242
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Pdi_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14244
 *         if self._readonly:
 *             raise ValueError("This Pdi_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14244, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":14240
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Pdi_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14246
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def value(self):
 *         """int: 64-bit PDI value"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_5value_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_5value_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_5value___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_5value___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14249
 *     def value(self):
 *         """int: 64-bit PDI value"""
 *         return self._ptr[0].value             # <<<<<<<<<<<<<<
 * 
 *     @value.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14249, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14246
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def value(self):
 *         """int: 64-bit PDI value"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Pdi_v1.value.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14251
 *         return self._ptr[0].value
 * 
 *     @value.setter             # <<<<<<<<<<<<<<
 *     def value(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_5value_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_5value_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_5value_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_5value_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14253
 *     @value.setter
 *     def value(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Pdi_v1 instance is read-only")
 *         self._ptr[0].value = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14254
 *     def value(self, val):
 *         if self._readonly:
 *             raise ValueError("This Pdi_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].value = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_Pdi_v1_instance_is_read_onl};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14254, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14254, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14253
 *     @value.setter
 *     def value(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This Pdi_v1 instance is read-only")
 *         self._ptr[0].value = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14255
 *         if self._readonly:
 *             raise ValueError("This Pdi_v1 instance is read-only")
 *         self._ptr[0].value = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 14255, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).value = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":14251
 *         return self._ptr[0].value
 * 
 *     @value.setter             # <<<<<<<<<<<<<<
 *     def value(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Pdi_v1.value.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14257
 *         self._ptr[0].value = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Pdi_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_6Pdi_v1_12from_data, "Pdi_v1.from_data(data)\n\nCreate an Pdi_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `pdi_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_6Pdi_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Pdi_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14257, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14257, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 14257, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 14257, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14257, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 14257, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Pdi_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":14264
 *             data (_numpy.ndarray): a single-element array of dtype `pdi_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "pdi_v1_dtype", pdi_v1_dtype, Pdi_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_pdi_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14264, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_pdi_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14264, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14257
 *         self._ptr[0].value = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Pdi_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Pdi_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14266
 *         return __from_data(data, "pdi_v1_dtype", pdi_v1_dtype, Pdi_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Pdi_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_6Pdi_v1_14from_ptr, "Pdi_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an Pdi_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_6Pdi_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Pdi_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14266, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 14266, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14266, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14266, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 14266, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":14267
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an Pdi_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 14266, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 14266, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14266, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14266, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 14267, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14267, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 14266, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Pdi_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":14266
 *         return __from_data(data, "pdi_v1_dtype", pdi_v1_dtype, Pdi_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Pdi_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":14275
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Pdi_v1 obj = Pdi_v1.__new__(Pdi_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":14276
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef Pdi_v1 obj = Pdi_v1.__new__(Pdi_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14276, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 14276, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14275
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Pdi_v1 obj = Pdi_v1.__new__(Pdi_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":14277
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Pdi_v1 obj = Pdi_v1.__new__(Pdi_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlPdi_v1_t *>malloc(sizeof(nvmlPdi_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_Pdi_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14277, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":14278
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Pdi_v1 obj = Pdi_v1.__new__(Pdi_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlPdi_v1_t *>malloc(sizeof(nvmlPdi_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14279
 *         cdef Pdi_v1 obj = Pdi_v1.__new__(Pdi_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlPdi_v1_t *>malloc(sizeof(nvmlPdi_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating Pdi_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlPdi_v1_t *)malloc((sizeof(nvmlPdi_v1_t))));

    /* "cuda/bindings/_nvml.pyx":14280
 *         if owner is None:
 *             obj._ptr = <nvmlPdi_v1_t *>malloc(sizeof(nvmlPdi_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Pdi_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPdi_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":14281
 *             obj._ptr = <nvmlPdi_v1_t *>malloc(sizeof(nvmlPdi_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating Pdi_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPdi_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14281, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_Pdi_v1};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14281, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 14281, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":14280
 *         if owner is None:
 *             obj._ptr = <nvmlPdi_v1_t *>malloc(sizeof(nvmlPdi_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating Pdi_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPdi_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":14282
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating Pdi_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPdi_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlPdi_v1_t))));

    /* "cuda/bindings/_nvml.pyx":14283
 *                 raise MemoryError("Error allocating Pdi_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPdi_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":14284
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlPdi_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlPdi_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":14278
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Pdi_v1 obj = Pdi_v1.__new__(Pdi_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlPdi_v1_t *>malloc(sizeof(nvmlPdi_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":14286
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlPdi_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlPdi_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":14287
 *         else:
 *             obj._ptr = <nvmlPdi_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":14288
 *             obj._ptr = <nvmlPdi_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":14289
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":14290
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14266
 *         return __from_data(data, "pdi_v1_dtype", pdi_v1_dtype, Pdi_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Pdi_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.Pdi_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_6Pdi_v1_16__reduce_cython__, "Pdi_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_6Pdi_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Pdi_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.Pdi_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_6Pdi_v1_18__setstate_cython__, "Pdi_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_6Pdi_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Pdi_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Pdi_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Pdi_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.Pdi_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14293
 * 
 * 
 * cdef _get_device_power_mizer_modes_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlDevicePowerMizerModes_v1_t pod = nvmlDevicePowerMizerModes_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_device_power_mizer_modes_v1_dtype_offsets(void) {
  nvmlDevicePowerMizerModes_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlDevicePowerMizerModes_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_device_power_mizer_modes_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":14294
 * 
 * cdef _get_device_power_mizer_modes_v1_dtype_offsets():
 *     cdef nvmlDevicePowerMizerModes_v1_t pod = nvmlDevicePowerMizerModes_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['current_mode', 'mode', 'supported_power_mizer_modes'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":14295
 * cdef _get_device_power_mizer_modes_v1_dtype_offsets():
 *     cdef nvmlDevicePowerMizerModes_v1_t pod = nvmlDevicePowerMizerModes_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['current_mode', 'mode', 'supported_power_mizer_modes'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14295, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14295, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":14296
 *     cdef nvmlDevicePowerMizerModes_v1_t pod = nvmlDevicePowerMizerModes_v1_t()
 *     return _numpy.dtype({
 *         'names': ['current_mode', 'mode', 'supported_power_mizer_modes'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14296, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14296, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_current_mode);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_current_mode);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_current_mode) != (0)) __PYX_ERR(0, 14296, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_mode);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_mode);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_mode) != (0)) __PYX_ERR(0, 14296, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_supported_power_mizer_modes);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_supported_power_mizer_modes);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_supported_power_mizer_modes) != (0)) __PYX_ERR(0, 14296, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 14296, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":14297
 *     return _numpy.dtype({
 *         'names': ['current_mode', 'mode', 'supported_power_mizer_modes'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.currentMode)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 14297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 14297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 14297, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 14297, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 14297, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 14296, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":14299
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.currentMode)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.mode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.supportedPowerMizerModes)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.currentMode)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14299, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":14300
 *         'offsets': [
 *             (<intptr_t>&(pod.currentMode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.mode)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.supportedPowerMizerModes)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.mode)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 14300, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":14301
 *             (<intptr_t>&(pod.currentMode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.mode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.supportedPowerMizerModes)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlDevicePowerMizerModes_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.supportedPowerMizerModes)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 14301, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":14298
 *         'names': ['current_mode', 'mode', 'supported_power_mizer_modes'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.currentMode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.mode)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14298, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 14298, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 14298, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 14298, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 14296, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":14303
 *             (<intptr_t>&(pod.supportedPowerMizerModes)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlDevicePowerMizerModes_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlDevicePowerMizerModes_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14303, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 14296, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14295, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14293
 * 
 * 
 * cdef _get_device_power_mizer_modes_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlDevicePowerMizerModes_v1_t pod = nvmlDevicePowerMizerModes_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_device_power_mizer_modes_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14320
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlDevicePowerMizerModes_v1_t *>calloc(1, sizeof(nvmlDevicePowerMizerModes_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":14321
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlDevicePowerMizerModes_v1_t *>calloc(1, sizeof(nvmlDevicePowerMizerModes_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DevicePowerMizerModes_v1")
*/
  __pyx_v_self->_ptr = ((nvmlDevicePowerMizerModes_v1_t *)calloc(1, (sizeof(nvmlDevicePowerMizerModes_v1_t))));

  /* "cuda/bindings/_nvml.pyx":14322
 *     def __init__(self):
 *         self._ptr = <nvmlDevicePowerMizerModes_v1_t *>calloc(1, sizeof(nvmlDevicePowerMizerModes_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating DevicePowerMizerModes_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":14323
 *         self._ptr = <nvmlDevicePowerMizerModes_v1_t *>calloc(1, sizeof(nvmlDevicePowerMizerModes_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DevicePowerMizerModes_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14323, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DevicePowerMize};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14323, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 14323, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14322
 *     def __init__(self):
 *         self._ptr = <nvmlDevicePowerMizerModes_v1_t *>calloc(1, sizeof(nvmlDevicePowerMizerModes_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating DevicePowerMizerModes_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":14324
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating DevicePowerMizerModes_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":14325
 *             raise MemoryError("Error allocating DevicePowerMizerModes_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":14326
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":14320
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlDevicePowerMizerModes_v1_t *>calloc(1, sizeof(nvmlDevicePowerMizerModes_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14328
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlDevicePowerMizerModes_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self) {
  nvmlDevicePowerMizerModes_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlDevicePowerMizerModes_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":14330
 *     def __dealloc__(self):
 *         cdef nvmlDevicePowerMizerModes_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14331
 *         cdef nvmlDevicePowerMizerModes_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":14332
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":14333
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":14330
 *     def __dealloc__(self):
 *         cdef nvmlDevicePowerMizerModes_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":14328
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlDevicePowerMizerModes_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":14335
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.DevicePowerMizerModes_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":14336
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.DevicePowerMizerModes_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14336, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14336, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14336, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14336, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14336, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_DevicePowerMizerModes_v1_object;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 36 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14336, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14335
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.DevicePowerMizerModes_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14338
 *         return f"<{__name__}.DevicePowerMizerModes_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14341
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14341, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14338
 *         return f"<{__name__}.DevicePowerMizerModes_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14343
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":14344
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14343
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14346
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":14347
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14347, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14346
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14349
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef DevicePowerMizerModes_v1 other_
 *         if not isinstance(other, DevicePowerMizerModes_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":14351
 *     def __eq__(self, other):
 *         cdef DevicePowerMizerModes_v1 other_
 *         if not isinstance(other, DevicePowerMizerModes_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":14352
 *         cdef DevicePowerMizerModes_v1 other_
 *         if not isinstance(other, DevicePowerMizerModes_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDevicePowerMizerModes_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":14351
 *     def __eq__(self, other):
 *         cdef DevicePowerMizerModes_v1 other_
 *         if not isinstance(other, DevicePowerMizerModes_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":14353
 *         if not isinstance(other, DevicePowerMizerModes_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDevicePowerMizerModes_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1))))) __PYX_ERR(0, 14353, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":14354
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDevicePowerMizerModes_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlDevicePowerMizerModes_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14354, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14349
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef DevicePowerMizerModes_v1 other_
 *         if not isinstance(other, DevicePowerMizerModes_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14356
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDevicePowerMizerModes_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDevicePowerMizerModes_v1_t *>malloc(sizeof(nvmlDevicePowerMizerModes_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":14357
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlDevicePowerMizerModes_v1_t *>malloc(sizeof(nvmlDevicePowerMizerModes_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 14357, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14357, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14357, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 14357, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14358
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDevicePowerMizerModes_v1_t *>malloc(sizeof(nvmlDevicePowerMizerModes_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DevicePowerMizerModes_v1")
*/
    __pyx_v_self->_ptr = ((nvmlDevicePowerMizerModes_v1_t *)malloc((sizeof(nvmlDevicePowerMizerModes_v1_t))));

    /* "cuda/bindings/_nvml.pyx":14359
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDevicePowerMizerModes_v1_t *>malloc(sizeof(nvmlDevicePowerMizerModes_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DevicePowerMizerModes_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDevicePowerMizerModes_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":14360
 *             self._ptr = <nvmlDevicePowerMizerModes_v1_t *>malloc(sizeof(nvmlDevicePowerMizerModes_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DevicePowerMizerModes_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDevicePowerMizerModes_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14360, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DevicePowerMize};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14360, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 14360, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":14359
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDevicePowerMizerModes_v1_t *>malloc(sizeof(nvmlDevicePowerMizerModes_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DevicePowerMizerModes_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDevicePowerMizerModes_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":14361
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating DevicePowerMizerModes_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDevicePowerMizerModes_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14361, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14361, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 14361, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlDevicePowerMizerModes_v1_t))));

    /* "cuda/bindings/_nvml.pyx":14362
 *                 raise MemoryError("Error allocating DevicePowerMizerModes_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDevicePowerMizerModes_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":14363
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlDevicePowerMizerModes_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":14364
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14364, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14364, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 14364, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":14357
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlDevicePowerMizerModes_v1_t *>malloc(sizeof(nvmlDevicePowerMizerModes_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":14366
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 14366, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":14356
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlDevicePowerMizerModes_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlDevicePowerMizerModes_v1_t *>malloc(sizeof(nvmlDevicePowerMizerModes_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14368
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def current_mode(self):
 *         """int: OUT: the current powermizer mode."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12current_mode_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12current_mode_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12current_mode___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12current_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14371
 *     def current_mode(self):
 *         """int: OUT: the current powermizer mode."""
 *         return self._ptr[0].currentMode             # <<<<<<<<<<<<<<
 * 
 *     @current_mode.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).currentMode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14371, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14368
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def current_mode(self):
 *         """int: OUT: the current powermizer mode."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.current_mode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14373
 *         return self._ptr[0].currentMode
 * 
 *     @current_mode.setter             # <<<<<<<<<<<<<<
 *     def current_mode(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12current_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12current_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12current_mode_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12current_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14375
 *     @current_mode.setter
 *     def current_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DevicePowerMizerModes_v1 instance is read-only")
 *         self._ptr[0].currentMode = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14376
 *     def current_mode(self, val):
 *         if self._readonly:
 *             raise ValueError("This DevicePowerMizerModes_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].currentMode = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DevicePowerMizerModes_v1_in};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14376, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14376, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14375
 *     @current_mode.setter
 *     def current_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DevicePowerMizerModes_v1 instance is read-only")
 *         self._ptr[0].currentMode = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14377
 *         if self._readonly:
 *             raise ValueError("This DevicePowerMizerModes_v1 instance is read-only")
 *         self._ptr[0].currentMode = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14377, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).currentMode = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":14373
 *         return self._ptr[0].currentMode
 * 
 *     @current_mode.setter             # <<<<<<<<<<<<<<
 *     def current_mode(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.current_mode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14379
 *         self._ptr[0].currentMode = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def mode(self):
 *         """int: IN: the powermizer mode to set."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_4mode_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_4mode_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_4mode___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_4mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14382
 *     def mode(self):
 *         """int: IN: the powermizer mode to set."""
 *         return self._ptr[0].mode             # <<<<<<<<<<<<<<
 * 
 *     @mode.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).mode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14382, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14379
 *         self._ptr[0].currentMode = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def mode(self):
 *         """int: IN: the powermizer mode to set."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.mode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14384
 *         return self._ptr[0].mode
 * 
 *     @mode.setter             # <<<<<<<<<<<<<<
 *     def mode(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_4mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_4mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_4mode_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_4mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14386
 *     @mode.setter
 *     def mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DevicePowerMizerModes_v1 instance is read-only")
 *         self._ptr[0].mode = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14387
 *     def mode(self, val):
 *         if self._readonly:
 *             raise ValueError("This DevicePowerMizerModes_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].mode = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DevicePowerMizerModes_v1_in};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14387, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14387, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14386
 *     @mode.setter
 *     def mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DevicePowerMizerModes_v1 instance is read-only")
 *         self._ptr[0].mode = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14388
 *         if self._readonly:
 *             raise ValueError("This DevicePowerMizerModes_v1 instance is read-only")
 *         self._ptr[0].mode = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14388, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).mode = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":14384
 *         return self._ptr[0].mode
 * 
 *     @mode.setter             # <<<<<<<<<<<<<<
 *     def mode(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.mode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14390
 *         self._ptr[0].mode = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def supported_power_mizer_modes(self):
 *         """int: OUT: Bitmask of supported powermizer modes."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_27supported_power_mizer_modes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_27supported_power_mizer_modes_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_27supported_power_mizer_modes___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_27supported_power_mizer_modes___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14393
 *     def supported_power_mizer_modes(self):
 *         """int: OUT: Bitmask of supported powermizer modes."""
 *         return self._ptr[0].supportedPowerMizerModes             # <<<<<<<<<<<<<<
 * 
 *     @supported_power_mizer_modes.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).supportedPowerMizerModes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14393, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14390
 *         self._ptr[0].mode = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def supported_power_mizer_modes(self):
 *         """int: OUT: Bitmask of supported powermizer modes."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.supported_power_mizer_modes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14395
 *         return self._ptr[0].supportedPowerMizerModes
 * 
 *     @supported_power_mizer_modes.setter             # <<<<<<<<<<<<<<
 *     def supported_power_mizer_modes(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_27supported_power_mizer_modes_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_27supported_power_mizer_modes_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_27supported_power_mizer_modes_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_27supported_power_mizer_modes_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14397
 *     @supported_power_mizer_modes.setter
 *     def supported_power_mizer_modes(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DevicePowerMizerModes_v1 instance is read-only")
 *         self._ptr[0].supportedPowerMizerModes = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14398
 *     def supported_power_mizer_modes(self, val):
 *         if self._readonly:
 *             raise ValueError("This DevicePowerMizerModes_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].supportedPowerMizerModes = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_DevicePowerMizerModes_v1_in};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14398, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14398, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14397
 *     @supported_power_mizer_modes.setter
 *     def supported_power_mizer_modes(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This DevicePowerMizerModes_v1 instance is read-only")
 *         self._ptr[0].supportedPowerMizerModes = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14399
 *         if self._readonly:
 *             raise ValueError("This DevicePowerMizerModes_v1 instance is read-only")
 *         self._ptr[0].supportedPowerMizerModes = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14399, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).supportedPowerMizerModes = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":14395
 *         return self._ptr[0].supportedPowerMizerModes
 * 
 *     @supported_power_mizer_modes.setter             # <<<<<<<<<<<<<<
 *     def supported_power_mizer_modes(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.supported_power_mizer_modes.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14401
 *         self._ptr[0].supportedPowerMizerModes = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DevicePowerMizerModes_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12from_data, "DevicePowerMizerModes_v1.from_data(data)\n\nCreate an DevicePowerMizerModes_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `device_power_mizer_modes_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14401, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14401, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 14401, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 14401, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14401, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 14401, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":14408
 *             data (_numpy.ndarray): a single-element array of dtype `device_power_mizer_modes_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "device_power_mizer_modes_v1_dtype", device_power_mizer_modes_v1_dtype, DevicePowerMizerModes_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_device_power_mizer_modes_v1_dtyp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14408, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_device_power_mizer_modes_v1_dtyp, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14408, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14401
 *         self._ptr[0].supportedPowerMizerModes = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DevicePowerMizerModes_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14410
 *         return __from_data(data, "device_power_mizer_modes_v1_dtype", device_power_mizer_modes_v1_dtype, DevicePowerMizerModes_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DevicePowerMizerModes_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_14from_ptr, "DevicePowerMizerModes_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an DevicePowerMizerModes_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14410, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 14410, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14410, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14410, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 14410, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":14411
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an DevicePowerMizerModes_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 14410, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 14410, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14410, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14410, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 14411, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14411, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 14410, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":14410
 *         return __from_data(data, "device_power_mizer_modes_v1_dtype", device_power_mizer_modes_v1_dtype, DevicePowerMizerModes_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DevicePowerMizerModes_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":14419
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DevicePowerMizerModes_v1 obj = DevicePowerMizerModes_v1.__new__(DevicePowerMizerModes_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":14420
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef DevicePowerMizerModes_v1 obj = DevicePowerMizerModes_v1.__new__(DevicePowerMizerModes_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14420, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 14420, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14419
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DevicePowerMizerModes_v1 obj = DevicePowerMizerModes_v1.__new__(DevicePowerMizerModes_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":14421
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DevicePowerMizerModes_v1 obj = DevicePowerMizerModes_v1.__new__(DevicePowerMizerModes_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlDevicePowerMizerModes_v1_t *>malloc(sizeof(nvmlDevicePowerMizerModes_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14421, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":14422
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DevicePowerMizerModes_v1 obj = DevicePowerMizerModes_v1.__new__(DevicePowerMizerModes_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlDevicePowerMizerModes_v1_t *>malloc(sizeof(nvmlDevicePowerMizerModes_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14423
 *         cdef DevicePowerMizerModes_v1 obj = DevicePowerMizerModes_v1.__new__(DevicePowerMizerModes_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlDevicePowerMizerModes_v1_t *>malloc(sizeof(nvmlDevicePowerMizerModes_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DevicePowerMizerModes_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlDevicePowerMizerModes_v1_t *)malloc((sizeof(nvmlDevicePowerMizerModes_v1_t))));

    /* "cuda/bindings/_nvml.pyx":14424
 *         if owner is None:
 *             obj._ptr = <nvmlDevicePowerMizerModes_v1_t *>malloc(sizeof(nvmlDevicePowerMizerModes_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DevicePowerMizerModes_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDevicePowerMizerModes_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":14425
 *             obj._ptr = <nvmlDevicePowerMizerModes_v1_t *>malloc(sizeof(nvmlDevicePowerMizerModes_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DevicePowerMizerModes_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDevicePowerMizerModes_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14425, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_DevicePowerMize};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14425, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 14425, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":14424
 *         if owner is None:
 *             obj._ptr = <nvmlDevicePowerMizerModes_v1_t *>malloc(sizeof(nvmlDevicePowerMizerModes_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating DevicePowerMizerModes_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDevicePowerMizerModes_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":14426
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating DevicePowerMizerModes_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDevicePowerMizerModes_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlDevicePowerMizerModes_v1_t))));

    /* "cuda/bindings/_nvml.pyx":14427
 *                 raise MemoryError("Error allocating DevicePowerMizerModes_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDevicePowerMizerModes_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":14428
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlDevicePowerMizerModes_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlDevicePowerMizerModes_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":14422
 *             raise ValueError("ptr must not be null (0)")
 *         cdef DevicePowerMizerModes_v1 obj = DevicePowerMizerModes_v1.__new__(DevicePowerMizerModes_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlDevicePowerMizerModes_v1_t *>malloc(sizeof(nvmlDevicePowerMizerModes_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":14430
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlDevicePowerMizerModes_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlDevicePowerMizerModes_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":14431
 *         else:
 *             obj._ptr = <nvmlDevicePowerMizerModes_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":14432
 *             obj._ptr = <nvmlDevicePowerMizerModes_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":14433
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":14434
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14410
 *         return __from_data(data, "device_power_mizer_modes_v1_dtype", device_power_mizer_modes_v1_dtype, DevicePowerMizerModes_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DevicePowerMizerModes_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_16__reduce_cython__, "DevicePowerMizerModes_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_18__setstate_cython__, "DevicePowerMizerModes_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.DevicePowerMizerModes_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14437
 * 
 * 
 * cdef _get_ecc_sram_unique_uncorrected_error_entry_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlEccSramUniqueUncorrectedErrorEntry_v1_t pod = nvmlEccSramUniqueUncorrectedErrorEntry_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_ecc_sram_unique_uncorrected_error_entry_v1_dtype_offsets(void) {
  nvmlEccSramUniqueUncorrectedErrorEntry_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlEccSramUniqueUncorrectedErrorEntry_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  size_t __pyx_t_14;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ecc_sram_unique_uncorrected_error_entry_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":14438
 * 
 * cdef _get_ecc_sram_unique_uncorrected_error_entry_v1_dtype_offsets():
 *     cdef nvmlEccSramUniqueUncorrectedErrorEntry_v1_t pod = nvmlEccSramUniqueUncorrectedErrorEntry_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['unit', 'location', 'sublocation', 'extlocation', 'address', 'is_parity', 'count'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":14439
 * cdef _get_ecc_sram_unique_uncorrected_error_entry_v1_dtype_offsets():
 *     cdef nvmlEccSramUniqueUncorrectedErrorEntry_v1_t pod = nvmlEccSramUniqueUncorrectedErrorEntry_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['unit', 'location', 'sublocation', 'extlocation', 'address', 'is_parity', 'count'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14439, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14439, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":14440
 *     cdef nvmlEccSramUniqueUncorrectedErrorEntry_v1_t pod = nvmlEccSramUniqueUncorrectedErrorEntry_v1_t()
 *     return _numpy.dtype({
 *         'names': ['unit', 'location', 'sublocation', 'extlocation', 'address', 'is_parity', 'count'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14440, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14440, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_unit);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_unit);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_unit) != (0)) __PYX_ERR(0, 14440, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_location);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_location);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_location) != (0)) __PYX_ERR(0, 14440, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_sublocation);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_sublocation);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_sublocation) != (0)) __PYX_ERR(0, 14440, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_extlocation);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_extlocation);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_extlocation) != (0)) __PYX_ERR(0, 14440, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_address);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_address);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_address) != (0)) __PYX_ERR(0, 14440, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_is_parity);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_is_parity);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_is_parity) != (0)) __PYX_ERR(0, 14440, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_count) != (0)) __PYX_ERR(0, 14440, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 14440, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":14441
 *     return _numpy.dtype({
 *         'names': ['unit', 'location', 'sublocation', 'extlocation', 'address', 'is_parity', 'count'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.unit)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 14441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 14441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 14441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 14441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 14441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 14441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 14441, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 14441, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 14441, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 14441, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 14441, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 14441, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 14441, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 14440, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":14443
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.unit)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.location)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sublocation)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.unit)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":14444
 *         'offsets': [
 *             (<intptr_t>&(pod.unit)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.location)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sublocation)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.extlocation)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.location)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 14444, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":14445
 *             (<intptr_t>&(pod.unit)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.location)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sublocation)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.extlocation)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.address)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sublocation)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 14445, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":14446
 *             (<intptr_t>&(pod.location)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sublocation)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.extlocation)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.address)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.isParity)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.extlocation)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 14446, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":14447
 *             (<intptr_t>&(pod.sublocation)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.extlocation)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.address)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.isParity)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.address)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 14447, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":14448
 *             (<intptr_t>&(pod.extlocation)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.address)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.isParity)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.isParity)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 14448, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":14449
 *             (<intptr_t>&(pod.address)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.isParity)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.count)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 14449, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":14442
 *         'names': ['unit', 'location', 'sublocation', 'extlocation', 'address', 'is_parity', 'count'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.unit)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.location)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14442, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 14442, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_13) != (0)) __PYX_ERR(0, 14442, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_12) != (0)) __PYX_ERR(0, 14442, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_11) != (0)) __PYX_ERR(0, 14442, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_10) != (0)) __PYX_ERR(0, 14442, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_9) != (0)) __PYX_ERR(0, 14442, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_8) != (0)) __PYX_ERR(0, 14442, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 14440, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":14451
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14451, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 14440, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_14 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_14 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_14, (2-__pyx_t_14) | (__pyx_t_14*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14439, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14437
 * 
 * 
 * cdef _get_ecc_sram_unique_uncorrected_error_entry_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlEccSramUniqueUncorrectedErrorEntry_v1_t pod = nvmlEccSramUniqueUncorrectedErrorEntry_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_ecc_sram_unique_uncorrected_error_entry_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14473
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=ecc_sram_unique_uncorrected_error_entry_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14473, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14473, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 14473, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14473, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 14473, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":14474
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=ecc_sram_unique_uncorrected_error_entry_v1_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14474, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14474, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ecc_sram_unique_uncorrected_erro); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14474, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14474, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 14474, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14474, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":14475
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=ecc_sram_unique_uncorrected_error_entry_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14475, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14475, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14475, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":14476
 *         arr = _numpy.empty(size, dtype=ecc_sram_unique_uncorrected_error_entry_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14476, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14476, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14476, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 14476, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":14477
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14477, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14477, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14477, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14477, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 14476, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 14476, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":14473
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=ecc_sram_unique_uncorrected_error_entry_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14479
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.EccSramUniqueUncorrectedErrorEntry_v1_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":14480
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.EccSramUniqueUncorrectedErrorEntry_v1_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14480, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14480, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 14480, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":14481
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.EccSramUniqueUncorrectedErrorEntry_v1_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.EccSramUniqueUncorrectedErrorEntry_v1 object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14481, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14481, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14481, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14481, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14481, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14481, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14481, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_EccSramUniqueUncorrectedErrorEn;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 45 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14481, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":14480
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.EccSramUniqueUncorrectedErrorEntry_v1_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":14483
 *             return f"<{__name__}.EccSramUniqueUncorrectedErrorEntry_v1_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.EccSramUniqueUncorrectedErrorEntry_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14483, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14483, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14483, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14483, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14483, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_EccSramUniqueUncorrectedErrorEn_2;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 49 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14483, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":14479
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.EccSramUniqueUncorrectedErrorEntry_v1_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14485
 *             return f"<{__name__}.EccSramUniqueUncorrectedErrorEntry_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14488
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14488, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14488, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14485
 *             return f"<{__name__}.EccSramUniqueUncorrectedErrorEntry_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14490
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":14491
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14491, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14491, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 14491, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14490
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14493
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":14494
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14494, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14494, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 14494, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":14495
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14495, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 14495, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14494
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":14497
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14497, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14497, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14493
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14499
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":14500
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14500, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 14500, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14499
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14502
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, EccSramUniqueUncorrectedErrorEntry_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":14503
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, EccSramUniqueUncorrectedErrorEntry_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":14504
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, EccSramUniqueUncorrectedErrorEntry_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14504, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 14504, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14504, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 14504, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":14505
 *         cdef object self_data = self._data
 *         if (not isinstance(other, EccSramUniqueUncorrectedErrorEntry_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":14504
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, EccSramUniqueUncorrectedErrorEntry_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":14506
 *         if (not isinstance(other, EccSramUniqueUncorrectedErrorEntry_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14506, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14506, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14506, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 14506, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14506, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14502
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, EccSramUniqueUncorrectedErrorEntry_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14508
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def unit(self):
 *         """Union[~_numpy.uint32, int]: the SRAM unit index"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_4unit_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_4unit_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_4unit___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_4unit___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14511
 *     def unit(self):
 *         """Union[~_numpy.uint32, int]: the SRAM unit index"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.unit[0])
 *         return self._data.unit
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14511, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 14511, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":14512
 *         """Union[~_numpy.uint32, int]: the SRAM unit index"""
 *         if self._data.size == 1:
 *             return int(self._data.unit[0])             # <<<<<<<<<<<<<<
 *         return self._data.unit
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_unit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14512, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14512, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14512, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":14511
 *     def unit(self):
 *         """Union[~_numpy.uint32, int]: the SRAM unit index"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.unit[0])
 *         return self._data.unit
*/
  }

  /* "cuda/bindings/_nvml.pyx":14513
 *         if self._data.size == 1:
 *             return int(self._data.unit[0])
 *         return self._data.unit             # <<<<<<<<<<<<<<
 * 
 *     @unit.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_unit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14513, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14508
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def unit(self):
 *         """Union[~_numpy.uint32, int]: the SRAM unit index"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.unit.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14515
 *         return self._data.unit
 * 
 *     @unit.setter             # <<<<<<<<<<<<<<
 *     def unit(self, val):
 *         self._data.unit = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_4unit_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_4unit_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_4unit_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_4unit_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":14517
 *     @unit.setter
 *     def unit(self, val):
 *         self._data.unit = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_unit, __pyx_v_val) < (0)) __PYX_ERR(0, 14517, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":14515
 *         return self._data.unit
 * 
 *     @unit.setter             # <<<<<<<<<<<<<<
 *     def unit(self, val):
 *         self._data.unit = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.unit.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14519
 *         self._data.unit = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def location(self):
 *         """Union[~_numpy.uint32, int]: the error location within the SRAM unit"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_8location_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_8location_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_8location___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_8location___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14522
 *     def location(self):
 *         """Union[~_numpy.uint32, int]: the error location within the SRAM unit"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.location[0])
 *         return self._data.location
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14522, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 14522, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":14523
 *         """Union[~_numpy.uint32, int]: the error location within the SRAM unit"""
 *         if self._data.size == 1:
 *             return int(self._data.location[0])             # <<<<<<<<<<<<<<
 *         return self._data.location
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_location); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14523, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14523, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14523, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":14522
 *     def location(self):
 *         """Union[~_numpy.uint32, int]: the error location within the SRAM unit"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.location[0])
 *         return self._data.location
*/
  }

  /* "cuda/bindings/_nvml.pyx":14524
 *         if self._data.size == 1:
 *             return int(self._data.location[0])
 *         return self._data.location             # <<<<<<<<<<<<<<
 * 
 *     @location.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_location); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14524, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14519
 *         self._data.unit = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def location(self):
 *         """Union[~_numpy.uint32, int]: the error location within the SRAM unit"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.location.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14526
 *         return self._data.location
 * 
 *     @location.setter             # <<<<<<<<<<<<<<
 *     def location(self, val):
 *         self._data.location = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_8location_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_8location_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_8location_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_8location_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":14528
 *     @location.setter
 *     def location(self, val):
 *         self._data.location = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_location, __pyx_v_val) < (0)) __PYX_ERR(0, 14528, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":14526
 *         return self._data.location
 * 
 *     @location.setter             # <<<<<<<<<<<<<<
 *     def location(self, val):
 *         self._data.location = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.location.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14530
 *         self._data.location = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sublocation(self):
 *         """Union[~_numpy.uint32, int]: the error sublocation within the SRAM unit"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11sublocation_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11sublocation_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11sublocation___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11sublocation___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14533
 *     def sublocation(self):
 *         """Union[~_numpy.uint32, int]: the error sublocation within the SRAM unit"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.sublocation[0])
 *         return self._data.sublocation
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14533, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 14533, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":14534
 *         """Union[~_numpy.uint32, int]: the error sublocation within the SRAM unit"""
 *         if self._data.size == 1:
 *             return int(self._data.sublocation[0])             # <<<<<<<<<<<<<<
 *         return self._data.sublocation
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sublocation); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14534, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14534, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14534, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":14533
 *     def sublocation(self):
 *         """Union[~_numpy.uint32, int]: the error sublocation within the SRAM unit"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.sublocation[0])
 *         return self._data.sublocation
*/
  }

  /* "cuda/bindings/_nvml.pyx":14535
 *         if self._data.size == 1:
 *             return int(self._data.sublocation[0])
 *         return self._data.sublocation             # <<<<<<<<<<<<<<
 * 
 *     @sublocation.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sublocation); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14535, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14530
 *         self._data.location = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sublocation(self):
 *         """Union[~_numpy.uint32, int]: the error sublocation within the SRAM unit"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.sublocation.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14537
 *         return self._data.sublocation
 * 
 *     @sublocation.setter             # <<<<<<<<<<<<<<
 *     def sublocation(self, val):
 *         self._data.sublocation = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11sublocation_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11sublocation_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11sublocation_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11sublocation_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":14539
 *     @sublocation.setter
 *     def sublocation(self, val):
 *         self._data.sublocation = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sublocation, __pyx_v_val) < (0)) __PYX_ERR(0, 14539, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":14537
 *         return self._data.sublocation
 * 
 *     @sublocation.setter             # <<<<<<<<<<<<<<
 *     def sublocation(self, val):
 *         self._data.sublocation = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.sublocation.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14541
 *         self._data.sublocation = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def extlocation(self):
 *         """Union[~_numpy.uint32, int]: the error extlocation within the SRAM unit"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11extlocation_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11extlocation_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11extlocation___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11extlocation___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14544
 *     def extlocation(self):
 *         """Union[~_numpy.uint32, int]: the error extlocation within the SRAM unit"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.extlocation[0])
 *         return self._data.extlocation
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14544, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 14544, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":14545
 *         """Union[~_numpy.uint32, int]: the error extlocation within the SRAM unit"""
 *         if self._data.size == 1:
 *             return int(self._data.extlocation[0])             # <<<<<<<<<<<<<<
 *         return self._data.extlocation
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_extlocation); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14545, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14545, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14545, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":14544
 *     def extlocation(self):
 *         """Union[~_numpy.uint32, int]: the error extlocation within the SRAM unit"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.extlocation[0])
 *         return self._data.extlocation
*/
  }

  /* "cuda/bindings/_nvml.pyx":14546
 *         if self._data.size == 1:
 *             return int(self._data.extlocation[0])
 *         return self._data.extlocation             # <<<<<<<<<<<<<<
 * 
 *     @extlocation.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_extlocation); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14546, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14541
 *         self._data.sublocation = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def extlocation(self):
 *         """Union[~_numpy.uint32, int]: the error extlocation within the SRAM unit"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.extlocation.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14548
 *         return self._data.extlocation
 * 
 *     @extlocation.setter             # <<<<<<<<<<<<<<
 *     def extlocation(self, val):
 *         self._data.extlocation = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11extlocation_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11extlocation_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11extlocation_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11extlocation_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":14550
 *     @extlocation.setter
 *     def extlocation(self, val):
 *         self._data.extlocation = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_extlocation, __pyx_v_val) < (0)) __PYX_ERR(0, 14550, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":14548
 *         return self._data.extlocation
 * 
 *     @extlocation.setter             # <<<<<<<<<<<<<<
 *     def extlocation(self, val):
 *         self._data.extlocation = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.extlocation.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14552
 *         self._data.extlocation = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def address(self):
 *         """Union[~_numpy.uint32, int]: the error address within the SRAM unit"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7address_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7address_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7address___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7address___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14555
 *     def address(self):
 *         """Union[~_numpy.uint32, int]: the error address within the SRAM unit"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.address[0])
 *         return self._data.address
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14555, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 14555, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":14556
 *         """Union[~_numpy.uint32, int]: the error address within the SRAM unit"""
 *         if self._data.size == 1:
 *             return int(self._data.address[0])             # <<<<<<<<<<<<<<
 *         return self._data.address
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_address); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14556, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14556, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14556, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":14555
 *     def address(self):
 *         """Union[~_numpy.uint32, int]: the error address within the SRAM unit"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.address[0])
 *         return self._data.address
*/
  }

  /* "cuda/bindings/_nvml.pyx":14557
 *         if self._data.size == 1:
 *             return int(self._data.address[0])
 *         return self._data.address             # <<<<<<<<<<<<<<
 * 
 *     @address.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_address); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14552
 *         self._data.extlocation = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def address(self):
 *         """Union[~_numpy.uint32, int]: the error address within the SRAM unit"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.address.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14559
 *         return self._data.address
 * 
 *     @address.setter             # <<<<<<<<<<<<<<
 *     def address(self, val):
 *         self._data.address = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7address_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7address_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7address_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7address_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":14561
 *     @address.setter
 *     def address(self, val):
 *         self._data.address = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_address, __pyx_v_val) < (0)) __PYX_ERR(0, 14561, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":14559
 *         return self._data.address
 * 
 *     @address.setter             # <<<<<<<<<<<<<<
 *     def address(self, val):
 *         self._data.address = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.address.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14563
 *         self._data.address = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_parity(self):
 *         """Union[~_numpy.uint32, int]: if the SRAM error is parity or not"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_9is_parity_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_9is_parity_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_9is_parity___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_9is_parity___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14566
 *     def is_parity(self):
 *         """Union[~_numpy.uint32, int]: if the SRAM error is parity or not"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.is_parity[0])
 *         return self._data.is_parity
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14566, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 14566, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":14567
 *         """Union[~_numpy.uint32, int]: if the SRAM error is parity or not"""
 *         if self._data.size == 1:
 *             return int(self._data.is_parity[0])             # <<<<<<<<<<<<<<
 *         return self._data.is_parity
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_is_parity); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14567, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14567, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14567, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":14566
 *     def is_parity(self):
 *         """Union[~_numpy.uint32, int]: if the SRAM error is parity or not"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.is_parity[0])
 *         return self._data.is_parity
*/
  }

  /* "cuda/bindings/_nvml.pyx":14568
 *         if self._data.size == 1:
 *             return int(self._data.is_parity[0])
 *         return self._data.is_parity             # <<<<<<<<<<<<<<
 * 
 *     @is_parity.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_is_parity); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14563
 *         self._data.address = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_parity(self):
 *         """Union[~_numpy.uint32, int]: if the SRAM error is parity or not"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.is_parity.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14570
 *         return self._data.is_parity
 * 
 *     @is_parity.setter             # <<<<<<<<<<<<<<
 *     def is_parity(self, val):
 *         self._data.is_parity = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_9is_parity_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_9is_parity_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_9is_parity_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_9is_parity_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":14572
 *     @is_parity.setter
 *     def is_parity(self, val):
 *         self._data.is_parity = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_is_parity, __pyx_v_val) < (0)) __PYX_ERR(0, 14572, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":14570
 *         return self._data.is_parity
 * 
 *     @is_parity.setter             # <<<<<<<<<<<<<<
 *     def is_parity(self, val):
 *         self._data.is_parity = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.is_parity.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14574
 *         self._data.is_parity = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def count(self):
 *         """Union[~_numpy.uint32, int]: the error count at the same SRAM address"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14577
 *     def count(self):
 *         """Union[~_numpy.uint32, int]: the error count at the same SRAM address"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.count[0])
 *         return self._data.count
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14577, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 14577, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":14578
 *         """Union[~_numpy.uint32, int]: the error count at the same SRAM address"""
 *         if self._data.size == 1:
 *             return int(self._data.count[0])             # <<<<<<<<<<<<<<
 *         return self._data.count
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_count); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14578, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14578, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14578, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":14577
 *     def count(self):
 *         """Union[~_numpy.uint32, int]: the error count at the same SRAM address"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.count[0])
 *         return self._data.count
*/
  }

  /* "cuda/bindings/_nvml.pyx":14579
 *         if self._data.size == 1:
 *             return int(self._data.count[0])
 *         return self._data.count             # <<<<<<<<<<<<<<
 * 
 *     @count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_count); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14579, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14574
 *         self._data.is_parity = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def count(self):
 *         """Union[~_numpy.uint32, int]: the error count at the same SRAM address"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14581
 *         return self._data.count
 * 
 *     @count.setter             # <<<<<<<<<<<<<<
 *     def count(self, val):
 *         self._data.count = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":14583
 *     @count.setter
 *     def count(self, val):
 *         self._data.count = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_count, __pyx_v_val) < (0)) __PYX_ERR(0, 14583, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":14581
 *         return self._data.count
 * 
 *     @count.setter             # <<<<<<<<<<<<<<
 *     def count(self, val):
 *         self._data.count = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14585
 *         self._data.count = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":14588
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14589
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 14589, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":14590
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14590, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 14590, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":14591
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":14592
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14592, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 14592, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":14591
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":14593
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return EccSramUniqueUncorrectedErrorEntry_v1.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":14594
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return EccSramUniqueUncorrectedErrorEntry_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":14593
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return EccSramUniqueUncorrectedErrorEntry_v1.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":14595
 *             if key_ < 0:
 *                 key_ += size
 *             return EccSramUniqueUncorrectedErrorEntry_v1.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == ecc_sram_unique_uncorrected_error_entry_v1_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14595, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14595, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":14588
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":14596
 *                 key_ += size
 *             return EccSramUniqueUncorrectedErrorEntry_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == ecc_sram_unique_uncorrected_error_entry_v1_dtype:
 *             return EccSramUniqueUncorrectedErrorEntry_v1.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14596, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":14597
 *             return EccSramUniqueUncorrectedErrorEntry_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == ecc_sram_unique_uncorrected_error_entry_v1_dtype:             # <<<<<<<<<<<<<<
 *             return EccSramUniqueUncorrectedErrorEntry_v1.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14597, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14597, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 14597, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14597, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ecc_sram_unique_uncorrected_erro); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14597, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14597, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 14597, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14598
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == ecc_sram_unique_uncorrected_error_entry_v1_dtype:
 *             return EccSramUniqueUncorrectedErrorEntry_v1.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14598, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":14597
 *             return EccSramUniqueUncorrectedErrorEntry_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == ecc_sram_unique_uncorrected_error_entry_v1_dtype:             # <<<<<<<<<<<<<<
 *             return EccSramUniqueUncorrectedErrorEntry_v1.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":14599
 *         if isinstance(out, _numpy.recarray) and out.dtype == ecc_sram_unique_uncorrected_error_entry_v1_dtype:
 *             return EccSramUniqueUncorrectedErrorEntry_v1.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14585
 *         self._data.count = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14601
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":14602
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 14602, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":14601
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14604
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an EccSramUniqueUncorrectedErrorEntry_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_14from_data, "EccSramUniqueUncorrectedErrorEntry_v1.from_data(data)\n\nCreate an EccSramUniqueUncorrectedErrorEntry_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `ecc_sram_unique_uncorrected_error_entry_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14604, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14604, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 14604, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 14604, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14604, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 14604, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":14611
 *             data (_numpy.ndarray): a 1D array of dtype `ecc_sram_unique_uncorrected_error_entry_v1_dtype` holding the data.
 *         """
 *         cdef EccSramUniqueUncorrectedErrorEntry_v1 obj = EccSramUniqueUncorrectedErrorEntry_v1.__new__(EccSramUniqueUncorrectedErrorEntry_v1)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14611, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":14612
 *         """
 *         cdef EccSramUniqueUncorrectedErrorEntry_v1 obj = EccSramUniqueUncorrectedErrorEntry_v1.__new__(EccSramUniqueUncorrectedErrorEntry_v1)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14612, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14612, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 14612, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":14613
 *         cdef EccSramUniqueUncorrectedErrorEntry_v1 obj = EccSramUniqueUncorrectedErrorEntry_v1.__new__(EccSramUniqueUncorrectedErrorEntry_v1)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14613, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 14613, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14612
 *         """
 *         cdef EccSramUniqueUncorrectedErrorEntry_v1 obj = EccSramUniqueUncorrectedErrorEntry_v1.__new__(EccSramUniqueUncorrectedErrorEntry_v1)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":14614
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != ecc_sram_unique_uncorrected_error_entry_v1_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14614, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 14614, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":14615
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != ecc_sram_unique_uncorrected_error_entry_v1_dtype:
 *             raise ValueError("data array must be of dtype ecc_sram_unique_uncorrected_error_entry_v1_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14615, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 14615, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14614
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != ecc_sram_unique_uncorrected_error_entry_v1_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":14616
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != ecc_sram_unique_uncorrected_error_entry_v1_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype ecc_sram_unique_uncorrected_error_entry_v1_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14616, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ecc_sram_unique_uncorrected_erro); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14616, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14616, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 14616, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":14617
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != ecc_sram_unique_uncorrected_error_entry_v1_dtype:
 *             raise ValueError("data array must be of dtype ecc_sram_unique_uncorrected_error_entry_v1_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_ecc};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14617, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 14617, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14616
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != ecc_sram_unique_uncorrected_error_entry_v1_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype ecc_sram_unique_uncorrected_error_entry_v1_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":14618
 *         if data.dtype != ecc_sram_unique_uncorrected_error_entry_v1_dtype:
 *             raise ValueError("data array must be of dtype ecc_sram_unique_uncorrected_error_entry_v1_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14618, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14618, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14618, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":14620
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14604
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an EccSramUniqueUncorrectedErrorEntry_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14622
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an EccSramUniqueUncorrectedErrorEntry_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_16from_ptr, "EccSramUniqueUncorrectedErrorEntry_v1.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an EccSramUniqueUncorrectedErrorEntry_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14622, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 14622, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14622, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14622, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 14622, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 14622, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 14622, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14622, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14622, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 14623, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 14623, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14623, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":14623
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an EccSramUniqueUncorrectedErrorEntry_v1 instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 14622, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":14622
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an EccSramUniqueUncorrectedErrorEntry_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":14631
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EccSramUniqueUncorrectedErrorEntry_v1 obj = EccSramUniqueUncorrectedErrorEntry_v1.__new__(EccSramUniqueUncorrectedErrorEntry_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":14632
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef EccSramUniqueUncorrectedErrorEntry_v1 obj = EccSramUniqueUncorrectedErrorEntry_v1.__new__(EccSramUniqueUncorrectedErrorEntry_v1)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14632, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 14632, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14631
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EccSramUniqueUncorrectedErrorEntry_v1 obj = EccSramUniqueUncorrectedErrorEntry_v1.__new__(EccSramUniqueUncorrectedErrorEntry_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":14633
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EccSramUniqueUncorrectedErrorEntry_v1 obj = EccSramUniqueUncorrectedErrorEntry_v1.__new__(EccSramUniqueUncorrectedErrorEntry_v1)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14633, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":14634
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EccSramUniqueUncorrectedErrorEntry_v1 obj = EccSramUniqueUncorrectedErrorEntry_v1.__new__(EccSramUniqueUncorrectedErrorEntry_v1)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14634, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14634, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":14636
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=ecc_sram_unique_uncorrected_error_entry_v1_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14636, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":14635
 *         cdef EccSramUniqueUncorrectedErrorEntry_v1 obj = EccSramUniqueUncorrectedErrorEntry_v1.__new__(EccSramUniqueUncorrectedErrorEntry_v1)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=ecc_sram_unique_uncorrected_error_entry_v1_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14635, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":14637
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=ecc_sram_unique_uncorrected_error_entry_v1_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14637, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14637, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14637, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_ecc_sram_unique_uncorrected_erro); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 14637, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 14637, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 14637, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 14637, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14637, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":14638
 *             <char*>ptr, sizeof(nvmlEccSramUniqueUncorrectedErrorEntry_v1_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=ecc_sram_unique_uncorrected_error_entry_v1_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 14638, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 14638, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14638, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":14640
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14622
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an EccSramUniqueUncorrectedErrorEntry_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14469
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_18__reduce_cython__, "EccSramUniqueUncorrectedErrorEntry_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_EccSramUniqueUnco); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_EccSramUniqueUnco); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_20__setstate_cython__, "EccSramUniqueUncorrectedErrorEntry_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14643
 * 
 * 
 * cdef _get_gpu_fabric_info_v3_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuFabricInfo_v3_t pod = nvmlGpuFabricInfo_v3_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_gpu_fabric_info_v3_dtype_offsets(void) {
  nvmlGpuFabricInfo_v3_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlGpuFabricInfo_v3_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  size_t __pyx_t_14;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_gpu_fabric_info_v3_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":14644
 * 
 * cdef _get_gpu_fabric_info_v3_dtype_offsets():
 *     cdef nvmlGpuFabricInfo_v3_t pod = nvmlGpuFabricInfo_v3_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'cluster_uuid', 'status', 'clique_id', 'state', 'health_mask', 'health_summary'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":14645
 * cdef _get_gpu_fabric_info_v3_dtype_offsets():
 *     cdef nvmlGpuFabricInfo_v3_t pod = nvmlGpuFabricInfo_v3_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'cluster_uuid', 'status', 'clique_id', 'state', 'health_mask', 'health_summary'],
 *         'formats': [_numpy.uint32, _numpy.uint8, _numpy.int32, _numpy.uint32, _numpy.uint8, _numpy.uint32, _numpy.uint8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14645, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14645, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":14646
 *     cdef nvmlGpuFabricInfo_v3_t pod = nvmlGpuFabricInfo_v3_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'cluster_uuid', 'status', 'clique_id', 'state', 'health_mask', 'health_summary'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint8, _numpy.int32, _numpy.uint32, _numpy.uint8, _numpy.uint32, _numpy.uint8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14646, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14646, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 14646, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_cluster_uuid);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_cluster_uuid);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_cluster_uuid) != (0)) __PYX_ERR(0, 14646, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_status);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_status);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_status) != (0)) __PYX_ERR(0, 14646, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_clique_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_clique_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_clique_id) != (0)) __PYX_ERR(0, 14646, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_state);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_state);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_state) != (0)) __PYX_ERR(0, 14646, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_health_mask);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_health_mask);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_health_mask) != (0)) __PYX_ERR(0, 14646, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_health_summary);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_health_summary);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_health_summary) != (0)) __PYX_ERR(0, 14646, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 14646, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":14647
 *     return _numpy.dtype({
 *         'names': ['version', 'cluster_uuid', 'status', 'clique_id', 'state', 'health_mask', 'health_summary'],
 *         'formats': [_numpy.uint32, _numpy.uint8, _numpy.int32, _numpy.uint32, _numpy.uint8, _numpy.uint32, _numpy.uint8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 14647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 14647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 14647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 14647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 14647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 14647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 14647, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 14647, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 14647, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 14647, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 14647, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 14647, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 14647, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 14646, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":14649
 *         'formats': [_numpy.uint32, _numpy.uint8, _numpy.int32, _numpy.uint32, _numpy.uint8, _numpy.uint32, _numpy.uint8],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.clusterUuid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.status)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14649, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":14650
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.clusterUuid)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.status)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.cliqueId)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.clusterUuid)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 14650, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":14651
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.clusterUuid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.status)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.cliqueId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.state)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.status)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 14651, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":14652
 *             (<intptr_t>&(pod.clusterUuid)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.status)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.cliqueId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.state)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.healthMask)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.cliqueId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 14652, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":14653
 *             (<intptr_t>&(pod.status)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.cliqueId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.state)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.healthMask)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.healthSummary)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.state)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 14653, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":14654
 *             (<intptr_t>&(pod.cliqueId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.state)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.healthMask)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.healthSummary)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.healthMask)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 14654, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":14655
 *             (<intptr_t>&(pod.state)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.healthMask)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.healthSummary)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlGpuFabricInfo_v3_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.healthSummary)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 14655, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":14648
 *         'names': ['version', 'cluster_uuid', 'status', 'clique_id', 'state', 'health_mask', 'health_summary'],
 *         'formats': [_numpy.uint32, _numpy.uint8, _numpy.int32, _numpy.uint32, _numpy.uint8, _numpy.uint32, _numpy.uint8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.clusterUuid)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14648, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 14648, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_13) != (0)) __PYX_ERR(0, 14648, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_12) != (0)) __PYX_ERR(0, 14648, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_11) != (0)) __PYX_ERR(0, 14648, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_10) != (0)) __PYX_ERR(0, 14648, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_9) != (0)) __PYX_ERR(0, 14648, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_8) != (0)) __PYX_ERR(0, 14648, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 14646, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":14657
 *             (<intptr_t>&(pod.healthSummary)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlGpuFabricInfo_v3_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlGpuFabricInfo_v3_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14657, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 14646, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_14 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_14 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_14, (2-__pyx_t_14) | (__pyx_t_14*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14645, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14643
 * 
 * 
 * cdef _get_gpu_fabric_info_v3_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuFabricInfo_v3_t pod = nvmlGpuFabricInfo_v3_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_gpu_fabric_info_v3_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14674
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGpuFabricInfo_v3_t *>calloc(1, sizeof(nvmlGpuFabricInfo_v3_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":14675
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlGpuFabricInfo_v3_t *>calloc(1, sizeof(nvmlGpuFabricInfo_v3_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuFabricInfo_v3")
*/
  __pyx_v_self->_ptr = ((nvmlGpuFabricInfo_v3_t *)calloc(1, (sizeof(nvmlGpuFabricInfo_v3_t))));

  /* "cuda/bindings/_nvml.pyx":14676
 *     def __init__(self):
 *         self._ptr = <nvmlGpuFabricInfo_v3_t *>calloc(1, sizeof(nvmlGpuFabricInfo_v3_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GpuFabricInfo_v3")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":14677
 *         self._ptr = <nvmlGpuFabricInfo_v3_t *>calloc(1, sizeof(nvmlGpuFabricInfo_v3_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuFabricInfo_v3")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14677, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuFabricInfo_v};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14677, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 14677, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14676
 *     def __init__(self):
 *         self._ptr = <nvmlGpuFabricInfo_v3_t *>calloc(1, sizeof(nvmlGpuFabricInfo_v3_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GpuFabricInfo_v3")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":14678
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuFabricInfo_v3")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":14679
 *             raise MemoryError("Error allocating GpuFabricInfo_v3")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":14680
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":14674
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGpuFabricInfo_v3_t *>calloc(1, sizeof(nvmlGpuFabricInfo_v3_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14682
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGpuFabricInfo_v3_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self) {
  nvmlGpuFabricInfo_v3_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlGpuFabricInfo_v3_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":14684
 *     def __dealloc__(self):
 *         cdef nvmlGpuFabricInfo_v3_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14685
 *         cdef nvmlGpuFabricInfo_v3_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":14686
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":14687
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":14684
 *     def __dealloc__(self):
 *         cdef nvmlGpuFabricInfo_v3_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":14682
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGpuFabricInfo_v3_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":14689
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GpuFabricInfo_v3 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":14690
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.GpuFabricInfo_v3 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14690, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14690, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14690, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14690, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14690, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_GpuFabricInfo_v3_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 28 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14690, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14689
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GpuFabricInfo_v3 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14692
 *         return f"<{__name__}.GpuFabricInfo_v3 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14695
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14695, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14692
 *         return f"<{__name__}.GpuFabricInfo_v3 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14697
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":14698
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14697
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14700
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":14701
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14701, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14700
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14703
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GpuFabricInfo_v3 other_
 *         if not isinstance(other, GpuFabricInfo_v3):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":14705
 *     def __eq__(self, other):
 *         cdef GpuFabricInfo_v3 other_
 *         if not isinstance(other, GpuFabricInfo_v3):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":14706
 *         cdef GpuFabricInfo_v3 other_
 *         if not isinstance(other, GpuFabricInfo_v3):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuFabricInfo_v3_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":14705
 *     def __eq__(self, other):
 *         cdef GpuFabricInfo_v3 other_
 *         if not isinstance(other, GpuFabricInfo_v3):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":14707
 *         if not isinstance(other, GpuFabricInfo_v3):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuFabricInfo_v3_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3))))) __PYX_ERR(0, 14707, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":14708
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuFabricInfo_v3_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlGpuFabricInfo_v3_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14708, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14703
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GpuFabricInfo_v3 other_
 *         if not isinstance(other, GpuFabricInfo_v3):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14710
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuFabricInfo_v3_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuFabricInfo_v3_t *>malloc(sizeof(nvmlGpuFabricInfo_v3_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":14711
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGpuFabricInfo_v3_t *>malloc(sizeof(nvmlGpuFabricInfo_v3_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 14711, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 14711, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14712
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuFabricInfo_v3_t *>malloc(sizeof(nvmlGpuFabricInfo_v3_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuFabricInfo_v3")
*/
    __pyx_v_self->_ptr = ((nvmlGpuFabricInfo_v3_t *)malloc((sizeof(nvmlGpuFabricInfo_v3_t))));

    /* "cuda/bindings/_nvml.pyx":14713
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuFabricInfo_v3_t *>malloc(sizeof(nvmlGpuFabricInfo_v3_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuFabricInfo_v3")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuFabricInfo_v3_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":14714
 *             self._ptr = <nvmlGpuFabricInfo_v3_t *>malloc(sizeof(nvmlGpuFabricInfo_v3_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuFabricInfo_v3")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuFabricInfo_v3_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14714, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuFabricInfo_v};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14714, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 14714, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":14713
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuFabricInfo_v3_t *>malloc(sizeof(nvmlGpuFabricInfo_v3_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuFabricInfo_v3")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuFabricInfo_v3_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":14715
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuFabricInfo_v3")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuFabricInfo_v3_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14715, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14715, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 14715, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlGpuFabricInfo_v3_t))));

    /* "cuda/bindings/_nvml.pyx":14716
 *                 raise MemoryError("Error allocating GpuFabricInfo_v3")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuFabricInfo_v3_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":14717
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuFabricInfo_v3_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":14718
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14718, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14718, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 14718, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":14711
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGpuFabricInfo_v3_t *>malloc(sizeof(nvmlGpuFabricInfo_v3_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":14720
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 14720, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":14710
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuFabricInfo_v3_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuFabricInfo_v3_t *>malloc(sizeof(nvmlGpuFabricInfo_v3_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14722
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: Structure version identifier (set to nvmlGpuFabricInfo_v2)"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14725
 *     def version(self):
 *         """int: Structure version identifier (set to nvmlGpuFabricInfo_v2)"""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14725, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14722
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: Structure version identifier (set to nvmlGpuFabricInfo_v2)"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14727
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14729
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14730
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuFabricInfo_v3_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14730, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14730, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14729
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14731
 *         if self._readonly:
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14731, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":14727
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14733
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cluster_uuid(self):
 *         """~_numpy.uint8: (array of length 16).Uuid of the cluster to which this GPU belongs."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12cluster_uuid_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12cluster_uuid_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12cluster_uuid___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12cluster_uuid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14736
 *     def cluster_uuid(self):
 *         """~_numpy.uint8: (array of length 16).Uuid of the cluster to which this GPU belongs."""
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(&(self._ptr[0].clusterUuid))
 *         return _numpy.asarray(arr)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14736, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 5 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14736, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[3], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 14736, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_3, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 14736, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 14736, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 14736, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_allocate_buffer, Py_False, __pyx_t_5, __pyx_callargs+1, 4) < (0)) __PYX_ERR(0, 14736, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14736, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":14737
 *         """~_numpy.uint8: (array of length 16).Uuid of the cluster to which this GPU belongs."""
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].clusterUuid))             # <<<<<<<<<<<<<<
 *         return _numpy.asarray(arr)
 * 
*/
  __pyx_v_arr->data = ((char *)(&(__pyx_v_self->_ptr[0]).clusterUuid));

  /* "cuda/bindings/_nvml.pyx":14738
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)
 *         arr.data = <char *>(&(self._ptr[0].clusterUuid))
 *         return _numpy.asarray(arr)             # <<<<<<<<<<<<<<
 * 
 *     @cluster_uuid.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14738, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14738, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, ((PyObject *)__pyx_v_arr)};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14738, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14733
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def cluster_uuid(self):
 *         """~_numpy.uint8: (array of length 16).Uuid of the cluster to which this GPU belongs."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.cluster_uuid.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14740
 *         return _numpy.asarray(arr)
 * 
 *     @cluster_uuid.setter             # <<<<<<<<<<<<<<
 *     def cluster_uuid(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12cluster_uuid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12cluster_uuid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12cluster_uuid_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12cluster_uuid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_array_obj *__pyx_v_arr = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  Py_ssize_t __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14742
 *     @cluster_uuid.setter
 *     def cluster_uuid(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14743
 *     def cluster_uuid(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuFabricInfo_v3_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14743, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14743, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14742
 *     @cluster_uuid.setter
 *     def cluster_uuid(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":14744
 *         if self._readonly:
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")             # <<<<<<<<<<<<<<
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].clusterUuid)), <void *>(arr.data), sizeof(unsigned char) * len(val))
*/
  __pyx_t_2 = NULL;
  __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned char))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14744, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14744, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[3], __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 14744, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_5, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 14744, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_B, __pyx_t_5, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 14744, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_5, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 14744, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14744, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_arr = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":14745
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(&(self._ptr[0].clusterUuid)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
*/
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14745, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14745, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14745, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14745, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_3 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_2))) {
    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
    assert(__pyx_t_5);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_2);
    __Pyx_INCREF(__pyx_t_5);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_2, __pyx__function);
    __pyx_t_3 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_5, __pyx_v_val};
    __pyx_t_4 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14745, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_6, __pyx_t_4, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 14745, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_2, __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_4);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14745, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (__Pyx_PyObject_SetSlice(((PyObject *)__pyx_v_arr), __pyx_t_1, 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[0], 0, 0, 1) < (0)) __PYX_ERR(0, 14745, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":14746
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c")
 *         arr[:] = _numpy.asarray(val, dtype=_numpy.uint8)
 *         memcpy(<void *>(&(self._ptr[0].clusterUuid)), <void *>(arr.data), sizeof(unsigned char) * len(val))             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_7 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 14746, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).clusterUuid)), ((void *)__pyx_v_arr->data), ((sizeof(unsigned char)) * __pyx_t_7)));

  /* "cuda/bindings/_nvml.pyx":14740
 *         return _numpy.asarray(arr)
 * 
 *     @cluster_uuid.setter             # <<<<<<<<<<<<<<
 *     def cluster_uuid(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.cluster_uuid.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14748
 *         memcpy(<void *>(&(self._ptr[0].clusterUuid)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def status(self):
 *         """int: Probe Error status, if any. Must be checked only if Probe state returns "complete"."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_6status_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_6status_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_6status___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_6status___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14751
 *     def status(self):
 *         """int: Probe Error status, if any. Must be checked only if Probe state returns "complete"."""
 *         return <int>(self._ptr[0].status)             # <<<<<<<<<<<<<<
 * 
 *     @status.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int(((int)(__pyx_v_self->_ptr[0]).status)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14751, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14748
 *         memcpy(<void *>(&(self._ptr[0].clusterUuid)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def status(self):
 *         """int: Probe Error status, if any. Must be checked only if Probe state returns "complete"."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.status.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14753
 *         return <int>(self._ptr[0].status)
 * 
 *     @status.setter             # <<<<<<<<<<<<<<
 *     def status(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_6status_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_6status_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_6status_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_6status_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14755
 *     @status.setter
 *     def status(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].status = <nvmlReturn_t><int>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14756
 *     def status(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].status = <nvmlReturn_t><int>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuFabricInfo_v3_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14756, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14756, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14755
 *     @status.setter
 *     def status(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].status = <nvmlReturn_t><int>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14757
 *         if self._readonly:
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].status = <nvmlReturn_t><int>val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14757, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).status = ((nvmlReturn_t)((int)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":14753
 *         return <int>(self._ptr[0].status)
 * 
 *     @status.setter             # <<<<<<<<<<<<<<
 *     def status(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.status.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14759
 *         self._ptr[0].status = <nvmlReturn_t><int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def clique_id(self):
 *         """int: ID of the fabric clique to which this GPU belongs."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_9clique_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_9clique_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_9clique_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_9clique_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14762
 *     def clique_id(self):
 *         """int: ID of the fabric clique to which this GPU belongs."""
 *         return self._ptr[0].cliqueId             # <<<<<<<<<<<<<<
 * 
 *     @clique_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).cliqueId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14762, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14759
 *         self._ptr[0].status = <nvmlReturn_t><int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def clique_id(self):
 *         """int: ID of the fabric clique to which this GPU belongs."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.clique_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14764
 *         return self._ptr[0].cliqueId
 * 
 *     @clique_id.setter             # <<<<<<<<<<<<<<
 *     def clique_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_9clique_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_9clique_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_9clique_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_9clique_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14766
 *     @clique_id.setter
 *     def clique_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].cliqueId = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14767
 *     def clique_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].cliqueId = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuFabricInfo_v3_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14767, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14767, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14766
 *     @clique_id.setter
 *     def clique_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].cliqueId = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14768
 *         if self._readonly:
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].cliqueId = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14768, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).cliqueId = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":14764
 *         return self._ptr[0].cliqueId
 * 
 *     @clique_id.setter             # <<<<<<<<<<<<<<
 *     def clique_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.clique_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14770
 *         self._ptr[0].cliqueId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def state(self):
 *         """int: Current Probe State of GPU registration process. See NVML_GPU_FABRIC_STATE_*."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_5state_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_5state_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_5state___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_5state___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14773
 *     def state(self):
 *         """int: Current Probe State of GPU registration process. See NVML_GPU_FABRIC_STATE_*."""
 *         return <unsigned char>(self._ptr[0].state)             # <<<<<<<<<<<<<<
 * 
 *     @state.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_char(((unsigned char)(__pyx_v_self->_ptr[0]).state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14773, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14770
 *         self._ptr[0].cliqueId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def state(self):
 *         """int: Current Probe State of GPU registration process. See NVML_GPU_FABRIC_STATE_*."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.state.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14775
 *         return <unsigned char>(self._ptr[0].state)
 * 
 *     @state.setter             # <<<<<<<<<<<<<<
 *     def state(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_5state_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_5state_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_5state_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_5state_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned char __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14777
 *     @state.setter
 *     def state(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].state = <nvmlGpuFabricState_t><unsigned char>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14778
 *     def state(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].state = <nvmlGpuFabricState_t><unsigned char>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuFabricInfo_v3_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14778, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14778, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14777
 *     @state.setter
 *     def state(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].state = <nvmlGpuFabricState_t><unsigned char>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14779
 *         if self._readonly:
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].state = <nvmlGpuFabricState_t><unsigned char>val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_char(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned char)-1) && PyErr_Occurred())) __PYX_ERR(0, 14779, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).state = ((nvmlGpuFabricState_t)((unsigned char)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":14775
 *         return <unsigned char>(self._ptr[0].state)
 * 
 *     @state.setter             # <<<<<<<<<<<<<<
 *     def state(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.state.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14781
 *         self._ptr[0].state = <nvmlGpuFabricState_t><unsigned char>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def health_mask(self):
 *         """int: GPU Fabric health Status Mask. See NVML_GPU_FABRIC_HEALTH_MASK_*."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_11health_mask_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_11health_mask_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_11health_mask___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_11health_mask___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14784
 *     def health_mask(self):
 *         """int: GPU Fabric health Status Mask. See NVML_GPU_FABRIC_HEALTH_MASK_*."""
 *         return self._ptr[0].healthMask             # <<<<<<<<<<<<<<
 * 
 *     @health_mask.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).healthMask); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14784, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14781
 *         self._ptr[0].state = <nvmlGpuFabricState_t><unsigned char>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def health_mask(self):
 *         """int: GPU Fabric health Status Mask. See NVML_GPU_FABRIC_HEALTH_MASK_*."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.health_mask.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14786
 *         return self._ptr[0].healthMask
 * 
 *     @health_mask.setter             # <<<<<<<<<<<<<<
 *     def health_mask(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_11health_mask_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_11health_mask_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_11health_mask_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_11health_mask_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14788
 *     @health_mask.setter
 *     def health_mask(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].healthMask = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14789
 *     def health_mask(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].healthMask = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuFabricInfo_v3_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14789, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14789, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14788
 *     @health_mask.setter
 *     def health_mask(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].healthMask = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14790
 *         if self._readonly:
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].healthMask = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14790, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).healthMask = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":14786
 *         return self._ptr[0].healthMask
 * 
 *     @health_mask.setter             # <<<<<<<<<<<<<<
 *     def health_mask(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.health_mask.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14792
 *         self._ptr[0].healthMask = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def health_summary(self):
 *         """int: GPU Fabric health summary. See NVML_GPU_FABRIC_HEALTH_SUMMARY_*."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14health_summary_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14health_summary_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14health_summary___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14health_summary___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14795
 *     def health_summary(self):
 *         """int: GPU Fabric health summary. See NVML_GPU_FABRIC_HEALTH_SUMMARY_*."""
 *         return self._ptr[0].healthSummary             # <<<<<<<<<<<<<<
 * 
 *     @health_summary.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_char((__pyx_v_self->_ptr[0]).healthSummary); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14795, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14792
 *         self._ptr[0].healthMask = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def health_summary(self):
 *         """int: GPU Fabric health summary. See NVML_GPU_FABRIC_HEALTH_SUMMARY_*."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.health_summary.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14797
 *         return self._ptr[0].healthSummary
 * 
 *     @health_summary.setter             # <<<<<<<<<<<<<<
 *     def health_summary(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14health_summary_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14health_summary_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14health_summary_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14health_summary_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned char __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14799
 *     @health_summary.setter
 *     def health_summary(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].healthSummary = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14800
 *     def health_summary(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].healthSummary = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuFabricInfo_v3_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14800, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14800, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14799
 *     @health_summary.setter
 *     def health_summary(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].healthSummary = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14801
 *         if self._readonly:
 *             raise ValueError("This GpuFabricInfo_v3 instance is read-only")
 *         self._ptr[0].healthSummary = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_char(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned char)-1) && PyErr_Occurred())) __PYX_ERR(0, 14801, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).healthSummary = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":14797
 *         return self._ptr[0].healthSummary
 * 
 *     @health_summary.setter             # <<<<<<<<<<<<<<
 *     def health_summary(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.health_summary.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14803
 *         self._ptr[0].healthSummary = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuFabricInfo_v3 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12from_data, "GpuFabricInfo_v3.from_data(data)\n\nCreate an GpuFabricInfo_v3 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `gpu_fabric_info_v3_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14803, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14803, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 14803, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 14803, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14803, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 14803, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":14810
 *             data (_numpy.ndarray): a single-element array of dtype `gpu_fabric_info_v3_dtype` holding the data.
 *         """
 *         return __from_data(data, "gpu_fabric_info_v3_dtype", gpu_fabric_info_v3_dtype, GpuFabricInfo_v3)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_gpu_fabric_info_v3_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14810, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_gpu_fabric_info_v3_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14810, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14803
 *         self._ptr[0].healthSummary = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuFabricInfo_v3 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14812
 *         return __from_data(data, "gpu_fabric_info_v3_dtype", gpu_fabric_info_v3_dtype, GpuFabricInfo_v3)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuFabricInfo_v3 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14from_ptr, "GpuFabricInfo_v3.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an GpuFabricInfo_v3 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14812, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 14812, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14812, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14812, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 14812, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":14813
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an GpuFabricInfo_v3 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 14812, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 14812, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14812, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14812, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 14813, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14813, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 14812, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":14812
 *         return __from_data(data, "gpu_fabric_info_v3_dtype", gpu_fabric_info_v3_dtype, GpuFabricInfo_v3)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuFabricInfo_v3 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":14821
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuFabricInfo_v3 obj = GpuFabricInfo_v3.__new__(GpuFabricInfo_v3)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":14822
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef GpuFabricInfo_v3 obj = GpuFabricInfo_v3.__new__(GpuFabricInfo_v3)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14822, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 14822, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14821
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuFabricInfo_v3 obj = GpuFabricInfo_v3.__new__(GpuFabricInfo_v3)
*/
  }

  /* "cuda/bindings/_nvml.pyx":14823
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuFabricInfo_v3 obj = GpuFabricInfo_v3.__new__(GpuFabricInfo_v3)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlGpuFabricInfo_v3_t *>malloc(sizeof(nvmlGpuFabricInfo_v3_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_GpuFabricInfo_v3(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14823, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":14824
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuFabricInfo_v3 obj = GpuFabricInfo_v3.__new__(GpuFabricInfo_v3)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGpuFabricInfo_v3_t *>malloc(sizeof(nvmlGpuFabricInfo_v3_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14825
 *         cdef GpuFabricInfo_v3 obj = GpuFabricInfo_v3.__new__(GpuFabricInfo_v3)
 *         if owner is None:
 *             obj._ptr = <nvmlGpuFabricInfo_v3_t *>malloc(sizeof(nvmlGpuFabricInfo_v3_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuFabricInfo_v3")
*/
    __pyx_v_obj->_ptr = ((nvmlGpuFabricInfo_v3_t *)malloc((sizeof(nvmlGpuFabricInfo_v3_t))));

    /* "cuda/bindings/_nvml.pyx":14826
 *         if owner is None:
 *             obj._ptr = <nvmlGpuFabricInfo_v3_t *>malloc(sizeof(nvmlGpuFabricInfo_v3_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuFabricInfo_v3")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuFabricInfo_v3_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":14827
 *             obj._ptr = <nvmlGpuFabricInfo_v3_t *>malloc(sizeof(nvmlGpuFabricInfo_v3_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuFabricInfo_v3")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuFabricInfo_v3_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14827, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuFabricInfo_v};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14827, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 14827, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":14826
 *         if owner is None:
 *             obj._ptr = <nvmlGpuFabricInfo_v3_t *>malloc(sizeof(nvmlGpuFabricInfo_v3_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuFabricInfo_v3")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuFabricInfo_v3_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":14828
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuFabricInfo_v3")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuFabricInfo_v3_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlGpuFabricInfo_v3_t))));

    /* "cuda/bindings/_nvml.pyx":14829
 *                 raise MemoryError("Error allocating GpuFabricInfo_v3")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuFabricInfo_v3_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":14830
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuFabricInfo_v3_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlGpuFabricInfo_v3_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":14824
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuFabricInfo_v3 obj = GpuFabricInfo_v3.__new__(GpuFabricInfo_v3)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGpuFabricInfo_v3_t *>malloc(sizeof(nvmlGpuFabricInfo_v3_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":14832
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlGpuFabricInfo_v3_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlGpuFabricInfo_v3_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":14833
 *         else:
 *             obj._ptr = <nvmlGpuFabricInfo_v3_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":14834
 *             obj._ptr = <nvmlGpuFabricInfo_v3_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":14835
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":14836
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14812
 *         return __from_data(data, "gpu_fabric_info_v3_dtype", gpu_fabric_info_v3_dtype, GpuFabricInfo_v3)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuFabricInfo_v3 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_16__reduce_cython__, "GpuFabricInfo_v3.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_18__setstate_cython__, "GpuFabricInfo_v3.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuFabricInfo_v3.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14839
 * 
 * 
 * cdef _get_nvlink_firmware_version_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlNvlinkFirmwareVersion_t pod = nvmlNvlinkFirmwareVersion_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_nvlink_firmware_version_dtype_offsets(void) {
  nvmlNvlinkFirmwareVersion_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlNvlinkFirmwareVersion_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  size_t __pyx_t_11;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_nvlink_firmware_version_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":14840
 * 
 * cdef _get_nvlink_firmware_version_dtype_offsets():
 *     cdef nvmlNvlinkFirmwareVersion_t pod = nvmlNvlinkFirmwareVersion_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['ucode_type', 'major', 'minor', 'sub_minor'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":14841
 * cdef _get_nvlink_firmware_version_dtype_offsets():
 *     cdef nvmlNvlinkFirmwareVersion_t pod = nvmlNvlinkFirmwareVersion_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['ucode_type', 'major', 'minor', 'sub_minor'],
 *         'formats': [_numpy.uint8, _numpy.uint32, _numpy.uint32, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14841, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14841, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":14842
 *     cdef nvmlNvlinkFirmwareVersion_t pod = nvmlNvlinkFirmwareVersion_t()
 *     return _numpy.dtype({
 *         'names': ['ucode_type', 'major', 'minor', 'sub_minor'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint8, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14842, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14842, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_ucode_type);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_ucode_type);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_ucode_type) != (0)) __PYX_ERR(0, 14842, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_major);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_major);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_major) != (0)) __PYX_ERR(0, 14842, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_minor);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_minor);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_minor) != (0)) __PYX_ERR(0, 14842, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_sub_minor);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_sub_minor);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_sub_minor) != (0)) __PYX_ERR(0, 14842, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 14842, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":14843
 *     return _numpy.dtype({
 *         'names': ['ucode_type', 'major', 'minor', 'sub_minor'],
 *         'formats': [_numpy.uint8, _numpy.uint32, _numpy.uint32, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.ucodeType)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14843, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14843, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14843, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 14843, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14843, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 14843, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14843, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 14843, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14843, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 14843, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 14843, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 14843, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 14843, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 14842, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":14845
 *         'formats': [_numpy.uint8, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.ucodeType)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.major)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.minor)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.ucodeType)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14845, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":14846
 *         'offsets': [
 *             (<intptr_t>&(pod.ucodeType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.major)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.minor)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.subMinor)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.major)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 14846, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":14847
 *             (<intptr_t>&(pod.ucodeType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.major)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.minor)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.subMinor)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.minor)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 14847, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":14848
 *             (<intptr_t>&(pod.major)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.minor)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.subMinor)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlNvlinkFirmwareVersion_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.subMinor)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 14848, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":14844
 *         'names': ['ucode_type', 'major', 'minor', 'sub_minor'],
 *         'formats': [_numpy.uint8, _numpy.uint32, _numpy.uint32, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.ucodeType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.major)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14844, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 14844, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_10) != (0)) __PYX_ERR(0, 14844, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 14844, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_8) != (0)) __PYX_ERR(0, 14844, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 14842, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":14850
 *             (<intptr_t>&(pod.subMinor)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlNvlinkFirmwareVersion_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlNvlinkFirmwareVersion_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14850, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 14842, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_11 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_11 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_11, (2-__pyx_t_11) | (__pyx_t_11*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14841, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14839
 * 
 * 
 * cdef _get_nvlink_firmware_version_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlNvlinkFirmwareVersion_t pod = nvmlNvlinkFirmwareVersion_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_nvlink_firmware_version_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14867
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlNvlinkFirmwareVersion_t *>calloc(1, sizeof(nvmlNvlinkFirmwareVersion_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":14868
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlNvlinkFirmwareVersion_t *>calloc(1, sizeof(nvmlNvlinkFirmwareVersion_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvlinkFirmwareVersion")
*/
  __pyx_v_self->_ptr = ((nvmlNvlinkFirmwareVersion_t *)calloc(1, (sizeof(nvmlNvlinkFirmwareVersion_t))));

  /* "cuda/bindings/_nvml.pyx":14869
 *     def __init__(self):
 *         self._ptr = <nvmlNvlinkFirmwareVersion_t *>calloc(1, sizeof(nvmlNvlinkFirmwareVersion_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating NvlinkFirmwareVersion")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":14870
 *         self._ptr = <nvmlNvlinkFirmwareVersion_t *>calloc(1, sizeof(nvmlNvlinkFirmwareVersion_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvlinkFirmwareVersion")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14870, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvlinkFirmwareV};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14870, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 14870, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14869
 *     def __init__(self):
 *         self._ptr = <nvmlNvlinkFirmwareVersion_t *>calloc(1, sizeof(nvmlNvlinkFirmwareVersion_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating NvlinkFirmwareVersion")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":14871
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvlinkFirmwareVersion")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":14872
 *             raise MemoryError("Error allocating NvlinkFirmwareVersion")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":14873
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":14867
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlNvlinkFirmwareVersion_t *>calloc(1, sizeof(nvmlNvlinkFirmwareVersion_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14875
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlNvlinkFirmwareVersion_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self) {
  nvmlNvlinkFirmwareVersion_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlNvlinkFirmwareVersion_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":14877
 *     def __dealloc__(self):
 *         cdef nvmlNvlinkFirmwareVersion_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14878
 *         cdef nvmlNvlinkFirmwareVersion_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":14879
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":14880
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":14877
 *     def __dealloc__(self):
 *         cdef nvmlNvlinkFirmwareVersion_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":14875
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlNvlinkFirmwareVersion_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":14882
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.NvlinkFirmwareVersion object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":14883
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.NvlinkFirmwareVersion object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14883, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14883, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14883, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14883, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14883, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_NvlinkFirmwareVersion_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 33 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14883, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14882
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.NvlinkFirmwareVersion object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14885
 *         return f"<{__name__}.NvlinkFirmwareVersion object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14888
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14888, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14885
 *         return f"<{__name__}.NvlinkFirmwareVersion object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14890
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":14891
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14890
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14893
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":14894
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14893
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14896
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef NvlinkFirmwareVersion other_
 *         if not isinstance(other, NvlinkFirmwareVersion):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":14898
 *     def __eq__(self, other):
 *         cdef NvlinkFirmwareVersion other_
 *         if not isinstance(other, NvlinkFirmwareVersion):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":14899
 *         cdef NvlinkFirmwareVersion other_
 *         if not isinstance(other, NvlinkFirmwareVersion):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkFirmwareVersion_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":14898
 *     def __eq__(self, other):
 *         cdef NvlinkFirmwareVersion other_
 *         if not isinstance(other, NvlinkFirmwareVersion):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":14900
 *         if not isinstance(other, NvlinkFirmwareVersion):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkFirmwareVersion_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion))))) __PYX_ERR(0, 14900, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":14901
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkFirmwareVersion_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlNvlinkFirmwareVersion_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14901, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14896
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef NvlinkFirmwareVersion other_
 *         if not isinstance(other, NvlinkFirmwareVersion):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14903
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkFirmwareVersion_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkFirmwareVersion_t *>malloc(sizeof(nvmlNvlinkFirmwareVersion_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":14904
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlNvlinkFirmwareVersion_t *>malloc(sizeof(nvmlNvlinkFirmwareVersion_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 14904, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14904, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14904, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 14904, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14905
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkFirmwareVersion_t *>malloc(sizeof(nvmlNvlinkFirmwareVersion_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkFirmwareVersion")
*/
    __pyx_v_self->_ptr = ((nvmlNvlinkFirmwareVersion_t *)malloc((sizeof(nvmlNvlinkFirmwareVersion_t))));

    /* "cuda/bindings/_nvml.pyx":14906
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkFirmwareVersion_t *>malloc(sizeof(nvmlNvlinkFirmwareVersion_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkFirmwareVersion")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkFirmwareVersion_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":14907
 *             self._ptr = <nvmlNvlinkFirmwareVersion_t *>malloc(sizeof(nvmlNvlinkFirmwareVersion_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkFirmwareVersion")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkFirmwareVersion_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14907, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvlinkFirmwareV};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14907, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 14907, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":14906
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkFirmwareVersion_t *>malloc(sizeof(nvmlNvlinkFirmwareVersion_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkFirmwareVersion")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkFirmwareVersion_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":14908
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkFirmwareVersion")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkFirmwareVersion_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14908, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14908, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 14908, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlNvlinkFirmwareVersion_t))));

    /* "cuda/bindings/_nvml.pyx":14909
 *                 raise MemoryError("Error allocating NvlinkFirmwareVersion")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkFirmwareVersion_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":14910
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkFirmwareVersion_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":14911
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14911, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14911, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 14911, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":14904
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlNvlinkFirmwareVersion_t *>malloc(sizeof(nvmlNvlinkFirmwareVersion_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":14913
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 14913, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":14903
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkFirmwareVersion_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkFirmwareVersion_t *>malloc(sizeof(nvmlNvlinkFirmwareVersion_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14915
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ucode_type(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_10ucode_type_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_10ucode_type_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_10ucode_type___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_10ucode_type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14918
 *     def ucode_type(self):
 *         """int: """
 *         return self._ptr[0].ucodeType             # <<<<<<<<<<<<<<
 * 
 *     @ucode_type.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_char((__pyx_v_self->_ptr[0]).ucodeType); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14918, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14915
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ucode_type(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.ucode_type.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14920
 *         return self._ptr[0].ucodeType
 * 
 *     @ucode_type.setter             # <<<<<<<<<<<<<<
 *     def ucode_type(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_10ucode_type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_10ucode_type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_10ucode_type_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_10ucode_type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned char __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14922
 *     @ucode_type.setter
 *     def ucode_type(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkFirmwareVersion instance is read-only")
 *         self._ptr[0].ucodeType = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14923
 *     def ucode_type(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvlinkFirmwareVersion instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].ucodeType = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvlinkFirmwareVersion_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14923, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14923, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14922
 *     @ucode_type.setter
 *     def ucode_type(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkFirmwareVersion instance is read-only")
 *         self._ptr[0].ucodeType = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14924
 *         if self._readonly:
 *             raise ValueError("This NvlinkFirmwareVersion instance is read-only")
 *         self._ptr[0].ucodeType = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_char(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned char)-1) && PyErr_Occurred())) __PYX_ERR(0, 14924, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).ucodeType = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":14920
 *         return self._ptr[0].ucodeType
 * 
 *     @ucode_type.setter             # <<<<<<<<<<<<<<
 *     def ucode_type(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.ucode_type.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14926
 *         self._ptr[0].ucodeType = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def major(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5major_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5major_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5major___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5major___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14929
 *     def major(self):
 *         """int: """
 *         return self._ptr[0].major             # <<<<<<<<<<<<<<
 * 
 *     @major.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).major); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14929, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14926
 *         self._ptr[0].ucodeType = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def major(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.major.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14931
 *         return self._ptr[0].major
 * 
 *     @major.setter             # <<<<<<<<<<<<<<
 *     def major(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5major_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5major_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5major_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5major_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14933
 *     @major.setter
 *     def major(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkFirmwareVersion instance is read-only")
 *         self._ptr[0].major = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14934
 *     def major(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvlinkFirmwareVersion instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].major = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvlinkFirmwareVersion_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14934, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14934, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14933
 *     @major.setter
 *     def major(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkFirmwareVersion instance is read-only")
 *         self._ptr[0].major = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14935
 *         if self._readonly:
 *             raise ValueError("This NvlinkFirmwareVersion instance is read-only")
 *         self._ptr[0].major = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14935, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).major = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":14931
 *         return self._ptr[0].major
 * 
 *     @major.setter             # <<<<<<<<<<<<<<
 *     def major(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.major.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14937
 *         self._ptr[0].major = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def minor(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5minor_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5minor_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5minor___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5minor___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14940
 *     def minor(self):
 *         """int: """
 *         return self._ptr[0].minor             # <<<<<<<<<<<<<<
 * 
 *     @minor.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).minor); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14940, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14937
 *         self._ptr[0].major = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def minor(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.minor.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14942
 *         return self._ptr[0].minor
 * 
 *     @minor.setter             # <<<<<<<<<<<<<<
 *     def minor(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5minor_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5minor_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5minor_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5minor_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14944
 *     @minor.setter
 *     def minor(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkFirmwareVersion instance is read-only")
 *         self._ptr[0].minor = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14945
 *     def minor(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvlinkFirmwareVersion instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].minor = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvlinkFirmwareVersion_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14945, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14945, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14944
 *     @minor.setter
 *     def minor(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkFirmwareVersion instance is read-only")
 *         self._ptr[0].minor = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14946
 *         if self._readonly:
 *             raise ValueError("This NvlinkFirmwareVersion instance is read-only")
 *         self._ptr[0].minor = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14946, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).minor = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":14942
 *         return self._ptr[0].minor
 * 
 *     @minor.setter             # <<<<<<<<<<<<<<
 *     def minor(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.minor.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14948
 *         self._ptr[0].minor = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sub_minor(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_9sub_minor_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_9sub_minor_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_9sub_minor___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_9sub_minor___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":14951
 *     def sub_minor(self):
 *         """int: """
 *         return self._ptr[0].subMinor             # <<<<<<<<<<<<<<
 * 
 *     @sub_minor.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).subMinor); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14951, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14948
 *         self._ptr[0].minor = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sub_minor(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.sub_minor.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14953
 *         return self._ptr[0].subMinor
 * 
 *     @sub_minor.setter             # <<<<<<<<<<<<<<
 *     def sub_minor(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_9sub_minor_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_9sub_minor_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_9sub_minor_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_9sub_minor_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":14955
 *     @sub_minor.setter
 *     def sub_minor(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkFirmwareVersion instance is read-only")
 *         self._ptr[0].subMinor = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":14956
 *     def sub_minor(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvlinkFirmwareVersion instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].subMinor = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvlinkFirmwareVersion_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14956, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 14956, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14955
 *     @sub_minor.setter
 *     def sub_minor(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkFirmwareVersion instance is read-only")
 *         self._ptr[0].subMinor = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":14957
 *         if self._readonly:
 *             raise ValueError("This NvlinkFirmwareVersion instance is read-only")
 *         self._ptr[0].subMinor = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14957, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).subMinor = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":14953
 *         return self._ptr[0].subMinor
 * 
 *     @sub_minor.setter             # <<<<<<<<<<<<<<
 *     def sub_minor(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.sub_minor.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14959
 *         self._ptr[0].subMinor = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvlinkFirmwareVersion instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_12from_data, "NvlinkFirmwareVersion.from_data(data)\n\nCreate an NvlinkFirmwareVersion instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `nvlink_firmware_version_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14959, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14959, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 14959, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 14959, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14959, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 14959, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":14966
 *             data (_numpy.ndarray): a single-element array of dtype `nvlink_firmware_version_dtype` holding the data.
 *         """
 *         return __from_data(data, "nvlink_firmware_version_dtype", nvlink_firmware_version_dtype, NvlinkFirmwareVersion)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_nvlink_firmware_version_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14966, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_nvlink_firmware_version_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14966, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14959
 *         self._ptr[0].subMinor = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvlinkFirmwareVersion instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14968
 *         return __from_data(data, "nvlink_firmware_version_dtype", nvlink_firmware_version_dtype, NvlinkFirmwareVersion)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkFirmwareVersion instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_14from_ptr, "NvlinkFirmwareVersion.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an NvlinkFirmwareVersion instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14968, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 14968, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14968, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14968, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 14968, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":14969
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an NvlinkFirmwareVersion instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 14968, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 14968, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14968, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14968, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 14969, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14969, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 14968, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":14968
 *         return __from_data(data, "nvlink_firmware_version_dtype", nvlink_firmware_version_dtype, NvlinkFirmwareVersion)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkFirmwareVersion instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":14977
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkFirmwareVersion obj = NvlinkFirmwareVersion.__new__(NvlinkFirmwareVersion)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":14978
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef NvlinkFirmwareVersion obj = NvlinkFirmwareVersion.__new__(NvlinkFirmwareVersion)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14978, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 14978, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":14977
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkFirmwareVersion obj = NvlinkFirmwareVersion.__new__(NvlinkFirmwareVersion)
*/
  }

  /* "cuda/bindings/_nvml.pyx":14979
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkFirmwareVersion obj = NvlinkFirmwareVersion.__new__(NvlinkFirmwareVersion)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkFirmwareVersion_t *>malloc(sizeof(nvmlNvlinkFirmwareVersion_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14979, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":14980
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkFirmwareVersion obj = NvlinkFirmwareVersion.__new__(NvlinkFirmwareVersion)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlNvlinkFirmwareVersion_t *>malloc(sizeof(nvmlNvlinkFirmwareVersion_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":14981
 *         cdef NvlinkFirmwareVersion obj = NvlinkFirmwareVersion.__new__(NvlinkFirmwareVersion)
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkFirmwareVersion_t *>malloc(sizeof(nvmlNvlinkFirmwareVersion_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkFirmwareVersion")
*/
    __pyx_v_obj->_ptr = ((nvmlNvlinkFirmwareVersion_t *)malloc((sizeof(nvmlNvlinkFirmwareVersion_t))));

    /* "cuda/bindings/_nvml.pyx":14982
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkFirmwareVersion_t *>malloc(sizeof(nvmlNvlinkFirmwareVersion_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkFirmwareVersion")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkFirmwareVersion_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":14983
 *             obj._ptr = <nvmlNvlinkFirmwareVersion_t *>malloc(sizeof(nvmlNvlinkFirmwareVersion_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkFirmwareVersion")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkFirmwareVersion_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14983, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvlinkFirmwareV};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14983, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 14983, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":14982
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkFirmwareVersion_t *>malloc(sizeof(nvmlNvlinkFirmwareVersion_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkFirmwareVersion")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkFirmwareVersion_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":14984
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkFirmwareVersion")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkFirmwareVersion_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlNvlinkFirmwareVersion_t))));

    /* "cuda/bindings/_nvml.pyx":14985
 *                 raise MemoryError("Error allocating NvlinkFirmwareVersion")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkFirmwareVersion_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":14986
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkFirmwareVersion_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlNvlinkFirmwareVersion_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":14980
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkFirmwareVersion obj = NvlinkFirmwareVersion.__new__(NvlinkFirmwareVersion)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlNvlinkFirmwareVersion_t *>malloc(sizeof(nvmlNvlinkFirmwareVersion_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":14988
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlNvlinkFirmwareVersion_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlNvlinkFirmwareVersion_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":14989
 *         else:
 *             obj._ptr = <nvmlNvlinkFirmwareVersion_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":14990
 *             obj._ptr = <nvmlNvlinkFirmwareVersion_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":14991
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":14992
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14968
 *         return __from_data(data, "nvlink_firmware_version_dtype", nvlink_firmware_version_dtype, NvlinkFirmwareVersion)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkFirmwareVersion instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_16__reduce_cython__, "NvlinkFirmwareVersion.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_18__setstate_cython__, "NvlinkFirmwareVersion.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareVersion.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":14995
 * 
 * 
 * cdef _get_excluded_device_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlExcludedDeviceInfo_t pod = nvmlExcludedDeviceInfo_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_excluded_device_info_dtype_offsets(void) {
  nvmlExcludedDeviceInfo_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlExcludedDeviceInfo_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_excluded_device_info_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":14996
 * 
 * cdef _get_excluded_device_info_dtype_offsets():
 *     cdef nvmlExcludedDeviceInfo_t pod = nvmlExcludedDeviceInfo_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['pci_info', 'uuid'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":14997
 * cdef _get_excluded_device_info_dtype_offsets():
 *     cdef nvmlExcludedDeviceInfo_t pod = nvmlExcludedDeviceInfo_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['pci_info', 'uuid'],
 *         'formats': [pci_info_dtype, _numpy.int8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14997, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14997, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":14998
 *     cdef nvmlExcludedDeviceInfo_t pod = nvmlExcludedDeviceInfo_t()
 *     return _numpy.dtype({
 *         'names': ['pci_info', 'uuid'],             # <<<<<<<<<<<<<<
 *         'formats': [pci_info_dtype, _numpy.int8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14998, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14998, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_pci_info);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_pci_info);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_pci_info) != (0)) __PYX_ERR(0, 14998, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_uuid);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_uuid);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_uuid) != (0)) __PYX_ERR(0, 14998, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 14998, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":14999
 *     return _numpy.dtype({
 *         'names': ['pci_info', 'uuid'],
 *         'formats': [pci_info_dtype, _numpy.int8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.pciInfo)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_pci_info_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14999, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14999, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 14999, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14999, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 14999, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 14999, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_7) < (0)) __PYX_ERR(0, 14998, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":15001
 *         'formats': [pci_info_dtype, _numpy.int8],
 *         'offsets': [
 *             (<intptr_t>&(pod.pciInfo)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.uuid)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_7 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.pciInfo)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15001, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);

  /* "cuda/bindings/_nvml.pyx":15002
 *         'offsets': [
 *             (<intptr_t>&(pod.pciInfo)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.uuid)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlExcludedDeviceInfo_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.uuid)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15002, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":15000
 *         'names': ['pci_info', 'uuid'],
 *         'formats': [pci_info_dtype, _numpy.int8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.pciInfo)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.uuid)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15000, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 15000, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 15000, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_6) < (0)) __PYX_ERR(0, 14998, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":15004
 *             (<intptr_t>&(pod.uuid)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlExcludedDeviceInfo_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_6 = __Pyx_PyLong_FromSize_t((sizeof(nvmlExcludedDeviceInfo_t))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15004, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_6) < (0)) __PYX_ERR(0, 14998, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14997, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":14995
 * 
 * 
 * cdef _get_excluded_device_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlExcludedDeviceInfo_t pod = nvmlExcludedDeviceInfo_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_excluded_device_info_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15021
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlExcludedDeviceInfo_t *>calloc(1, sizeof(nvmlExcludedDeviceInfo_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":15022
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlExcludedDeviceInfo_t *>calloc(1, sizeof(nvmlExcludedDeviceInfo_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ExcludedDeviceInfo")
*/
  __pyx_v_self->_ptr = ((nvmlExcludedDeviceInfo_t *)calloc(1, (sizeof(nvmlExcludedDeviceInfo_t))));

  /* "cuda/bindings/_nvml.pyx":15023
 *     def __init__(self):
 *         self._ptr = <nvmlExcludedDeviceInfo_t *>calloc(1, sizeof(nvmlExcludedDeviceInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ExcludedDeviceInfo")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":15024
 *         self._ptr = <nvmlExcludedDeviceInfo_t *>calloc(1, sizeof(nvmlExcludedDeviceInfo_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ExcludedDeviceInfo")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15024, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ExcludedDeviceI};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15024, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15024, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15023
 *     def __init__(self):
 *         self._ptr = <nvmlExcludedDeviceInfo_t *>calloc(1, sizeof(nvmlExcludedDeviceInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ExcludedDeviceInfo")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":15025
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ExcludedDeviceInfo")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":15026
 *             raise MemoryError("Error allocating ExcludedDeviceInfo")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":15027
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":15021
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlExcludedDeviceInfo_t *>calloc(1, sizeof(nvmlExcludedDeviceInfo_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ExcludedDeviceInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15029
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlExcludedDeviceInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self) {
  nvmlExcludedDeviceInfo_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlExcludedDeviceInfo_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":15031
 *     def __dealloc__(self):
 *         cdef nvmlExcludedDeviceInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":15032
 *         cdef nvmlExcludedDeviceInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":15033
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":15034
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":15031
 *     def __dealloc__(self):
 *         cdef nvmlExcludedDeviceInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":15029
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlExcludedDeviceInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":15036
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ExcludedDeviceInfo object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":15037
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.ExcludedDeviceInfo object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_ExcludedDeviceInfo_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 30 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15036
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ExcludedDeviceInfo object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ExcludedDeviceInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15039
 *         return f"<{__name__}.ExcludedDeviceInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15042
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15042, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15039
 *         return f"<{__name__}.ExcludedDeviceInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ExcludedDeviceInfo.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15044
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":15045
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15044
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15047
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":15048
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15048, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15047
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ExcludedDeviceInfo.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15050
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ExcludedDeviceInfo other_
 *         if not isinstance(other, ExcludedDeviceInfo):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":15052
 *     def __eq__(self, other):
 *         cdef ExcludedDeviceInfo other_
 *         if not isinstance(other, ExcludedDeviceInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":15053
 *         cdef ExcludedDeviceInfo other_
 *         if not isinstance(other, ExcludedDeviceInfo):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlExcludedDeviceInfo_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15052
 *     def __eq__(self, other):
 *         cdef ExcludedDeviceInfo other_
 *         if not isinstance(other, ExcludedDeviceInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":15054
 *         if not isinstance(other, ExcludedDeviceInfo):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlExcludedDeviceInfo_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo))))) __PYX_ERR(0, 15054, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":15055
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlExcludedDeviceInfo_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlExcludedDeviceInfo_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15055, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15050
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ExcludedDeviceInfo other_
 *         if not isinstance(other, ExcludedDeviceInfo):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ExcludedDeviceInfo.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15057
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlExcludedDeviceInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlExcludedDeviceInfo_t *>malloc(sizeof(nvmlExcludedDeviceInfo_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":15058
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlExcludedDeviceInfo_t *>malloc(sizeof(nvmlExcludedDeviceInfo_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 15058, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15058, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15058, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 15058, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":15059
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlExcludedDeviceInfo_t *>malloc(sizeof(nvmlExcludedDeviceInfo_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ExcludedDeviceInfo")
*/
    __pyx_v_self->_ptr = ((nvmlExcludedDeviceInfo_t *)malloc((sizeof(nvmlExcludedDeviceInfo_t))));

    /* "cuda/bindings/_nvml.pyx":15060
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlExcludedDeviceInfo_t *>malloc(sizeof(nvmlExcludedDeviceInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ExcludedDeviceInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlExcludedDeviceInfo_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":15061
 *             self._ptr = <nvmlExcludedDeviceInfo_t *>malloc(sizeof(nvmlExcludedDeviceInfo_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ExcludedDeviceInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlExcludedDeviceInfo_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15061, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ExcludedDeviceI};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15061, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 15061, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":15060
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlExcludedDeviceInfo_t *>malloc(sizeof(nvmlExcludedDeviceInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ExcludedDeviceInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlExcludedDeviceInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":15062
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ExcludedDeviceInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlExcludedDeviceInfo_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15062, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15062, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 15062, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlExcludedDeviceInfo_t))));

    /* "cuda/bindings/_nvml.pyx":15063
 *                 raise MemoryError("Error allocating ExcludedDeviceInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlExcludedDeviceInfo_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":15064
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlExcludedDeviceInfo_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":15065
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15065, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15065, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 15065, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":15058
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlExcludedDeviceInfo_t *>malloc(sizeof(nvmlExcludedDeviceInfo_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":15067
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 15067, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":15057
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlExcludedDeviceInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlExcludedDeviceInfo_t *>malloc(sizeof(nvmlExcludedDeviceInfo_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ExcludedDeviceInfo.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15069
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pci_info(self):
 *         """PciInfo: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_8pci_info_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_8pci_info_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_8pci_info___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_8pci_info___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15072
 *     def pci_info(self):
 *         """PciInfo: """
 *         return PciInfo.from_ptr(<intptr_t>&(self._ptr[0].pciInfo), self._readonly, self)             # <<<<<<<<<<<<<<
 * 
 *     @pci_info.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).pciInfo))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15072, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15072, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_t_4, ((PyObject *)__pyx_v_self)};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15072, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15069
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pci_info(self):
 *         """PciInfo: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ExcludedDeviceInfo.pci_info.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15074
 *         return PciInfo.from_ptr(<intptr_t>&(self._ptr[0].pciInfo), self._readonly, self)
 * 
 *     @pci_info.setter             # <<<<<<<<<<<<<<
 *     def pci_info(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_8pci_info_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_8pci_info_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_8pci_info_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_8pci_info_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":15076
 *     @pci_info.setter
 *     def pci_info(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ExcludedDeviceInfo instance is read-only")
 *         cdef PciInfo val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":15077
 *     def pci_info(self, val):
 *         if self._readonly:
 *             raise ValueError("This ExcludedDeviceInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef PciInfo val_ = val
 *         memcpy(<void *>&(self._ptr[0].pciInfo), <void *>(val_._get_ptr()), sizeof(nvmlPciInfo_t) * 1)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ExcludedDeviceInfo_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15077, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 15077, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15076
 *     @pci_info.setter
 *     def pci_info(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ExcludedDeviceInfo instance is read-only")
 *         cdef PciInfo val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":15078
 *         if self._readonly:
 *             raise ValueError("This ExcludedDeviceInfo instance is read-only")
 *         cdef PciInfo val_ = val             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].pciInfo), <void *>(val_._get_ptr()), sizeof(nvmlPciInfo_t) * 1)
 * 
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo))))) __PYX_ERR(0, 15078, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":15079
 *             raise ValueError("This ExcludedDeviceInfo instance is read-only")
 *         cdef PciInfo val_ = val
 *         memcpy(<void *>&(self._ptr[0].pciInfo), <void *>(val_._get_ptr()), sizeof(nvmlPciInfo_t) * 1)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 15079, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).pciInfo)), ((void *)__pyx_t_4), ((sizeof(nvmlPciInfo_t)) * 1)));

  /* "cuda/bindings/_nvml.pyx":15074
 *         return PciInfo.from_ptr(<intptr_t>&(self._ptr[0].pciInfo), self._readonly, self)
 * 
 *     @pci_info.setter             # <<<<<<<<<<<<<<
 *     def pci_info(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ExcludedDeviceInfo.pci_info.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15081
 *         memcpy(<void *>&(self._ptr[0].pciInfo), <void *>(val_._get_ptr()), sizeof(nvmlPciInfo_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def uuid(self):
 *         """~_numpy.int8: (array of length 80)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_4uuid_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_4uuid_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_4uuid___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_4uuid___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15084
 *     def uuid(self):
 *         """~_numpy.int8: (array of length 80)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].uuid)             # <<<<<<<<<<<<<<
 * 
 *     @uuid.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).uuid); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15084, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15081
 *         memcpy(<void *>&(self._ptr[0].pciInfo), <void *>(val_._get_ptr()), sizeof(nvmlPciInfo_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def uuid(self):
 *         """~_numpy.int8: (array of length 80)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ExcludedDeviceInfo.uuid.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15086
 *         return cpython.PyUnicode_FromString(self._ptr[0].uuid)
 * 
 *     @uuid.setter             # <<<<<<<<<<<<<<
 *     def uuid(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_4uuid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_4uuid_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_4uuid_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_4uuid_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":15088
 *     @uuid.setter
 *     def uuid(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ExcludedDeviceInfo instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":15089
 *     def uuid(self, val):
 *         if self._readonly:
 *             raise ValueError("This ExcludedDeviceInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 80:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ExcludedDeviceInfo_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15089, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 15089, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15088
 *     @uuid.setter
 *     def uuid(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ExcludedDeviceInfo instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":15090
 *         if self._readonly:
 *             raise ValueError("This ExcludedDeviceInfo instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 80:
 *             raise ValueError("String too long for field uuid, max length is 79")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15090, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 15090, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":15091
 *             raise ValueError("This ExcludedDeviceInfo instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 80:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field uuid, max length is 79")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 15091, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 15091, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 80);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":15092
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 80:
 *             raise ValueError("String too long for field uuid, max length is 79")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].uuid), <void *>ptr, 80)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_uuid_m};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15092, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 15092, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15091
 *             raise ValueError("This ExcludedDeviceInfo instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 80:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field uuid, max length is 79")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":15093
 *         if len(buf) >= 80:
 *             raise ValueError("String too long for field uuid, max length is 79")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].uuid), <void *>ptr, 80)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 15093, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 15093, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":15094
 *             raise ValueError("String too long for field uuid, max length is 79")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].uuid), <void *>ptr, 80)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).uuid), ((void *)__pyx_v_ptr), 80));

  /* "cuda/bindings/_nvml.pyx":15086
 *         return cpython.PyUnicode_FromString(self._ptr[0].uuid)
 * 
 *     @uuid.setter             # <<<<<<<<<<<<<<
 *     def uuid(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ExcludedDeviceInfo.uuid.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15096
 *         memcpy(<void *>(self._ptr[0].uuid), <void *>ptr, 80)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ExcludedDeviceInfo instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_12from_data, "ExcludedDeviceInfo.from_data(data)\n\nCreate an ExcludedDeviceInfo instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `excluded_device_info_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15096, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15096, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 15096, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 15096, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15096, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 15096, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ExcludedDeviceInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":15103
 *             data (_numpy.ndarray): a single-element array of dtype `excluded_device_info_dtype` holding the data.
 *         """
 *         return __from_data(data, "excluded_device_info_dtype", excluded_device_info_dtype, ExcludedDeviceInfo)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_excluded_device_info_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15103, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_excluded_device_info_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15103, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15096
 *         memcpy(<void *>(self._ptr[0].uuid), <void *>ptr, 80)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ExcludedDeviceInfo instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ExcludedDeviceInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15105
 *         return __from_data(data, "excluded_device_info_dtype", excluded_device_info_dtype, ExcludedDeviceInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ExcludedDeviceInfo instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_14from_ptr, "ExcludedDeviceInfo.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an ExcludedDeviceInfo instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15105, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 15105, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15105, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15105, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 15105, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":15106
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an ExcludedDeviceInfo instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 15105, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 15105, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15105, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15105, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 15106, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15106, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 15105, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ExcludedDeviceInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":15105
 *         return __from_data(data, "excluded_device_info_dtype", excluded_device_info_dtype, ExcludedDeviceInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ExcludedDeviceInfo instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":15114
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ExcludedDeviceInfo obj = ExcludedDeviceInfo.__new__(ExcludedDeviceInfo)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":15115
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ExcludedDeviceInfo obj = ExcludedDeviceInfo.__new__(ExcludedDeviceInfo)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15115, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15115, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15114
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ExcludedDeviceInfo obj = ExcludedDeviceInfo.__new__(ExcludedDeviceInfo)
*/
  }

  /* "cuda/bindings/_nvml.pyx":15116
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ExcludedDeviceInfo obj = ExcludedDeviceInfo.__new__(ExcludedDeviceInfo)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlExcludedDeviceInfo_t *>malloc(sizeof(nvmlExcludedDeviceInfo_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ExcludedDeviceInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15116, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15117
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ExcludedDeviceInfo obj = ExcludedDeviceInfo.__new__(ExcludedDeviceInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlExcludedDeviceInfo_t *>malloc(sizeof(nvmlExcludedDeviceInfo_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":15118
 *         cdef ExcludedDeviceInfo obj = ExcludedDeviceInfo.__new__(ExcludedDeviceInfo)
 *         if owner is None:
 *             obj._ptr = <nvmlExcludedDeviceInfo_t *>malloc(sizeof(nvmlExcludedDeviceInfo_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ExcludedDeviceInfo")
*/
    __pyx_v_obj->_ptr = ((nvmlExcludedDeviceInfo_t *)malloc((sizeof(nvmlExcludedDeviceInfo_t))));

    /* "cuda/bindings/_nvml.pyx":15119
 *         if owner is None:
 *             obj._ptr = <nvmlExcludedDeviceInfo_t *>malloc(sizeof(nvmlExcludedDeviceInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ExcludedDeviceInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlExcludedDeviceInfo_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":15120
 *             obj._ptr = <nvmlExcludedDeviceInfo_t *>malloc(sizeof(nvmlExcludedDeviceInfo_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ExcludedDeviceInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlExcludedDeviceInfo_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15120, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ExcludedDeviceI};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15120, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 15120, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":15119
 *         if owner is None:
 *             obj._ptr = <nvmlExcludedDeviceInfo_t *>malloc(sizeof(nvmlExcludedDeviceInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ExcludedDeviceInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlExcludedDeviceInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":15121
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ExcludedDeviceInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlExcludedDeviceInfo_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlExcludedDeviceInfo_t))));

    /* "cuda/bindings/_nvml.pyx":15122
 *                 raise MemoryError("Error allocating ExcludedDeviceInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlExcludedDeviceInfo_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":15123
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlExcludedDeviceInfo_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlExcludedDeviceInfo_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":15117
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ExcludedDeviceInfo obj = ExcludedDeviceInfo.__new__(ExcludedDeviceInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlExcludedDeviceInfo_t *>malloc(sizeof(nvmlExcludedDeviceInfo_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":15125
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlExcludedDeviceInfo_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlExcludedDeviceInfo_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":15126
 *         else:
 *             obj._ptr = <nvmlExcludedDeviceInfo_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":15127
 *             obj._ptr = <nvmlExcludedDeviceInfo_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":15128
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":15129
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15105
 *         return __from_data(data, "excluded_device_info_dtype", excluded_device_info_dtype, ExcludedDeviceInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ExcludedDeviceInfo instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ExcludedDeviceInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_16__reduce_cython__, "ExcludedDeviceInfo.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ExcludedDeviceInfo.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_18__setstate_cython__, "ExcludedDeviceInfo.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ExcludedDeviceInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ExcludedDeviceInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15132
 * 
 * 
 * cdef _get_process_detail_list_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlProcessDetailList_v1_t pod = nvmlProcessDetailList_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_process_detail_list_v1_dtype_offsets(void) {
  nvmlProcessDetailList_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlProcessDetailList_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  size_t __pyx_t_11;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_process_detail_list_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":15133
 * 
 * cdef _get_process_detail_list_v1_dtype_offsets():
 *     cdef nvmlProcessDetailList_v1_t pod = nvmlProcessDetailList_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'mode', 'num_proc_array_entries', 'proc_array'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":15134
 * cdef _get_process_detail_list_v1_dtype_offsets():
 *     cdef nvmlProcessDetailList_v1_t pod = nvmlProcessDetailList_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'mode', 'num_proc_array_entries', 'proc_array'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.intp],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15134, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15134, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":15135
 *     cdef nvmlProcessDetailList_v1_t pod = nvmlProcessDetailList_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'mode', 'num_proc_array_entries', 'proc_array'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.intp],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15135, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15135, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 15135, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_mode);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_mode);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_mode) != (0)) __PYX_ERR(0, 15135, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_num_proc_array_entries);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_num_proc_array_entries);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_num_proc_array_entries) != (0)) __PYX_ERR(0, 15135, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_proc_array);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_proc_array);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_proc_array) != (0)) __PYX_ERR(0, 15135, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 15135, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":15136
 *     return _numpy.dtype({
 *         'names': ['version', 'mode', 'num_proc_array_entries', 'proc_array'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.intp],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15136, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15136, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15136, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15136, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15136, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 15136, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15136, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_intp); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 15136, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15136, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 15136, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 15136, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 15136, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 15136, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 15135, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":15138
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.intp],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.mode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.numProcArrayEntries)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15138, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":15139
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.mode)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.numProcArrayEntries)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.procArray)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.mode)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 15139, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":15140
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.mode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.numProcArrayEntries)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.procArray)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.numProcArrayEntries)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 15140, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":15141
 *             (<intptr_t>&(pod.mode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.numProcArrayEntries)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.procArray)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlProcessDetailList_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.procArray)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15141, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":15137
 *         'names': ['version', 'mode', 'num_proc_array_entries', 'proc_array'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.intp],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.mode)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15137, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 15137, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_10) != (0)) __PYX_ERR(0, 15137, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 15137, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_8) != (0)) __PYX_ERR(0, 15137, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 15135, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":15143
 *             (<intptr_t>&(pod.procArray)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlProcessDetailList_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlProcessDetailList_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15143, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 15135, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_11 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_11 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_11, (2-__pyx_t_11) | (__pyx_t_11*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15134, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15132
 * 
 * 
 * cdef _get_process_detail_list_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlProcessDetailList_v1_t pod = nvmlProcessDetailList_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_process_detail_list_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15161
 *         dict _refs
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlProcessDetailList_v1_t *>calloc(1, sizeof(nvmlProcessDetailList_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":15162
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlProcessDetailList_v1_t *>calloc(1, sizeof(nvmlProcessDetailList_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ProcessDetailList_v1")
*/
  __pyx_v_self->_ptr = ((nvmlProcessDetailList_v1_t *)calloc(1, (sizeof(nvmlProcessDetailList_v1_t))));

  /* "cuda/bindings/_nvml.pyx":15163
 *     def __init__(self):
 *         self._ptr = <nvmlProcessDetailList_v1_t *>calloc(1, sizeof(nvmlProcessDetailList_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ProcessDetailList_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":15164
 *         self._ptr = <nvmlProcessDetailList_v1_t *>calloc(1, sizeof(nvmlProcessDetailList_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ProcessDetailList_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15164, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ProcessDetailLi};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15164, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15164, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15163
 *     def __init__(self):
 *         self._ptr = <nvmlProcessDetailList_v1_t *>calloc(1, sizeof(nvmlProcessDetailList_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ProcessDetailList_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":15165
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ProcessDetailList_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":15166
 *             raise MemoryError("Error allocating ProcessDetailList_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 *         self._refs = {}
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":15167
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 *         self._refs = {}
 * 
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":15168
 *         self._owned = True
 *         self._readonly = False
 *         self._refs = {}             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15168, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_self->_refs);
  __Pyx_DECREF(__pyx_v_self->_refs);
  __pyx_v_self->_refs = ((PyObject*)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15161
 *         dict _refs
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlProcessDetailList_v1_t *>calloc(1, sizeof(nvmlProcessDetailList_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15170
 *         self._refs = {}
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlProcessDetailList_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self) {
  nvmlProcessDetailList_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlProcessDetailList_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":15172
 *     def __dealloc__(self):
 *         cdef nvmlProcessDetailList_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":15173
 *         cdef nvmlProcessDetailList_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":15174
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":15175
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":15172
 *     def __dealloc__(self):
 *         cdef nvmlProcessDetailList_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":15170
 *         self._refs = {}
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlProcessDetailList_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":15177
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ProcessDetailList_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":15178
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.ProcessDetailList_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15178, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15178, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15178, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15178, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15178, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_ProcessDetailList_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 32 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15178, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15177
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ProcessDetailList_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15180
 *         return f"<{__name__}.ProcessDetailList_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15183
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15183, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15180
 *         return f"<{__name__}.ProcessDetailList_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15185
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_20ProcessDetailList_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":15186
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15185
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15188
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":15189
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15189, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15188
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15191
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ProcessDetailList_v1 other_
 *         if not isinstance(other, ProcessDetailList_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":15193
 *     def __eq__(self, other):
 *         cdef ProcessDetailList_v1 other_
 *         if not isinstance(other, ProcessDetailList_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":15194
 *         cdef ProcessDetailList_v1 other_
 *         if not isinstance(other, ProcessDetailList_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlProcessDetailList_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15193
 *     def __eq__(self, other):
 *         cdef ProcessDetailList_v1 other_
 *         if not isinstance(other, ProcessDetailList_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":15195
 *         if not isinstance(other, ProcessDetailList_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlProcessDetailList_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1))))) __PYX_ERR(0, 15195, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":15196
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlProcessDetailList_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlProcessDetailList_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15196, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15191
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ProcessDetailList_v1 other_
 *         if not isinstance(other, ProcessDetailList_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15198
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlProcessDetailList_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlProcessDetailList_v1_t *>malloc(sizeof(nvmlProcessDetailList_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":15199
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlProcessDetailList_v1_t *>malloc(sizeof(nvmlProcessDetailList_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 15199, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15199, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15199, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 15199, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":15200
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlProcessDetailList_v1_t *>malloc(sizeof(nvmlProcessDetailList_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ProcessDetailList_v1")
*/
    __pyx_v_self->_ptr = ((nvmlProcessDetailList_v1_t *)malloc((sizeof(nvmlProcessDetailList_v1_t))));

    /* "cuda/bindings/_nvml.pyx":15201
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlProcessDetailList_v1_t *>malloc(sizeof(nvmlProcessDetailList_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ProcessDetailList_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlProcessDetailList_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":15202
 *             self._ptr = <nvmlProcessDetailList_v1_t *>malloc(sizeof(nvmlProcessDetailList_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ProcessDetailList_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlProcessDetailList_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15202, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ProcessDetailLi};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15202, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 15202, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":15201
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlProcessDetailList_v1_t *>malloc(sizeof(nvmlProcessDetailList_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ProcessDetailList_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlProcessDetailList_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":15203
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ProcessDetailList_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlProcessDetailList_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15203, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15203, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 15203, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlProcessDetailList_v1_t))));

    /* "cuda/bindings/_nvml.pyx":15204
 *                 raise MemoryError("Error allocating ProcessDetailList_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlProcessDetailList_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":15205
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlProcessDetailList_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":15206
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15206, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15206, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 15206, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":15199
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlProcessDetailList_v1_t *>malloc(sizeof(nvmlProcessDetailList_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":15208
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 15208, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":15198
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlProcessDetailList_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlProcessDetailList_v1_t *>malloc(sizeof(nvmlProcessDetailList_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15210
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: Struct version, MUST be nvmlProcessDetailList_v1."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15213
 *     def version(self):
 *         """int: Struct version, MUST be nvmlProcessDetailList_v1."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15213, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15210
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: Struct version, MUST be nvmlProcessDetailList_v1."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15215
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":15217
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ProcessDetailList_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":15218
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This ProcessDetailList_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ProcessDetailList_v1_instan};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15218, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 15218, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15217
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ProcessDetailList_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":15219
 *         if self._readonly:
 *             raise ValueError("This ProcessDetailList_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15219, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":15215
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15221
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def mode(self):
 *         """int: Process mode(Compute/Graphics/MPSCompute)"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_4mode_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_4mode_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_4mode___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_4mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15224
 *     def mode(self):
 *         """int: Process mode(Compute/Graphics/MPSCompute)"""
 *         return self._ptr[0].mode             # <<<<<<<<<<<<<<
 * 
 *     @mode.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).mode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15224, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15221
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def mode(self):
 *         """int: Process mode(Compute/Graphics/MPSCompute)"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.mode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15226
 *         return self._ptr[0].mode
 * 
 *     @mode.setter             # <<<<<<<<<<<<<<
 *     def mode(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_4mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_4mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_4mode_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_4mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":15228
 *     @mode.setter
 *     def mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ProcessDetailList_v1 instance is read-only")
 *         self._ptr[0].mode = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":15229
 *     def mode(self, val):
 *         if self._readonly:
 *             raise ValueError("This ProcessDetailList_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].mode = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ProcessDetailList_v1_instan};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15229, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 15229, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15228
 *     @mode.setter
 *     def mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ProcessDetailList_v1 instance is read-only")
 *         self._ptr[0].mode = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":15230
 *         if self._readonly:
 *             raise ValueError("This ProcessDetailList_v1 instance is read-only")
 *         self._ptr[0].mode = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15230, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).mode = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":15226
 *         return self._ptr[0].mode
 * 
 *     @mode.setter             # <<<<<<<<<<<<<<
 *     def mode(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.mode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15232
 *         self._ptr[0].mode = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def proc_array(self):
 *         """int: Process array."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_10proc_array_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_10proc_array_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_10proc_array___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_10proc_array___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15235
 *     def proc_array(self):
 *         """int: Process array."""
 *         if self._ptr[0].procArray == NULL or self._ptr[0].numProcArrayEntries == 0:             # <<<<<<<<<<<<<<
 *             return []
 *         return ProcessDetail_v1.from_ptr(<intptr_t>(self._ptr[0].procArray), self._ptr[0].numProcArrayEntries)
*/
  __pyx_t_2 = ((__pyx_v_self->_ptr[0]).procArray == NULL);
  if (!__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = ((__pyx_v_self->_ptr[0]).numProcArrayEntries == 0);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":15236
 *         """int: Process array."""
 *         if self._ptr[0].procArray == NULL or self._ptr[0].numProcArrayEntries == 0:
 *             return []             # <<<<<<<<<<<<<<
 *         return ProcessDetail_v1.from_ptr(<intptr_t>(self._ptr[0].procArray), self._ptr[0].numProcArrayEntries)
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15236, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15235
 *     def proc_array(self):
 *         """int: Process array."""
 *         if self._ptr[0].procArray == NULL or self._ptr[0].numProcArrayEntries == 0:             # <<<<<<<<<<<<<<
 *             return []
 *         return ProcessDetail_v1.from_ptr(<intptr_t>(self._ptr[0].procArray), self._ptr[0].numProcArrayEntries)
*/
  }

  /* "cuda/bindings/_nvml.pyx":15237
 *         if self._ptr[0].procArray == NULL or self._ptr[0].numProcArrayEntries == 0:
 *             return []
 *         return ProcessDetail_v1.from_ptr(<intptr_t>(self._ptr[0].procArray), self._ptr[0].numProcArrayEntries)             # <<<<<<<<<<<<<<
 * 
 *     @proc_array.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1);
  __Pyx_INCREF(__pyx_t_4);
  __pyx_t_5 = PyLong_FromSsize_t(((intptr_t)(__pyx_v_self->_ptr[0]).procArray)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15237, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).numProcArrayEntries); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15237, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = 0;
  {
    PyObject *__pyx_callargs[3] = {__pyx_t_4, __pyx_t_5, __pyx_t_6};
    __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_7, (3-__pyx_t_7) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15237, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
  }
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15232
 *         self._ptr[0].mode = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def proc_array(self):
 *         """int: Process array."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.proc_array.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15239
 *         return ProcessDetail_v1.from_ptr(<intptr_t>(self._ptr[0].procArray), self._ptr[0].numProcArrayEntries)
 * 
 *     @proc_array.setter             # <<<<<<<<<<<<<<
 *     def proc_array(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_10proc_array_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_10proc_array_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_10proc_array_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_10proc_array_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_arr = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  Py_ssize_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":15241
 *     @proc_array.setter
 *     def proc_array(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ProcessDetailList_v1 instance is read-only")
 *         cdef ProcessDetail_v1 arr = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":15242
 *     def proc_array(self, val):
 *         if self._readonly:
 *             raise ValueError("This ProcessDetailList_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef ProcessDetail_v1 arr = val
 *         self._ptr[0].procArray = <nvmlProcessDetail_v1_t*><intptr_t>(arr._get_ptr())
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ProcessDetailList_v1_instan};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15242, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 15242, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15241
 *     @proc_array.setter
 *     def proc_array(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ProcessDetailList_v1 instance is read-only")
 *         cdef ProcessDetail_v1 arr = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":15243
 *         if self._readonly:
 *             raise ValueError("This ProcessDetailList_v1 instance is read-only")
 *         cdef ProcessDetail_v1 arr = val             # <<<<<<<<<<<<<<
 *         self._ptr[0].procArray = <nvmlProcessDetail_v1_t*><intptr_t>(arr._get_ptr())
 *         self._ptr[0].numProcArrayEntries = len(arr)
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1))))) __PYX_ERR(0, 15243, __pyx_L1_error)
  __pyx_v_arr = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":15244
 *             raise ValueError("This ProcessDetailList_v1 instance is read-only")
 *         cdef ProcessDetail_v1 arr = val
 *         self._ptr[0].procArray = <nvmlProcessDetail_v1_t*><intptr_t>(arr._get_ptr())             # <<<<<<<<<<<<<<
 *         self._ptr[0].numProcArrayEntries = len(arr)
 *         self._refs["proc_array"] = arr
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v_arr->__pyx_vtab)->_get_ptr(__pyx_v_arr); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 15244, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).procArray = ((nvmlProcessDetail_v1_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":15245
 *         cdef ProcessDetail_v1 arr = val
 *         self._ptr[0].procArray = <nvmlProcessDetail_v1_t*><intptr_t>(arr._get_ptr())
 *         self._ptr[0].numProcArrayEntries = len(arr)             # <<<<<<<<<<<<<<
 *         self._refs["proc_array"] = arr
 * 
*/
  __pyx_t_5 = PyObject_Length(((PyObject *)__pyx_v_arr)); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 15245, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).numProcArrayEntries = __pyx_t_5;

  /* "cuda/bindings/_nvml.pyx":15246
 *         self._ptr[0].procArray = <nvmlProcessDetail_v1_t*><intptr_t>(arr._get_ptr())
 *         self._ptr[0].numProcArrayEntries = len(arr)
 *         self._refs["proc_array"] = arr             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely(__pyx_v_self->_refs == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
    __PYX_ERR(0, 15246, __pyx_L1_error)
  }
  if (unlikely((PyDict_SetItem(__pyx_v_self->_refs, __pyx_mstate_global->__pyx_n_u_proc_array, ((PyObject *)__pyx_v_arr)) < 0))) __PYX_ERR(0, 15246, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15239
 *         return ProcessDetail_v1.from_ptr(<intptr_t>(self._ptr[0].procArray), self._ptr[0].numProcArrayEntries)
 * 
 *     @proc_array.setter             # <<<<<<<<<<<<<<
 *     def proc_array(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.proc_array.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15248
 *         self._refs["proc_array"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessDetailList_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_12from_data, "ProcessDetailList_v1.from_data(data)\n\nCreate an ProcessDetailList_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `process_detail_list_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15248, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15248, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 15248, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 15248, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15248, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 15248, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":15255
 *             data (_numpy.ndarray): a single-element array of dtype `process_detail_list_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "process_detail_list_v1_dtype", process_detail_list_v1_dtype, ProcessDetailList_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_process_detail_list_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15255, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_process_detail_list_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15255, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15248
 *         self._refs["proc_array"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessDetailList_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15257
 *         return __from_data(data, "process_detail_list_v1_dtype", process_detail_list_v1_dtype, ProcessDetailList_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ProcessDetailList_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_14from_ptr, "ProcessDetailList_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an ProcessDetailList_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15257, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 15257, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15257, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15257, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 15257, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":15258
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an ProcessDetailList_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 15257, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 15257, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15257, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15257, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 15258, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15258, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 15257, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":15257
 *         return __from_data(data, "process_detail_list_v1_dtype", process_detail_list_v1_dtype, ProcessDetailList_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ProcessDetailList_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":15266
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessDetailList_v1 obj = ProcessDetailList_v1.__new__(ProcessDetailList_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":15267
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ProcessDetailList_v1 obj = ProcessDetailList_v1.__new__(ProcessDetailList_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15267, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15267, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15266
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessDetailList_v1 obj = ProcessDetailList_v1.__new__(ProcessDetailList_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":15268
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessDetailList_v1 obj = ProcessDetailList_v1.__new__(ProcessDetailList_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlProcessDetailList_v1_t *>malloc(sizeof(nvmlProcessDetailList_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessDetailList_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15268, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15269
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessDetailList_v1 obj = ProcessDetailList_v1.__new__(ProcessDetailList_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlProcessDetailList_v1_t *>malloc(sizeof(nvmlProcessDetailList_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":15270
 *         cdef ProcessDetailList_v1 obj = ProcessDetailList_v1.__new__(ProcessDetailList_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlProcessDetailList_v1_t *>malloc(sizeof(nvmlProcessDetailList_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ProcessDetailList_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlProcessDetailList_v1_t *)malloc((sizeof(nvmlProcessDetailList_v1_t))));

    /* "cuda/bindings/_nvml.pyx":15271
 *         if owner is None:
 *             obj._ptr = <nvmlProcessDetailList_v1_t *>malloc(sizeof(nvmlProcessDetailList_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ProcessDetailList_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlProcessDetailList_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":15272
 *             obj._ptr = <nvmlProcessDetailList_v1_t *>malloc(sizeof(nvmlProcessDetailList_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ProcessDetailList_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlProcessDetailList_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15272, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ProcessDetailLi};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15272, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 15272, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":15271
 *         if owner is None:
 *             obj._ptr = <nvmlProcessDetailList_v1_t *>malloc(sizeof(nvmlProcessDetailList_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ProcessDetailList_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlProcessDetailList_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":15273
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ProcessDetailList_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlProcessDetailList_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlProcessDetailList_v1_t))));

    /* "cuda/bindings/_nvml.pyx":15274
 *                 raise MemoryError("Error allocating ProcessDetailList_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlProcessDetailList_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":15275
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlProcessDetailList_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlProcessDetailList_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":15269
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessDetailList_v1 obj = ProcessDetailList_v1.__new__(ProcessDetailList_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlProcessDetailList_v1_t *>malloc(sizeof(nvmlProcessDetailList_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":15277
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlProcessDetailList_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlProcessDetailList_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":15278
 *         else:
 *             obj._ptr = <nvmlProcessDetailList_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":15279
 *             obj._ptr = <nvmlProcessDetailList_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         obj._refs = {}
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":15280
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         obj._refs = {}
 *         return obj
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":15281
 *             obj._owned = False
 *         obj._readonly = readonly
 *         obj._refs = {}             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15281, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_refs);
  __Pyx_DECREF(__pyx_v_obj->_refs);
  __pyx_v_obj->_refs = ((PyObject*)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15282
 *         obj._readonly = readonly
 *         obj._refs = {}
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15257
 *         return __from_data(data, "process_detail_list_v1_dtype", process_detail_list_v1_dtype, ProcessDetailList_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ProcessDetailList_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_16__reduce_cython__, "ProcessDetailList_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_18__setstate_cython__, "ProcessDetailList_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessDetailList_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15285
 * 
 * 
 * cdef _get_bridge_chip_hierarchy_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlBridgeChipHierarchy_t pod = nvmlBridgeChipHierarchy_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_bridge_chip_hierarchy_dtype_offsets(void) {
  nvmlBridgeChipHierarchy_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlBridgeChipHierarchy_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_bridge_chip_hierarchy_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":15286
 * 
 * cdef _get_bridge_chip_hierarchy_dtype_offsets():
 *     cdef nvmlBridgeChipHierarchy_t pod = nvmlBridgeChipHierarchy_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['bridge_count', 'bridge_chip_info'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":15287
 * cdef _get_bridge_chip_hierarchy_dtype_offsets():
 *     cdef nvmlBridgeChipHierarchy_t pod = nvmlBridgeChipHierarchy_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['bridge_count', 'bridge_chip_info'],
 *         'formats': [_numpy.uint8, bridge_chip_info_dtype],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15287, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15287, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":15288
 *     cdef nvmlBridgeChipHierarchy_t pod = nvmlBridgeChipHierarchy_t()
 *     return _numpy.dtype({
 *         'names': ['bridge_count', 'bridge_chip_info'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint8, bridge_chip_info_dtype],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15288, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15288, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_bridge_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_bridge_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_bridge_count) != (0)) __PYX_ERR(0, 15288, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_bridge_chip_info);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_bridge_chip_info);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_bridge_chip_info) != (0)) __PYX_ERR(0, 15288, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 15288, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":15289
 *     return _numpy.dtype({
 *         'names': ['bridge_count', 'bridge_chip_info'],
 *         'formats': [_numpy.uint8, bridge_chip_info_dtype],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.bridgeCount)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15289, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15289, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_bridge_chip_info_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15289, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = PyList_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15289, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 15289, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 15289, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_8) < (0)) __PYX_ERR(0, 15288, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;

  /* "cuda/bindings/_nvml.pyx":15291
 *         'formats': [_numpy.uint8, bridge_chip_info_dtype],
 *         'offsets': [
 *             (<intptr_t>&(pod.bridgeCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bridgeChipInfo)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bridgeCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15291, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":15292
 *         'offsets': [
 *             (<intptr_t>&(pod.bridgeCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bridgeChipInfo)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlBridgeChipHierarchy_t),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bridgeChipInfo)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15292, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":15290
 *         'names': ['bridge_count', 'bridge_chip_info'],
 *         'formats': [_numpy.uint8, bridge_chip_info_dtype],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bridgeCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.bridgeChipInfo)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15290, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 15290, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 15290, __pyx_L1_error);
  __pyx_t_8 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 15288, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":15294
 *             (<intptr_t>&(pod.bridgeChipInfo)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlBridgeChipHierarchy_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlBridgeChipHierarchy_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15294, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 15288, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15287, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15285
 * 
 * 
 * cdef _get_bridge_chip_hierarchy_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlBridgeChipHierarchy_t pod = nvmlBridgeChipHierarchy_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_bridge_chip_hierarchy_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15311
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlBridgeChipHierarchy_t *>calloc(1, sizeof(nvmlBridgeChipHierarchy_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":15312
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlBridgeChipHierarchy_t *>calloc(1, sizeof(nvmlBridgeChipHierarchy_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating BridgeChipHierarchy")
*/
  __pyx_v_self->_ptr = ((nvmlBridgeChipHierarchy_t *)calloc(1, (sizeof(nvmlBridgeChipHierarchy_t))));

  /* "cuda/bindings/_nvml.pyx":15313
 *     def __init__(self):
 *         self._ptr = <nvmlBridgeChipHierarchy_t *>calloc(1, sizeof(nvmlBridgeChipHierarchy_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating BridgeChipHierarchy")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":15314
 *         self._ptr = <nvmlBridgeChipHierarchy_t *>calloc(1, sizeof(nvmlBridgeChipHierarchy_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating BridgeChipHierarchy")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15314, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_BridgeChipHiera};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15314, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15314, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15313
 *     def __init__(self):
 *         self._ptr = <nvmlBridgeChipHierarchy_t *>calloc(1, sizeof(nvmlBridgeChipHierarchy_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating BridgeChipHierarchy")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":15315
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating BridgeChipHierarchy")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":15316
 *             raise MemoryError("Error allocating BridgeChipHierarchy")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":15317
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":15311
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlBridgeChipHierarchy_t *>calloc(1, sizeof(nvmlBridgeChipHierarchy_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipHierarchy.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15319
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlBridgeChipHierarchy_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self) {
  nvmlBridgeChipHierarchy_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlBridgeChipHierarchy_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":15321
 *     def __dealloc__(self):
 *         cdef nvmlBridgeChipHierarchy_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":15322
 *         cdef nvmlBridgeChipHierarchy_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":15323
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":15324
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":15321
 *     def __dealloc__(self):
 *         cdef nvmlBridgeChipHierarchy_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":15319
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlBridgeChipHierarchy_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":15326
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.BridgeChipHierarchy object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":15327
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.BridgeChipHierarchy object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_BridgeChipHierarchy_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 31 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15326
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.BridgeChipHierarchy object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipHierarchy.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15329
 *         return f"<{__name__}.BridgeChipHierarchy object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15332
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15332, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15329
 *         return f"<{__name__}.BridgeChipHierarchy object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipHierarchy.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15334
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_19BridgeChipHierarchy__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":15335
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15334
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15337
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":15338
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15338, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15337
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipHierarchy.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15340
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef BridgeChipHierarchy other_
 *         if not isinstance(other, BridgeChipHierarchy):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":15342
 *     def __eq__(self, other):
 *         cdef BridgeChipHierarchy other_
 *         if not isinstance(other, BridgeChipHierarchy):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":15343
 *         cdef BridgeChipHierarchy other_
 *         if not isinstance(other, BridgeChipHierarchy):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlBridgeChipHierarchy_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15342
 *     def __eq__(self, other):
 *         cdef BridgeChipHierarchy other_
 *         if not isinstance(other, BridgeChipHierarchy):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":15344
 *         if not isinstance(other, BridgeChipHierarchy):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlBridgeChipHierarchy_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy))))) __PYX_ERR(0, 15344, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":15345
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlBridgeChipHierarchy_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlBridgeChipHierarchy_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15345, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15340
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef BridgeChipHierarchy other_
 *         if not isinstance(other, BridgeChipHierarchy):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipHierarchy.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15347
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlBridgeChipHierarchy_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlBridgeChipHierarchy_t *>malloc(sizeof(nvmlBridgeChipHierarchy_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":15348
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlBridgeChipHierarchy_t *>malloc(sizeof(nvmlBridgeChipHierarchy_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 15348, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15348, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15348, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 15348, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":15349
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlBridgeChipHierarchy_t *>malloc(sizeof(nvmlBridgeChipHierarchy_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating BridgeChipHierarchy")
*/
    __pyx_v_self->_ptr = ((nvmlBridgeChipHierarchy_t *)malloc((sizeof(nvmlBridgeChipHierarchy_t))));

    /* "cuda/bindings/_nvml.pyx":15350
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlBridgeChipHierarchy_t *>malloc(sizeof(nvmlBridgeChipHierarchy_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating BridgeChipHierarchy")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlBridgeChipHierarchy_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":15351
 *             self._ptr = <nvmlBridgeChipHierarchy_t *>malloc(sizeof(nvmlBridgeChipHierarchy_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating BridgeChipHierarchy")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlBridgeChipHierarchy_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15351, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_BridgeChipHiera};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15351, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 15351, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":15350
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlBridgeChipHierarchy_t *>malloc(sizeof(nvmlBridgeChipHierarchy_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating BridgeChipHierarchy")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlBridgeChipHierarchy_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":15352
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating BridgeChipHierarchy")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlBridgeChipHierarchy_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15352, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15352, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 15352, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlBridgeChipHierarchy_t))));

    /* "cuda/bindings/_nvml.pyx":15353
 *                 raise MemoryError("Error allocating BridgeChipHierarchy")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlBridgeChipHierarchy_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":15354
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlBridgeChipHierarchy_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":15355
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15355, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15355, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 15355, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":15348
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlBridgeChipHierarchy_t *>malloc(sizeof(nvmlBridgeChipHierarchy_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":15357
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 15357, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":15347
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlBridgeChipHierarchy_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlBridgeChipHierarchy_t *>malloc(sizeof(nvmlBridgeChipHierarchy_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipHierarchy.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15359
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bridge_chip_info(self):
 *         """BridgeChipInfo: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16bridge_chip_info_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16bridge_chip_info_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16bridge_chip_info___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16bridge_chip_info___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15362
 *     def bridge_chip_info(self):
 *         """BridgeChipInfo: """
 *         return BridgeChipInfo.from_ptr(<intptr_t>&(self._ptr[0].bridgeChipInfo), 128, self._readonly)             # <<<<<<<<<<<<<<
 * 
 *     @bridge_chip_info.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).bridgeChipInfo))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15362, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15362, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_mstate_global->__pyx_int_128, __pyx_t_4};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15362, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15359
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bridge_chip_info(self):
 *         """BridgeChipInfo: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipHierarchy.bridge_chip_info.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15364
 *         return BridgeChipInfo.from_ptr(<intptr_t>&(self._ptr[0].bridgeChipInfo), 128, self._readonly)
 * 
 *     @bridge_chip_info.setter             # <<<<<<<<<<<<<<
 *     def bridge_chip_info(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16bridge_chip_info_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16bridge_chip_info_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16bridge_chip_info_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16bridge_chip_info_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  intptr_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":15366
 *     @bridge_chip_info.setter
 *     def bridge_chip_info(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This BridgeChipHierarchy instance is read-only")
 *         cdef BridgeChipInfo val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":15367
 *     def bridge_chip_info(self, val):
 *         if self._readonly:
 *             raise ValueError("This BridgeChipHierarchy instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef BridgeChipInfo val_ = val
 *         if len(val) != 128:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_BridgeChipHierarchy_instanc};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15367, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 15367, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15366
 *     @bridge_chip_info.setter
 *     def bridge_chip_info(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This BridgeChipHierarchy instance is read-only")
 *         cdef BridgeChipInfo val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":15368
 *         if self._readonly:
 *             raise ValueError("This BridgeChipHierarchy instance is read-only")
 *         cdef BridgeChipInfo val_ = val             # <<<<<<<<<<<<<<
 *         if len(val) != 128:
 *             raise ValueError(f"Expected length 128 for field bridge_chip_info, got {len(val)}")
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo))))) __PYX_ERR(0, 15368, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":15369
 *             raise ValueError("This BridgeChipHierarchy instance is read-only")
 *         cdef BridgeChipInfo val_ = val
 *         if len(val) != 128:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 128 for field bridge_chip_info, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].bridgeChipInfo), <void *>(val_._get_ptr()), sizeof(nvmlBridgeChipInfo_t) * 128)
*/
  __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 15369, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 != 0x80);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":15370
 *         cdef BridgeChipInfo val_ = val
 *         if len(val) != 128:
 *             raise ValueError(f"Expected length 128 for field bridge_chip_info, got {len(val)}")             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].bridgeChipInfo), <void *>(val_._get_ptr()), sizeof(nvmlBridgeChipInfo_t) * 128)
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 15370, __pyx_L1_error)
    __pyx_t_6 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_t_4, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15370, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __pyx_t_7 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Expected_length_128_for_field_br, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15370, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_7};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15370, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 15370, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15369
 *             raise ValueError("This BridgeChipHierarchy instance is read-only")
 *         cdef BridgeChipInfo val_ = val
 *         if len(val) != 128:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 128 for field bridge_chip_info, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].bridgeChipInfo), <void *>(val_._get_ptr()), sizeof(nvmlBridgeChipInfo_t) * 128)
*/
  }

  /* "cuda/bindings/_nvml.pyx":15371
 *         if len(val) != 128:
 *             raise ValueError(f"Expected length 128 for field bridge_chip_info, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].bridgeChipInfo), <void *>(val_._get_ptr()), sizeof(nvmlBridgeChipInfo_t) * 128)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 15371, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).bridgeChipInfo)), ((void *)__pyx_t_8), ((sizeof(nvmlBridgeChipInfo_t)) * 0x80)));

  /* "cuda/bindings/_nvml.pyx":15364
 *         return BridgeChipInfo.from_ptr(<intptr_t>&(self._ptr[0].bridgeChipInfo), 128, self._readonly)
 * 
 *     @bridge_chip_info.setter             # <<<<<<<<<<<<<<
 *     def bridge_chip_info(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipHierarchy.bridge_chip_info.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15373
 *         memcpy(<void *>&(self._ptr[0].bridgeChipInfo), <void *>(val_._get_ptr()), sizeof(nvmlBridgeChipInfo_t) * 128)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bridge_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12bridge_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12bridge_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12bridge_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12bridge_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15376
 *     def bridge_count(self):
 *         """int: """
 *         return self._ptr[0].bridgeCount             # <<<<<<<<<<<<<<
 * 
 *     @bridge_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_char((__pyx_v_self->_ptr[0]).bridgeCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15376, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15373
 *         memcpy(<void *>&(self._ptr[0].bridgeChipInfo), <void *>(val_._get_ptr()), sizeof(nvmlBridgeChipInfo_t) * 128)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def bridge_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipHierarchy.bridge_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15378
 *         return self._ptr[0].bridgeCount
 * 
 *     @bridge_count.setter             # <<<<<<<<<<<<<<
 *     def bridge_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12bridge_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12bridge_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12bridge_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12bridge_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned char __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":15380
 *     @bridge_count.setter
 *     def bridge_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This BridgeChipHierarchy instance is read-only")
 *         self._ptr[0].bridgeCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":15381
 *     def bridge_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This BridgeChipHierarchy instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].bridgeCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_BridgeChipHierarchy_instanc};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15381, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 15381, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15380
 *     @bridge_count.setter
 *     def bridge_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This BridgeChipHierarchy instance is read-only")
 *         self._ptr[0].bridgeCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":15382
 *         if self._readonly:
 *             raise ValueError("This BridgeChipHierarchy instance is read-only")
 *         self._ptr[0].bridgeCount = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_char(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned char)-1) && PyErr_Occurred())) __PYX_ERR(0, 15382, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).bridgeCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":15378
 *         return self._ptr[0].bridgeCount
 * 
 *     @bridge_count.setter             # <<<<<<<<<<<<<<
 *     def bridge_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipHierarchy.bridge_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15384
 *         self._ptr[0].bridgeCount = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an BridgeChipHierarchy instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12from_data, "BridgeChipHierarchy.from_data(data)\n\nCreate an BridgeChipHierarchy instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `bridge_chip_hierarchy_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15384, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15384, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 15384, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 15384, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15384, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 15384, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipHierarchy.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":15391
 *             data (_numpy.ndarray): a single-element array of dtype `bridge_chip_hierarchy_dtype` holding the data.
 *         """
 *         return __from_data(data, "bridge_chip_hierarchy_dtype", bridge_chip_hierarchy_dtype, BridgeChipHierarchy)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_bridge_chip_hierarchy_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15391, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_bridge_chip_hierarchy_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15391, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15384
 *         self._ptr[0].bridgeCount = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an BridgeChipHierarchy instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipHierarchy.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15393
 *         return __from_data(data, "bridge_chip_hierarchy_dtype", bridge_chip_hierarchy_dtype, BridgeChipHierarchy)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an BridgeChipHierarchy instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_14from_ptr, "BridgeChipHierarchy.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an BridgeChipHierarchy instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15393, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 15393, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15393, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15393, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 15393, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":15394
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an BridgeChipHierarchy instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 15393, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 15393, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15393, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15393, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 15394, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15394, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 15393, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipHierarchy.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":15393
 *         return __from_data(data, "bridge_chip_hierarchy_dtype", bridge_chip_hierarchy_dtype, BridgeChipHierarchy)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an BridgeChipHierarchy instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":15402
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef BridgeChipHierarchy obj = BridgeChipHierarchy.__new__(BridgeChipHierarchy)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":15403
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef BridgeChipHierarchy obj = BridgeChipHierarchy.__new__(BridgeChipHierarchy)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15403, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15403, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15402
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef BridgeChipHierarchy obj = BridgeChipHierarchy.__new__(BridgeChipHierarchy)
*/
  }

  /* "cuda/bindings/_nvml.pyx":15404
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef BridgeChipHierarchy obj = BridgeChipHierarchy.__new__(BridgeChipHierarchy)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlBridgeChipHierarchy_t *>malloc(sizeof(nvmlBridgeChipHierarchy_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_BridgeChipHierarchy(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15404, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15405
 *             raise ValueError("ptr must not be null (0)")
 *         cdef BridgeChipHierarchy obj = BridgeChipHierarchy.__new__(BridgeChipHierarchy)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlBridgeChipHierarchy_t *>malloc(sizeof(nvmlBridgeChipHierarchy_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":15406
 *         cdef BridgeChipHierarchy obj = BridgeChipHierarchy.__new__(BridgeChipHierarchy)
 *         if owner is None:
 *             obj._ptr = <nvmlBridgeChipHierarchy_t *>malloc(sizeof(nvmlBridgeChipHierarchy_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating BridgeChipHierarchy")
*/
    __pyx_v_obj->_ptr = ((nvmlBridgeChipHierarchy_t *)malloc((sizeof(nvmlBridgeChipHierarchy_t))));

    /* "cuda/bindings/_nvml.pyx":15407
 *         if owner is None:
 *             obj._ptr = <nvmlBridgeChipHierarchy_t *>malloc(sizeof(nvmlBridgeChipHierarchy_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating BridgeChipHierarchy")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlBridgeChipHierarchy_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":15408
 *             obj._ptr = <nvmlBridgeChipHierarchy_t *>malloc(sizeof(nvmlBridgeChipHierarchy_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating BridgeChipHierarchy")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlBridgeChipHierarchy_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15408, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_BridgeChipHiera};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15408, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 15408, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":15407
 *         if owner is None:
 *             obj._ptr = <nvmlBridgeChipHierarchy_t *>malloc(sizeof(nvmlBridgeChipHierarchy_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating BridgeChipHierarchy")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlBridgeChipHierarchy_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":15409
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating BridgeChipHierarchy")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlBridgeChipHierarchy_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlBridgeChipHierarchy_t))));

    /* "cuda/bindings/_nvml.pyx":15410
 *                 raise MemoryError("Error allocating BridgeChipHierarchy")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlBridgeChipHierarchy_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":15411
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlBridgeChipHierarchy_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlBridgeChipHierarchy_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":15405
 *             raise ValueError("ptr must not be null (0)")
 *         cdef BridgeChipHierarchy obj = BridgeChipHierarchy.__new__(BridgeChipHierarchy)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlBridgeChipHierarchy_t *>malloc(sizeof(nvmlBridgeChipHierarchy_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":15413
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlBridgeChipHierarchy_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlBridgeChipHierarchy_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":15414
 *         else:
 *             obj._ptr = <nvmlBridgeChipHierarchy_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":15415
 *             obj._ptr = <nvmlBridgeChipHierarchy_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":15416
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":15417
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15393
 *         return __from_data(data, "bridge_chip_hierarchy_dtype", bridge_chip_hierarchy_dtype, BridgeChipHierarchy)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an BridgeChipHierarchy instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipHierarchy.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16__reduce_cython__, "BridgeChipHierarchy.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipHierarchy.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_18__setstate_cython__, "BridgeChipHierarchy.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipHierarchy.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.BridgeChipHierarchy.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15420
 * 
 * 
 * cdef _get_sample_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlSample_t pod = nvmlSample_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_sample_dtype_offsets(void) {
  nvmlSample_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlSample_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_sample_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":15421
 * 
 * cdef _get_sample_dtype_offsets():
 *     cdef nvmlSample_t pod = nvmlSample_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['time_stamp', 'sample_value'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":15422
 * cdef _get_sample_dtype_offsets():
 *     cdef nvmlSample_t pod = nvmlSample_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['time_stamp', 'sample_value'],
 *         'formats': [_numpy.uint64, value_dtype],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15422, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15422, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":15423
 *     cdef nvmlSample_t pod = nvmlSample_t()
 *     return _numpy.dtype({
 *         'names': ['time_stamp', 'sample_value'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint64, value_dtype],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15423, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15423, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_time_stamp);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_time_stamp);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_time_stamp) != (0)) __PYX_ERR(0, 15423, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_sample_value);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_sample_value);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_sample_value) != (0)) __PYX_ERR(0, 15423, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 15423, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":15424
 *     return _numpy.dtype({
 *         'names': ['time_stamp', 'sample_value'],
 *         'formats': [_numpy.uint64, value_dtype],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15424, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15424, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_value_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15424, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = PyList_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15424, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 15424, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 15424, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_8) < (0)) __PYX_ERR(0, 15423, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;

  /* "cuda/bindings/_nvml.pyx":15426
 *         'formats': [_numpy.uint64, value_dtype],
 *         'offsets': [
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sampleValue)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.timeStamp)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15426, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":15427
 *         'offsets': [
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sampleValue)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlSample_t),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sampleValue)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15427, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":15425
 *         'names': ['time_stamp', 'sample_value'],
 *         'formats': [_numpy.uint64, value_dtype],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sampleValue)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15425, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 15425, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 15425, __pyx_L1_error);
  __pyx_t_8 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 15423, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":15429
 *             (<intptr_t>&(pod.sampleValue)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlSample_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlSample_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15429, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 15423, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15422, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15420
 * 
 * 
 * cdef _get_sample_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlSample_t pod = nvmlSample_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_sample_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15451
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=sample_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_6Sample_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_6Sample_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15451, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15451, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 15451, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15451, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 15451, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Sample___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_6Sample___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":15452
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=sample_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlSample_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15452, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15452, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_sample_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15452, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15452, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 15452, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15452, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":15453
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=sample_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlSample_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlSample_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15453, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15453, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15453, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":15454
 *         arr = _numpy.empty(size, dtype=sample_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlSample_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlSample_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15454, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlSample_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15454, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15454, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 15454, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":15455
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlSample_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlSample_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15455, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15455, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlSample_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15455, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15455, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 15454, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 15454, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":15451
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=sample_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15457
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlSample_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.Sample_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Sample_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":15458
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.Sample_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15458, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15458, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 15458, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":15459
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.Sample_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.Sample object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15459, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15459, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15459, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15459, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15459, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15459, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15459, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_Sample_Array;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 14 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15459, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15458
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.Sample_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":15461
 *             return f"<{__name__}.Sample_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.Sample object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15461, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15461, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15461, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15461, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15461, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_Sample_object_at;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 18 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15461, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":15457
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlSample_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.Sample_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15463
 *             return f"<{__name__}.Sample object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Sample_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15466
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15466, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15466, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15463
 *             return f"<{__name__}.Sample object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15468
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_6Sample__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":15469
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15469, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15469, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 15469, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15468
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15471
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Sample_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":15472
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15472, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15472, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 15472, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":15473
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15473, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15473, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15472
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":15475
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15475, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15475, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15471
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15477
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_6Sample_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_6Sample_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Sample_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_6Sample_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":15478
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15478, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 15478, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15477
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15480
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, Sample)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Sample_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":15481
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, Sample)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":15482
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, Sample)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Sample); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15482, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15482, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15482, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15482, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 15482, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15482, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15482, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15482, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15482, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 15482, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":15483
 *         cdef object self_data = self._data
 *         if (not isinstance(other, Sample)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15482
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, Sample)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":15484
 *         if (not isinstance(other, Sample)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15484, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15484, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15484, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 15484, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15484, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15480
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, Sample)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15486
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_10time_stamp_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_10time_stamp_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Sample_10time_stamp___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_10time_stamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15489
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.time_stamp[0])
 *         return self._data.time_stamp
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15489, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 15489, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":15490
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.time_stamp[0])             # <<<<<<<<<<<<<<
 *         return self._data.time_stamp
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_stamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15490, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15490, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15490, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15489
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.time_stamp[0])
 *         return self._data.time_stamp
*/
  }

  /* "cuda/bindings/_nvml.pyx":15491
 *         if self._data.size == 1:
 *             return int(self._data.time_stamp[0])
 *         return self._data.time_stamp             # <<<<<<<<<<<<<<
 * 
 *     @time_stamp.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_stamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15491, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15486
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.time_stamp.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15493
 *         return self._data.time_stamp
 * 
 *     @time_stamp.setter             # <<<<<<<<<<<<<<
 *     def time_stamp(self, val):
 *         self._data.time_stamp = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_6Sample_10time_stamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_6Sample_10time_stamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Sample_10time_stamp_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_6Sample_10time_stamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15495
 *     @time_stamp.setter
 *     def time_stamp(self, val):
 *         self._data.time_stamp = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_stamp, __pyx_v_val) < (0)) __PYX_ERR(0, 15495, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15493
 *         return self._data.time_stamp
 * 
 *     @time_stamp.setter             # <<<<<<<<<<<<<<
 *     def time_stamp(self, val):
 *         self._data.time_stamp = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.time_stamp.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15497
 *         self._data.time_stamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sample_value(self):
 *         """value_dtype: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_12sample_value_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_12sample_value_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Sample_12sample_value___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_12sample_value___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15500
 *     def sample_value(self):
 *         """value_dtype: """
 *         return self._data.sample_value             # <<<<<<<<<<<<<<
 * 
 *     @sample_value.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sample_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15500, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15497
 *         self._data.time_stamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sample_value(self):
 *         """value_dtype: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.sample_value.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15502
 *         return self._data.sample_value
 * 
 *     @sample_value.setter             # <<<<<<<<<<<<<<
 *     def sample_value(self, val):
 *         self._data.sample_value = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_6Sample_12sample_value_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_6Sample_12sample_value_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Sample_12sample_value_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_6Sample_12sample_value_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15504
 *     @sample_value.setter
 *     def sample_value(self, val):
 *         self._data.sample_value = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sample_value, __pyx_v_val) < (0)) __PYX_ERR(0, 15504, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15502
 *         return self._data.sample_value
 * 
 *     @sample_value.setter             # <<<<<<<<<<<<<<
 *     def sample_value(self, val):
 *         self._data.sample_value = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.sample_value.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15506
 *         self._data.sample_value = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Sample_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":15509
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":15510
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 15510, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":15511
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15511, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 15511, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":15512
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":15513
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15513, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 15513, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":15512
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":15514
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return Sample.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":15515
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return Sample.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":15514
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return Sample.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":15516
 *             if key_ < 0:
 *                 key_ += size
 *             return Sample.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == sample_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Sample);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15516, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15516, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15509
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":15517
 *                 key_ += size
 *             return Sample.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == sample_dtype:
 *             return Sample.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15517, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":15518
 *             return Sample.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == sample_dtype:             # <<<<<<<<<<<<<<
 *             return Sample.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15518, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15518, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 15518, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15518, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_sample_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15518, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15518, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 15518, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":15519
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == sample_dtype:
 *             return Sample.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Sample);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15519, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15518
 *             return Sample.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == sample_dtype:             # <<<<<<<<<<<<<<
 *             return Sample.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":15520
 *         if isinstance(out, _numpy.recarray) and out.dtype == sample_dtype:
 *             return Sample.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15506
 *         self._data.sample_value = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15522
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_6Sample_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_6Sample_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Sample_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_6Sample_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15523
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 15523, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15522
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15525
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Sample instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_6Sample_14from_data, "Sample.from_data(data)\n\nCreate an Sample instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `sample_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_6Sample_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Sample_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Sample_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15525, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15525, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 15525, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 15525, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15525, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 15525, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Sample_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":15532
 *             data (_numpy.ndarray): a 1D array of dtype `sample_dtype` holding the data.
 *         """
 *         cdef Sample obj = Sample.__new__(Sample)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_Sample(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Sample), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15532, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":15533
 *         """
 *         cdef Sample obj = Sample.__new__(Sample)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15533, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15533, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 15533, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":15534
 *         cdef Sample obj = Sample.__new__(Sample)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15534, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15534, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15533
 *         """
 *         cdef Sample obj = Sample.__new__(Sample)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":15535
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != sample_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15535, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 15535, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":15536
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != sample_dtype:
 *             raise ValueError("data array must be of dtype sample_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15536, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15536, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15535
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != sample_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":15537
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != sample_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype sample_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15537, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_sample_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15537, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15537, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 15537, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":15538
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != sample_dtype:
 *             raise ValueError("data array must be of dtype sample_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_samp};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15538, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 15538, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15537
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != sample_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype sample_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":15539
 *         if data.dtype != sample_dtype:
 *             raise ValueError("data array must be of dtype sample_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15539, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15539, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15539, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":15541
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15525
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Sample instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15543
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an Sample instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_6Sample_16from_ptr, "Sample.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an Sample instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_6Sample_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Sample_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Sample_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15543, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 15543, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15543, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15543, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 15543, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 15543, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 15543, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15543, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15543, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 15544, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 15544, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15544, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":15544
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an Sample instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 15543, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Sample_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":15543
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an Sample instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":15552
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Sample obj = Sample.__new__(Sample)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":15553
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef Sample obj = Sample.__new__(Sample)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15553, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15553, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15552
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Sample obj = Sample.__new__(Sample)
*/
  }

  /* "cuda/bindings/_nvml.pyx":15554
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Sample obj = Sample.__new__(Sample)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_Sample(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Sample), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15554, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15555
 *             raise ValueError("ptr must not be null (0)")
 *         cdef Sample obj = Sample.__new__(Sample)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlSample_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15555, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15555, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15557
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlSample_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=sample_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15557, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15556
 *         cdef Sample obj = Sample.__new__(Sample)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlSample_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=sample_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlSample_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15556, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15558
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlSample_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=sample_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15558, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15558, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15558, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_sample_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15558, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 15558, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 15558, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 15558, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15558, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15559
 *             <char*>ptr, sizeof(nvmlSample_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=sample_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 15559, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15559, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15559, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15561
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15543
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an Sample instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15447
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Sample_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_6Sample_18__reduce_cython__, "Sample.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_6Sample_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Sample_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Sample_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Sample_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_Sample, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_Sample, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_Sample, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_Sample, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_Sample); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_Sample, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_Sample, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_Sample, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_Sample__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_Sample); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_Sample, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_Sample__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_6Sample_20__setstate_cython__, "Sample.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_6Sample_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Sample_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Sample_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_6Sample_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6Sample_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6Sample_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_Sample, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_Sample__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_Sample__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_Sample, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_Sample__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.Sample.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15564
 * 
 * 
 * cdef _get_vgpu_instance_utilization_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuInstanceUtilizationInfo_v1_t pod = nvmlVgpuInstanceUtilizationInfo_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_instance_utilization_info_v1_dtype_offsets(void) {
  nvmlVgpuInstanceUtilizationInfo_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuInstanceUtilizationInfo_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  PyObject *__pyx_t_14 = NULL;
  size_t __pyx_t_15;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_instance_utilization_info_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":15565
 * 
 * cdef _get_vgpu_instance_utilization_info_v1_dtype_offsets():
 *     cdef nvmlVgpuInstanceUtilizationInfo_v1_t pod = nvmlVgpuInstanceUtilizationInfo_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['time_stamp', 'vgpu_instance', 'sm_util', 'mem_util', 'enc_util', 'dec_util', 'jpg_util', 'ofa_util'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":15566
 * cdef _get_vgpu_instance_utilization_info_v1_dtype_offsets():
 *     cdef nvmlVgpuInstanceUtilizationInfo_v1_t pod = nvmlVgpuInstanceUtilizationInfo_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['time_stamp', 'vgpu_instance', 'sm_util', 'mem_util', 'enc_util', 'dec_util', 'jpg_util', 'ofa_util'],
 *         'formats': [_numpy.uint64, _numpy.uint32, value_dtype, value_dtype, value_dtype, value_dtype, value_dtype, value_dtype],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15566, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15566, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":15567
 *     cdef nvmlVgpuInstanceUtilizationInfo_v1_t pod = nvmlVgpuInstanceUtilizationInfo_v1_t()
 *     return _numpy.dtype({
 *         'names': ['time_stamp', 'vgpu_instance', 'sm_util', 'mem_util', 'enc_util', 'dec_util', 'jpg_util', 'ofa_util'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint64, _numpy.uint32, value_dtype, value_dtype, value_dtype, value_dtype, value_dtype, value_dtype],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15567, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15567, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_time_stamp);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_time_stamp);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_time_stamp) != (0)) __PYX_ERR(0, 15567, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_vgpu_instance);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_vgpu_instance);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_vgpu_instance) != (0)) __PYX_ERR(0, 15567, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_sm_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_sm_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_sm_util) != (0)) __PYX_ERR(0, 15567, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_mem_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_mem_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_mem_util) != (0)) __PYX_ERR(0, 15567, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_enc_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_enc_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_enc_util) != (0)) __PYX_ERR(0, 15567, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_dec_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_dec_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_dec_util) != (0)) __PYX_ERR(0, 15567, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_jpg_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_jpg_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_jpg_util) != (0)) __PYX_ERR(0, 15567, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_ofa_util);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_ofa_util);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_mstate_global->__pyx_n_u_ofa_util) != (0)) __PYX_ERR(0, 15567, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 15567, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":15568
 *     return _numpy.dtype({
 *         'names': ['time_stamp', 'vgpu_instance', 'sm_util', 'mem_util', 'enc_util', 'dec_util', 'jpg_util', 'ofa_util'],
 *         'formats': [_numpy.uint64, _numpy.uint32, value_dtype, value_dtype, value_dtype, value_dtype, value_dtype, value_dtype],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_value_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_value_dtype); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 15568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_GetModuleGlobalName(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_value_dtype); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 15568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_GetModuleGlobalName(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_value_dtype); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 15568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_GetModuleGlobalName(__pyx_t_12, __pyx_mstate_global->__pyx_n_u_value_dtype); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 15568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_GetModuleGlobalName(__pyx_t_13, __pyx_mstate_global->__pyx_n_u_value_dtype); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 15568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_14 = PyList_New(8); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 15568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_14, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 15568, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_14, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 15568, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_14, 2, __pyx_t_6) != (0)) __PYX_ERR(0, 15568, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_14, 3, __pyx_t_9) != (0)) __PYX_ERR(0, 15568, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_14, 4, __pyx_t_10) != (0)) __PYX_ERR(0, 15568, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_14, 5, __pyx_t_11) != (0)) __PYX_ERR(0, 15568, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_14, 6, __pyx_t_12) != (0)) __PYX_ERR(0, 15568, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_14, 7, __pyx_t_13) != (0)) __PYX_ERR(0, 15568, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_13 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_14) < (0)) __PYX_ERR(0, 15567, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;

  /* "cuda/bindings/_nvml.pyx":15570
 *         'formats': [_numpy.uint64, _numpy.uint32, value_dtype, value_dtype, value_dtype, value_dtype, value_dtype, value_dtype],
 *         'offsets': [
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_14 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.timeStamp)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 15570, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);

  /* "cuda/bindings/_nvml.pyx":15571
 *         'offsets': [
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vgpuInstance)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 15571, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":15572
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.smUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 15572, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":15573
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.memUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 15573, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":15574
 *             (<intptr_t>&(pod.smUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.jpgUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.encUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 15574, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":15575
 *             (<intptr_t>&(pod.memUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.jpgUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ofaUtil)) - (<intptr_t>&pod),
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.decUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 15575, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":15576
 *             (<intptr_t>&(pod.encUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.jpgUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.ofaUtil)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.jpgUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15576, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":15577
 *             (<intptr_t>&(pod.decUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.jpgUtil)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.ofaUtil)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.ofaUtil)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15577, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":15569
 *         'names': ['time_stamp', 'vgpu_instance', 'sm_util', 'mem_util', 'enc_util', 'dec_util', 'jpg_util', 'ofa_util'],
 *         'formats': [_numpy.uint64, _numpy.uint32, value_dtype, value_dtype, value_dtype, value_dtype, value_dtype, value_dtype],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.timeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuInstance)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15569, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_14) != (0)) __PYX_ERR(0, 15569, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_13) != (0)) __PYX_ERR(0, 15569, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_12) != (0)) __PYX_ERR(0, 15569, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_11) != (0)) __PYX_ERR(0, 15569, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_10) != (0)) __PYX_ERR(0, 15569, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_9) != (0)) __PYX_ERR(0, 15569, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_6) != (0)) __PYX_ERR(0, 15569, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 7, __pyx_t_8) != (0)) __PYX_ERR(0, 15569, __pyx_L1_error);
  __pyx_t_14 = 0;
  __pyx_t_13 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 15567, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":15579
 *             (<intptr_t>&(pod.ofaUtil)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15579, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 15567, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_15 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_15 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_15, (2-__pyx_t_15) | (__pyx_t_15*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15566, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15564
 * 
 * 
 * cdef _get_vgpu_instance_utilization_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuInstanceUtilizationInfo_v1_t pod = nvmlVgpuInstanceUtilizationInfo_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_XDECREF(__pyx_t_14);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_instance_utilization_info_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15601
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=vgpu_instance_utilization_info_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15601, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15601, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 15601, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15601, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 15601, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":15602
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=vgpu_instance_utilization_info_v1_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15602, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15602, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_vgpu_instance_utilization_info_v); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15602, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15602, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 15602, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15602, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":15603
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=vgpu_instance_utilization_info_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15603, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15603, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15603, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":15604
 *         arr = _numpy.empty(size, dtype=vgpu_instance_utilization_info_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15604, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15604, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15604, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 15604, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":15605
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15605, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15605, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15605, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15605, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 15604, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 15604, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":15601
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=vgpu_instance_utilization_info_v1_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15607
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.VgpuInstanceUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":15608
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.VgpuInstanceUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15608, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15608, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 15608, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":15609
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.VgpuInstanceUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.VgpuInstanceUtilizationInfo_v1 object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15609, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15609, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15609, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15609, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15609, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15609, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15609, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_VgpuInstanceUtilizationInfo_v1;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 38 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15609, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15608
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.VgpuInstanceUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":15611
 *             return f"<{__name__}.VgpuInstanceUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.VgpuInstanceUtilizationInfo_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15611, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15611, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15611, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15611, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15611, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_VgpuInstanceUtilizationInfo_v1_2;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 42 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15611, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":15607
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.VgpuInstanceUtilizationInfo_v1_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15613
 *             return f"<{__name__}.VgpuInstanceUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15616
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15616, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15616, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15613
 *             return f"<{__name__}.VgpuInstanceUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15618
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":15619
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15619, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15619, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 15619, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15618
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15621
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":15622
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15622, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15622, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 15622, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":15623
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15623, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15623, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15622
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":15625
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15625, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15625, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15621
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15627
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":15628
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15628, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 15628, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15627
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15630
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, VgpuInstanceUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":15631
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, VgpuInstanceUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":15632
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, VgpuInstanceUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15632, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15632, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15632, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15632, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 15632, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15632, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15632, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15632, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15632, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 15632, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":15633
 *         cdef object self_data = self._data
 *         if (not isinstance(other, VgpuInstanceUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15632
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, VgpuInstanceUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":15634
 *         if (not isinstance(other, VgpuInstanceUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15634, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15634, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15634, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 15634, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15634, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15630
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, VgpuInstanceUtilizationInfo_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15636
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_10time_stamp_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_10time_stamp_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_10time_stamp___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_10time_stamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15639
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.time_stamp[0])
 *         return self._data.time_stamp
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15639, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 15639, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":15640
 *         """Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."""
 *         if self._data.size == 1:
 *             return int(self._data.time_stamp[0])             # <<<<<<<<<<<<<<
 *         return self._data.time_stamp
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_stamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15640, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15640, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15640, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15639
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.time_stamp[0])
 *         return self._data.time_stamp
*/
  }

  /* "cuda/bindings/_nvml.pyx":15641
 *         if self._data.size == 1:
 *             return int(self._data.time_stamp[0])
 *         return self._data.time_stamp             # <<<<<<<<<<<<<<
 * 
 *     @time_stamp.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_stamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15641, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15636
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def time_stamp(self):
 *         """Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.time_stamp.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15643
 *         return self._data.time_stamp
 * 
 *     @time_stamp.setter             # <<<<<<<<<<<<<<
 *     def time_stamp(self, val):
 *         self._data.time_stamp = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_10time_stamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_10time_stamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_10time_stamp_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_10time_stamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15645
 *     @time_stamp.setter
 *     def time_stamp(self, val):
 *         self._data.time_stamp = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_time_stamp, __pyx_v_val) < (0)) __PYX_ERR(0, 15645, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15643
 *         return self._data.time_stamp
 * 
 *     @time_stamp.setter             # <<<<<<<<<<<<<<
 *     def time_stamp(self, val):
 *         self._data.time_stamp = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.time_stamp.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15647
 *         self._data.time_stamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_instance(self):
 *         """Union[~_numpy.uint32, int]: vGPU Instance"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_13vgpu_instance_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_13vgpu_instance_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_13vgpu_instance___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_13vgpu_instance___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15650
 *     def vgpu_instance(self):
 *         """Union[~_numpy.uint32, int]: vGPU Instance"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.vgpu_instance[0])
 *         return self._data.vgpu_instance
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15650, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 15650, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":15651
 *         """Union[~_numpy.uint32, int]: vGPU Instance"""
 *         if self._data.size == 1:
 *             return int(self._data.vgpu_instance[0])             # <<<<<<<<<<<<<<
 *         return self._data.vgpu_instance
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_vgpu_instance); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15651, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15651, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15651, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15650
 *     def vgpu_instance(self):
 *         """Union[~_numpy.uint32, int]: vGPU Instance"""
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.vgpu_instance[0])
 *         return self._data.vgpu_instance
*/
  }

  /* "cuda/bindings/_nvml.pyx":15652
 *         if self._data.size == 1:
 *             return int(self._data.vgpu_instance[0])
 *         return self._data.vgpu_instance             # <<<<<<<<<<<<<<
 * 
 *     @vgpu_instance.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_vgpu_instance); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15652, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15647
 *         self._data.time_stamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_instance(self):
 *         """Union[~_numpy.uint32, int]: vGPU Instance"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.vgpu_instance.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15654
 *         return self._data.vgpu_instance
 * 
 *     @vgpu_instance.setter             # <<<<<<<<<<<<<<
 *     def vgpu_instance(self, val):
 *         self._data.vgpu_instance = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_13vgpu_instance_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_13vgpu_instance_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_13vgpu_instance_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_13vgpu_instance_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15656
 *     @vgpu_instance.setter
 *     def vgpu_instance(self, val):
 *         self._data.vgpu_instance = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_vgpu_instance, __pyx_v_val) < (0)) __PYX_ERR(0, 15656, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15654
 *         return self._data.vgpu_instance
 * 
 *     @vgpu_instance.setter             # <<<<<<<<<<<<<<
 *     def vgpu_instance(self, val):
 *         self._data.vgpu_instance = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.vgpu_instance.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15658
 *         self._data.vgpu_instance = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sm_util(self):
 *         """value_dtype: SM (3D/Compute) Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7sm_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7sm_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7sm_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7sm_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15661
 *     def sm_util(self):
 *         """value_dtype: SM (3D/Compute) Util Value."""
 *         return self._data.sm_util             # <<<<<<<<<<<<<<
 * 
 *     @sm_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sm_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15661, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15658
 *         self._data.vgpu_instance = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sm_util(self):
 *         """value_dtype: SM (3D/Compute) Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.sm_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15663
 *         return self._data.sm_util
 * 
 *     @sm_util.setter             # <<<<<<<<<<<<<<
 *     def sm_util(self, val):
 *         self._data.sm_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7sm_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7sm_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7sm_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7sm_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15665
 *     @sm_util.setter
 *     def sm_util(self, val):
 *         self._data.sm_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_sm_util, __pyx_v_val) < (0)) __PYX_ERR(0, 15665, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15663
 *         return self._data.sm_util
 * 
 *     @sm_util.setter             # <<<<<<<<<<<<<<
 *     def sm_util(self, val):
 *         self._data.sm_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.sm_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15667
 *         self._data.sm_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def mem_util(self):
 *         """value_dtype: Frame Buffer Memory Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8mem_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8mem_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8mem_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8mem_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15670
 *     def mem_util(self):
 *         """value_dtype: Frame Buffer Memory Util Value."""
 *         return self._data.mem_util             # <<<<<<<<<<<<<<
 * 
 *     @mem_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_mem_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15670, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15667
 *         self._data.sm_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def mem_util(self):
 *         """value_dtype: Frame Buffer Memory Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.mem_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15672
 *         return self._data.mem_util
 * 
 *     @mem_util.setter             # <<<<<<<<<<<<<<
 *     def mem_util(self, val):
 *         self._data.mem_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8mem_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8mem_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8mem_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8mem_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15674
 *     @mem_util.setter
 *     def mem_util(self, val):
 *         self._data.mem_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_mem_util, __pyx_v_val) < (0)) __PYX_ERR(0, 15674, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15672
 *         return self._data.mem_util
 * 
 *     @mem_util.setter             # <<<<<<<<<<<<<<
 *     def mem_util(self, val):
 *         self._data.mem_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.mem_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15676
 *         self._data.mem_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def enc_util(self):
 *         """value_dtype: Encoder Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8enc_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8enc_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8enc_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8enc_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15679
 *     def enc_util(self):
 *         """value_dtype: Encoder Util Value."""
 *         return self._data.enc_util             # <<<<<<<<<<<<<<
 * 
 *     @enc_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_enc_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15679, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15676
 *         self._data.mem_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def enc_util(self):
 *         """value_dtype: Encoder Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.enc_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15681
 *         return self._data.enc_util
 * 
 *     @enc_util.setter             # <<<<<<<<<<<<<<
 *     def enc_util(self, val):
 *         self._data.enc_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8enc_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8enc_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8enc_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8enc_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15683
 *     @enc_util.setter
 *     def enc_util(self, val):
 *         self._data.enc_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_enc_util, __pyx_v_val) < (0)) __PYX_ERR(0, 15683, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15681
 *         return self._data.enc_util
 * 
 *     @enc_util.setter             # <<<<<<<<<<<<<<
 *     def enc_util(self, val):
 *         self._data.enc_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.enc_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15685
 *         self._data.enc_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def dec_util(self):
 *         """value_dtype: Decoder Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8dec_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8dec_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8dec_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8dec_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15688
 *     def dec_util(self):
 *         """value_dtype: Decoder Util Value."""
 *         return self._data.dec_util             # <<<<<<<<<<<<<<
 * 
 *     @dec_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_dec_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15688, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15685
 *         self._data.enc_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def dec_util(self):
 *         """value_dtype: Decoder Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.dec_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15690
 *         return self._data.dec_util
 * 
 *     @dec_util.setter             # <<<<<<<<<<<<<<
 *     def dec_util(self, val):
 *         self._data.dec_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8dec_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8dec_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8dec_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8dec_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15692
 *     @dec_util.setter
 *     def dec_util(self, val):
 *         self._data.dec_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_dec_util, __pyx_v_val) < (0)) __PYX_ERR(0, 15692, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15690
 *         return self._data.dec_util
 * 
 *     @dec_util.setter             # <<<<<<<<<<<<<<
 *     def dec_util(self, val):
 *         self._data.dec_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.dec_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15694
 *         self._data.dec_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def jpg_util(self):
 *         """value_dtype: Jpeg Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8jpg_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8jpg_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8jpg_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8jpg_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15697
 *     def jpg_util(self):
 *         """value_dtype: Jpeg Util Value."""
 *         return self._data.jpg_util             # <<<<<<<<<<<<<<
 * 
 *     @jpg_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_jpg_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15697, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15694
 *         self._data.dec_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def jpg_util(self):
 *         """value_dtype: Jpeg Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.jpg_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15699
 *         return self._data.jpg_util
 * 
 *     @jpg_util.setter             # <<<<<<<<<<<<<<
 *     def jpg_util(self, val):
 *         self._data.jpg_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8jpg_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8jpg_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8jpg_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8jpg_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15701
 *     @jpg_util.setter
 *     def jpg_util(self, val):
 *         self._data.jpg_util = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_jpg_util, __pyx_v_val) < (0)) __PYX_ERR(0, 15701, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15699
 *         return self._data.jpg_util
 * 
 *     @jpg_util.setter             # <<<<<<<<<<<<<<
 *     def jpg_util(self, val):
 *         self._data.jpg_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.jpg_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15703
 *         self._data.jpg_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ofa_util(self):
 *         """value_dtype: Ofa Util Value."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8ofa_util_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8ofa_util_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8ofa_util___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8ofa_util___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15706
 *     def ofa_util(self):
 *         """value_dtype: Ofa Util Value."""
 *         return self._data.ofa_util             # <<<<<<<<<<<<<<
 * 
 *     @ofa_util.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ofa_util); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15706, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15703
 *         self._data.jpg_util = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ofa_util(self):
 *         """value_dtype: Ofa Util Value."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.ofa_util.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15708
 *         return self._data.ofa_util
 * 
 *     @ofa_util.setter             # <<<<<<<<<<<<<<
 *     def ofa_util(self, val):
 *         self._data.ofa_util = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8ofa_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8ofa_util_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8ofa_util_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8ofa_util_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15710
 *     @ofa_util.setter
 *     def ofa_util(self, val):
 *         self._data.ofa_util = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ofa_util, __pyx_v_val) < (0)) __PYX_ERR(0, 15710, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15708
 *         return self._data.ofa_util
 * 
 *     @ofa_util.setter             # <<<<<<<<<<<<<<
 *     def ofa_util(self, val):
 *         self._data.ofa_util = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.ofa_util.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15712
 *         self._data.ofa_util = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":15715
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":15716
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 15716, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":15717
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15717, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 15717, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":15718
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":15719
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15719, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 15719, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":15718
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":15720
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return VgpuInstanceUtilizationInfo_v1.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":15721
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return VgpuInstanceUtilizationInfo_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":15720
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return VgpuInstanceUtilizationInfo_v1.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":15722
 *             if key_ < 0:
 *                 key_ += size
 *             return VgpuInstanceUtilizationInfo_v1.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_instance_utilization_info_v1_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15722, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15722, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15715
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":15723
 *                 key_ += size
 *             return VgpuInstanceUtilizationInfo_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_instance_utilization_info_v1_dtype:
 *             return VgpuInstanceUtilizationInfo_v1.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15723, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":15724
 *             return VgpuInstanceUtilizationInfo_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_instance_utilization_info_v1_dtype:             # <<<<<<<<<<<<<<
 *             return VgpuInstanceUtilizationInfo_v1.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15724, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15724, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 15724, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15724, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_vgpu_instance_utilization_info_v); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15724, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15724, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 15724, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":15725
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_instance_utilization_info_v1_dtype:
 *             return VgpuInstanceUtilizationInfo_v1.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15725, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15724
 *             return VgpuInstanceUtilizationInfo_v1.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_instance_utilization_info_v1_dtype:             # <<<<<<<<<<<<<<
 *             return VgpuInstanceUtilizationInfo_v1.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":15726
 *         if isinstance(out, _numpy.recarray) and out.dtype == vgpu_instance_utilization_info_v1_dtype:
 *             return VgpuInstanceUtilizationInfo_v1.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15712
 *         self._data.ofa_util = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15728
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15729
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 15729, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15728
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15731
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuInstanceUtilizationInfo_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_14from_data, "VgpuInstanceUtilizationInfo_v1.from_data(data)\n\nCreate an VgpuInstanceUtilizationInfo_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `vgpu_instance_utilization_info_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15731, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15731, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 15731, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 15731, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15731, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 15731, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":15738
 *             data (_numpy.ndarray): a 1D array of dtype `vgpu_instance_utilization_info_v1_dtype` holding the data.
 *         """
 *         cdef VgpuInstanceUtilizationInfo_v1 obj = VgpuInstanceUtilizationInfo_v1.__new__(VgpuInstanceUtilizationInfo_v1)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15738, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":15739
 *         """
 *         cdef VgpuInstanceUtilizationInfo_v1 obj = VgpuInstanceUtilizationInfo_v1.__new__(VgpuInstanceUtilizationInfo_v1)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15739, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15739, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 15739, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":15740
 *         cdef VgpuInstanceUtilizationInfo_v1 obj = VgpuInstanceUtilizationInfo_v1.__new__(VgpuInstanceUtilizationInfo_v1)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15740, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15740, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15739
 *         """
 *         cdef VgpuInstanceUtilizationInfo_v1 obj = VgpuInstanceUtilizationInfo_v1.__new__(VgpuInstanceUtilizationInfo_v1)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":15741
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != vgpu_instance_utilization_info_v1_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15741, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 15741, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":15742
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != vgpu_instance_utilization_info_v1_dtype:
 *             raise ValueError("data array must be of dtype vgpu_instance_utilization_info_v1_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15742, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15742, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15741
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != vgpu_instance_utilization_info_v1_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":15743
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != vgpu_instance_utilization_info_v1_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype vgpu_instance_utilization_info_v1_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15743, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_instance_utilization_info_v); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15743, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15743, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 15743, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":15744
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != vgpu_instance_utilization_info_v1_dtype:
 *             raise ValueError("data array must be of dtype vgpu_instance_utilization_info_v1_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_vgpu_3};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15744, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 15744, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15743
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != vgpu_instance_utilization_info_v1_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype vgpu_instance_utilization_info_v1_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":15745
 *         if data.dtype != vgpu_instance_utilization_info_v1_dtype:
 *             raise ValueError("data array must be of dtype vgpu_instance_utilization_info_v1_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15745, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15745, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15745, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":15747
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15731
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuInstanceUtilizationInfo_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15749
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an VgpuInstanceUtilizationInfo_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_16from_ptr, "VgpuInstanceUtilizationInfo_v1.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an VgpuInstanceUtilizationInfo_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15749, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 15749, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15749, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15749, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 15749, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 15749, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 15749, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15749, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15749, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 15750, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 15750, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15750, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":15750
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an VgpuInstanceUtilizationInfo_v1 instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 15749, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":15749
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an VgpuInstanceUtilizationInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":15758
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuInstanceUtilizationInfo_v1 obj = VgpuInstanceUtilizationInfo_v1.__new__(VgpuInstanceUtilizationInfo_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":15759
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuInstanceUtilizationInfo_v1 obj = VgpuInstanceUtilizationInfo_v1.__new__(VgpuInstanceUtilizationInfo_v1)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15759, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15759, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15758
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuInstanceUtilizationInfo_v1 obj = VgpuInstanceUtilizationInfo_v1.__new__(VgpuInstanceUtilizationInfo_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":15760
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuInstanceUtilizationInfo_v1 obj = VgpuInstanceUtilizationInfo_v1.__new__(VgpuInstanceUtilizationInfo_v1)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15760, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15761
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuInstanceUtilizationInfo_v1 obj = VgpuInstanceUtilizationInfo_v1.__new__(VgpuInstanceUtilizationInfo_v1)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15761, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15761, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15763
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=vgpu_instance_utilization_info_v1_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15763, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15762
 *         cdef VgpuInstanceUtilizationInfo_v1 obj = VgpuInstanceUtilizationInfo_v1.__new__(VgpuInstanceUtilizationInfo_v1)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=vgpu_instance_utilization_info_v1_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15762, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15764
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=vgpu_instance_utilization_info_v1_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15764, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15764, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15764, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_vgpu_instance_utilization_info_v); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15764, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 15764, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 15764, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 15764, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15764, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15765
 *             <char*>ptr, sizeof(nvmlVgpuInstanceUtilizationInfo_v1_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=vgpu_instance_utilization_info_v1_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 15765, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15765, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15765, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15767
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15749
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an VgpuInstanceUtilizationInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15597
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_18__reduce_cython__, "VgpuInstanceUtilizationInfo_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_VgpuInstanceUtilizationInfo_v1, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_VgpuInstanceUtilizationInfo_v1, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_VgpuInstanceUtilizationInfo_v1, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_VgpuInstanceUtilizationInfo_v1, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_VgpuInstanceUtili); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_VgpuInstanceUtilizationInfo_v1, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_VgpuInstanceUtilizationInfo_v1, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_VgpuInstanceUtilizationInfo_v1, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_VgpuInstanceUtili); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_VgpuInstanceUtilizationInfo_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_20__setstate_cython__, "VgpuInstanceUtilizationInfo_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_VgpuInstanceUtilizationInfo_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_VgpuInstanceUtilizationInfo_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15770
 * 
 * 
 * cdef _get_field_value_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlFieldValue_t pod = nvmlFieldValue_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_field_value_dtype_offsets(void) {
  nvmlFieldValue_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlFieldValue_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  size_t __pyx_t_14;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_field_value_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":15771
 * 
 * cdef _get_field_value_dtype_offsets():
 *     cdef nvmlFieldValue_t pod = nvmlFieldValue_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['field_id', 'scope_id', 'timestamp', 'latency_usec', 'value_type', 'nvml_return', 'value'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":15772
 * cdef _get_field_value_dtype_offsets():
 *     cdef nvmlFieldValue_t pod = nvmlFieldValue_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['field_id', 'scope_id', 'timestamp', 'latency_usec', 'value_type', 'nvml_return', 'value'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int64, _numpy.int64, _numpy.int32, _numpy.int32, value_dtype],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15772, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15772, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":15773
 *     cdef nvmlFieldValue_t pod = nvmlFieldValue_t()
 *     return _numpy.dtype({
 *         'names': ['field_id', 'scope_id', 'timestamp', 'latency_usec', 'value_type', 'nvml_return', 'value'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int64, _numpy.int64, _numpy.int32, _numpy.int32, value_dtype],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15773, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15773, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_field_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_field_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_field_id) != (0)) __PYX_ERR(0, 15773, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_scope_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_scope_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_scope_id) != (0)) __PYX_ERR(0, 15773, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_timestamp);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_timestamp);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_timestamp) != (0)) __PYX_ERR(0, 15773, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_latency_usec);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_latency_usec);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_latency_usec) != (0)) __PYX_ERR(0, 15773, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_value_type);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_value_type);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_value_type) != (0)) __PYX_ERR(0, 15773, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_nvml_return);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_nvml_return);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_nvml_return) != (0)) __PYX_ERR(0, 15773, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_value);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_value);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_value) != (0)) __PYX_ERR(0, 15773, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 15773, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":15774
 *     return _numpy.dtype({
 *         'names': ['field_id', 'scope_id', 'timestamp', 'latency_usec', 'value_type', 'nvml_return', 'value'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int64, _numpy.int64, _numpy.int32, _numpy.int32, value_dtype],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.fieldId)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15774, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15774, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15774, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15774, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15774, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int64); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 15774, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15774, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int64); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 15774, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15774, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 15774, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15774, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 15774, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_value_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15774, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_13 = PyList_New(7); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 15774, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_13, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 15774, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_13, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 15774, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_13, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 15774, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_13, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 15774, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_13, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 15774, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_13, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 15774, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_13, 6, __pyx_t_6) != (0)) __PYX_ERR(0, 15774, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_13) < (0)) __PYX_ERR(0, 15773, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;

  /* "cuda/bindings/_nvml.pyx":15776
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int64, _numpy.int64, _numpy.int32, _numpy.int32, value_dtype],
 *         'offsets': [
 *             (<intptr_t>&(pod.fieldId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.scopeId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.timestamp)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.fieldId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 15776, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":15777
 *         'offsets': [
 *             (<intptr_t>&(pod.fieldId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.scopeId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.timestamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.latencyUsec)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.scopeId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15777, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":15778
 *             (<intptr_t>&(pod.fieldId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.scopeId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.timestamp)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.latencyUsec)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.valueType)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.timestamp)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 15778, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":15779
 *             (<intptr_t>&(pod.scopeId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.timestamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.latencyUsec)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.valueType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.nvmlReturn)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.latencyUsec)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 15779, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":15780
 *             (<intptr_t>&(pod.timestamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.latencyUsec)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.valueType)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.nvmlReturn)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.value)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.valueType)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 15780, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":15781
 *             (<intptr_t>&(pod.latencyUsec)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.valueType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.nvmlReturn)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.value)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.nvmlReturn)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 15781, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":15782
 *             (<intptr_t>&(pod.valueType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.nvmlReturn)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.value)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlFieldValue_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.value)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15782, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":15775
 *         'names': ['field_id', 'scope_id', 'timestamp', 'latency_usec', 'value_type', 'nvml_return', 'value'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int64, _numpy.int64, _numpy.int32, _numpy.int32, value_dtype],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.fieldId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.scopeId)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15775, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_13) != (0)) __PYX_ERR(0, 15775, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 15775, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_12) != (0)) __PYX_ERR(0, 15775, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_11) != (0)) __PYX_ERR(0, 15775, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_10) != (0)) __PYX_ERR(0, 15775, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_9) != (0)) __PYX_ERR(0, 15775, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_8) != (0)) __PYX_ERR(0, 15775, __pyx_L1_error);
  __pyx_t_13 = 0;
  __pyx_t_6 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 15773, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":15784
 *             (<intptr_t>&(pod.value)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlFieldValue_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlFieldValue_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15784, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 15773, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_14 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_14 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_14, (2-__pyx_t_14) | (__pyx_t_14*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15772, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15770
 * 
 * 
 * cdef _get_field_value_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlFieldValue_t pod = nvmlFieldValue_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_field_value_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15806
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=field_value_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15806, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15806, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 15806, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15806, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 15806, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":15807
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=field_value_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlFieldValue_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15807, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15807, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_field_value_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15807, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15807, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 15807, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15807, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":15808
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=field_value_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlFieldValue_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlFieldValue_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15808, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15808, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15808, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":15809
 *         arr = _numpy.empty(size, dtype=field_value_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlFieldValue_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlFieldValue_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15809, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlFieldValue_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15809, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15809, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 15809, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":15810
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlFieldValue_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlFieldValue_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15810, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15810, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlFieldValue_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15810, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15810, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 15809, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 15809, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":15806
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=field_value_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15812
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlFieldValue_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.FieldValue_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":15813
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.FieldValue_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15813, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15813, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 15813, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":15814
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.FieldValue_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.FieldValue object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15814, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15814, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15814, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15814, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15814, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15814, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15814, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_FieldValue_Array;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 18 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15814, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15813
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.FieldValue_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":15816
 *             return f"<{__name__}.FieldValue_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.FieldValue object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15816, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15816, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15816, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15816, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15816, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_FieldValue_object_at;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 22 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15816, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":15812
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlFieldValue_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.FieldValue_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15818
 *             return f"<{__name__}.FieldValue object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15821
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15821, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15821, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15818
 *             return f"<{__name__}.FieldValue object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15823
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_10FieldValue__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":15824
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15824, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15824, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 15824, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15823
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15826
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":15827
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15827, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15827, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 15827, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":15828
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15828, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15828, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15827
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":15830
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15830, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15830, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15826
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15832
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":15833
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15833, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 15833, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15832
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15835
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, FieldValue)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":15836
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, FieldValue)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":15837
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, FieldValue)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15837, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15837, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15837, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15837, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 15837, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15837, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15837, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15837, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15837, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 15837, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":15838
 *         cdef object self_data = self._data
 *         if (not isinstance(other, FieldValue)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15837
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, FieldValue)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":15839
 *         if (not isinstance(other, FieldValue)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15839, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15839, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15839, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 15839, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15839, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15835
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, FieldValue)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15841
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def field_id(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_8field_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_8field_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_8field_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_8field_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15844
 *     def field_id(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.field_id[0])
 *         return self._data.field_id
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15844, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 15844, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":15845
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.field_id[0])             # <<<<<<<<<<<<<<
 *         return self._data.field_id
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_field_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15845, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15845, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15845, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15844
 *     def field_id(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.field_id[0])
 *         return self._data.field_id
*/
  }

  /* "cuda/bindings/_nvml.pyx":15846
 *         if self._data.size == 1:
 *             return int(self._data.field_id[0])
 *         return self._data.field_id             # <<<<<<<<<<<<<<
 * 
 *     @field_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_field_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15846, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15841
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def field_id(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.field_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15848
 *         return self._data.field_id
 * 
 *     @field_id.setter             # <<<<<<<<<<<<<<
 *     def field_id(self, val):
 *         self._data.field_id = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_8field_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_8field_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_8field_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_8field_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15850
 *     @field_id.setter
 *     def field_id(self, val):
 *         self._data.field_id = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_field_id, __pyx_v_val) < (0)) __PYX_ERR(0, 15850, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15848
 *         return self._data.field_id
 * 
 *     @field_id.setter             # <<<<<<<<<<<<<<
 *     def field_id(self, val):
 *         self._data.field_id = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.field_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15852
 *         self._data.field_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scope_id(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_8scope_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_8scope_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_8scope_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_8scope_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15855
 *     def scope_id(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.scope_id[0])
 *         return self._data.scope_id
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15855, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 15855, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":15856
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.scope_id[0])             # <<<<<<<<<<<<<<
 *         return self._data.scope_id
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_scope_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15856, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15856, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15856, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15855
 *     def scope_id(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.scope_id[0])
 *         return self._data.scope_id
*/
  }

  /* "cuda/bindings/_nvml.pyx":15857
 *         if self._data.size == 1:
 *             return int(self._data.scope_id[0])
 *         return self._data.scope_id             # <<<<<<<<<<<<<<
 * 
 *     @scope_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_scope_id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15857, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15852
 *         self._data.field_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scope_id(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.scope_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15859
 *         return self._data.scope_id
 * 
 *     @scope_id.setter             # <<<<<<<<<<<<<<
 *     def scope_id(self, val):
 *         self._data.scope_id = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_8scope_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_8scope_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_8scope_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_8scope_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15861
 *     @scope_id.setter
 *     def scope_id(self, val):
 *         self._data.scope_id = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_scope_id, __pyx_v_val) < (0)) __PYX_ERR(0, 15861, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15859
 *         return self._data.scope_id
 * 
 *     @scope_id.setter             # <<<<<<<<<<<<<<
 *     def scope_id(self, val):
 *         self._data.scope_id = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.scope_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15863
 *         self._data.scope_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def timestamp(self):
 *         """Union[~_numpy.int64, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_9timestamp_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_9timestamp_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_9timestamp___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_9timestamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15866
 *     def timestamp(self):
 *         """Union[~_numpy.int64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.timestamp[0])
 *         return self._data.timestamp
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15866, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 15866, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":15867
 *         """Union[~_numpy.int64, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.timestamp[0])             # <<<<<<<<<<<<<<
 *         return self._data.timestamp
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_timestamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15867, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15867, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15867, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15866
 *     def timestamp(self):
 *         """Union[~_numpy.int64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.timestamp[0])
 *         return self._data.timestamp
*/
  }

  /* "cuda/bindings/_nvml.pyx":15868
 *         if self._data.size == 1:
 *             return int(self._data.timestamp[0])
 *         return self._data.timestamp             # <<<<<<<<<<<<<<
 * 
 *     @timestamp.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_timestamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15868, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15863
 *         self._data.scope_id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def timestamp(self):
 *         """Union[~_numpy.int64, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.timestamp.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15870
 *         return self._data.timestamp
 * 
 *     @timestamp.setter             # <<<<<<<<<<<<<<
 *     def timestamp(self, val):
 *         self._data.timestamp = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_9timestamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_9timestamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_9timestamp_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_9timestamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15872
 *     @timestamp.setter
 *     def timestamp(self, val):
 *         self._data.timestamp = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_timestamp, __pyx_v_val) < (0)) __PYX_ERR(0, 15872, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15870
 *         return self._data.timestamp
 * 
 *     @timestamp.setter             # <<<<<<<<<<<<<<
 *     def timestamp(self, val):
 *         self._data.timestamp = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.timestamp.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15874
 *         self._data.timestamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def latency_usec(self):
 *         """Union[~_numpy.int64, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_12latency_usec_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_12latency_usec_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_12latency_usec___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_12latency_usec___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15877
 *     def latency_usec(self):
 *         """Union[~_numpy.int64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.latency_usec[0])
 *         return self._data.latency_usec
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15877, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 15877, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":15878
 *         """Union[~_numpy.int64, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.latency_usec[0])             # <<<<<<<<<<<<<<
 *         return self._data.latency_usec
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_latency_usec); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15878, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15878, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15878, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15877
 *     def latency_usec(self):
 *         """Union[~_numpy.int64, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.latency_usec[0])
 *         return self._data.latency_usec
*/
  }

  /* "cuda/bindings/_nvml.pyx":15879
 *         if self._data.size == 1:
 *             return int(self._data.latency_usec[0])
 *         return self._data.latency_usec             # <<<<<<<<<<<<<<
 * 
 *     @latency_usec.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_latency_usec); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15879, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15874
 *         self._data.timestamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def latency_usec(self):
 *         """Union[~_numpy.int64, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.latency_usec.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15881
 *         return self._data.latency_usec
 * 
 *     @latency_usec.setter             # <<<<<<<<<<<<<<
 *     def latency_usec(self, val):
 *         self._data.latency_usec = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_12latency_usec_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_12latency_usec_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_12latency_usec_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_12latency_usec_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15883
 *     @latency_usec.setter
 *     def latency_usec(self, val):
 *         self._data.latency_usec = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_latency_usec, __pyx_v_val) < (0)) __PYX_ERR(0, 15883, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15881
 *         return self._data.latency_usec
 * 
 *     @latency_usec.setter             # <<<<<<<<<<<<<<
 *     def latency_usec(self, val):
 *         self._data.latency_usec = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.latency_usec.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15885
 *         self._data.latency_usec = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def value_type(self):
 *         """Union[~_numpy.int32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_10value_type_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_10value_type_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_10value_type___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_10value_type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15888
 *     def value_type(self):
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.value_type[0])
 *         return self._data.value_type
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15888, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 15888, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":15889
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.value_type[0])             # <<<<<<<<<<<<<<
 *         return self._data.value_type
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_value_type); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15889, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15889, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15889, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15888
 *     def value_type(self):
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.value_type[0])
 *         return self._data.value_type
*/
  }

  /* "cuda/bindings/_nvml.pyx":15890
 *         if self._data.size == 1:
 *             return int(self._data.value_type[0])
 *         return self._data.value_type             # <<<<<<<<<<<<<<
 * 
 *     @value_type.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_value_type); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15890, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15885
 *         self._data.latency_usec = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def value_type(self):
 *         """Union[~_numpy.int32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.value_type.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15892
 *         return self._data.value_type
 * 
 *     @value_type.setter             # <<<<<<<<<<<<<<
 *     def value_type(self, val):
 *         self._data.value_type = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_10value_type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_10value_type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_10value_type_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_10value_type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15894
 *     @value_type.setter
 *     def value_type(self, val):
 *         self._data.value_type = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_value_type, __pyx_v_val) < (0)) __PYX_ERR(0, 15894, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15892
 *         return self._data.value_type
 * 
 *     @value_type.setter             # <<<<<<<<<<<<<<
 *     def value_type(self, val):
 *         self._data.value_type = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.value_type.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15896
 *         self._data.value_type = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def nvml_return(self):
 *         """Union[~_numpy.int32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_11nvml_return_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_11nvml_return_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_11nvml_return___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_11nvml_return___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15899
 *     def nvml_return(self):
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.nvml_return[0])
 *         return self._data.nvml_return
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15899, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 15899, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":15900
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.nvml_return[0])             # <<<<<<<<<<<<<<
 *         return self._data.nvml_return
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_nvml_return); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15900, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15900, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15900, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15899
 *     def nvml_return(self):
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.nvml_return[0])
 *         return self._data.nvml_return
*/
  }

  /* "cuda/bindings/_nvml.pyx":15901
 *         if self._data.size == 1:
 *             return int(self._data.nvml_return[0])
 *         return self._data.nvml_return             # <<<<<<<<<<<<<<
 * 
 *     @nvml_return.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_nvml_return); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15901, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15896
 *         self._data.value_type = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def nvml_return(self):
 *         """Union[~_numpy.int32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.nvml_return.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15903
 *         return self._data.nvml_return
 * 
 *     @nvml_return.setter             # <<<<<<<<<<<<<<
 *     def nvml_return(self, val):
 *         self._data.nvml_return = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_11nvml_return_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_11nvml_return_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_11nvml_return_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_11nvml_return_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15905
 *     @nvml_return.setter
 *     def nvml_return(self, val):
 *         self._data.nvml_return = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_nvml_return, __pyx_v_val) < (0)) __PYX_ERR(0, 15905, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15903
 *         return self._data.nvml_return
 * 
 *     @nvml_return.setter             # <<<<<<<<<<<<<<
 *     def nvml_return(self, val):
 *         self._data.nvml_return = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.nvml_return.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15907
 *         self._data.nvml_return = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def value(self):
 *         """value_dtype: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_5value_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_5value_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_5value___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_5value___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":15910
 *     def value(self):
 *         """value_dtype: """
 *         return self._data.value             # <<<<<<<<<<<<<<
 * 
 *     @value.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15910, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15907
 *         self._data.nvml_return = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def value(self):
 *         """value_dtype: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.value.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15912
 *         return self._data.value
 * 
 *     @value.setter             # <<<<<<<<<<<<<<
 *     def value(self, val):
 *         self._data.value = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_5value_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_5value_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_5value_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_5value_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15914
 *     @value.setter
 *     def value(self, val):
 *         self._data.value = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_value, __pyx_v_val) < (0)) __PYX_ERR(0, 15914, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15912
 *         return self._data.value
 * 
 *     @value.setter             # <<<<<<<<<<<<<<
 *     def value(self, val):
 *         self._data.value = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.value.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15916
 *         self._data.value = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":15919
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":15920
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 15920, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":15921
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15921, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 15921, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":15922
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":15923
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15923, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 15923, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":15922
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":15924
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return FieldValue.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":15925
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return FieldValue.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":15924
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return FieldValue.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":15926
 *             if key_ < 0:
 *                 key_ += size
 *             return FieldValue.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == field_value_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15926, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15926, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15919
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":15927
 *                 key_ += size
 *             return FieldValue.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == field_value_dtype:
 *             return FieldValue.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15927, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":15928
 *             return FieldValue.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == field_value_dtype:             # <<<<<<<<<<<<<<
 *             return FieldValue.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15928, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15928, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 15928, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15928, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_field_value_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15928, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15928, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 15928, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":15929
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == field_value_dtype:
 *             return FieldValue.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15929, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":15928
 *             return FieldValue.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == field_value_dtype:             # <<<<<<<<<<<<<<
 *             return FieldValue.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":15930
 *         if isinstance(out, _numpy.recarray) and out.dtype == field_value_dtype:
 *             return FieldValue.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15916
 *         self._data.value = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15932
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":15933
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 15933, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15932
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15935
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an FieldValue instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_10FieldValue_14from_data, "FieldValue.from_data(data)\n\nCreate an FieldValue instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `field_value_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_10FieldValue_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10FieldValue_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15935, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15935, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 15935, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 15935, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15935, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 15935, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":15942
 *             data (_numpy.ndarray): a 1D array of dtype `field_value_dtype` holding the data.
 *         """
 *         cdef FieldValue obj = FieldValue.__new__(FieldValue)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_FieldValue(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15942, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":15943
 *         """
 *         cdef FieldValue obj = FieldValue.__new__(FieldValue)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15943, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15943, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 15943, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":15944
 *         cdef FieldValue obj = FieldValue.__new__(FieldValue)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15944, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15944, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15943
 *         """
 *         cdef FieldValue obj = FieldValue.__new__(FieldValue)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":15945
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != field_value_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15945, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 15945, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":15946
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != field_value_dtype:
 *             raise ValueError("data array must be of dtype field_value_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15946, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15946, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15945
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != field_value_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":15947
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != field_value_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype field_value_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15947, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_field_value_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15947, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15947, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 15947, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":15948
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != field_value_dtype:
 *             raise ValueError("data array must be of dtype field_value_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_fiel};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15948, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 15948, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15947
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != field_value_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype field_value_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":15949
 *         if data.dtype != field_value_dtype:
 *             raise ValueError("data array must be of dtype field_value_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15949, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15949, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15949, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":15951
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15935
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an FieldValue instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15953
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an FieldValue instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_10FieldValue_16from_ptr, "FieldValue.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an FieldValue instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_10FieldValue_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10FieldValue_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15953, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 15953, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15953, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15953, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 15953, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 15953, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 15953, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15953, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15953, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 15954, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 15954, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15954, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":15954
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an FieldValue instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 15953, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":15953
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an FieldValue instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":15962
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FieldValue obj = FieldValue.__new__(FieldValue)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":15963
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef FieldValue obj = FieldValue.__new__(FieldValue)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15963, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 15963, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":15962
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FieldValue obj = FieldValue.__new__(FieldValue)
*/
  }

  /* "cuda/bindings/_nvml.pyx":15964
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FieldValue obj = FieldValue.__new__(FieldValue)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_FieldValue(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15964, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15965
 *             raise ValueError("ptr must not be null (0)")
 *         cdef FieldValue obj = FieldValue.__new__(FieldValue)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlFieldValue_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15965, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15965, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15967
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlFieldValue_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=field_value_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15967, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":15966
 *         cdef FieldValue obj = FieldValue.__new__(FieldValue)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlFieldValue_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=field_value_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlFieldValue_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15966, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15968
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlFieldValue_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=field_value_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15968, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15968, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15968, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_field_value_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15968, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 15968, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 15968, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 15968, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15968, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15969
 *             <char*>ptr, sizeof(nvmlFieldValue_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=field_value_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 15969, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15969, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15969, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":15971
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15953
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an FieldValue instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15802
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_10FieldValue_18__reduce_cython__, "FieldValue.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_10FieldValue_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10FieldValue_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_FieldValue, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_FieldValue, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_FieldValue, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_FieldValue, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_FieldValue); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_FieldValue, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_FieldValue, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_FieldValue, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_FieldValue__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_FieldValue); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_FieldValue, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_FieldValue__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_10FieldValue_20__setstate_cython__, "FieldValue.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_10FieldValue_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10FieldValue_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10FieldValue_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_FieldValue, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_FieldValue__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_FieldValue__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_FieldValue, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_FieldValue__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.FieldValue.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":15974
 * 
 * 
 * cdef _get_gpu_thermal_settings_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuThermalSettings_t pod = nvmlGpuThermalSettings_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_gpu_thermal_settings_dtype_offsets(void) {
  nvmlGpuThermalSettings_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlGpuThermalSettings_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_gpu_thermal_settings_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":15975
 * 
 * cdef _get_gpu_thermal_settings_dtype_offsets():
 *     cdef nvmlGpuThermalSettings_t pod = nvmlGpuThermalSettings_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['count', 'sensor'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":15976
 * cdef _get_gpu_thermal_settings_dtype_offsets():
 *     cdef nvmlGpuThermalSettings_t pod = nvmlGpuThermalSettings_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['count', 'sensor'],
 *         'formats': [_numpy.uint32, _py_anon_pod0_dtype],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15976, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15976, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":15977
 *     cdef nvmlGpuThermalSettings_t pod = nvmlGpuThermalSettings_t()
 *     return _numpy.dtype({
 *         'names': ['count', 'sensor'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _py_anon_pod0_dtype],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15977, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15977, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_count) != (0)) __PYX_ERR(0, 15977, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_sensor);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_sensor);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_sensor) != (0)) __PYX_ERR(0, 15977, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 15977, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":15978
 *     return _numpy.dtype({
 *         'names': ['count', 'sensor'],
 *         'formats': [_numpy.uint32, _py_anon_pod0_dtype],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15978, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15978, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_py_anon_pod0_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15978, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = PyList_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15978, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 15978, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 15978, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_8) < (0)) __PYX_ERR(0, 15977, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;

  /* "cuda/bindings/_nvml.pyx":15980
 *         'formats': [_numpy.uint32, _py_anon_pod0_dtype],
 *         'offsets': [
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sensor)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.count)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 15980, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":15981
 *         'offsets': [
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sensor)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlGpuThermalSettings_t),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sensor)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15981, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":15979
 *         'names': ['count', 'sensor'],
 *         'formats': [_numpy.uint32, _py_anon_pod0_dtype],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sensor)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 15979, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 15979, __pyx_L1_error);
  __pyx_t_8 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 15977, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":15983
 *             (<intptr_t>&(pod.sensor)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlGpuThermalSettings_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlGpuThermalSettings_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 15983, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 15977, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15976, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":15974
 * 
 * 
 * cdef _get_gpu_thermal_settings_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuThermalSettings_t pod = nvmlGpuThermalSettings_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_gpu_thermal_settings_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16000
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGpuThermalSettings_t *>calloc(1, sizeof(nvmlGpuThermalSettings_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":16001
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlGpuThermalSettings_t *>calloc(1, sizeof(nvmlGpuThermalSettings_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuThermalSettings")
*/
  __pyx_v_self->_ptr = ((nvmlGpuThermalSettings_t *)calloc(1, (sizeof(nvmlGpuThermalSettings_t))));

  /* "cuda/bindings/_nvml.pyx":16002
 *     def __init__(self):
 *         self._ptr = <nvmlGpuThermalSettings_t *>calloc(1, sizeof(nvmlGpuThermalSettings_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GpuThermalSettings")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":16003
 *         self._ptr = <nvmlGpuThermalSettings_t *>calloc(1, sizeof(nvmlGpuThermalSettings_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuThermalSettings")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16003, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuThermalSetti};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16003, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 16003, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16002
 *     def __init__(self):
 *         self._ptr = <nvmlGpuThermalSettings_t *>calloc(1, sizeof(nvmlGpuThermalSettings_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GpuThermalSettings")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":16004
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuThermalSettings")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":16005
 *             raise MemoryError("Error allocating GpuThermalSettings")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":16006
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":16000
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGpuThermalSettings_t *>calloc(1, sizeof(nvmlGpuThermalSettings_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuThermalSettings.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16008
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGpuThermalSettings_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self) {
  nvmlGpuThermalSettings_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlGpuThermalSettings_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":16010
 *     def __dealloc__(self):
 *         cdef nvmlGpuThermalSettings_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16011
 *         cdef nvmlGpuThermalSettings_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":16012
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":16013
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":16010
 *     def __dealloc__(self):
 *         cdef nvmlGpuThermalSettings_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":16008
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGpuThermalSettings_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":16015
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GpuThermalSettings object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":16016
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.GpuThermalSettings object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16016, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16016, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16016, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16016, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16016, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_GpuThermalSettings_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 30 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16016, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16015
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GpuThermalSettings object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuThermalSettings.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16018
 *         return f"<{__name__}.GpuThermalSettings object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16021
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16021, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16018
 *         return f"<{__name__}.GpuThermalSettings object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuThermalSettings.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16023
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_18GpuThermalSettings__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":16024
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16023
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16026
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":16027
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16027, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16026
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuThermalSettings.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16029
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GpuThermalSettings other_
 *         if not isinstance(other, GpuThermalSettings):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":16031
 *     def __eq__(self, other):
 *         cdef GpuThermalSettings other_
 *         if not isinstance(other, GpuThermalSettings):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":16032
 *         cdef GpuThermalSettings other_
 *         if not isinstance(other, GpuThermalSettings):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuThermalSettings_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":16031
 *     def __eq__(self, other):
 *         cdef GpuThermalSettings other_
 *         if not isinstance(other, GpuThermalSettings):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":16033
 *         if not isinstance(other, GpuThermalSettings):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuThermalSettings_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings))))) __PYX_ERR(0, 16033, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":16034
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuThermalSettings_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlGpuThermalSettings_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16034, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16029
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GpuThermalSettings other_
 *         if not isinstance(other, GpuThermalSettings):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuThermalSettings.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16036
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuThermalSettings_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuThermalSettings_t *>malloc(sizeof(nvmlGpuThermalSettings_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":16037
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGpuThermalSettings_t *>malloc(sizeof(nvmlGpuThermalSettings_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 16037, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16037, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 16037, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16038
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuThermalSettings_t *>malloc(sizeof(nvmlGpuThermalSettings_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuThermalSettings")
*/
    __pyx_v_self->_ptr = ((nvmlGpuThermalSettings_t *)malloc((sizeof(nvmlGpuThermalSettings_t))));

    /* "cuda/bindings/_nvml.pyx":16039
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuThermalSettings_t *>malloc(sizeof(nvmlGpuThermalSettings_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuThermalSettings")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuThermalSettings_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":16040
 *             self._ptr = <nvmlGpuThermalSettings_t *>malloc(sizeof(nvmlGpuThermalSettings_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuThermalSettings")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuThermalSettings_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16040, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuThermalSetti};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16040, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 16040, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":16039
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuThermalSettings_t *>malloc(sizeof(nvmlGpuThermalSettings_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuThermalSettings")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuThermalSettings_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":16041
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuThermalSettings")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuThermalSettings_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16041, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16041, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 16041, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlGpuThermalSettings_t))));

    /* "cuda/bindings/_nvml.pyx":16042
 *                 raise MemoryError("Error allocating GpuThermalSettings")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuThermalSettings_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":16043
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuThermalSettings_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":16044
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16044, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16044, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 16044, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":16037
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGpuThermalSettings_t *>malloc(sizeof(nvmlGpuThermalSettings_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":16046
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 16046, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":16036
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuThermalSettings_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuThermalSettings_t *>malloc(sizeof(nvmlGpuThermalSettings_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuThermalSettings.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16048
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sensor(self):
 *         """_py_anon_pod0: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_6sensor_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_6sensor_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_6sensor___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_6sensor___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16051
 *     def sensor(self):
 *         """_py_anon_pod0: """
 *         return _py_anon_pod0.from_ptr(<intptr_t>&(self._ptr[0].sensor), 3, self._readonly)             # <<<<<<<<<<<<<<
 * 
 *     @sensor.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).sensor))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16051, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16051, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_mstate_global->__pyx_int_3, __pyx_t_4};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16051, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16048
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sensor(self):
 *         """_py_anon_pod0: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuThermalSettings.sensor.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16053
 *         return _py_anon_pod0.from_ptr(<intptr_t>&(self._ptr[0].sensor), 3, self._readonly)
 * 
 *     @sensor.setter             # <<<<<<<<<<<<<<
 *     def sensor(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_6sensor_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_6sensor_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_6sensor_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_6sensor_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  intptr_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":16055
 *     @sensor.setter
 *     def sensor(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuThermalSettings instance is read-only")
 *         cdef _py_anon_pod0 val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":16056
 *     def sensor(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuThermalSettings instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod0 val_ = val
 *         if len(val) != 3:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuThermalSettings_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16056, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16056, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16055
 *     @sensor.setter
 *     def sensor(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuThermalSettings instance is read-only")
 *         cdef _py_anon_pod0 val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":16057
 *         if self._readonly:
 *             raise ValueError("This GpuThermalSettings instance is read-only")
 *         cdef _py_anon_pod0 val_ = val             # <<<<<<<<<<<<<<
 *         if len(val) != 3:
 *             raise ValueError(f"Expected length 3 for field sensor, got {len(val)}")
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0))))) __PYX_ERR(0, 16057, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":16058
 *             raise ValueError("This GpuThermalSettings instance is read-only")
 *         cdef _py_anon_pod0 val_ = val
 *         if len(val) != 3:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 3 for field sensor, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].sensor), <void *>(val_._get_ptr()), sizeof(_anon_pod0) * 3)
*/
  __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 16058, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 != 3);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":16059
 *         cdef _py_anon_pod0 val_ = val
 *         if len(val) != 3:
 *             raise ValueError(f"Expected length 3 for field sensor, got {len(val)}")             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].sensor), <void *>(val_._get_ptr()), sizeof(_anon_pod0) * 3)
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 16059, __pyx_L1_error)
    __pyx_t_6 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_t_4, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16059, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __pyx_t_7 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Expected_length_3_for_field_sens, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16059, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_7};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16059, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16059, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16058
 *             raise ValueError("This GpuThermalSettings instance is read-only")
 *         cdef _py_anon_pod0 val_ = val
 *         if len(val) != 3:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 3 for field sensor, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].sensor), <void *>(val_._get_ptr()), sizeof(_anon_pod0) * 3)
*/
  }

  /* "cuda/bindings/_nvml.pyx":16060
 *         if len(val) != 3:
 *             raise ValueError(f"Expected length 3 for field sensor, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].sensor), <void *>(val_._get_ptr()), sizeof(_anon_pod0) * 3)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod0 *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 16060, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).sensor)), ((void *)__pyx_t_8), ((sizeof(_anon_pod0)) * 3)));

  /* "cuda/bindings/_nvml.pyx":16053
 *         return _py_anon_pod0.from_ptr(<intptr_t>&(self._ptr[0].sensor), 3, self._readonly)
 * 
 *     @sensor.setter             # <<<<<<<<<<<<<<
 *     def sensor(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuThermalSettings.sensor.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16062
 *         memcpy(<void *>&(self._ptr[0].sensor), <void *>(val_._get_ptr()), sizeof(_anon_pod0) * 3)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_5count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_5count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_5count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_5count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16065
 *     def count(self):
 *         """int: """
 *         return self._ptr[0].count             # <<<<<<<<<<<<<<
 * 
 *     @count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).count); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16065, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16062
 *         memcpy(<void *>&(self._ptr[0].sensor), <void *>(val_._get_ptr()), sizeof(_anon_pod0) * 3)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuThermalSettings.count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16067
 *         return self._ptr[0].count
 * 
 *     @count.setter             # <<<<<<<<<<<<<<
 *     def count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_5count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_5count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_5count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_5count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":16069
 *     @count.setter
 *     def count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuThermalSettings instance is read-only")
 *         self._ptr[0].count = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":16070
 *     def count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuThermalSettings instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].count = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuThermalSettings_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16070, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16070, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16069
 *     @count.setter
 *     def count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuThermalSettings instance is read-only")
 *         self._ptr[0].count = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":16071
 *         if self._readonly:
 *             raise ValueError("This GpuThermalSettings instance is read-only")
 *         self._ptr[0].count = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16071, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).count = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":16067
 *         return self._ptr[0].count
 * 
 *     @count.setter             # <<<<<<<<<<<<<<
 *     def count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuThermalSettings.count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16073
 *         self._ptr[0].count = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuThermalSettings instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18GpuThermalSettings_12from_data, "GpuThermalSettings.from_data(data)\n\nCreate an GpuThermalSettings instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `gpu_thermal_settings_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18GpuThermalSettings_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18GpuThermalSettings_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16073, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16073, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 16073, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 16073, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16073, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 16073, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuThermalSettings.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":16080
 *             data (_numpy.ndarray): a single-element array of dtype `gpu_thermal_settings_dtype` holding the data.
 *         """
 *         return __from_data(data, "gpu_thermal_settings_dtype", gpu_thermal_settings_dtype, GpuThermalSettings)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_gpu_thermal_settings_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16080, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_gpu_thermal_settings_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16080, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16073
 *         self._ptr[0].count = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuThermalSettings instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuThermalSettings.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16082
 *         return __from_data(data, "gpu_thermal_settings_dtype", gpu_thermal_settings_dtype, GpuThermalSettings)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuThermalSettings instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18GpuThermalSettings_14from_ptr, "GpuThermalSettings.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an GpuThermalSettings instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18GpuThermalSettings_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18GpuThermalSettings_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16082, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16082, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16082, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16082, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 16082, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":16083
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an GpuThermalSettings instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 16082, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16082, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16082, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16082, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 16083, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16083, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 16082, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuThermalSettings.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":16082
 *         return __from_data(data, "gpu_thermal_settings_dtype", gpu_thermal_settings_dtype, GpuThermalSettings)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuThermalSettings instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":16091
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuThermalSettings obj = GpuThermalSettings.__new__(GpuThermalSettings)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":16092
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef GpuThermalSettings obj = GpuThermalSettings.__new__(GpuThermalSettings)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16092, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 16092, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16091
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuThermalSettings obj = GpuThermalSettings.__new__(GpuThermalSettings)
*/
  }

  /* "cuda/bindings/_nvml.pyx":16093
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuThermalSettings obj = GpuThermalSettings.__new__(GpuThermalSettings)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlGpuThermalSettings_t *>malloc(sizeof(nvmlGpuThermalSettings_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_GpuThermalSettings(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16093, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":16094
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuThermalSettings obj = GpuThermalSettings.__new__(GpuThermalSettings)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGpuThermalSettings_t *>malloc(sizeof(nvmlGpuThermalSettings_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16095
 *         cdef GpuThermalSettings obj = GpuThermalSettings.__new__(GpuThermalSettings)
 *         if owner is None:
 *             obj._ptr = <nvmlGpuThermalSettings_t *>malloc(sizeof(nvmlGpuThermalSettings_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuThermalSettings")
*/
    __pyx_v_obj->_ptr = ((nvmlGpuThermalSettings_t *)malloc((sizeof(nvmlGpuThermalSettings_t))));

    /* "cuda/bindings/_nvml.pyx":16096
 *         if owner is None:
 *             obj._ptr = <nvmlGpuThermalSettings_t *>malloc(sizeof(nvmlGpuThermalSettings_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuThermalSettings")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuThermalSettings_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":16097
 *             obj._ptr = <nvmlGpuThermalSettings_t *>malloc(sizeof(nvmlGpuThermalSettings_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuThermalSettings")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuThermalSettings_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16097, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuThermalSetti};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16097, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 16097, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":16096
 *         if owner is None:
 *             obj._ptr = <nvmlGpuThermalSettings_t *>malloc(sizeof(nvmlGpuThermalSettings_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuThermalSettings")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuThermalSettings_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":16098
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuThermalSettings")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuThermalSettings_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlGpuThermalSettings_t))));

    /* "cuda/bindings/_nvml.pyx":16099
 *                 raise MemoryError("Error allocating GpuThermalSettings")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuThermalSettings_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":16100
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuThermalSettings_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlGpuThermalSettings_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":16094
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuThermalSettings obj = GpuThermalSettings.__new__(GpuThermalSettings)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGpuThermalSettings_t *>malloc(sizeof(nvmlGpuThermalSettings_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":16102
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlGpuThermalSettings_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlGpuThermalSettings_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":16103
 *         else:
 *             obj._ptr = <nvmlGpuThermalSettings_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":16104
 *             obj._ptr = <nvmlGpuThermalSettings_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":16105
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":16106
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16082
 *         return __from_data(data, "gpu_thermal_settings_dtype", gpu_thermal_settings_dtype, GpuThermalSettings)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuThermalSettings instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuThermalSettings.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18GpuThermalSettings_16__reduce_cython__, "GpuThermalSettings.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18GpuThermalSettings_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18GpuThermalSettings_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuThermalSettings.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18GpuThermalSettings_18__setstate_cython__, "GpuThermalSettings.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18GpuThermalSettings_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18GpuThermalSettings_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuThermalSettings.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18GpuThermalSettings_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuThermalSettings.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16109
 * 
 * 
 * cdef _get_clk_mon_status_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlClkMonStatus_t pod = nvmlClkMonStatus_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_clk_mon_status_dtype_offsets(void) {
  nvmlClkMonStatus_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlClkMonStatus_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_clk_mon_status_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":16110
 * 
 * cdef _get_clk_mon_status_dtype_offsets():
 *     cdef nvmlClkMonStatus_t pod = nvmlClkMonStatus_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['b_global_status', 'clk_mon_list_size', 'clk_mon_list'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":16111
 * cdef _get_clk_mon_status_dtype_offsets():
 *     cdef nvmlClkMonStatus_t pod = nvmlClkMonStatus_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['b_global_status', 'clk_mon_list_size', 'clk_mon_list'],
 *         'formats': [_numpy.uint32, _numpy.uint32, clk_mon_fault_info_dtype],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16111, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16111, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":16112
 *     cdef nvmlClkMonStatus_t pod = nvmlClkMonStatus_t()
 *     return _numpy.dtype({
 *         'names': ['b_global_status', 'clk_mon_list_size', 'clk_mon_list'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, clk_mon_fault_info_dtype],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16112, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16112, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_b_global_status);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_b_global_status);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_b_global_status) != (0)) __PYX_ERR(0, 16112, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_clk_mon_list_size);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_clk_mon_list_size);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_clk_mon_list_size) != (0)) __PYX_ERR(0, 16112, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_clk_mon_list);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_clk_mon_list);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_clk_mon_list) != (0)) __PYX_ERR(0, 16112, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 16112, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":16113
 *     return _numpy.dtype({
 *         'names': ['b_global_status', 'clk_mon_list_size', 'clk_mon_list'],
 *         'formats': [_numpy.uint32, _numpy.uint32, clk_mon_fault_info_dtype],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.bGlobalStatus)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 16113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_clk_mon_fault_info_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = PyList_New(3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 16113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 16113, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 16113, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 2, __pyx_t_6) != (0)) __PYX_ERR(0, 16113, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_9) < (0)) __PYX_ERR(0, 16112, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;

  /* "cuda/bindings/_nvml.pyx":16115
 *         'formats': [_numpy.uint32, _numpy.uint32, clk_mon_fault_info_dtype],
 *         'offsets': [
 *             (<intptr_t>&(pod.bGlobalStatus)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.clkMonListSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.clkMonList)) - (<intptr_t>&pod),
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.bGlobalStatus)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 16115, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":16116
 *         'offsets': [
 *             (<intptr_t>&(pod.bGlobalStatus)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.clkMonListSize)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.clkMonList)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.clkMonListSize)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16116, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":16117
 *             (<intptr_t>&(pod.bGlobalStatus)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.clkMonListSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.clkMonList)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlClkMonStatus_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.clkMonList)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 16117, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":16114
 *         'names': ['b_global_status', 'clk_mon_list_size', 'clk_mon_list'],
 *         'formats': [_numpy.uint32, _numpy.uint32, clk_mon_fault_info_dtype],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.bGlobalStatus)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.clkMonListSize)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16114, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_9) != (0)) __PYX_ERR(0, 16114, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 16114, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 16114, __pyx_L1_error);
  __pyx_t_9 = 0;
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 16112, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":16119
 *             (<intptr_t>&(pod.clkMonList)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlClkMonStatus_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlClkMonStatus_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16119, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 16112, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16111, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16109
 * 
 * 
 * cdef _get_clk_mon_status_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlClkMonStatus_t pod = nvmlClkMonStatus_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_clk_mon_status_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16136
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlClkMonStatus_t *>calloc(1, sizeof(nvmlClkMonStatus_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":16137
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlClkMonStatus_t *>calloc(1, sizeof(nvmlClkMonStatus_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ClkMonStatus")
*/
  __pyx_v_self->_ptr = ((nvmlClkMonStatus_t *)calloc(1, (sizeof(nvmlClkMonStatus_t))));

  /* "cuda/bindings/_nvml.pyx":16138
 *     def __init__(self):
 *         self._ptr = <nvmlClkMonStatus_t *>calloc(1, sizeof(nvmlClkMonStatus_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ClkMonStatus")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":16139
 *         self._ptr = <nvmlClkMonStatus_t *>calloc(1, sizeof(nvmlClkMonStatus_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ClkMonStatus")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16139, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ClkMonStatus};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16139, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 16139, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16138
 *     def __init__(self):
 *         self._ptr = <nvmlClkMonStatus_t *>calloc(1, sizeof(nvmlClkMonStatus_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ClkMonStatus")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":16140
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ClkMonStatus")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":16141
 *             raise MemoryError("Error allocating ClkMonStatus")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":16142
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":16136
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlClkMonStatus_t *>calloc(1, sizeof(nvmlClkMonStatus_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16144
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlClkMonStatus_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self) {
  nvmlClkMonStatus_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlClkMonStatus_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":16146
 *     def __dealloc__(self):
 *         cdef nvmlClkMonStatus_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16147
 *         cdef nvmlClkMonStatus_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":16148
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":16149
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":16146
 *     def __dealloc__(self):
 *         cdef nvmlClkMonStatus_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":16144
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlClkMonStatus_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":16151
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ClkMonStatus object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":16152
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.ClkMonStatus object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_ClkMonStatus_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16151
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ClkMonStatus object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16154
 *         return f"<{__name__}.ClkMonStatus object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16157
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16157, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16154
 *         return f"<{__name__}.ClkMonStatus object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16159
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_12ClkMonStatus__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":16160
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16159
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16162
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":16163
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16162
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16165
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ClkMonStatus other_
 *         if not isinstance(other, ClkMonStatus):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":16167
 *     def __eq__(self, other):
 *         cdef ClkMonStatus other_
 *         if not isinstance(other, ClkMonStatus):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":16168
 *         cdef ClkMonStatus other_
 *         if not isinstance(other, ClkMonStatus):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlClkMonStatus_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":16167
 *     def __eq__(self, other):
 *         cdef ClkMonStatus other_
 *         if not isinstance(other, ClkMonStatus):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":16169
 *         if not isinstance(other, ClkMonStatus):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlClkMonStatus_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus))))) __PYX_ERR(0, 16169, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":16170
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlClkMonStatus_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlClkMonStatus_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16170, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16165
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ClkMonStatus other_
 *         if not isinstance(other, ClkMonStatus):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16172
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlClkMonStatus_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlClkMonStatus_t *>malloc(sizeof(nvmlClkMonStatus_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":16173
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlClkMonStatus_t *>malloc(sizeof(nvmlClkMonStatus_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 16173, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16173, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16173, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 16173, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16174
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlClkMonStatus_t *>malloc(sizeof(nvmlClkMonStatus_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ClkMonStatus")
*/
    __pyx_v_self->_ptr = ((nvmlClkMonStatus_t *)malloc((sizeof(nvmlClkMonStatus_t))));

    /* "cuda/bindings/_nvml.pyx":16175
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlClkMonStatus_t *>malloc(sizeof(nvmlClkMonStatus_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ClkMonStatus")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlClkMonStatus_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":16176
 *             self._ptr = <nvmlClkMonStatus_t *>malloc(sizeof(nvmlClkMonStatus_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ClkMonStatus")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlClkMonStatus_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16176, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ClkMonStatus};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16176, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 16176, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":16175
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlClkMonStatus_t *>malloc(sizeof(nvmlClkMonStatus_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ClkMonStatus")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlClkMonStatus_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":16177
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ClkMonStatus")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlClkMonStatus_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16177, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16177, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 16177, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlClkMonStatus_t))));

    /* "cuda/bindings/_nvml.pyx":16178
 *                 raise MemoryError("Error allocating ClkMonStatus")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlClkMonStatus_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":16179
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlClkMonStatus_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":16180
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16180, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16180, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 16180, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":16173
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlClkMonStatus_t *>malloc(sizeof(nvmlClkMonStatus_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":16182
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 16182, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":16172
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlClkMonStatus_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlClkMonStatus_t *>malloc(sizeof(nvmlClkMonStatus_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16184
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def clk_mon_list(self):
 *         """ClkMonFaultInfo: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_12clk_mon_list_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_12clk_mon_list_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_12clk_mon_list___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_12clk_mon_list___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16187
 *     def clk_mon_list(self):
 *         """ClkMonFaultInfo: """
 *         return ClkMonFaultInfo.from_ptr(<intptr_t>&(self._ptr[0].clkMonList), 32, self._readonly)             # <<<<<<<<<<<<<<
 * 
 *     @clk_mon_list.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).clkMonList))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16187, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16187, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_mstate_global->__pyx_int_32, __pyx_t_4};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16187, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16184
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def clk_mon_list(self):
 *         """ClkMonFaultInfo: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.clk_mon_list.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16189
 *         return ClkMonFaultInfo.from_ptr(<intptr_t>&(self._ptr[0].clkMonList), 32, self._readonly)
 * 
 *     @clk_mon_list.setter             # <<<<<<<<<<<<<<
 *     def clk_mon_list(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_12clk_mon_list_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_12clk_mon_list_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_12clk_mon_list_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_12clk_mon_list_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  intptr_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":16191
 *     @clk_mon_list.setter
 *     def clk_mon_list(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClkMonStatus instance is read-only")
 *         cdef ClkMonFaultInfo val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":16192
 *     def clk_mon_list(self, val):
 *         if self._readonly:
 *             raise ValueError("This ClkMonStatus instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef ClkMonFaultInfo val_ = val
 *         if len(val) != 32:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ClkMonStatus_instance_is_re};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16192, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16192, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16191
 *     @clk_mon_list.setter
 *     def clk_mon_list(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClkMonStatus instance is read-only")
 *         cdef ClkMonFaultInfo val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":16193
 *         if self._readonly:
 *             raise ValueError("This ClkMonStatus instance is read-only")
 *         cdef ClkMonFaultInfo val_ = val             # <<<<<<<<<<<<<<
 *         if len(val) != 32:
 *             raise ValueError(f"Expected length 32 for field clk_mon_list, got {len(val)}")
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo))))) __PYX_ERR(0, 16193, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":16194
 *             raise ValueError("This ClkMonStatus instance is read-only")
 *         cdef ClkMonFaultInfo val_ = val
 *         if len(val) != 32:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 32 for field clk_mon_list, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].clkMonList), <void *>(val_._get_ptr()), sizeof(nvmlClkMonFaultInfo_t) * 32)
*/
  __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 16194, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 != 32);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":16195
 *         cdef ClkMonFaultInfo val_ = val
 *         if len(val) != 32:
 *             raise ValueError(f"Expected length 32 for field clk_mon_list, got {len(val)}")             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].clkMonList), <void *>(val_._get_ptr()), sizeof(nvmlClkMonFaultInfo_t) * 32)
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 16195, __pyx_L1_error)
    __pyx_t_6 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_t_4, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16195, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __pyx_t_7 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Expected_length_32_for_field_clk, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16195, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_7};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16195, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16195, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16194
 *             raise ValueError("This ClkMonStatus instance is read-only")
 *         cdef ClkMonFaultInfo val_ = val
 *         if len(val) != 32:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 32 for field clk_mon_list, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].clkMonList), <void *>(val_._get_ptr()), sizeof(nvmlClkMonFaultInfo_t) * 32)
*/
  }

  /* "cuda/bindings/_nvml.pyx":16196
 *         if len(val) != 32:
 *             raise ValueError(f"Expected length 32 for field clk_mon_list, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].clkMonList), <void *>(val_._get_ptr()), sizeof(nvmlClkMonFaultInfo_t) * 32)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 16196, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).clkMonList)), ((void *)__pyx_t_8), ((sizeof(nvmlClkMonFaultInfo_t)) * 32)));

  /* "cuda/bindings/_nvml.pyx":16189
 *         return ClkMonFaultInfo.from_ptr(<intptr_t>&(self._ptr[0].clkMonList), 32, self._readonly)
 * 
 *     @clk_mon_list.setter             # <<<<<<<<<<<<<<
 *     def clk_mon_list(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.clk_mon_list.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16198
 *         memcpy(<void *>&(self._ptr[0].clkMonList), <void *>(val_._get_ptr()), sizeof(nvmlClkMonFaultInfo_t) * 32)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def b_global_status(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_15b_global_status_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_15b_global_status_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_15b_global_status___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_15b_global_status___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16201
 *     def b_global_status(self):
 *         """int: """
 *         return self._ptr[0].bGlobalStatus             # <<<<<<<<<<<<<<
 * 
 *     @b_global_status.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).bGlobalStatus); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16201, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16198
 *         memcpy(<void *>&(self._ptr[0].clkMonList), <void *>(val_._get_ptr()), sizeof(nvmlClkMonFaultInfo_t) * 32)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def b_global_status(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.b_global_status.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16203
 *         return self._ptr[0].bGlobalStatus
 * 
 *     @b_global_status.setter             # <<<<<<<<<<<<<<
 *     def b_global_status(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_15b_global_status_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_15b_global_status_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_15b_global_status_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_15b_global_status_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":16205
 *     @b_global_status.setter
 *     def b_global_status(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClkMonStatus instance is read-only")
 *         self._ptr[0].bGlobalStatus = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":16206
 *     def b_global_status(self, val):
 *         if self._readonly:
 *             raise ValueError("This ClkMonStatus instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].bGlobalStatus = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ClkMonStatus_instance_is_re};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16206, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16206, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16205
 *     @b_global_status.setter
 *     def b_global_status(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClkMonStatus instance is read-only")
 *         self._ptr[0].bGlobalStatus = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":16207
 *         if self._readonly:
 *             raise ValueError("This ClkMonStatus instance is read-only")
 *         self._ptr[0].bGlobalStatus = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16207, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).bGlobalStatus = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":16203
 *         return self._ptr[0].bGlobalStatus
 * 
 *     @b_global_status.setter             # <<<<<<<<<<<<<<
 *     def b_global_status(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.b_global_status.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16209
 *         self._ptr[0].bGlobalStatus = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def clk_mon_list_size(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_17clk_mon_list_size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_17clk_mon_list_size_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_17clk_mon_list_size___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_17clk_mon_list_size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16212
 *     def clk_mon_list_size(self):
 *         """int: """
 *         return self._ptr[0].clkMonListSize             # <<<<<<<<<<<<<<
 * 
 *     @clk_mon_list_size.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).clkMonListSize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16212, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16209
 *         self._ptr[0].bGlobalStatus = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def clk_mon_list_size(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.clk_mon_list_size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16214
 *         return self._ptr[0].clkMonListSize
 * 
 *     @clk_mon_list_size.setter             # <<<<<<<<<<<<<<
 *     def clk_mon_list_size(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_17clk_mon_list_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_17clk_mon_list_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_17clk_mon_list_size_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_17clk_mon_list_size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":16216
 *     @clk_mon_list_size.setter
 *     def clk_mon_list_size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClkMonStatus instance is read-only")
 *         self._ptr[0].clkMonListSize = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":16217
 *     def clk_mon_list_size(self, val):
 *         if self._readonly:
 *             raise ValueError("This ClkMonStatus instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].clkMonListSize = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ClkMonStatus_instance_is_re};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16217, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16217, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16216
 *     @clk_mon_list_size.setter
 *     def clk_mon_list_size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ClkMonStatus instance is read-only")
 *         self._ptr[0].clkMonListSize = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":16218
 *         if self._readonly:
 *             raise ValueError("This ClkMonStatus instance is read-only")
 *         self._ptr[0].clkMonListSize = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16218, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).clkMonListSize = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":16214
 *         return self._ptr[0].clkMonListSize
 * 
 *     @clk_mon_list_size.setter             # <<<<<<<<<<<<<<
 *     def clk_mon_list_size(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.clk_mon_list_size.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16220
 *         self._ptr[0].clkMonListSize = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ClkMonStatus instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_12ClkMonStatus_12from_data, "ClkMonStatus.from_data(data)\n\nCreate an ClkMonStatus instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `clk_mon_status_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_12ClkMonStatus_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_12ClkMonStatus_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16220, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16220, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 16220, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 16220, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16220, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 16220, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":16227
 *             data (_numpy.ndarray): a single-element array of dtype `clk_mon_status_dtype` holding the data.
 *         """
 *         return __from_data(data, "clk_mon_status_dtype", clk_mon_status_dtype, ClkMonStatus)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_clk_mon_status_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16227, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_clk_mon_status_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16227, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16220
 *         self._ptr[0].clkMonListSize = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ClkMonStatus instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16229
 *         return __from_data(data, "clk_mon_status_dtype", clk_mon_status_dtype, ClkMonStatus)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ClkMonStatus instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_12ClkMonStatus_14from_ptr, "ClkMonStatus.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an ClkMonStatus instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_12ClkMonStatus_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_12ClkMonStatus_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16229, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16229, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16229, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16229, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 16229, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":16230
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an ClkMonStatus instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 16229, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16229, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16229, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16229, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 16230, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16230, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 16229, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":16229
 *         return __from_data(data, "clk_mon_status_dtype", clk_mon_status_dtype, ClkMonStatus)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ClkMonStatus instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":16238
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ClkMonStatus obj = ClkMonStatus.__new__(ClkMonStatus)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":16239
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ClkMonStatus obj = ClkMonStatus.__new__(ClkMonStatus)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16239, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 16239, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16238
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ClkMonStatus obj = ClkMonStatus.__new__(ClkMonStatus)
*/
  }

  /* "cuda/bindings/_nvml.pyx":16240
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ClkMonStatus obj = ClkMonStatus.__new__(ClkMonStatus)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlClkMonStatus_t *>malloc(sizeof(nvmlClkMonStatus_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ClkMonStatus(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16240, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":16241
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ClkMonStatus obj = ClkMonStatus.__new__(ClkMonStatus)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlClkMonStatus_t *>malloc(sizeof(nvmlClkMonStatus_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16242
 *         cdef ClkMonStatus obj = ClkMonStatus.__new__(ClkMonStatus)
 *         if owner is None:
 *             obj._ptr = <nvmlClkMonStatus_t *>malloc(sizeof(nvmlClkMonStatus_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ClkMonStatus")
*/
    __pyx_v_obj->_ptr = ((nvmlClkMonStatus_t *)malloc((sizeof(nvmlClkMonStatus_t))));

    /* "cuda/bindings/_nvml.pyx":16243
 *         if owner is None:
 *             obj._ptr = <nvmlClkMonStatus_t *>malloc(sizeof(nvmlClkMonStatus_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ClkMonStatus")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlClkMonStatus_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":16244
 *             obj._ptr = <nvmlClkMonStatus_t *>malloc(sizeof(nvmlClkMonStatus_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ClkMonStatus")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlClkMonStatus_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16244, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ClkMonStatus};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16244, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 16244, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":16243
 *         if owner is None:
 *             obj._ptr = <nvmlClkMonStatus_t *>malloc(sizeof(nvmlClkMonStatus_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ClkMonStatus")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlClkMonStatus_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":16245
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ClkMonStatus")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlClkMonStatus_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlClkMonStatus_t))));

    /* "cuda/bindings/_nvml.pyx":16246
 *                 raise MemoryError("Error allocating ClkMonStatus")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlClkMonStatus_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":16247
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlClkMonStatus_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlClkMonStatus_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":16241
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ClkMonStatus obj = ClkMonStatus.__new__(ClkMonStatus)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlClkMonStatus_t *>malloc(sizeof(nvmlClkMonStatus_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":16249
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlClkMonStatus_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlClkMonStatus_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":16250
 *         else:
 *             obj._ptr = <nvmlClkMonStatus_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":16251
 *             obj._ptr = <nvmlClkMonStatus_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":16252
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":16253
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16229
 *         return __from_data(data, "clk_mon_status_dtype", clk_mon_status_dtype, ClkMonStatus)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ClkMonStatus instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_12ClkMonStatus_16__reduce_cython__, "ClkMonStatus.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_12ClkMonStatus_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_12ClkMonStatus_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_12ClkMonStatus_18__setstate_cython__, "ClkMonStatus.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_12ClkMonStatus_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_12ClkMonStatus_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12ClkMonStatus_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ClkMonStatus.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16256
 * 
 * 
 * cdef _get_processes_utilization_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlProcessesUtilizationInfo_v1_t pod = nvmlProcessesUtilizationInfo_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_processes_utilization_info_v1_dtype_offsets(void) {
  nvmlProcessesUtilizationInfo_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlProcessesUtilizationInfo_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  size_t __pyx_t_11;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_processes_utilization_info_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":16257
 * 
 * cdef _get_processes_utilization_info_v1_dtype_offsets():
 *     cdef nvmlProcessesUtilizationInfo_v1_t pod = nvmlProcessesUtilizationInfo_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'process_samples_count', 'last_seen_time_stamp', 'proc_util_array'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":16258
 * cdef _get_processes_utilization_info_v1_dtype_offsets():
 *     cdef nvmlProcessesUtilizationInfo_v1_t pod = nvmlProcessesUtilizationInfo_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'process_samples_count', 'last_seen_time_stamp', 'proc_util_array'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.intp],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16258, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16258, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":16259
 *     cdef nvmlProcessesUtilizationInfo_v1_t pod = nvmlProcessesUtilizationInfo_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'process_samples_count', 'last_seen_time_stamp', 'proc_util_array'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.intp],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16259, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16259, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 16259, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_process_samples_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_process_samples_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_process_samples_count) != (0)) __PYX_ERR(0, 16259, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_last_seen_time_stamp);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_last_seen_time_stamp);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_last_seen_time_stamp) != (0)) __PYX_ERR(0, 16259, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_proc_util_array);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_proc_util_array);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_proc_util_array) != (0)) __PYX_ERR(0, 16259, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 16259, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":16260
 *     return _numpy.dtype({
 *         'names': ['version', 'process_samples_count', 'last_seen_time_stamp', 'proc_util_array'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.intp],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16260, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16260, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16260, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 16260, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16260, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 16260, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16260, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_intp); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 16260, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16260, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 16260, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 16260, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 16260, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 16260, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 16259, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":16262
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.intp],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.processSamplesCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.lastSeenTimeStamp)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16262, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":16263
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.processSamplesCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.lastSeenTimeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.procUtilArray)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.processSamplesCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 16263, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":16264
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.processSamplesCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.lastSeenTimeStamp)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.procUtilArray)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.lastSeenTimeStamp)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 16264, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":16265
 *             (<intptr_t>&(pod.processSamplesCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.lastSeenTimeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.procUtilArray)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlProcessesUtilizationInfo_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.procUtilArray)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 16265, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":16261
 *         'names': ['version', 'process_samples_count', 'last_seen_time_stamp', 'proc_util_array'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.intp],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.processSamplesCount)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16261, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 16261, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_10) != (0)) __PYX_ERR(0, 16261, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 16261, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_8) != (0)) __PYX_ERR(0, 16261, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 16259, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":16267
 *             (<intptr_t>&(pod.procUtilArray)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlProcessesUtilizationInfo_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlProcessesUtilizationInfo_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16267, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 16259, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_11 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_11 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_11, (2-__pyx_t_11) | (__pyx_t_11*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16258, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16256
 * 
 * 
 * cdef _get_processes_utilization_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlProcessesUtilizationInfo_v1_t pod = nvmlProcessesUtilizationInfo_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_processes_utilization_info_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16285
 *         dict _refs
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlProcessesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":16286
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlProcessesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlProcessesUtilizationInfo_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")
*/
  __pyx_v_self->_ptr = ((nvmlProcessesUtilizationInfo_v1_t *)calloc(1, (sizeof(nvmlProcessesUtilizationInfo_v1_t))));

  /* "cuda/bindings/_nvml.pyx":16287
 *     def __init__(self):
 *         self._ptr = <nvmlProcessesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":16288
 *         self._ptr = <nvmlProcessesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16288, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ProcessesUtiliz};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16288, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 16288, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16287
 *     def __init__(self):
 *         self._ptr = <nvmlProcessesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":16289
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":16290
 *             raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 *         self._refs = {}
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":16291
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 *         self._refs = {}
 * 
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":16292
 *         self._owned = True
 *         self._readonly = False
 *         self._refs = {}             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16292, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_self->_refs);
  __Pyx_DECREF(__pyx_v_self->_refs);
  __pyx_v_self->_refs = ((PyObject*)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":16285
 *         dict _refs
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlProcessesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16294
 *         self._refs = {}
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlProcessesUtilizationInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self) {
  nvmlProcessesUtilizationInfo_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlProcessesUtilizationInfo_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":16296
 *     def __dealloc__(self):
 *         cdef nvmlProcessesUtilizationInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16297
 *         cdef nvmlProcessesUtilizationInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":16298
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":16299
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":16296
 *     def __dealloc__(self):
 *         cdef nvmlProcessesUtilizationInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":16294
 *         self._refs = {}
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlProcessesUtilizationInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":16301
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ProcessesUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":16302
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.ProcessesUtilizationInfo_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16302, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16302, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16302, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16302, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16302, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_ProcessesUtilizationInfo_v1_obj;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 39 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16302, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16301
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ProcessesUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16304
 *         return f"<{__name__}.ProcessesUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16307
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16307, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16304
 *         return f"<{__name__}.ProcessesUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16309
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":16310
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16309
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16312
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":16313
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16313, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16312
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16315
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ProcessesUtilizationInfo_v1 other_
 *         if not isinstance(other, ProcessesUtilizationInfo_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":16317
 *     def __eq__(self, other):
 *         cdef ProcessesUtilizationInfo_v1 other_
 *         if not isinstance(other, ProcessesUtilizationInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":16318
 *         cdef ProcessesUtilizationInfo_v1 other_
 *         if not isinstance(other, ProcessesUtilizationInfo_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlProcessesUtilizationInfo_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":16317
 *     def __eq__(self, other):
 *         cdef ProcessesUtilizationInfo_v1 other_
 *         if not isinstance(other, ProcessesUtilizationInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":16319
 *         if not isinstance(other, ProcessesUtilizationInfo_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlProcessesUtilizationInfo_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1))))) __PYX_ERR(0, 16319, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":16320
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlProcessesUtilizationInfo_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlProcessesUtilizationInfo_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16320, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16315
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ProcessesUtilizationInfo_v1 other_
 *         if not isinstance(other, ProcessesUtilizationInfo_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16322
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlProcessesUtilizationInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlProcessesUtilizationInfo_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":16323
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 16323, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16323, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16323, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 16323, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16324
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlProcessesUtilizationInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")
*/
    __pyx_v_self->_ptr = ((nvmlProcessesUtilizationInfo_v1_t *)malloc((sizeof(nvmlProcessesUtilizationInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":16325
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlProcessesUtilizationInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":16326
 *             self._ptr = <nvmlProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16326, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ProcessesUtiliz};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16326, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 16326, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":16325
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlProcessesUtilizationInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":16327
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlProcessesUtilizationInfo_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16327, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16327, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 16327, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlProcessesUtilizationInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":16328
 *                 raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":16329
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":16330
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16330, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16330, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 16330, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":16323
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":16332
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 16332, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":16322
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlProcessesUtilizationInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlProcessesUtilizationInfo_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16334
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16337
 *     def version(self):
 *         """int: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16334
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16339
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":16341
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ProcessesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":16342
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This ProcessesUtilizationInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ProcessesUtilizationInfo_v1};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16342, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16342, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16341
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ProcessesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":16343
 *         if self._readonly:
 *             raise ValueError("This ProcessesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16343, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":16339
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16345
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def last_seen_time_stamp(self):
 *         """int: Return only samples with timestamp greater than lastSeenTimeStamp."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_20last_seen_time_stamp_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_20last_seen_time_stamp_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_20last_seen_time_stamp___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_20last_seen_time_stamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16348
 *     def last_seen_time_stamp(self):
 *         """int: Return only samples with timestamp greater than lastSeenTimeStamp."""
 *         return self._ptr[0].lastSeenTimeStamp             # <<<<<<<<<<<<<<
 * 
 *     @last_seen_time_stamp.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).lastSeenTimeStamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16348, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16345
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def last_seen_time_stamp(self):
 *         """int: Return only samples with timestamp greater than lastSeenTimeStamp."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.last_seen_time_stamp.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16350
 *         return self._ptr[0].lastSeenTimeStamp
 * 
 *     @last_seen_time_stamp.setter             # <<<<<<<<<<<<<<
 *     def last_seen_time_stamp(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_20last_seen_time_stamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_20last_seen_time_stamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_20last_seen_time_stamp_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_20last_seen_time_stamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":16352
 *     @last_seen_time_stamp.setter
 *     def last_seen_time_stamp(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ProcessesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].lastSeenTimeStamp = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":16353
 *     def last_seen_time_stamp(self, val):
 *         if self._readonly:
 *             raise ValueError("This ProcessesUtilizationInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].lastSeenTimeStamp = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ProcessesUtilizationInfo_v1};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16353, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16353, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16352
 *     @last_seen_time_stamp.setter
 *     def last_seen_time_stamp(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ProcessesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].lastSeenTimeStamp = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":16354
 *         if self._readonly:
 *             raise ValueError("This ProcessesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].lastSeenTimeStamp = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16354, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).lastSeenTimeStamp = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":16350
 *         return self._ptr[0].lastSeenTimeStamp
 * 
 *     @last_seen_time_stamp.setter             # <<<<<<<<<<<<<<
 *     def last_seen_time_stamp(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.last_seen_time_stamp.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16356
 *         self._ptr[0].lastSeenTimeStamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def proc_util_array(self):
 *         """int: The array (allocated by caller) of the utilization of GPU SM, framebuffer, video encoder, video decoder, JPEG, and OFA."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15proc_util_array_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15proc_util_array_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15proc_util_array___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15proc_util_array___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16359
 *     def proc_util_array(self):
 *         """int: The array (allocated by caller) of the utilization of GPU SM, framebuffer, video encoder, video decoder, JPEG, and OFA."""
 *         if self._ptr[0].procUtilArray == NULL or self._ptr[0].processSamplesCount == 0:             # <<<<<<<<<<<<<<
 *             return []
 *         return ProcessUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].procUtilArray), self._ptr[0].processSamplesCount)
*/
  __pyx_t_2 = ((__pyx_v_self->_ptr[0]).procUtilArray == NULL);
  if (!__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = ((__pyx_v_self->_ptr[0]).processSamplesCount == 0);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16360
 *         """int: The array (allocated by caller) of the utilization of GPU SM, framebuffer, video encoder, video decoder, JPEG, and OFA."""
 *         if self._ptr[0].procUtilArray == NULL or self._ptr[0].processSamplesCount == 0:
 *             return []             # <<<<<<<<<<<<<<
 *         return ProcessUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].procUtilArray), self._ptr[0].processSamplesCount)
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16360, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":16359
 *     def proc_util_array(self):
 *         """int: The array (allocated by caller) of the utilization of GPU SM, framebuffer, video encoder, video decoder, JPEG, and OFA."""
 *         if self._ptr[0].procUtilArray == NULL or self._ptr[0].processSamplesCount == 0:             # <<<<<<<<<<<<<<
 *             return []
 *         return ProcessUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].procUtilArray), self._ptr[0].processSamplesCount)
*/
  }

  /* "cuda/bindings/_nvml.pyx":16361
 *         if self._ptr[0].procUtilArray == NULL or self._ptr[0].processSamplesCount == 0:
 *             return []
 *         return ProcessUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].procUtilArray), self._ptr[0].processSamplesCount)             # <<<<<<<<<<<<<<
 * 
 *     @proc_util_array.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1);
  __Pyx_INCREF(__pyx_t_4);
  __pyx_t_5 = PyLong_FromSsize_t(((intptr_t)(__pyx_v_self->_ptr[0]).procUtilArray)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16361, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).processSamplesCount); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16361, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = 0;
  {
    PyObject *__pyx_callargs[3] = {__pyx_t_4, __pyx_t_5, __pyx_t_6};
    __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_7, (3-__pyx_t_7) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16361, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
  }
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16356
 *         self._ptr[0].lastSeenTimeStamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def proc_util_array(self):
 *         """int: The array (allocated by caller) of the utilization of GPU SM, framebuffer, video encoder, video decoder, JPEG, and OFA."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.proc_util_array.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16363
 *         return ProcessUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].procUtilArray), self._ptr[0].processSamplesCount)
 * 
 *     @proc_util_array.setter             # <<<<<<<<<<<<<<
 *     def proc_util_array(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15proc_util_array_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15proc_util_array_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15proc_util_array_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15proc_util_array_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_arr = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  Py_ssize_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":16365
 *     @proc_util_array.setter
 *     def proc_util_array(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ProcessesUtilizationInfo_v1 instance is read-only")
 *         cdef ProcessUtilizationInfo_v1 arr = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":16366
 *     def proc_util_array(self, val):
 *         if self._readonly:
 *             raise ValueError("This ProcessesUtilizationInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef ProcessUtilizationInfo_v1 arr = val
 *         self._ptr[0].procUtilArray = <nvmlProcessUtilizationInfo_v1_t*><intptr_t>(arr._get_ptr())
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ProcessesUtilizationInfo_v1};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16366, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16366, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16365
 *     @proc_util_array.setter
 *     def proc_util_array(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ProcessesUtilizationInfo_v1 instance is read-only")
 *         cdef ProcessUtilizationInfo_v1 arr = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":16367
 *         if self._readonly:
 *             raise ValueError("This ProcessesUtilizationInfo_v1 instance is read-only")
 *         cdef ProcessUtilizationInfo_v1 arr = val             # <<<<<<<<<<<<<<
 *         self._ptr[0].procUtilArray = <nvmlProcessUtilizationInfo_v1_t*><intptr_t>(arr._get_ptr())
 *         self._ptr[0].processSamplesCount = len(arr)
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1))))) __PYX_ERR(0, 16367, __pyx_L1_error)
  __pyx_v_arr = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":16368
 *             raise ValueError("This ProcessesUtilizationInfo_v1 instance is read-only")
 *         cdef ProcessUtilizationInfo_v1 arr = val
 *         self._ptr[0].procUtilArray = <nvmlProcessUtilizationInfo_v1_t*><intptr_t>(arr._get_ptr())             # <<<<<<<<<<<<<<
 *         self._ptr[0].processSamplesCount = len(arr)
 *         self._refs["proc_util_array"] = arr
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v_arr->__pyx_vtab)->_get_ptr(__pyx_v_arr); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 16368, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).procUtilArray = ((nvmlProcessUtilizationInfo_v1_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":16369
 *         cdef ProcessUtilizationInfo_v1 arr = val
 *         self._ptr[0].procUtilArray = <nvmlProcessUtilizationInfo_v1_t*><intptr_t>(arr._get_ptr())
 *         self._ptr[0].processSamplesCount = len(arr)             # <<<<<<<<<<<<<<
 *         self._refs["proc_util_array"] = arr
 * 
*/
  __pyx_t_5 = PyObject_Length(((PyObject *)__pyx_v_arr)); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 16369, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).processSamplesCount = __pyx_t_5;

  /* "cuda/bindings/_nvml.pyx":16370
 *         self._ptr[0].procUtilArray = <nvmlProcessUtilizationInfo_v1_t*><intptr_t>(arr._get_ptr())
 *         self._ptr[0].processSamplesCount = len(arr)
 *         self._refs["proc_util_array"] = arr             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely(__pyx_v_self->_refs == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
    __PYX_ERR(0, 16370, __pyx_L1_error)
  }
  if (unlikely((PyDict_SetItem(__pyx_v_self->_refs, __pyx_mstate_global->__pyx_n_u_proc_util_array, ((PyObject *)__pyx_v_arr)) < 0))) __PYX_ERR(0, 16370, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":16363
 *         return ProcessUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].procUtilArray), self._ptr[0].processSamplesCount)
 * 
 *     @proc_util_array.setter             # <<<<<<<<<<<<<<
 *     def proc_util_array(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.proc_util_array.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16372
 *         self._refs["proc_util_array"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessesUtilizationInfo_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_12from_data, "ProcessesUtilizationInfo_v1.from_data(data)\n\nCreate an ProcessesUtilizationInfo_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `processes_utilization_info_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16372, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16372, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 16372, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 16372, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16372, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 16372, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":16379
 *             data (_numpy.ndarray): a single-element array of dtype `processes_utilization_info_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "processes_utilization_info_v1_dtype", processes_utilization_info_v1_dtype, ProcessesUtilizationInfo_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_processes_utilization_info_v1_dt); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16379, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_processes_utilization_info_v1_dt, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16379, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16372
 *         self._refs["proc_util_array"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessesUtilizationInfo_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16381
 *         return __from_data(data, "processes_utilization_info_v1_dtype", processes_utilization_info_v1_dtype, ProcessesUtilizationInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ProcessesUtilizationInfo_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_14from_ptr, "ProcessesUtilizationInfo_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an ProcessesUtilizationInfo_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16381, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16381, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16381, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16381, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 16381, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":16382
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an ProcessesUtilizationInfo_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 16381, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16381, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16381, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16381, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 16382, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16382, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 16381, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":16381
 *         return __from_data(data, "processes_utilization_info_v1_dtype", processes_utilization_info_v1_dtype, ProcessesUtilizationInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ProcessesUtilizationInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":16390
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessesUtilizationInfo_v1 obj = ProcessesUtilizationInfo_v1.__new__(ProcessesUtilizationInfo_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":16391
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ProcessesUtilizationInfo_v1 obj = ProcessesUtilizationInfo_v1.__new__(ProcessesUtilizationInfo_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16391, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 16391, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16390
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessesUtilizationInfo_v1 obj = ProcessesUtilizationInfo_v1.__new__(ProcessesUtilizationInfo_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":16392
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessesUtilizationInfo_v1 obj = ProcessesUtilizationInfo_v1.__new__(ProcessesUtilizationInfo_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlProcessesUtilizationInfo_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16392, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":16393
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessesUtilizationInfo_v1 obj = ProcessesUtilizationInfo_v1.__new__(ProcessesUtilizationInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16394
 *         cdef ProcessesUtilizationInfo_v1 obj = ProcessesUtilizationInfo_v1.__new__(ProcessesUtilizationInfo_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlProcessesUtilizationInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlProcessesUtilizationInfo_v1_t *)malloc((sizeof(nvmlProcessesUtilizationInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":16395
 *         if owner is None:
 *             obj._ptr = <nvmlProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlProcessesUtilizationInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":16396
 *             obj._ptr = <nvmlProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16396, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ProcessesUtiliz};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16396, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 16396, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":16395
 *         if owner is None:
 *             obj._ptr = <nvmlProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlProcessesUtilizationInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":16397
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlProcessesUtilizationInfo_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlProcessesUtilizationInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":16398
 *                 raise MemoryError("Error allocating ProcessesUtilizationInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":16399
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlProcessesUtilizationInfo_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":16393
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ProcessesUtilizationInfo_v1 obj = ProcessesUtilizationInfo_v1.__new__(ProcessesUtilizationInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlProcessesUtilizationInfo_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":16401
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlProcessesUtilizationInfo_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlProcessesUtilizationInfo_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":16402
 *         else:
 *             obj._ptr = <nvmlProcessesUtilizationInfo_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":16403
 *             obj._ptr = <nvmlProcessesUtilizationInfo_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         obj._refs = {}
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":16404
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         obj._refs = {}
 *         return obj
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":16405
 *             obj._owned = False
 *         obj._readonly = readonly
 *         obj._refs = {}             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16405, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_refs);
  __Pyx_DECREF(__pyx_v_obj->_refs);
  __pyx_v_obj->_refs = ((PyObject*)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":16406
 *         obj._readonly = readonly
 *         obj._refs = {}
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16381
 *         return __from_data(data, "processes_utilization_info_v1_dtype", processes_utilization_info_v1_dtype, ProcessesUtilizationInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ProcessesUtilizationInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_16__reduce_cython__, "ProcessesUtilizationInfo_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_18__setstate_cython__, "ProcessesUtilizationInfo_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ProcessesUtilizationInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16409
 * 
 * 
 * cdef _get_gpu_dynamic_pstates_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuDynamicPstatesInfo_t pod = nvmlGpuDynamicPstatesInfo_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_gpu_dynamic_pstates_info_dtype_offsets(void) {
  nvmlGpuDynamicPstatesInfo_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlGpuDynamicPstatesInfo_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_gpu_dynamic_pstates_info_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":16410
 * 
 * cdef _get_gpu_dynamic_pstates_info_dtype_offsets():
 *     cdef nvmlGpuDynamicPstatesInfo_t pod = nvmlGpuDynamicPstatesInfo_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['flags_', 'utilization'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":16411
 * cdef _get_gpu_dynamic_pstates_info_dtype_offsets():
 *     cdef nvmlGpuDynamicPstatesInfo_t pod = nvmlGpuDynamicPstatesInfo_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['flags_', 'utilization'],
 *         'formats': [_numpy.uint32, _py_anon_pod1_dtype],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16411, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16411, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":16412
 *     cdef nvmlGpuDynamicPstatesInfo_t pod = nvmlGpuDynamicPstatesInfo_t()
 *     return _numpy.dtype({
 *         'names': ['flags_', 'utilization'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _py_anon_pod1_dtype],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16412, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16412, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_flags_2);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_flags_2);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_flags_2) != (0)) __PYX_ERR(0, 16412, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_utilization);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_utilization);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_utilization) != (0)) __PYX_ERR(0, 16412, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 16412, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":16413
 *     return _numpy.dtype({
 *         'names': ['flags_', 'utilization'],
 *         'formats': [_numpy.uint32, _py_anon_pod1_dtype],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.flags)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16413, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16413, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_py_anon_pod1_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16413, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = PyList_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 16413, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 16413, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 16413, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_8) < (0)) __PYX_ERR(0, 16412, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;

  /* "cuda/bindings/_nvml.pyx":16415
 *         'formats': [_numpy.uint32, _py_anon_pod1_dtype],
 *         'offsets': [
 *             (<intptr_t>&(pod.flags)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.utilization)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.flags)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 16415, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":16416
 *         'offsets': [
 *             (<intptr_t>&(pod.flags)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.utilization)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlGpuDynamicPstatesInfo_t),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.utilization)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16416, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":16414
 *         'names': ['flags_', 'utilization'],
 *         'formats': [_numpy.uint32, _py_anon_pod1_dtype],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.flags)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.utilization)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16414, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 16414, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 16414, __pyx_L1_error);
  __pyx_t_8 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 16412, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":16418
 *             (<intptr_t>&(pod.utilization)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlGpuDynamicPstatesInfo_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlGpuDynamicPstatesInfo_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16418, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 16412, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16411, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16409
 * 
 * 
 * cdef _get_gpu_dynamic_pstates_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuDynamicPstatesInfo_t pod = nvmlGpuDynamicPstatesInfo_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_gpu_dynamic_pstates_info_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16435
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGpuDynamicPstatesInfo_t *>calloc(1, sizeof(nvmlGpuDynamicPstatesInfo_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":16436
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlGpuDynamicPstatesInfo_t *>calloc(1, sizeof(nvmlGpuDynamicPstatesInfo_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuDynamicPstatesInfo")
*/
  __pyx_v_self->_ptr = ((nvmlGpuDynamicPstatesInfo_t *)calloc(1, (sizeof(nvmlGpuDynamicPstatesInfo_t))));

  /* "cuda/bindings/_nvml.pyx":16437
 *     def __init__(self):
 *         self._ptr = <nvmlGpuDynamicPstatesInfo_t *>calloc(1, sizeof(nvmlGpuDynamicPstatesInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GpuDynamicPstatesInfo")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":16438
 *         self._ptr = <nvmlGpuDynamicPstatesInfo_t *>calloc(1, sizeof(nvmlGpuDynamicPstatesInfo_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuDynamicPstatesInfo")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16438, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuDynamicPstat};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16438, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 16438, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16437
 *     def __init__(self):
 *         self._ptr = <nvmlGpuDynamicPstatesInfo_t *>calloc(1, sizeof(nvmlGpuDynamicPstatesInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GpuDynamicPstatesInfo")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":16439
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuDynamicPstatesInfo")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":16440
 *             raise MemoryError("Error allocating GpuDynamicPstatesInfo")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":16441
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":16435
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGpuDynamicPstatesInfo_t *>calloc(1, sizeof(nvmlGpuDynamicPstatesInfo_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuDynamicPstatesInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16443
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGpuDynamicPstatesInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self) {
  nvmlGpuDynamicPstatesInfo_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlGpuDynamicPstatesInfo_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":16445
 *     def __dealloc__(self):
 *         cdef nvmlGpuDynamicPstatesInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16446
 *         cdef nvmlGpuDynamicPstatesInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":16447
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":16448
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":16445
 *     def __dealloc__(self):
 *         cdef nvmlGpuDynamicPstatesInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":16443
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGpuDynamicPstatesInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":16450
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GpuDynamicPstatesInfo object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":16451
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.GpuDynamicPstatesInfo object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16451, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16451, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16451, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16451, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16451, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_GpuDynamicPstatesInfo_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 33 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16451, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16450
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GpuDynamicPstatesInfo object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuDynamicPstatesInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16453
 *         return f"<{__name__}.GpuDynamicPstatesInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16456
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16456, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16453
 *         return f"<{__name__}.GpuDynamicPstatesInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuDynamicPstatesInfo.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16458
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":16459
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16458
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16461
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":16462
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16462, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16461
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuDynamicPstatesInfo.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16464
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GpuDynamicPstatesInfo other_
 *         if not isinstance(other, GpuDynamicPstatesInfo):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":16466
 *     def __eq__(self, other):
 *         cdef GpuDynamicPstatesInfo other_
 *         if not isinstance(other, GpuDynamicPstatesInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":16467
 *         cdef GpuDynamicPstatesInfo other_
 *         if not isinstance(other, GpuDynamicPstatesInfo):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuDynamicPstatesInfo_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":16466
 *     def __eq__(self, other):
 *         cdef GpuDynamicPstatesInfo other_
 *         if not isinstance(other, GpuDynamicPstatesInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":16468
 *         if not isinstance(other, GpuDynamicPstatesInfo):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuDynamicPstatesInfo_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo))))) __PYX_ERR(0, 16468, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":16469
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuDynamicPstatesInfo_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlGpuDynamicPstatesInfo_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16469, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16464
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GpuDynamicPstatesInfo other_
 *         if not isinstance(other, GpuDynamicPstatesInfo):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuDynamicPstatesInfo.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16471
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuDynamicPstatesInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuDynamicPstatesInfo_t *>malloc(sizeof(nvmlGpuDynamicPstatesInfo_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":16472
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGpuDynamicPstatesInfo_t *>malloc(sizeof(nvmlGpuDynamicPstatesInfo_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 16472, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16472, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16472, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 16472, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16473
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuDynamicPstatesInfo_t *>malloc(sizeof(nvmlGpuDynamicPstatesInfo_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuDynamicPstatesInfo")
*/
    __pyx_v_self->_ptr = ((nvmlGpuDynamicPstatesInfo_t *)malloc((sizeof(nvmlGpuDynamicPstatesInfo_t))));

    /* "cuda/bindings/_nvml.pyx":16474
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuDynamicPstatesInfo_t *>malloc(sizeof(nvmlGpuDynamicPstatesInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuDynamicPstatesInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuDynamicPstatesInfo_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":16475
 *             self._ptr = <nvmlGpuDynamicPstatesInfo_t *>malloc(sizeof(nvmlGpuDynamicPstatesInfo_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuDynamicPstatesInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuDynamicPstatesInfo_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16475, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuDynamicPstat};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16475, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 16475, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":16474
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuDynamicPstatesInfo_t *>malloc(sizeof(nvmlGpuDynamicPstatesInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuDynamicPstatesInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuDynamicPstatesInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":16476
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuDynamicPstatesInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuDynamicPstatesInfo_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16476, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16476, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 16476, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlGpuDynamicPstatesInfo_t))));

    /* "cuda/bindings/_nvml.pyx":16477
 *                 raise MemoryError("Error allocating GpuDynamicPstatesInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuDynamicPstatesInfo_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":16478
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuDynamicPstatesInfo_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":16479
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16479, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16479, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 16479, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":16472
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGpuDynamicPstatesInfo_t *>malloc(sizeof(nvmlGpuDynamicPstatesInfo_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":16481
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 16481, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":16471
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuDynamicPstatesInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuDynamicPstatesInfo_t *>malloc(sizeof(nvmlGpuDynamicPstatesInfo_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuDynamicPstatesInfo.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16483
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def utilization(self):
 *         """_py_anon_pod1: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_11utilization_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_11utilization_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_11utilization___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_11utilization___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16486
 *     def utilization(self):
 *         """_py_anon_pod1: """
 *         return _py_anon_pod1.from_ptr(<intptr_t>&(self._ptr[0].utilization), 8, self._readonly)             # <<<<<<<<<<<<<<
 * 
 *     @utilization.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).utilization))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16486, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16486, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_mstate_global->__pyx_int_8, __pyx_t_4};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16486, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16483
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def utilization(self):
 *         """_py_anon_pod1: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuDynamicPstatesInfo.utilization.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16488
 *         return _py_anon_pod1.from_ptr(<intptr_t>&(self._ptr[0].utilization), 8, self._readonly)
 * 
 *     @utilization.setter             # <<<<<<<<<<<<<<
 *     def utilization(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_11utilization_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_11utilization_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_11utilization_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_11utilization_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  intptr_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":16490
 *     @utilization.setter
 *     def utilization(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuDynamicPstatesInfo instance is read-only")
 *         cdef _py_anon_pod1 val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":16491
 *     def utilization(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuDynamicPstatesInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod1 val_ = val
 *         if len(val) != 8:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuDynamicPstatesInfo_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16491, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16491, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16490
 *     @utilization.setter
 *     def utilization(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuDynamicPstatesInfo instance is read-only")
 *         cdef _py_anon_pod1 val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":16492
 *         if self._readonly:
 *             raise ValueError("This GpuDynamicPstatesInfo instance is read-only")
 *         cdef _py_anon_pod1 val_ = val             # <<<<<<<<<<<<<<
 *         if len(val) != 8:
 *             raise ValueError(f"Expected length 8 for field utilization, got {len(val)}")
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1))))) __PYX_ERR(0, 16492, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":16493
 *             raise ValueError("This GpuDynamicPstatesInfo instance is read-only")
 *         cdef _py_anon_pod1 val_ = val
 *         if len(val) != 8:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 8 for field utilization, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].utilization), <void *>(val_._get_ptr()), sizeof(_anon_pod1) * 8)
*/
  __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 16493, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 != 8);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":16494
 *         cdef _py_anon_pod1 val_ = val
 *         if len(val) != 8:
 *             raise ValueError(f"Expected length 8 for field utilization, got {len(val)}")             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].utilization), <void *>(val_._get_ptr()), sizeof(_anon_pod1) * 8)
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 16494, __pyx_L1_error)
    __pyx_t_6 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_t_4, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16494, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __pyx_t_7 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Expected_length_8_for_field_util, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16494, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_7};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16494, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16494, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16493
 *             raise ValueError("This GpuDynamicPstatesInfo instance is read-only")
 *         cdef _py_anon_pod1 val_ = val
 *         if len(val) != 8:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 8 for field utilization, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].utilization), <void *>(val_._get_ptr()), sizeof(_anon_pod1) * 8)
*/
  }

  /* "cuda/bindings/_nvml.pyx":16495
 *         if len(val) != 8:
 *             raise ValueError(f"Expected length 8 for field utilization, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].utilization), <void *>(val_._get_ptr()), sizeof(_anon_pod1) * 8)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod1 *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 16495, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).utilization)), ((void *)__pyx_t_8), ((sizeof(_anon_pod1)) * 8)));

  /* "cuda/bindings/_nvml.pyx":16488
 *         return _py_anon_pod1.from_ptr(<intptr_t>&(self._ptr[0].utilization), 8, self._readonly)
 * 
 *     @utilization.setter             # <<<<<<<<<<<<<<
 *     def utilization(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuDynamicPstatesInfo.utilization.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16497
 *         memcpy(<void *>&(self._ptr[0].utilization), <void *>(val_._get_ptr()), sizeof(_anon_pod1) * 8)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def flags_(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_6flags__1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_6flags__1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_6flags____get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_6flags____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16500
 *     def flags_(self):
 *         """int: """
 *         return self._ptr[0].flags             # <<<<<<<<<<<<<<
 * 
 *     @flags_.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16500, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16497
 *         memcpy(<void *>&(self._ptr[0].utilization), <void *>(val_._get_ptr()), sizeof(_anon_pod1) * 8)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def flags_(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuDynamicPstatesInfo.flags_.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16502
 *         return self._ptr[0].flags
 * 
 *     @flags_.setter             # <<<<<<<<<<<<<<
 *     def flags_(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_6flags__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_6flags__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_6flags__2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_6flags__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":16504
 *     @flags_.setter
 *     def flags_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuDynamicPstatesInfo instance is read-only")
 *         self._ptr[0].flags = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":16505
 *     def flags_(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuDynamicPstatesInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].flags = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuDynamicPstatesInfo_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16505, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16505, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16504
 *     @flags_.setter
 *     def flags_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuDynamicPstatesInfo instance is read-only")
 *         self._ptr[0].flags = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":16506
 *         if self._readonly:
 *             raise ValueError("This GpuDynamicPstatesInfo instance is read-only")
 *         self._ptr[0].flags = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16506, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).flags = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":16502
 *         return self._ptr[0].flags
 * 
 *     @flags_.setter             # <<<<<<<<<<<<<<
 *     def flags_(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuDynamicPstatesInfo.flags_.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16508
 *         self._ptr[0].flags = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuDynamicPstatesInfo instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_12from_data, "GpuDynamicPstatesInfo.from_data(data)\n\nCreate an GpuDynamicPstatesInfo instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `gpu_dynamic_pstates_info_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16508, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16508, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 16508, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 16508, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16508, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 16508, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuDynamicPstatesInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":16515
 *             data (_numpy.ndarray): a single-element array of dtype `gpu_dynamic_pstates_info_dtype` holding the data.
 *         """
 *         return __from_data(data, "gpu_dynamic_pstates_info_dtype", gpu_dynamic_pstates_info_dtype, GpuDynamicPstatesInfo)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_gpu_dynamic_pstates_info_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16515, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_gpu_dynamic_pstates_info_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16515, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16508
 *         self._ptr[0].flags = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuDynamicPstatesInfo instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuDynamicPstatesInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16517
 *         return __from_data(data, "gpu_dynamic_pstates_info_dtype", gpu_dynamic_pstates_info_dtype, GpuDynamicPstatesInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuDynamicPstatesInfo instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_14from_ptr, "GpuDynamicPstatesInfo.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an GpuDynamicPstatesInfo instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16517, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16517, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16517, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16517, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 16517, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":16518
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an GpuDynamicPstatesInfo instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 16517, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16517, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16517, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16517, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 16518, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16518, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 16517, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuDynamicPstatesInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":16517
 *         return __from_data(data, "gpu_dynamic_pstates_info_dtype", gpu_dynamic_pstates_info_dtype, GpuDynamicPstatesInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuDynamicPstatesInfo instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":16526
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuDynamicPstatesInfo obj = GpuDynamicPstatesInfo.__new__(GpuDynamicPstatesInfo)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":16527
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef GpuDynamicPstatesInfo obj = GpuDynamicPstatesInfo.__new__(GpuDynamicPstatesInfo)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16527, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 16527, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16526
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuDynamicPstatesInfo obj = GpuDynamicPstatesInfo.__new__(GpuDynamicPstatesInfo)
*/
  }

  /* "cuda/bindings/_nvml.pyx":16528
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuDynamicPstatesInfo obj = GpuDynamicPstatesInfo.__new__(GpuDynamicPstatesInfo)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlGpuDynamicPstatesInfo_t *>malloc(sizeof(nvmlGpuDynamicPstatesInfo_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16528, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":16529
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuDynamicPstatesInfo obj = GpuDynamicPstatesInfo.__new__(GpuDynamicPstatesInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGpuDynamicPstatesInfo_t *>malloc(sizeof(nvmlGpuDynamicPstatesInfo_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16530
 *         cdef GpuDynamicPstatesInfo obj = GpuDynamicPstatesInfo.__new__(GpuDynamicPstatesInfo)
 *         if owner is None:
 *             obj._ptr = <nvmlGpuDynamicPstatesInfo_t *>malloc(sizeof(nvmlGpuDynamicPstatesInfo_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuDynamicPstatesInfo")
*/
    __pyx_v_obj->_ptr = ((nvmlGpuDynamicPstatesInfo_t *)malloc((sizeof(nvmlGpuDynamicPstatesInfo_t))));

    /* "cuda/bindings/_nvml.pyx":16531
 *         if owner is None:
 *             obj._ptr = <nvmlGpuDynamicPstatesInfo_t *>malloc(sizeof(nvmlGpuDynamicPstatesInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuDynamicPstatesInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuDynamicPstatesInfo_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":16532
 *             obj._ptr = <nvmlGpuDynamicPstatesInfo_t *>malloc(sizeof(nvmlGpuDynamicPstatesInfo_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuDynamicPstatesInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuDynamicPstatesInfo_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16532, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuDynamicPstat};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16532, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 16532, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":16531
 *         if owner is None:
 *             obj._ptr = <nvmlGpuDynamicPstatesInfo_t *>malloc(sizeof(nvmlGpuDynamicPstatesInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuDynamicPstatesInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuDynamicPstatesInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":16533
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuDynamicPstatesInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuDynamicPstatesInfo_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlGpuDynamicPstatesInfo_t))));

    /* "cuda/bindings/_nvml.pyx":16534
 *                 raise MemoryError("Error allocating GpuDynamicPstatesInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuDynamicPstatesInfo_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":16535
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuDynamicPstatesInfo_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlGpuDynamicPstatesInfo_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":16529
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuDynamicPstatesInfo obj = GpuDynamicPstatesInfo.__new__(GpuDynamicPstatesInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGpuDynamicPstatesInfo_t *>malloc(sizeof(nvmlGpuDynamicPstatesInfo_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":16537
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlGpuDynamicPstatesInfo_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlGpuDynamicPstatesInfo_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":16538
 *         else:
 *             obj._ptr = <nvmlGpuDynamicPstatesInfo_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":16539
 *             obj._ptr = <nvmlGpuDynamicPstatesInfo_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":16540
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":16541
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16517
 *         return __from_data(data, "gpu_dynamic_pstates_info_dtype", gpu_dynamic_pstates_info_dtype, GpuDynamicPstatesInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuDynamicPstatesInfo instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuDynamicPstatesInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_16__reduce_cython__, "GpuDynamicPstatesInfo.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuDynamicPstatesInfo.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_18__setstate_cython__, "GpuDynamicPstatesInfo.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuDynamicPstatesInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuDynamicPstatesInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16544
 * 
 * 
 * cdef _get_vgpu_processes_utilization_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuProcessesUtilizationInfo_v1_t pod = nvmlVgpuProcessesUtilizationInfo_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_processes_utilization_info_v1_dtype_offsets(void) {
  nvmlVgpuProcessesUtilizationInfo_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuProcessesUtilizationInfo_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  size_t __pyx_t_11;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_processes_utilization_info_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":16545
 * 
 * cdef _get_vgpu_processes_utilization_info_v1_dtype_offsets():
 *     cdef nvmlVgpuProcessesUtilizationInfo_v1_t pod = nvmlVgpuProcessesUtilizationInfo_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'vgpu_process_count', 'last_seen_time_stamp', 'vgpu_proc_util_array'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":16546
 * cdef _get_vgpu_processes_utilization_info_v1_dtype_offsets():
 *     cdef nvmlVgpuProcessesUtilizationInfo_v1_t pod = nvmlVgpuProcessesUtilizationInfo_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'vgpu_process_count', 'last_seen_time_stamp', 'vgpu_proc_util_array'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.intp],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16546, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16546, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":16547
 *     cdef nvmlVgpuProcessesUtilizationInfo_v1_t pod = nvmlVgpuProcessesUtilizationInfo_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'vgpu_process_count', 'last_seen_time_stamp', 'vgpu_proc_util_array'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.intp],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16547, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16547, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 16547, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_vgpu_process_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_vgpu_process_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_vgpu_process_count) != (0)) __PYX_ERR(0, 16547, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_last_seen_time_stamp);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_last_seen_time_stamp);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_last_seen_time_stamp) != (0)) __PYX_ERR(0, 16547, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_vgpu_proc_util_array);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_vgpu_proc_util_array);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_vgpu_proc_util_array) != (0)) __PYX_ERR(0, 16547, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 16547, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":16548
 *     return _numpy.dtype({
 *         'names': ['version', 'vgpu_process_count', 'last_seen_time_stamp', 'vgpu_proc_util_array'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.intp],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 16548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 16548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_intp); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 16548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 16548, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 16548, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 16548, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 16548, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 16547, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":16550
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.intp],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vgpuProcessCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.lastSeenTimeStamp)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16550, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":16551
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuProcessCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.lastSeenTimeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuProcUtilArray)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vgpuProcessCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 16551, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":16552
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuProcessCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.lastSeenTimeStamp)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vgpuProcUtilArray)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.lastSeenTimeStamp)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 16552, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":16553
 *             (<intptr_t>&(pod.vgpuProcessCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.lastSeenTimeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuProcUtilArray)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vgpuProcUtilArray)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 16553, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":16549
 *         'names': ['version', 'vgpu_process_count', 'last_seen_time_stamp', 'vgpu_proc_util_array'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint64, _numpy.intp],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuProcessCount)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16549, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 16549, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_10) != (0)) __PYX_ERR(0, 16549, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 16549, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_8) != (0)) __PYX_ERR(0, 16549, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 16547, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":16555
 *             (<intptr_t>&(pod.vgpuProcUtilArray)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16555, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 16547, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_11 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_11 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_11, (2-__pyx_t_11) | (__pyx_t_11*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16546, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16544
 * 
 * 
 * cdef _get_vgpu_processes_utilization_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuProcessesUtilizationInfo_v1_t pod = nvmlVgpuProcessesUtilizationInfo_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_processes_utilization_info_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16573
 *         dict _refs
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":16574
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuProcessesUtilizationInfo_v1_t *)calloc(1, (sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))));

  /* "cuda/bindings/_nvml.pyx":16575
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":16576
 *         self._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16576, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuProcessesUt};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16576, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 16576, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16575
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":16577
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":16578
 *             raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 *         self._refs = {}
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":16579
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 *         self._refs = {}
 * 
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":16580
 *         self._owned = True
 *         self._readonly = False
 *         self._refs = {}             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_self->_refs);
  __Pyx_DECREF(__pyx_v_self->_refs);
  __pyx_v_self->_refs = ((PyObject*)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":16573
 *         dict _refs
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16582
 *         self._refs = {}
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuProcessesUtilizationInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self) {
  nvmlVgpuProcessesUtilizationInfo_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuProcessesUtilizationInfo_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":16584
 *     def __dealloc__(self):
 *         cdef nvmlVgpuProcessesUtilizationInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16585
 *         cdef nvmlVgpuProcessesUtilizationInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":16586
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":16587
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":16584
 *     def __dealloc__(self):
 *         cdef nvmlVgpuProcessesUtilizationInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":16582
 *         self._refs = {}
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuProcessesUtilizationInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":16589
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuProcessesUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":16590
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuProcessesUtilizationInfo_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16590, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16590, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16590, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16590, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16590, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuProcessesUtilizationInfo_v1;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 43 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16590, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16589
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuProcessesUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16592
 *         return f"<{__name__}.VgpuProcessesUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16595
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16595, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16592
 *         return f"<{__name__}.VgpuProcessesUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16597
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":16598
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16597
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16600
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":16601
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16601, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16600
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16603
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuProcessesUtilizationInfo_v1 other_
 *         if not isinstance(other, VgpuProcessesUtilizationInfo_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":16605
 *     def __eq__(self, other):
 *         cdef VgpuProcessesUtilizationInfo_v1 other_
 *         if not isinstance(other, VgpuProcessesUtilizationInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":16606
 *         cdef VgpuProcessesUtilizationInfo_v1 other_
 *         if not isinstance(other, VgpuProcessesUtilizationInfo_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":16605
 *     def __eq__(self, other):
 *         cdef VgpuProcessesUtilizationInfo_v1 other_
 *         if not isinstance(other, VgpuProcessesUtilizationInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":16607
 *         if not isinstance(other, VgpuProcessesUtilizationInfo_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1))))) __PYX_ERR(0, 16607, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":16608
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16608, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16603
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuProcessesUtilizationInfo_v1 other_
 *         if not isinstance(other, VgpuProcessesUtilizationInfo_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16610
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":16611
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 16611, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16611, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16611, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 16611, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16612
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuProcessesUtilizationInfo_v1_t *)malloc((sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":16613
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":16614
 *             self._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16614, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuProcessesUt};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16614, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 16614, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":16613
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":16615
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16615, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16615, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 16615, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":16616
 *                 raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":16617
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":16618
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16618, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16618, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 16618, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":16611
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":16620
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 16620, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":16610
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16622
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16625
 *     def version(self):
 *         """int: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16625, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16622
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16627
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":16629
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuProcessesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":16630
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuProcessesUtilizationInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuProcessesUtilizationInf};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16630, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16630, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16629
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuProcessesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":16631
 *         if self._readonly:
 *             raise ValueError("This VgpuProcessesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16631, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":16627
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16633
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def last_seen_time_stamp(self):
 *         """int: Return only samples with timestamp greater than lastSeenTimeStamp."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20last_seen_time_stamp_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20last_seen_time_stamp_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20last_seen_time_stamp___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20last_seen_time_stamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16636
 *     def last_seen_time_stamp(self):
 *         """int: Return only samples with timestamp greater than lastSeenTimeStamp."""
 *         return self._ptr[0].lastSeenTimeStamp             # <<<<<<<<<<<<<<
 * 
 *     @last_seen_time_stamp.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).lastSeenTimeStamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16636, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16633
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def last_seen_time_stamp(self):
 *         """int: Return only samples with timestamp greater than lastSeenTimeStamp."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.last_seen_time_stamp.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16638
 *         return self._ptr[0].lastSeenTimeStamp
 * 
 *     @last_seen_time_stamp.setter             # <<<<<<<<<<<<<<
 *     def last_seen_time_stamp(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20last_seen_time_stamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20last_seen_time_stamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20last_seen_time_stamp_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20last_seen_time_stamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":16640
 *     @last_seen_time_stamp.setter
 *     def last_seen_time_stamp(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuProcessesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].lastSeenTimeStamp = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":16641
 *     def last_seen_time_stamp(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuProcessesUtilizationInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].lastSeenTimeStamp = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuProcessesUtilizationInf};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16641, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16641, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16640
 *     @last_seen_time_stamp.setter
 *     def last_seen_time_stamp(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuProcessesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].lastSeenTimeStamp = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":16642
 *         if self._readonly:
 *             raise ValueError("This VgpuProcessesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].lastSeenTimeStamp = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16642, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).lastSeenTimeStamp = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":16638
 *         return self._ptr[0].lastSeenTimeStamp
 * 
 *     @last_seen_time_stamp.setter             # <<<<<<<<<<<<<<
 *     def last_seen_time_stamp(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.last_seen_time_stamp.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16644
 *         self._ptr[0].lastSeenTimeStamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_proc_util_array(self):
 *         """int: The array (allocated by caller) in which utilization of processes running on vGPU instances are returned."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20vgpu_proc_util_array_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20vgpu_proc_util_array_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20vgpu_proc_util_array___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20vgpu_proc_util_array___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16647
 *     def vgpu_proc_util_array(self):
 *         """int: The array (allocated by caller) in which utilization of processes running on vGPU instances are returned."""
 *         if self._ptr[0].vgpuProcUtilArray == NULL or self._ptr[0].vgpuProcessCount == 0:             # <<<<<<<<<<<<<<
 *             return []
 *         return VgpuProcessUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].vgpuProcUtilArray), self._ptr[0].vgpuProcessCount)
*/
  __pyx_t_2 = ((__pyx_v_self->_ptr[0]).vgpuProcUtilArray == NULL);
  if (!__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = ((__pyx_v_self->_ptr[0]).vgpuProcessCount == 0);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16648
 *         """int: The array (allocated by caller) in which utilization of processes running on vGPU instances are returned."""
 *         if self._ptr[0].vgpuProcUtilArray == NULL or self._ptr[0].vgpuProcessCount == 0:
 *             return []             # <<<<<<<<<<<<<<
 *         return VgpuProcessUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].vgpuProcUtilArray), self._ptr[0].vgpuProcessCount)
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16648, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":16647
 *     def vgpu_proc_util_array(self):
 *         """int: The array (allocated by caller) in which utilization of processes running on vGPU instances are returned."""
 *         if self._ptr[0].vgpuProcUtilArray == NULL or self._ptr[0].vgpuProcessCount == 0:             # <<<<<<<<<<<<<<
 *             return []
 *         return VgpuProcessUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].vgpuProcUtilArray), self._ptr[0].vgpuProcessCount)
*/
  }

  /* "cuda/bindings/_nvml.pyx":16649
 *         if self._ptr[0].vgpuProcUtilArray == NULL or self._ptr[0].vgpuProcessCount == 0:
 *             return []
 *         return VgpuProcessUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].vgpuProcUtilArray), self._ptr[0].vgpuProcessCount)             # <<<<<<<<<<<<<<
 * 
 *     @vgpu_proc_util_array.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1);
  __Pyx_INCREF(__pyx_t_4);
  __pyx_t_5 = PyLong_FromSsize_t(((intptr_t)(__pyx_v_self->_ptr[0]).vgpuProcUtilArray)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16649, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).vgpuProcessCount); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16649, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = 0;
  {
    PyObject *__pyx_callargs[3] = {__pyx_t_4, __pyx_t_5, __pyx_t_6};
    __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_7, (3-__pyx_t_7) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16649, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
  }
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16644
 *         self._ptr[0].lastSeenTimeStamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_proc_util_array(self):
 *         """int: The array (allocated by caller) in which utilization of processes running on vGPU instances are returned."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.vgpu_proc_util_array.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16651
 *         return VgpuProcessUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].vgpuProcUtilArray), self._ptr[0].vgpuProcessCount)
 * 
 *     @vgpu_proc_util_array.setter             # <<<<<<<<<<<<<<
 *     def vgpu_proc_util_array(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20vgpu_proc_util_array_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20vgpu_proc_util_array_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20vgpu_proc_util_array_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20vgpu_proc_util_array_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_arr = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  Py_ssize_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":16653
 *     @vgpu_proc_util_array.setter
 *     def vgpu_proc_util_array(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuProcessesUtilizationInfo_v1 instance is read-only")
 *         cdef VgpuProcessUtilizationInfo_v1 arr = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":16654
 *     def vgpu_proc_util_array(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuProcessesUtilizationInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef VgpuProcessUtilizationInfo_v1 arr = val
 *         self._ptr[0].vgpuProcUtilArray = <nvmlVgpuProcessUtilizationInfo_v1_t*><intptr_t>(arr._get_ptr())
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuProcessesUtilizationInf};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16654, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16654, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16653
 *     @vgpu_proc_util_array.setter
 *     def vgpu_proc_util_array(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuProcessesUtilizationInfo_v1 instance is read-only")
 *         cdef VgpuProcessUtilizationInfo_v1 arr = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":16655
 *         if self._readonly:
 *             raise ValueError("This VgpuProcessesUtilizationInfo_v1 instance is read-only")
 *         cdef VgpuProcessUtilizationInfo_v1 arr = val             # <<<<<<<<<<<<<<
 *         self._ptr[0].vgpuProcUtilArray = <nvmlVgpuProcessUtilizationInfo_v1_t*><intptr_t>(arr._get_ptr())
 *         self._ptr[0].vgpuProcessCount = len(arr)
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1))))) __PYX_ERR(0, 16655, __pyx_L1_error)
  __pyx_v_arr = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":16656
 *             raise ValueError("This VgpuProcessesUtilizationInfo_v1 instance is read-only")
 *         cdef VgpuProcessUtilizationInfo_v1 arr = val
 *         self._ptr[0].vgpuProcUtilArray = <nvmlVgpuProcessUtilizationInfo_v1_t*><intptr_t>(arr._get_ptr())             # <<<<<<<<<<<<<<
 *         self._ptr[0].vgpuProcessCount = len(arr)
 *         self._refs["vgpu_proc_util_array"] = arr
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v_arr->__pyx_vtab)->_get_ptr(__pyx_v_arr); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 16656, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).vgpuProcUtilArray = ((nvmlVgpuProcessUtilizationInfo_v1_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":16657
 *         cdef VgpuProcessUtilizationInfo_v1 arr = val
 *         self._ptr[0].vgpuProcUtilArray = <nvmlVgpuProcessUtilizationInfo_v1_t*><intptr_t>(arr._get_ptr())
 *         self._ptr[0].vgpuProcessCount = len(arr)             # <<<<<<<<<<<<<<
 *         self._refs["vgpu_proc_util_array"] = arr
 * 
*/
  __pyx_t_5 = PyObject_Length(((PyObject *)__pyx_v_arr)); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 16657, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).vgpuProcessCount = __pyx_t_5;

  /* "cuda/bindings/_nvml.pyx":16658
 *         self._ptr[0].vgpuProcUtilArray = <nvmlVgpuProcessUtilizationInfo_v1_t*><intptr_t>(arr._get_ptr())
 *         self._ptr[0].vgpuProcessCount = len(arr)
 *         self._refs["vgpu_proc_util_array"] = arr             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely(__pyx_v_self->_refs == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
    __PYX_ERR(0, 16658, __pyx_L1_error)
  }
  if (unlikely((PyDict_SetItem(__pyx_v_self->_refs, __pyx_mstate_global->__pyx_n_u_vgpu_proc_util_array, ((PyObject *)__pyx_v_arr)) < 0))) __PYX_ERR(0, 16658, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":16651
 *         return VgpuProcessUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].vgpuProcUtilArray), self._ptr[0].vgpuProcessCount)
 * 
 *     @vgpu_proc_util_array.setter             # <<<<<<<<<<<<<<
 *     def vgpu_proc_util_array(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.vgpu_proc_util_array.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16660
 *         self._refs["vgpu_proc_util_array"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuProcessesUtilizationInfo_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_12from_data, "VgpuProcessesUtilizationInfo_v1.from_data(data)\n\nCreate an VgpuProcessesUtilizationInfo_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_processes_utilization_info_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16660, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16660, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 16660, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 16660, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16660, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 16660, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":16667
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_processes_utilization_info_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_processes_utilization_info_v1_dtype", vgpu_processes_utilization_info_v1_dtype, VgpuProcessesUtilizationInfo_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_processes_utilization_info); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16667, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_processes_utilization_info, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16667, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16660
 *         self._refs["vgpu_proc_util_array"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuProcessesUtilizationInfo_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16669
 *         return __from_data(data, "vgpu_processes_utilization_info_v1_dtype", vgpu_processes_utilization_info_v1_dtype, VgpuProcessesUtilizationInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuProcessesUtilizationInfo_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_14from_ptr, "VgpuProcessesUtilizationInfo_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuProcessesUtilizationInfo_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16669, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16669, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16669, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16669, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 16669, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":16670
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuProcessesUtilizationInfo_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 16669, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16669, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16669, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16669, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 16670, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16670, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 16669, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":16669
 *         return __from_data(data, "vgpu_processes_utilization_info_v1_dtype", vgpu_processes_utilization_info_v1_dtype, VgpuProcessesUtilizationInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuProcessesUtilizationInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":16678
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuProcessesUtilizationInfo_v1 obj = VgpuProcessesUtilizationInfo_v1.__new__(VgpuProcessesUtilizationInfo_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":16679
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuProcessesUtilizationInfo_v1 obj = VgpuProcessesUtilizationInfo_v1.__new__(VgpuProcessesUtilizationInfo_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16679, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 16679, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16678
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuProcessesUtilizationInfo_v1 obj = VgpuProcessesUtilizationInfo_v1.__new__(VgpuProcessesUtilizationInfo_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":16680
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuProcessesUtilizationInfo_v1 obj = VgpuProcessesUtilizationInfo_v1.__new__(VgpuProcessesUtilizationInfo_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16680, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":16681
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuProcessesUtilizationInfo_v1 obj = VgpuProcessesUtilizationInfo_v1.__new__(VgpuProcessesUtilizationInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16682
 *         cdef VgpuProcessesUtilizationInfo_v1 obj = VgpuProcessesUtilizationInfo_v1.__new__(VgpuProcessesUtilizationInfo_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuProcessesUtilizationInfo_v1_t *)malloc((sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":16683
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":16684
 *             obj._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16684, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuProcessesUt};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16684, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 16684, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":16683
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":16685
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":16686
 *                 raise MemoryError("Error allocating VgpuProcessesUtilizationInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":16687
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":16681
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuProcessesUtilizationInfo_v1 obj = VgpuProcessesUtilizationInfo_v1.__new__(VgpuProcessesUtilizationInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":16689
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuProcessesUtilizationInfo_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":16690
 *         else:
 *             obj._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":16691
 *             obj._ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         obj._refs = {}
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":16692
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         obj._refs = {}
 *         return obj
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":16693
 *             obj._owned = False
 *         obj._readonly = readonly
 *         obj._refs = {}             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16693, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_refs);
  __Pyx_DECREF(__pyx_v_obj->_refs);
  __pyx_v_obj->_refs = ((PyObject*)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":16694
 *         obj._readonly = readonly
 *         obj._refs = {}
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16669
 *         return __from_data(data, "vgpu_processes_utilization_info_v1_dtype", vgpu_processes_utilization_info_v1_dtype, VgpuProcessesUtilizationInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuProcessesUtilizationInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_16__reduce_cython__, "VgpuProcessesUtilizationInfo_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_18__setstate_cython__, "VgpuProcessesUtilizationInfo_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16718
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuSchedulerParams_t *>calloc(1, sizeof(nvmlVgpuSchedulerParams_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":16719
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerParams_t *>calloc(1, sizeof(nvmlVgpuSchedulerParams_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerParams")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuSchedulerParams_t *)calloc(1, (sizeof(nvmlVgpuSchedulerParams_t))));

  /* "cuda/bindings/_nvml.pyx":16720
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerParams_t *>calloc(1, sizeof(nvmlVgpuSchedulerParams_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuSchedulerParams")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":16721
 *         self._ptr = <nvmlVgpuSchedulerParams_t *>calloc(1, sizeof(nvmlVgpuSchedulerParams_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerParams")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16721, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerPa};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16721, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 16721, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16720
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerParams_t *>calloc(1, sizeof(nvmlVgpuSchedulerParams_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuSchedulerParams")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":16722
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerParams")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":16723
 *             raise MemoryError("Error allocating VgpuSchedulerParams")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":16724
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":16718
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuSchedulerParams_t *>calloc(1, sizeof(nvmlVgpuSchedulerParams_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerParams.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16726
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuSchedulerParams_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self) {
  nvmlVgpuSchedulerParams_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuSchedulerParams_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":16728
 *     def __dealloc__(self):
 *         cdef nvmlVgpuSchedulerParams_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16729
 *         cdef nvmlVgpuSchedulerParams_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":16730
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":16731
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":16728
 *     def __dealloc__(self):
 *         cdef nvmlVgpuSchedulerParams_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":16726
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuSchedulerParams_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":16733
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuSchedulerParams object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":16734
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuSchedulerParams object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16734, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16734, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16734, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16734, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16734, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuSchedulerParams_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 31 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16734, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16733
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuSchedulerParams object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerParams.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16736
 *         return f"<{__name__}.VgpuSchedulerParams object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16739
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16739, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16736
 *         return f"<{__name__}.VgpuSchedulerParams object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerParams.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16741
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_19VgpuSchedulerParams__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":16742
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16741
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16744
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":16745
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16745, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16744
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerParams.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16747
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerParams other_
 *         if not isinstance(other, VgpuSchedulerParams):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":16749
 *     def __eq__(self, other):
 *         cdef VgpuSchedulerParams other_
 *         if not isinstance(other, VgpuSchedulerParams):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":16750
 *         cdef VgpuSchedulerParams other_
 *         if not isinstance(other, VgpuSchedulerParams):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerParams_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":16749
 *     def __eq__(self, other):
 *         cdef VgpuSchedulerParams other_
 *         if not isinstance(other, VgpuSchedulerParams):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":16751
 *         if not isinstance(other, VgpuSchedulerParams):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerParams_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams))))) __PYX_ERR(0, 16751, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":16752
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerParams_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuSchedulerParams_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16752, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16747
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerParams other_
 *         if not isinstance(other, VgpuSchedulerParams):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerParams.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16754
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerParams_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerParams_t *>malloc(sizeof(nvmlVgpuSchedulerParams_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":16755
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuSchedulerParams_t *>malloc(sizeof(nvmlVgpuSchedulerParams_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 16755, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16755, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16755, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 16755, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16756
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerParams_t *>malloc(sizeof(nvmlVgpuSchedulerParams_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerParams")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuSchedulerParams_t *)malloc((sizeof(nvmlVgpuSchedulerParams_t))));

    /* "cuda/bindings/_nvml.pyx":16757
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerParams_t *>malloc(sizeof(nvmlVgpuSchedulerParams_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerParams")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerParams_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":16758
 *             self._ptr = <nvmlVgpuSchedulerParams_t *>malloc(sizeof(nvmlVgpuSchedulerParams_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerParams")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerParams_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16758, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerPa};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16758, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 16758, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":16757
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerParams_t *>malloc(sizeof(nvmlVgpuSchedulerParams_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerParams")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerParams_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":16759
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerParams")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerParams_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16759, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16759, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 16759, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuSchedulerParams_t))));

    /* "cuda/bindings/_nvml.pyx":16760
 *                 raise MemoryError("Error allocating VgpuSchedulerParams")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerParams_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":16761
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerParams_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":16762
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16762, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16762, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 16762, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":16755
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuSchedulerParams_t *>malloc(sizeof(nvmlVgpuSchedulerParams_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":16764
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 16764, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":16754
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerParams_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerParams_t *>malloc(sizeof(nvmlVgpuSchedulerParams_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerParams.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16766
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_sched_data_with_arr(self):
 *         """_py_anon_pod2: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_24vgpu_sched_data_with_arr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_24vgpu_sched_data_with_arr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_24vgpu_sched_data_with_arr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_24vgpu_sched_data_with_arr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16769
 *     def vgpu_sched_data_with_arr(self):
 *         """_py_anon_pod2: """
 *         return _py_anon_pod2.from_ptr(<intptr_t>&(self._ptr[0].vgpuSchedDataWithARR), self._readonly, self)             # <<<<<<<<<<<<<<
 * 
 *     @vgpu_sched_data_with_arr.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).vgpuSchedDataWithARR))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16769, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16769, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_t_4, ((PyObject *)__pyx_v_self)};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16769, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16766
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_sched_data_with_arr(self):
 *         """_py_anon_pod2: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerParams.vgpu_sched_data_with_arr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16771
 *         return _py_anon_pod2.from_ptr(<intptr_t>&(self._ptr[0].vgpuSchedDataWithARR), self._readonly, self)
 * 
 *     @vgpu_sched_data_with_arr.setter             # <<<<<<<<<<<<<<
 *     def vgpu_sched_data_with_arr(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_24vgpu_sched_data_with_arr_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_24vgpu_sched_data_with_arr_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_24vgpu_sched_data_with_arr_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_24vgpu_sched_data_with_arr_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":16773
 *     @vgpu_sched_data_with_arr.setter
 *     def vgpu_sched_data_with_arr(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerParams instance is read-only")
 *         cdef _py_anon_pod2 val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":16774
 *     def vgpu_sched_data_with_arr(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerParams instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod2 val_ = val
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedDataWithARR), <void *>(val_._get_ptr()), sizeof(_anon_pod2) * 1)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerParams_instanc};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16774, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16774, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16773
 *     @vgpu_sched_data_with_arr.setter
 *     def vgpu_sched_data_with_arr(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerParams instance is read-only")
 *         cdef _py_anon_pod2 val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":16775
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerParams instance is read-only")
 *         cdef _py_anon_pod2 val_ = val             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedDataWithARR), <void *>(val_._get_ptr()), sizeof(_anon_pod2) * 1)
 * 
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2))))) __PYX_ERR(0, 16775, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":16776
 *             raise ValueError("This VgpuSchedulerParams instance is read-only")
 *         cdef _py_anon_pod2 val_ = val
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedDataWithARR), <void *>(val_._get_ptr()), sizeof(_anon_pod2) * 1)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod2 *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 16776, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).vgpuSchedDataWithARR)), ((void *)__pyx_t_4), ((sizeof(_anon_pod2)) * 1)));

  /* "cuda/bindings/_nvml.pyx":16771
 *         return _py_anon_pod2.from_ptr(<intptr_t>&(self._ptr[0].vgpuSchedDataWithARR), self._readonly, self)
 * 
 *     @vgpu_sched_data_with_arr.setter             # <<<<<<<<<<<<<<
 *     def vgpu_sched_data_with_arr(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerParams.vgpu_sched_data_with_arr.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16778
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedDataWithARR), <void *>(val_._get_ptr()), sizeof(_anon_pod2) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_sched_data(self):
 *         """_py_anon_pod3: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15vgpu_sched_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15vgpu_sched_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15vgpu_sched_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15vgpu_sched_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16781
 *     def vgpu_sched_data(self):
 *         """_py_anon_pod3: """
 *         return _py_anon_pod3.from_ptr(<intptr_t>&(self._ptr[0].vgpuSchedData), self._readonly, self)             # <<<<<<<<<<<<<<
 * 
 *     @vgpu_sched_data.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).vgpuSchedData))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16781, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16781, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_t_4, ((PyObject *)__pyx_v_self)};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16781, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16778
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedDataWithARR), <void *>(val_._get_ptr()), sizeof(_anon_pod2) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_sched_data(self):
 *         """_py_anon_pod3: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerParams.vgpu_sched_data.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16783
 *         return _py_anon_pod3.from_ptr(<intptr_t>&(self._ptr[0].vgpuSchedData), self._readonly, self)
 * 
 *     @vgpu_sched_data.setter             # <<<<<<<<<<<<<<
 *     def vgpu_sched_data(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15vgpu_sched_data_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15vgpu_sched_data_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15vgpu_sched_data_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15vgpu_sched_data_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":16785
 *     @vgpu_sched_data.setter
 *     def vgpu_sched_data(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerParams instance is read-only")
 *         cdef _py_anon_pod3 val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":16786
 *     def vgpu_sched_data(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerParams instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod3 val_ = val
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedData), <void *>(val_._get_ptr()), sizeof(_anon_pod3) * 1)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerParams_instanc};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16786, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16786, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16785
 *     @vgpu_sched_data.setter
 *     def vgpu_sched_data(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerParams instance is read-only")
 *         cdef _py_anon_pod3 val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":16787
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerParams instance is read-only")
 *         cdef _py_anon_pod3 val_ = val             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedData), <void *>(val_._get_ptr()), sizeof(_anon_pod3) * 1)
 * 
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3))))) __PYX_ERR(0, 16787, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":16788
 *             raise ValueError("This VgpuSchedulerParams instance is read-only")
 *         cdef _py_anon_pod3 val_ = val
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedData), <void *>(val_._get_ptr()), sizeof(_anon_pod3) * 1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod3 *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 16788, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).vgpuSchedData)), ((void *)__pyx_t_4), ((sizeof(_anon_pod3)) * 1)));

  /* "cuda/bindings/_nvml.pyx":16783
 *         return _py_anon_pod3.from_ptr(<intptr_t>&(self._ptr[0].vgpuSchedData), self._readonly, self)
 * 
 *     @vgpu_sched_data.setter             # <<<<<<<<<<<<<<
 *     def vgpu_sched_data(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerParams.vgpu_sched_data.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16790
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedData), <void *>(val_._get_ptr()), sizeof(_anon_pod3) * 1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerParams instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_12from_data, "VgpuSchedulerParams.from_data(data)\n\nCreate an VgpuSchedulerParams instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_scheduler_params_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16790, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16790, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 16790, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 16790, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16790, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 16790, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerParams.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":16797
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_scheduler_params_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_scheduler_params_dtype", vgpu_scheduler_params_dtype, VgpuSchedulerParams)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_params_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16797, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_params_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16797, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16790
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedData), <void *>(val_._get_ptr()), sizeof(_anon_pod3) * 1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerParams instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerParams.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16799
 *         return __from_data(data, "vgpu_scheduler_params_dtype", vgpu_scheduler_params_dtype, VgpuSchedulerParams)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerParams instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_14from_ptr, "VgpuSchedulerParams.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuSchedulerParams instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16799, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16799, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16799, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16799, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 16799, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":16800
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuSchedulerParams instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 16799, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16799, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16799, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16799, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 16800, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16800, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 16799, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerParams.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":16799
 *         return __from_data(data, "vgpu_scheduler_params_dtype", vgpu_scheduler_params_dtype, VgpuSchedulerParams)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerParams instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":16808
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerParams obj = VgpuSchedulerParams.__new__(VgpuSchedulerParams)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":16809
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerParams obj = VgpuSchedulerParams.__new__(VgpuSchedulerParams)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16809, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 16809, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16808
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerParams obj = VgpuSchedulerParams.__new__(VgpuSchedulerParams)
*/
  }

  /* "cuda/bindings/_nvml.pyx":16810
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerParams obj = VgpuSchedulerParams.__new__(VgpuSchedulerParams)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerParams_t *>malloc(sizeof(nvmlVgpuSchedulerParams_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerParams(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16810, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":16811
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerParams obj = VgpuSchedulerParams.__new__(VgpuSchedulerParams)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuSchedulerParams_t *>malloc(sizeof(nvmlVgpuSchedulerParams_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16812
 *         cdef VgpuSchedulerParams obj = VgpuSchedulerParams.__new__(VgpuSchedulerParams)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerParams_t *>malloc(sizeof(nvmlVgpuSchedulerParams_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerParams")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuSchedulerParams_t *)malloc((sizeof(nvmlVgpuSchedulerParams_t))));

    /* "cuda/bindings/_nvml.pyx":16813
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerParams_t *>malloc(sizeof(nvmlVgpuSchedulerParams_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerParams")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerParams_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":16814
 *             obj._ptr = <nvmlVgpuSchedulerParams_t *>malloc(sizeof(nvmlVgpuSchedulerParams_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerParams")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerParams_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16814, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerPa};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16814, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 16814, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":16813
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerParams_t *>malloc(sizeof(nvmlVgpuSchedulerParams_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerParams")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerParams_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":16815
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerParams")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerParams_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuSchedulerParams_t))));

    /* "cuda/bindings/_nvml.pyx":16816
 *                 raise MemoryError("Error allocating VgpuSchedulerParams")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerParams_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":16817
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerParams_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerParams_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":16811
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerParams obj = VgpuSchedulerParams.__new__(VgpuSchedulerParams)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuSchedulerParams_t *>malloc(sizeof(nvmlVgpuSchedulerParams_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":16819
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerParams_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuSchedulerParams_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":16820
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerParams_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":16821
 *             obj._ptr = <nvmlVgpuSchedulerParams_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":16822
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":16823
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16799
 *         return __from_data(data, "vgpu_scheduler_params_dtype", vgpu_scheduler_params_dtype, VgpuSchedulerParams)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerParams instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerParams.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_16__reduce_cython__, "VgpuSchedulerParams.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerParams.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_18__setstate_cython__, "VgpuSchedulerParams.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerParams.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerParams.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16847
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuSchedulerSetParams_t *>calloc(1, sizeof(nvmlVgpuSchedulerSetParams_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":16848
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerSetParams_t *>calloc(1, sizeof(nvmlVgpuSchedulerSetParams_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerSetParams")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuSchedulerSetParams_t *)calloc(1, (sizeof(nvmlVgpuSchedulerSetParams_t))));

  /* "cuda/bindings/_nvml.pyx":16849
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerSetParams_t *>calloc(1, sizeof(nvmlVgpuSchedulerSetParams_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuSchedulerSetParams")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":16850
 *         self._ptr = <nvmlVgpuSchedulerSetParams_t *>calloc(1, sizeof(nvmlVgpuSchedulerSetParams_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerSetParams")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16850, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerSe};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16850, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 16850, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16849
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerSetParams_t *>calloc(1, sizeof(nvmlVgpuSchedulerSetParams_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuSchedulerSetParams")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":16851
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerSetParams")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":16852
 *             raise MemoryError("Error allocating VgpuSchedulerSetParams")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":16853
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":16847
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuSchedulerSetParams_t *>calloc(1, sizeof(nvmlVgpuSchedulerSetParams_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerSetParams.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16855
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuSchedulerSetParams_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self) {
  nvmlVgpuSchedulerSetParams_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuSchedulerSetParams_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":16857
 *     def __dealloc__(self):
 *         cdef nvmlVgpuSchedulerSetParams_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16858
 *         cdef nvmlVgpuSchedulerSetParams_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":16859
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":16860
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":16857
 *     def __dealloc__(self):
 *         cdef nvmlVgpuSchedulerSetParams_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":16855
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuSchedulerSetParams_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":16862
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuSchedulerSetParams object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":16863
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuSchedulerSetParams object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16863, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16863, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16863, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16863, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16863, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuSchedulerSetParams_object_a;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 34 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16863, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16862
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuSchedulerSetParams object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerSetParams.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16865
 *         return f"<{__name__}.VgpuSchedulerSetParams object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16868
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16868, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16865
 *         return f"<{__name__}.VgpuSchedulerSetParams object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerSetParams.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16870
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":16871
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16870
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16873
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":16874
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16874, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16873
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerSetParams.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16876
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerSetParams other_
 *         if not isinstance(other, VgpuSchedulerSetParams):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":16878
 *     def __eq__(self, other):
 *         cdef VgpuSchedulerSetParams other_
 *         if not isinstance(other, VgpuSchedulerSetParams):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":16879
 *         cdef VgpuSchedulerSetParams other_
 *         if not isinstance(other, VgpuSchedulerSetParams):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerSetParams_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":16878
 *     def __eq__(self, other):
 *         cdef VgpuSchedulerSetParams other_
 *         if not isinstance(other, VgpuSchedulerSetParams):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":16880
 *         if not isinstance(other, VgpuSchedulerSetParams):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerSetParams_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams))))) __PYX_ERR(0, 16880, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":16881
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerSetParams_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuSchedulerSetParams_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16881, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16876
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerSetParams other_
 *         if not isinstance(other, VgpuSchedulerSetParams):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerSetParams.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16883
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerSetParams_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerSetParams_t *>malloc(sizeof(nvmlVgpuSchedulerSetParams_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":16884
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuSchedulerSetParams_t *>malloc(sizeof(nvmlVgpuSchedulerSetParams_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 16884, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16884, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16884, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 16884, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16885
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerSetParams_t *>malloc(sizeof(nvmlVgpuSchedulerSetParams_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerSetParams")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuSchedulerSetParams_t *)malloc((sizeof(nvmlVgpuSchedulerSetParams_t))));

    /* "cuda/bindings/_nvml.pyx":16886
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerSetParams_t *>malloc(sizeof(nvmlVgpuSchedulerSetParams_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerSetParams")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerSetParams_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":16887
 *             self._ptr = <nvmlVgpuSchedulerSetParams_t *>malloc(sizeof(nvmlVgpuSchedulerSetParams_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerSetParams")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerSetParams_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16887, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerSe};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16887, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 16887, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":16886
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerSetParams_t *>malloc(sizeof(nvmlVgpuSchedulerSetParams_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerSetParams")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerSetParams_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":16888
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerSetParams")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerSetParams_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16888, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16888, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 16888, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuSchedulerSetParams_t))));

    /* "cuda/bindings/_nvml.pyx":16889
 *                 raise MemoryError("Error allocating VgpuSchedulerSetParams")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerSetParams_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":16890
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerSetParams_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":16891
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16891, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16891, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 16891, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":16884
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuSchedulerSetParams_t *>malloc(sizeof(nvmlVgpuSchedulerSetParams_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":16893
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 16893, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":16883
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerSetParams_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerSetParams_t *>malloc(sizeof(nvmlVgpuSchedulerSetParams_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerSetParams.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16895
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_sched_data_with_arr(self):
 *         """_py_anon_pod4: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_24vgpu_sched_data_with_arr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_24vgpu_sched_data_with_arr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_24vgpu_sched_data_with_arr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_24vgpu_sched_data_with_arr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16898
 *     def vgpu_sched_data_with_arr(self):
 *         """_py_anon_pod4: """
 *         return _py_anon_pod4.from_ptr(<intptr_t>&(self._ptr[0].vgpuSchedDataWithARR), self._readonly, self)             # <<<<<<<<<<<<<<
 * 
 *     @vgpu_sched_data_with_arr.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).vgpuSchedDataWithARR))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16898, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16898, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_t_4, ((PyObject *)__pyx_v_self)};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16898, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16895
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_sched_data_with_arr(self):
 *         """_py_anon_pod4: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerSetParams.vgpu_sched_data_with_arr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16900
 *         return _py_anon_pod4.from_ptr(<intptr_t>&(self._ptr[0].vgpuSchedDataWithARR), self._readonly, self)
 * 
 *     @vgpu_sched_data_with_arr.setter             # <<<<<<<<<<<<<<
 *     def vgpu_sched_data_with_arr(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_24vgpu_sched_data_with_arr_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_24vgpu_sched_data_with_arr_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_24vgpu_sched_data_with_arr_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_24vgpu_sched_data_with_arr_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":16902
 *     @vgpu_sched_data_with_arr.setter
 *     def vgpu_sched_data_with_arr(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerSetParams instance is read-only")
 *         cdef _py_anon_pod4 val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":16903
 *     def vgpu_sched_data_with_arr(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerSetParams instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod4 val_ = val
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedDataWithARR), <void *>(val_._get_ptr()), sizeof(_anon_pod4) * 1)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerSetParams_inst};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16903, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16903, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16902
 *     @vgpu_sched_data_with_arr.setter
 *     def vgpu_sched_data_with_arr(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerSetParams instance is read-only")
 *         cdef _py_anon_pod4 val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":16904
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerSetParams instance is read-only")
 *         cdef _py_anon_pod4 val_ = val             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedDataWithARR), <void *>(val_._get_ptr()), sizeof(_anon_pod4) * 1)
 * 
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4))))) __PYX_ERR(0, 16904, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":16905
 *             raise ValueError("This VgpuSchedulerSetParams instance is read-only")
 *         cdef _py_anon_pod4 val_ = val
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedDataWithARR), <void *>(val_._get_ptr()), sizeof(_anon_pod4) * 1)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod4 *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 16905, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).vgpuSchedDataWithARR)), ((void *)__pyx_t_4), ((sizeof(_anon_pod4)) * 1)));

  /* "cuda/bindings/_nvml.pyx":16900
 *         return _py_anon_pod4.from_ptr(<intptr_t>&(self._ptr[0].vgpuSchedDataWithARR), self._readonly, self)
 * 
 *     @vgpu_sched_data_with_arr.setter             # <<<<<<<<<<<<<<
 *     def vgpu_sched_data_with_arr(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerSetParams.vgpu_sched_data_with_arr.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16907
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedDataWithARR), <void *>(val_._get_ptr()), sizeof(_anon_pod4) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_sched_data(self):
 *         """_py_anon_pod5: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15vgpu_sched_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15vgpu_sched_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15vgpu_sched_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15vgpu_sched_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":16910
 *     def vgpu_sched_data(self):
 *         """_py_anon_pod5: """
 *         return _py_anon_pod5.from_ptr(<intptr_t>&(self._ptr[0].vgpuSchedData), self._readonly, self)             # <<<<<<<<<<<<<<
 * 
 *     @vgpu_sched_data.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).vgpuSchedData))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16910, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16910, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_t_4, ((PyObject *)__pyx_v_self)};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16910, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16907
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedDataWithARR), <void *>(val_._get_ptr()), sizeof(_anon_pod4) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_sched_data(self):
 *         """_py_anon_pod5: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerSetParams.vgpu_sched_data.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16912
 *         return _py_anon_pod5.from_ptr(<intptr_t>&(self._ptr[0].vgpuSchedData), self._readonly, self)
 * 
 *     @vgpu_sched_data.setter             # <<<<<<<<<<<<<<
 *     def vgpu_sched_data(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15vgpu_sched_data_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15vgpu_sched_data_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15vgpu_sched_data_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15vgpu_sched_data_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":16914
 *     @vgpu_sched_data.setter
 *     def vgpu_sched_data(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerSetParams instance is read-only")
 *         cdef _py_anon_pod5 val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":16915
 *     def vgpu_sched_data(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerSetParams instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef _py_anon_pod5 val_ = val
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedData), <void *>(val_._get_ptr()), sizeof(_anon_pod5) * 1)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerSetParams_inst};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16915, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 16915, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16914
 *     @vgpu_sched_data.setter
 *     def vgpu_sched_data(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerSetParams instance is read-only")
 *         cdef _py_anon_pod5 val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":16916
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerSetParams instance is read-only")
 *         cdef _py_anon_pod5 val_ = val             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedData), <void *>(val_._get_ptr()), sizeof(_anon_pod5) * 1)
 * 
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5))))) __PYX_ERR(0, 16916, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":16917
 *             raise ValueError("This VgpuSchedulerSetParams instance is read-only")
 *         cdef _py_anon_pod5 val_ = val
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedData), <void *>(val_._get_ptr()), sizeof(_anon_pod5) * 1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod5 *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 16917, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).vgpuSchedData)), ((void *)__pyx_t_4), ((sizeof(_anon_pod5)) * 1)));

  /* "cuda/bindings/_nvml.pyx":16912
 *         return _py_anon_pod5.from_ptr(<intptr_t>&(self._ptr[0].vgpuSchedData), self._readonly, self)
 * 
 *     @vgpu_sched_data.setter             # <<<<<<<<<<<<<<
 *     def vgpu_sched_data(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerSetParams.vgpu_sched_data.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16919
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedData), <void *>(val_._get_ptr()), sizeof(_anon_pod5) * 1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerSetParams instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_12from_data, "VgpuSchedulerSetParams.from_data(data)\n\nCreate an VgpuSchedulerSetParams instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_scheduler_set_params_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16919, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16919, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 16919, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 16919, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16919, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 16919, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerSetParams.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":16926
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_scheduler_set_params_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_scheduler_set_params_dtype", vgpu_scheduler_set_params_dtype, VgpuSchedulerSetParams)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_set_params_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16926, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_set_params_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16926, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16919
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedData), <void *>(val_._get_ptr()), sizeof(_anon_pod5) * 1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerSetParams instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerSetParams.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16928
 *         return __from_data(data, "vgpu_scheduler_set_params_dtype", vgpu_scheduler_set_params_dtype, VgpuSchedulerSetParams)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerSetParams instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_14from_ptr, "VgpuSchedulerSetParams.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuSchedulerSetParams instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16928, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16928, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16928, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16928, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 16928, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":16929
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuSchedulerSetParams instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 16928, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16928, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16928, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16928, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 16929, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16929, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 16928, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerSetParams.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":16928
 *         return __from_data(data, "vgpu_scheduler_set_params_dtype", vgpu_scheduler_set_params_dtype, VgpuSchedulerSetParams)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerSetParams instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":16937
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerSetParams obj = VgpuSchedulerSetParams.__new__(VgpuSchedulerSetParams)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":16938
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerSetParams obj = VgpuSchedulerSetParams.__new__(VgpuSchedulerSetParams)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16938, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 16938, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16937
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerSetParams obj = VgpuSchedulerSetParams.__new__(VgpuSchedulerSetParams)
*/
  }

  /* "cuda/bindings/_nvml.pyx":16939
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerSetParams obj = VgpuSchedulerSetParams.__new__(VgpuSchedulerSetParams)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerSetParams_t *>malloc(sizeof(nvmlVgpuSchedulerSetParams_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16939, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":16940
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerSetParams obj = VgpuSchedulerSetParams.__new__(VgpuSchedulerSetParams)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuSchedulerSetParams_t *>malloc(sizeof(nvmlVgpuSchedulerSetParams_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16941
 *         cdef VgpuSchedulerSetParams obj = VgpuSchedulerSetParams.__new__(VgpuSchedulerSetParams)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerSetParams_t *>malloc(sizeof(nvmlVgpuSchedulerSetParams_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerSetParams")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuSchedulerSetParams_t *)malloc((sizeof(nvmlVgpuSchedulerSetParams_t))));

    /* "cuda/bindings/_nvml.pyx":16942
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerSetParams_t *>malloc(sizeof(nvmlVgpuSchedulerSetParams_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerSetParams")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerSetParams_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":16943
 *             obj._ptr = <nvmlVgpuSchedulerSetParams_t *>malloc(sizeof(nvmlVgpuSchedulerSetParams_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerSetParams")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerSetParams_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16943, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerSe};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16943, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 16943, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":16942
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerSetParams_t *>malloc(sizeof(nvmlVgpuSchedulerSetParams_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerSetParams")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerSetParams_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":16944
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerSetParams")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerSetParams_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuSchedulerSetParams_t))));

    /* "cuda/bindings/_nvml.pyx":16945
 *                 raise MemoryError("Error allocating VgpuSchedulerSetParams")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerSetParams_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":16946
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerSetParams_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerSetParams_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":16940
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerSetParams obj = VgpuSchedulerSetParams.__new__(VgpuSchedulerSetParams)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuSchedulerSetParams_t *>malloc(sizeof(nvmlVgpuSchedulerSetParams_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":16948
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerSetParams_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuSchedulerSetParams_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":16949
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerSetParams_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":16950
 *             obj._ptr = <nvmlVgpuSchedulerSetParams_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":16951
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":16952
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16928
 *         return __from_data(data, "vgpu_scheduler_set_params_dtype", vgpu_scheduler_set_params_dtype, VgpuSchedulerSetParams)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerSetParams instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerSetParams.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_16__reduce_cython__, "VgpuSchedulerSetParams.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerSetParams.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_18__setstate_cython__, "VgpuSchedulerSetParams.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerSetParams.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerSetParams.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16955
 * 
 * 
 * cdef _get_vgpu_license_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuLicenseInfo_t pod = nvmlVgpuLicenseInfo_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_license_info_dtype_offsets(void) {
  nvmlVgpuLicenseInfo_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuLicenseInfo_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_license_info_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":16956
 * 
 * cdef _get_vgpu_license_info_dtype_offsets():
 *     cdef nvmlVgpuLicenseInfo_t pod = nvmlVgpuLicenseInfo_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['is_licensed', 'license_expiry', 'current_state'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":16957
 * cdef _get_vgpu_license_info_dtype_offsets():
 *     cdef nvmlVgpuLicenseInfo_t pod = nvmlVgpuLicenseInfo_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['is_licensed', 'license_expiry', 'current_state'],
 *         'formats': [_numpy.uint8, vgpu_license_expiry_dtype, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16957, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16957, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":16958
 *     cdef nvmlVgpuLicenseInfo_t pod = nvmlVgpuLicenseInfo_t()
 *     return _numpy.dtype({
 *         'names': ['is_licensed', 'license_expiry', 'current_state'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint8, vgpu_license_expiry_dtype, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16958, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16958, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_is_licensed);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_is_licensed);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_is_licensed) != (0)) __PYX_ERR(0, 16958, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_license_expiry);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_license_expiry);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_license_expiry) != (0)) __PYX_ERR(0, 16958, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_current_state);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_current_state);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_current_state) != (0)) __PYX_ERR(0, 16958, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 16958, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":16959
 *     return _numpy.dtype({
 *         'names': ['is_licensed', 'license_expiry', 'current_state'],
 *         'formats': [_numpy.uint8, vgpu_license_expiry_dtype, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.isLicensed)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16959, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16959, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_vgpu_license_expiry_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16959, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 16959, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 16959, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
  __pyx_t_8 = PyList_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 16959, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 16959, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 16959, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 16959, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_8) < (0)) __PYX_ERR(0, 16958, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;

  /* "cuda/bindings/_nvml.pyx":16961
 *         'formats': [_numpy.uint8, vgpu_license_expiry_dtype, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.isLicensed)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.licenseExpiry)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.currentState)) - (<intptr_t>&pod),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.isLicensed)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 16961, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":16962
 *         'offsets': [
 *             (<intptr_t>&(pod.isLicensed)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.licenseExpiry)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.currentState)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.licenseExpiry)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 16962, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":16963
 *             (<intptr_t>&(pod.isLicensed)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.licenseExpiry)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.currentState)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuLicenseInfo_t),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.currentState)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16963, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":16960
 *         'names': ['is_licensed', 'license_expiry', 'current_state'],
 *         'formats': [_numpy.uint8, vgpu_license_expiry_dtype, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.isLicensed)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.licenseExpiry)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16960, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 16960, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 16960, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_6) != (0)) __PYX_ERR(0, 16960, __pyx_L1_error);
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 16958, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":16965
 *             (<intptr_t>&(pod.currentState)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuLicenseInfo_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuLicenseInfo_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 16965, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 16958, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16957, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16955
 * 
 * 
 * cdef _get_vgpu_license_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuLicenseInfo_t pod = nvmlVgpuLicenseInfo_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_license_info_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16982
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuLicenseInfo_t *>calloc(1, sizeof(nvmlVgpuLicenseInfo_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":16983
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuLicenseInfo_t *>calloc(1, sizeof(nvmlVgpuLicenseInfo_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuLicenseInfo")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuLicenseInfo_t *)calloc(1, (sizeof(nvmlVgpuLicenseInfo_t))));

  /* "cuda/bindings/_nvml.pyx":16984
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuLicenseInfo_t *>calloc(1, sizeof(nvmlVgpuLicenseInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuLicenseInfo")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":16985
 *         self._ptr = <nvmlVgpuLicenseInfo_t *>calloc(1, sizeof(nvmlVgpuLicenseInfo_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuLicenseInfo")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16985, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuLicenseInfo};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16985, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 16985, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":16984
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuLicenseInfo_t *>calloc(1, sizeof(nvmlVgpuLicenseInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuLicenseInfo")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":16986
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuLicenseInfo")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":16987
 *             raise MemoryError("Error allocating VgpuLicenseInfo")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":16988
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":16982
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuLicenseInfo_t *>calloc(1, sizeof(nvmlVgpuLicenseInfo_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":16990
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuLicenseInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self) {
  nvmlVgpuLicenseInfo_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuLicenseInfo_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":16992
 *     def __dealloc__(self):
 *         cdef nvmlVgpuLicenseInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":16993
 *         cdef nvmlVgpuLicenseInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":16994
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":16995
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":16992
 *     def __dealloc__(self):
 *         cdef nvmlVgpuLicenseInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":16990
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuLicenseInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":16997
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuLicenseInfo object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":16998
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuLicenseInfo object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16998, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16998, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16998, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16998, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16998, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuLicenseInfo_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 27 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16998, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":16997
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuLicenseInfo object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17000
 *         return f"<{__name__}.VgpuLicenseInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17003
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17003, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17000
 *         return f"<{__name__}.VgpuLicenseInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17005
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_15VgpuLicenseInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":17006
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17005
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17008
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":17009
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17009, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17008
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17011
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuLicenseInfo other_
 *         if not isinstance(other, VgpuLicenseInfo):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":17013
 *     def __eq__(self, other):
 *         cdef VgpuLicenseInfo other_
 *         if not isinstance(other, VgpuLicenseInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":17014
 *         cdef VgpuLicenseInfo other_
 *         if not isinstance(other, VgpuLicenseInfo):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuLicenseInfo_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":17013
 *     def __eq__(self, other):
 *         cdef VgpuLicenseInfo other_
 *         if not isinstance(other, VgpuLicenseInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":17015
 *         if not isinstance(other, VgpuLicenseInfo):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuLicenseInfo_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo))))) __PYX_ERR(0, 17015, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":17016
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuLicenseInfo_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuLicenseInfo_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17016, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17011
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuLicenseInfo other_
 *         if not isinstance(other, VgpuLicenseInfo):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17018
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuLicenseInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuLicenseInfo_t *>malloc(sizeof(nvmlVgpuLicenseInfo_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":17019
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuLicenseInfo_t *>malloc(sizeof(nvmlVgpuLicenseInfo_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 17019, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17019, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17019, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 17019, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":17020
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuLicenseInfo_t *>malloc(sizeof(nvmlVgpuLicenseInfo_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuLicenseInfo")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuLicenseInfo_t *)malloc((sizeof(nvmlVgpuLicenseInfo_t))));

    /* "cuda/bindings/_nvml.pyx":17021
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuLicenseInfo_t *>malloc(sizeof(nvmlVgpuLicenseInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuLicenseInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuLicenseInfo_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":17022
 *             self._ptr = <nvmlVgpuLicenseInfo_t *>malloc(sizeof(nvmlVgpuLicenseInfo_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuLicenseInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuLicenseInfo_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17022, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuLicenseInfo};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17022, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 17022, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":17021
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuLicenseInfo_t *>malloc(sizeof(nvmlVgpuLicenseInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuLicenseInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuLicenseInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":17023
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuLicenseInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuLicenseInfo_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17023, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17023, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 17023, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuLicenseInfo_t))));

    /* "cuda/bindings/_nvml.pyx":17024
 *                 raise MemoryError("Error allocating VgpuLicenseInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuLicenseInfo_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":17025
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuLicenseInfo_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":17026
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17026, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17026, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 17026, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":17019
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuLicenseInfo_t *>malloc(sizeof(nvmlVgpuLicenseInfo_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":17028
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 17028, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":17018
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuLicenseInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuLicenseInfo_t *>malloc(sizeof(nvmlVgpuLicenseInfo_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17030
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def license_expiry(self):
 *         """VgpuLicenseExpiry: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14license_expiry_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14license_expiry_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14license_expiry___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14license_expiry___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17033
 *     def license_expiry(self):
 *         """VgpuLicenseExpiry: """
 *         return VgpuLicenseExpiry.from_ptr(<intptr_t>&(self._ptr[0].licenseExpiry), self._readonly, self)             # <<<<<<<<<<<<<<
 * 
 *     @license_expiry.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).licenseExpiry))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_t_4, ((PyObject *)__pyx_v_self)};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17033, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17030
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def license_expiry(self):
 *         """VgpuLicenseExpiry: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.license_expiry.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17035
 *         return VgpuLicenseExpiry.from_ptr(<intptr_t>&(self._ptr[0].licenseExpiry), self._readonly, self)
 * 
 *     @license_expiry.setter             # <<<<<<<<<<<<<<
 *     def license_expiry(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14license_expiry_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14license_expiry_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14license_expiry_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14license_expiry_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17037
 *     @license_expiry.setter
 *     def license_expiry(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseInfo instance is read-only")
 *         cdef VgpuLicenseExpiry val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17038
 *     def license_expiry(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef VgpuLicenseExpiry val_ = val
 *         memcpy(<void *>&(self._ptr[0].licenseExpiry), <void *>(val_._get_ptr()), sizeof(nvmlVgpuLicenseExpiry_t) * 1)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuLicenseInfo_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17038, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17038, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17037
 *     @license_expiry.setter
 *     def license_expiry(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseInfo instance is read-only")
 *         cdef VgpuLicenseExpiry val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17039
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseInfo instance is read-only")
 *         cdef VgpuLicenseExpiry val_ = val             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].licenseExpiry), <void *>(val_._get_ptr()), sizeof(nvmlVgpuLicenseExpiry_t) * 1)
 * 
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry))))) __PYX_ERR(0, 17039, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":17040
 *             raise ValueError("This VgpuLicenseInfo instance is read-only")
 *         cdef VgpuLicenseExpiry val_ = val
 *         memcpy(<void *>&(self._ptr[0].licenseExpiry), <void *>(val_._get_ptr()), sizeof(nvmlVgpuLicenseExpiry_t) * 1)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 17040, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).licenseExpiry)), ((void *)__pyx_t_4), ((sizeof(nvmlVgpuLicenseExpiry_t)) * 1)));

  /* "cuda/bindings/_nvml.pyx":17035
 *         return VgpuLicenseExpiry.from_ptr(<intptr_t>&(self._ptr[0].licenseExpiry), self._readonly, self)
 * 
 *     @license_expiry.setter             # <<<<<<<<<<<<<<
 *     def license_expiry(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.license_expiry.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17042
 *         memcpy(<void *>&(self._ptr[0].licenseExpiry), <void *>(val_._get_ptr()), sizeof(nvmlVgpuLicenseExpiry_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_licensed(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_11is_licensed_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_11is_licensed_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_11is_licensed___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_11is_licensed___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17045
 *     def is_licensed(self):
 *         """int: """
 *         return self._ptr[0].isLicensed             # <<<<<<<<<<<<<<
 * 
 *     @is_licensed.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_char((__pyx_v_self->_ptr[0]).isLicensed); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17045, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17042
 *         memcpy(<void *>&(self._ptr[0].licenseExpiry), <void *>(val_._get_ptr()), sizeof(nvmlVgpuLicenseExpiry_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_licensed(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.is_licensed.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17047
 *         return self._ptr[0].isLicensed
 * 
 *     @is_licensed.setter             # <<<<<<<<<<<<<<
 *     def is_licensed(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_11is_licensed_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_11is_licensed_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_11is_licensed_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_11is_licensed_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned char __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17049
 *     @is_licensed.setter
 *     def is_licensed(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseInfo instance is read-only")
 *         self._ptr[0].isLicensed = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17050
 *     def is_licensed(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].isLicensed = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuLicenseInfo_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17050, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17050, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17049
 *     @is_licensed.setter
 *     def is_licensed(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseInfo instance is read-only")
 *         self._ptr[0].isLicensed = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17051
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseInfo instance is read-only")
 *         self._ptr[0].isLicensed = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_char(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned char)-1) && PyErr_Occurred())) __PYX_ERR(0, 17051, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).isLicensed = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":17047
 *         return self._ptr[0].isLicensed
 * 
 *     @is_licensed.setter             # <<<<<<<<<<<<<<
 *     def is_licensed(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.is_licensed.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17053
 *         self._ptr[0].isLicensed = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def current_state(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13current_state_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13current_state_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13current_state___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13current_state___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17056
 *     def current_state(self):
 *         """int: """
 *         return self._ptr[0].currentState             # <<<<<<<<<<<<<<
 * 
 *     @current_state.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).currentState); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17056, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17053
 *         self._ptr[0].isLicensed = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def current_state(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.current_state.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17058
 *         return self._ptr[0].currentState
 * 
 *     @current_state.setter             # <<<<<<<<<<<<<<
 *     def current_state(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13current_state_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13current_state_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13current_state_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13current_state_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17060
 *     @current_state.setter
 *     def current_state(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseInfo instance is read-only")
 *         self._ptr[0].currentState = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17061
 *     def current_state(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].currentState = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuLicenseInfo_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17061, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17061, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17060
 *     @current_state.setter
 *     def current_state(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuLicenseInfo instance is read-only")
 *         self._ptr[0].currentState = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17062
 *         if self._readonly:
 *             raise ValueError("This VgpuLicenseInfo instance is read-only")
 *         self._ptr[0].currentState = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17062, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).currentState = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":17058
 *         return self._ptr[0].currentState
 * 
 *     @current_state.setter             # <<<<<<<<<<<<<<
 *     def current_state(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.current_state.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17064
 *         self._ptr[0].currentState = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuLicenseInfo instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_12from_data, "VgpuLicenseInfo.from_data(data)\n\nCreate an VgpuLicenseInfo instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_license_info_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17064, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17064, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 17064, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 17064, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17064, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 17064, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":17071
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_license_info_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_license_info_dtype", vgpu_license_info_dtype, VgpuLicenseInfo)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_license_info_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17071, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_license_info_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17071, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17064
 *         self._ptr[0].currentState = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuLicenseInfo instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17073
 *         return __from_data(data, "vgpu_license_info_dtype", vgpu_license_info_dtype, VgpuLicenseInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuLicenseInfo instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14from_ptr, "VgpuLicenseInfo.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuLicenseInfo instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17073, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17073, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17073, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17073, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 17073, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":17074
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuLicenseInfo instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 17073, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17073, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17073, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17073, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 17074, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17074, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 17073, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":17073
 *         return __from_data(data, "vgpu_license_info_dtype", vgpu_license_info_dtype, VgpuLicenseInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuLicenseInfo instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":17082
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuLicenseInfo obj = VgpuLicenseInfo.__new__(VgpuLicenseInfo)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":17083
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuLicenseInfo obj = VgpuLicenseInfo.__new__(VgpuLicenseInfo)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17083, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 17083, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17082
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuLicenseInfo obj = VgpuLicenseInfo.__new__(VgpuLicenseInfo)
*/
  }

  /* "cuda/bindings/_nvml.pyx":17084
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuLicenseInfo obj = VgpuLicenseInfo.__new__(VgpuLicenseInfo)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuLicenseInfo_t *>malloc(sizeof(nvmlVgpuLicenseInfo_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuLicenseInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17084, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":17085
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuLicenseInfo obj = VgpuLicenseInfo.__new__(VgpuLicenseInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuLicenseInfo_t *>malloc(sizeof(nvmlVgpuLicenseInfo_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":17086
 *         cdef VgpuLicenseInfo obj = VgpuLicenseInfo.__new__(VgpuLicenseInfo)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuLicenseInfo_t *>malloc(sizeof(nvmlVgpuLicenseInfo_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuLicenseInfo")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuLicenseInfo_t *)malloc((sizeof(nvmlVgpuLicenseInfo_t))));

    /* "cuda/bindings/_nvml.pyx":17087
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuLicenseInfo_t *>malloc(sizeof(nvmlVgpuLicenseInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuLicenseInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuLicenseInfo_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":17088
 *             obj._ptr = <nvmlVgpuLicenseInfo_t *>malloc(sizeof(nvmlVgpuLicenseInfo_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuLicenseInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuLicenseInfo_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17088, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuLicenseInfo};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17088, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 17088, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":17087
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuLicenseInfo_t *>malloc(sizeof(nvmlVgpuLicenseInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuLicenseInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuLicenseInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":17089
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuLicenseInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuLicenseInfo_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuLicenseInfo_t))));

    /* "cuda/bindings/_nvml.pyx":17090
 *                 raise MemoryError("Error allocating VgpuLicenseInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuLicenseInfo_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":17091
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuLicenseInfo_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuLicenseInfo_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":17085
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuLicenseInfo obj = VgpuLicenseInfo.__new__(VgpuLicenseInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuLicenseInfo_t *>malloc(sizeof(nvmlVgpuLicenseInfo_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":17093
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuLicenseInfo_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuLicenseInfo_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":17094
 *         else:
 *             obj._ptr = <nvmlVgpuLicenseInfo_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":17095
 *             obj._ptr = <nvmlVgpuLicenseInfo_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":17096
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":17097
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17073
 *         return __from_data(data, "vgpu_license_info_dtype", vgpu_license_info_dtype, VgpuLicenseInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuLicenseInfo instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_16__reduce_cython__, "VgpuLicenseInfo.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_18__setstate_cython__, "VgpuLicenseInfo.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuLicenseInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17100
 * 
 * 
 * cdef _get_grid_licensable_feature_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGridLicensableFeature_t pod = nvmlGridLicensableFeature_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_grid_licensable_feature_dtype_offsets(void) {
  nvmlGridLicensableFeature_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlGridLicensableFeature_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  size_t __pyx_t_13;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_grid_licensable_feature_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":17101
 * 
 * cdef _get_grid_licensable_feature_dtype_offsets():
 *     cdef nvmlGridLicensableFeature_t pod = nvmlGridLicensableFeature_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['feature_code', 'feature_state', 'license_info', 'product_name', 'feature_enabled', 'license_expiry'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":17102
 * cdef _get_grid_licensable_feature_dtype_offsets():
 *     cdef nvmlGridLicensableFeature_t pod = nvmlGridLicensableFeature_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['feature_code', 'feature_state', 'license_info', 'product_name', 'feature_enabled', 'license_expiry'],
 *         'formats': [_numpy.int32, _numpy.uint32, _numpy.int8, _numpy.int8, _numpy.uint32, grid_license_expiry_dtype],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17102, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17102, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":17103
 *     cdef nvmlGridLicensableFeature_t pod = nvmlGridLicensableFeature_t()
 *     return _numpy.dtype({
 *         'names': ['feature_code', 'feature_state', 'license_info', 'product_name', 'feature_enabled', 'license_expiry'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.int32, _numpy.uint32, _numpy.int8, _numpy.int8, _numpy.uint32, grid_license_expiry_dtype],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17103, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17103, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_feature_code);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_feature_code);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_feature_code) != (0)) __PYX_ERR(0, 17103, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_feature_state);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_feature_state);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_feature_state) != (0)) __PYX_ERR(0, 17103, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_license_info);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_license_info);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_license_info) != (0)) __PYX_ERR(0, 17103, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_product_name);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_product_name);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_product_name) != (0)) __PYX_ERR(0, 17103, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_feature_enabled);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_feature_enabled);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_feature_enabled) != (0)) __PYX_ERR(0, 17103, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_license_expiry);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_license_expiry);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_license_expiry) != (0)) __PYX_ERR(0, 17103, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 17103, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":17104
 *     return _numpy.dtype({
 *         'names': ['feature_code', 'feature_state', 'license_info', 'product_name', 'feature_enabled', 'license_expiry'],
 *         'formats': [_numpy.int32, _numpy.uint32, _numpy.int8, _numpy.int8, _numpy.uint32, grid_license_expiry_dtype],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.featureCode)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 17104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 17104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_grid_license_expiry_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_12 = PyList_New(6); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 17104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 17104, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 17104, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 17104, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 17104, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 17104, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 5, __pyx_t_6) != (0)) __PYX_ERR(0, 17104, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_12) < (0)) __PYX_ERR(0, 17103, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;

  /* "cuda/bindings/_nvml.pyx":17106
 *         'formats': [_numpy.int32, _numpy.uint32, _numpy.int8, _numpy.int8, _numpy.uint32, grid_license_expiry_dtype],
 *         'offsets': [
 *             (<intptr_t>&(pod.featureCode)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.featureState)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.licenseInfo)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.featureCode)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 17106, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":17107
 *         'offsets': [
 *             (<intptr_t>&(pod.featureCode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.featureState)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.licenseInfo)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.productName)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.featureState)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17107, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":17108
 *             (<intptr_t>&(pod.featureCode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.featureState)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.licenseInfo)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.productName)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.featureEnabled)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.licenseInfo)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 17108, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":17109
 *             (<intptr_t>&(pod.featureState)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.licenseInfo)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.productName)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.featureEnabled)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.licenseExpiry)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.productName)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17109, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":17110
 *             (<intptr_t>&(pod.licenseInfo)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.productName)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.featureEnabled)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.licenseExpiry)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.featureEnabled)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17110, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":17111
 *             (<intptr_t>&(pod.productName)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.featureEnabled)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.licenseExpiry)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlGridLicensableFeature_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.licenseExpiry)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 17111, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":17105
 *         'names': ['feature_code', 'feature_state', 'license_info', 'product_name', 'feature_enabled', 'license_expiry'],
 *         'formats': [_numpy.int32, _numpy.uint32, _numpy.int8, _numpy.int8, _numpy.uint32, grid_license_expiry_dtype],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.featureCode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.featureState)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17105, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_12) != (0)) __PYX_ERR(0, 17105, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 17105, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_11) != (0)) __PYX_ERR(0, 17105, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 17105, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_9) != (0)) __PYX_ERR(0, 17105, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_8) != (0)) __PYX_ERR(0, 17105, __pyx_L1_error);
  __pyx_t_12 = 0;
  __pyx_t_6 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 17103, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":17113
 *             (<intptr_t>&(pod.licenseExpiry)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlGridLicensableFeature_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlGridLicensableFeature_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 17103, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_13 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_13 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_13, (2-__pyx_t_13) | (__pyx_t_13*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17102, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17100
 * 
 * 
 * cdef _get_grid_licensable_feature_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGridLicensableFeature_t pod = nvmlGridLicensableFeature_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_grid_licensable_feature_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17135
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=grid_licensable_feature_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyObject *__pyx_v_size = 0;
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17135, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17135, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 17135, __pyx_L3_error)
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    } else {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17135, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[0]) values[0] = __Pyx_NewRef(((PyObject *)__pyx_mstate_global->__pyx_int_1));
    }
    __pyx_v_size = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 17135, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return -1;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self), __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_size) {
  PyObject *__pyx_v_arr = NULL;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_t_7;
  PyObject *__pyx_t_8[4];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":17136
 * 
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=grid_licensable_feature_dtype)             # <<<<<<<<<<<<<<
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlGridLicensableFeature_t), \
*/
  __pyx_t_2 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17136, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17136, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_grid_licensable_feature_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17136, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_4))) {
    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
    assert(__pyx_t_2);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
    __pyx_t_5 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, __pyx_v_size};
    __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17136, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_3, __pyx_t_6, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 17136, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17136, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_v_arr = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":17137
 *     def __init__(self, size=1):
 *         arr = _numpy.empty(size, dtype=grid_licensable_feature_dtype)
 *         self._data = arr.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 *         assert self._data.itemsize == sizeof(nvmlGridLicensableFeature_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlGridLicensableFeature_t) }"
*/
  __pyx_t_4 = __pyx_v_arr;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17137, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17137, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17137, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v_self->_data);
  __Pyx_DECREF(__pyx_v_self->_data);
  __pyx_v_self->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":17138
 *         arr = _numpy.empty(size, dtype=grid_licensable_feature_dtype)
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlGridLicensableFeature_t), \             # <<<<<<<<<<<<<<
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlGridLicensableFeature_t) }"
 * 
*/
  #ifndef CYTHON_WITHOUT_ASSERTIONS
  if (unlikely(__pyx_assertions_enabled())) {
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17138, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(nvmlGridLicensableFeature_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17138, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17138, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 17138, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_7)) {

      /* "cuda/bindings/_nvml.pyx":17139
 *         self._data = arr.view(_numpy.recarray)
 *         assert self._data.itemsize == sizeof(nvmlGridLicensableFeature_t), \
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlGridLicensableFeature_t) }"             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17139, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_t_4, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17139, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyUnicode_From_size_t((sizeof(nvmlGridLicensableFeature_t)), 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17139, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8[0] = __pyx_mstate_global->__pyx_kp_u_itemsize_2;
      __pyx_t_8[1] = __pyx_t_3;
      __pyx_t_8[2] = __pyx_mstate_global->__pyx_kp_u_mismatches_struct_size;
      __pyx_t_8[3] = __pyx_t_4;
      __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, 9 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3) + 24 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_3));
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17139, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), __pyx_t_1, 0, 0);
      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
      __PYX_ERR(0, 17138, __pyx_L1_error)
    }
  }
  #else
  if ((1)); else __PYX_ERR(0, 17138, __pyx_L1_error)
  #endif

  /* "cuda/bindings/_nvml.pyx":17135
 * 
 * 
 *     def __init__(self, size=1):             # <<<<<<<<<<<<<<
 *         arr = _numpy.empty(size, dtype=grid_licensable_feature_dtype)
 *         self._data = arr.view(_numpy.recarray)
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17141
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlGridLicensableFeature_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.GridLicensableFeature_Array_{self._data.size} object at {hex(id(self))}>"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_3__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_3__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_2__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_2__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6[7];
  PyObject *__pyx_t_7[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":17142
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.GridLicensableFeature_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17142, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17142, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 17142, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (__pyx_t_3) {

    /* "cuda/bindings/_nvml.pyx":17143
 *     def __repr__(self):
 *         if self._data.size > 1:
 *             return f"<{__name__}.GridLicensableFeature_Array_{self._data.size} object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 *         else:
 *             return f"<{__name__}.GridLicensableFeature object at {hex(id(self))}>"
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17143, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17143, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17143, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_t_2, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17143, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17143, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_5 = __Pyx_PyNumber_Hex(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17143, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = __Pyx_PyUnicode_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17143, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_6[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_6[1] = __pyx_t_1;
    __pyx_t_6[2] = __pyx_mstate_global->__pyx_kp_u_GridLicensableFeature_Array;
    __pyx_t_6[3] = __pyx_t_4;
    __pyx_t_6[4] = __pyx_mstate_global->__pyx_kp_u_object_at;
    __pyx_t_6[5] = __pyx_t_2;
    __pyx_t_6[6] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_6, 7, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1) + 29 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4) + 11 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2));
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17143, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":17142
 * 
 *     def __repr__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             return f"<{__name__}.GridLicensableFeature_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
*/
  }

  /* "cuda/bindings/_nvml.pyx":17145
 *             return f"<{__name__}.GridLicensableFeature_Array_{self._data.size} object at {hex(id(self))}>"
 *         else:
 *             return f"<{__name__}.GridLicensableFeature object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17145, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_5, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17145, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17145, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyNumber_Hex(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17145, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_5 = __Pyx_PyUnicode_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17145, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7[0] = __pyx_mstate_global->__pyx_kp_u__6;
    __pyx_t_7[1] = __pyx_t_2;
    __pyx_t_7[2] = __pyx_mstate_global->__pyx_kp_u_GridLicensableFeature_object_at;
    __pyx_t_7[3] = __pyx_t_5;
    __pyx_t_7[4] = __pyx_mstate_global->__pyx_kp_u__3;
    __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_7, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 33 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5));
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17145, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "cuda/bindings/_nvml.pyx":17141
 *             f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlGridLicensableFeature_t) }"
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             return f"<{__name__}.GridLicensableFeature_Array_{self._data.size} object at {hex(id(self))}>"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17147
 *             return f"<{__name__}.GridLicensableFeature object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17150
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17150, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17150, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17147
 *             return f"<{__name__}.GridLicensableFeature object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17152
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21GridLicensableFeature__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self) {
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  intptr_t __pyx_t_3;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":17153
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17153, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17153, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyLong_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 17153, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_3;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17152
 *         return self._data.ctypes.data
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return self._data.ctypes.data
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature._get_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17155
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_5__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_5__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_4__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_4__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  size_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":17156
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17156, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17156, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 17156, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":17157
 *     def __int__(self):
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "             # <<<<<<<<<<<<<<
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data
*/
    __pyx_t_1 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_int_argument_must_be_a_bytes_lik};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17157, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 17157, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17156
 * 
 *     def __int__(self):
 *         if self._data.size > 1:             # <<<<<<<<<<<<<<
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
*/
  }

  /* "cuda/bindings/_nvml.pyx":17159
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
 *                             "To get the pointer address of an array, use .ptr")
 *         return self._data.ctypes.data             # <<<<<<<<<<<<<<
 * 
 *     def __len__(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17159, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17159, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17155
 *         return self._data.ctypes.data
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         if self._data.size > 1:
 *             raise TypeError("int() argument must be a bytes-like object of size 1. "
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17161
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

/* Python wrapper */
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_7__len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_7__len__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_6__len__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static Py_ssize_t __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_6__len__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self) {
  Py_ssize_t __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__len__", 0);

  /* "cuda/bindings/_nvml.pyx":17162
 * 
 *     def __len__(self):
 *         return self._data.size             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17162, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 17162, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17161
 *         return self._data.ctypes.data
 * 
 *     def __len__(self):             # <<<<<<<<<<<<<<
 *         return self._data.size
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17164
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, GridLicensableFeature)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_other) {
  PyObject *__pyx_v_self_data = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":17165
 * 
 *     def __eq__(self, other):
 *         cdef object self_data = self._data             # <<<<<<<<<<<<<<
 *         if (not isinstance(other, GridLicensableFeature)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
*/
  __pyx_t_1 = __pyx_v_self->_data;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_v_self_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":17166
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, GridLicensableFeature)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature); 
  __pyx_t_4 = (!__pyx_t_3);
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17166, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17166, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17166, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_6, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17166, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 17166, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (!__pyx_t_4) {
  } else {
    __pyx_t_2 = __pyx_t_4;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17166, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17166, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17166, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17166, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 17166, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_2 = __pyx_t_4;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":17167
 *         cdef object self_data = self._data
 *         if (not isinstance(other, GridLicensableFeature)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False             # <<<<<<<<<<<<<<
 *         return bool((self_data == other._data).all())
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":17166
 *     def __eq__(self, other):
 *         cdef object self_data = self._data
 *         if (not isinstance(other, GridLicensableFeature)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:             # <<<<<<<<<<<<<<
 *             return False
 *         return bool((self_data == other._data).all())
*/
  }

  /* "cuda/bindings/_nvml.pyx":17168
 *         if (not isinstance(other, GridLicensableFeature)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
 *             return False
 *         return bool((self_data == other._data).all())             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_mstate_global->__pyx_n_u_data_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17168, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyObject_RichCompare(__pyx_v_self_data, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17168, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_7;
  __Pyx_INCREF(__pyx_t_1);
  __pyx_t_8 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, NULL};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_all, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17168, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 17168, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17168, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_r = __pyx_t_6;
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17164
 *         return self._data.size
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef object self_data = self._data
 *         if (not isinstance(other, GridLicensableFeature)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype:
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_self_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17170
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def feature_code(self):
 *         """Union[~_numpy.int32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12feature_code_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12feature_code_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12feature_code___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12feature_code___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17173
 *     def feature_code(self):
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.feature_code[0])
 *         return self._data.feature_code
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17173, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 17173, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":17174
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.feature_code[0])             # <<<<<<<<<<<<<<
 *         return self._data.feature_code
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_feature_code); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17174, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17174, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17174, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":17173
 *     def feature_code(self):
 *         """Union[~_numpy.int32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.feature_code[0])
 *         return self._data.feature_code
*/
  }

  /* "cuda/bindings/_nvml.pyx":17175
 *         if self._data.size == 1:
 *             return int(self._data.feature_code[0])
 *         return self._data.feature_code             # <<<<<<<<<<<<<<
 * 
 *     @feature_code.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_feature_code); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17175, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17170
 *         return bool((self_data == other._data).all())
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def feature_code(self):
 *         """Union[~_numpy.int32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.feature_code.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17177
 *         return self._data.feature_code
 * 
 *     @feature_code.setter             # <<<<<<<<<<<<<<
 *     def feature_code(self, val):
 *         self._data.feature_code = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12feature_code_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12feature_code_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12feature_code_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12feature_code_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":17179
 *     @feature_code.setter
 *     def feature_code(self, val):
 *         self._data.feature_code = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_feature_code, __pyx_v_val) < (0)) __PYX_ERR(0, 17179, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":17177
 *         return self._data.feature_code
 * 
 *     @feature_code.setter             # <<<<<<<<<<<<<<
 *     def feature_code(self, val):
 *         self._data.feature_code = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.feature_code.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17181
 *         self._data.feature_code = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def feature_state(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_13feature_state_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_13feature_state_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_13feature_state___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_13feature_state___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17184
 *     def feature_state(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.feature_state[0])
 *         return self._data.feature_state
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17184, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 17184, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":17185
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.feature_state[0])             # <<<<<<<<<<<<<<
 *         return self._data.feature_state
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_feature_state); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17185, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17185, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17185, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":17184
 *     def feature_state(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.feature_state[0])
 *         return self._data.feature_state
*/
  }

  /* "cuda/bindings/_nvml.pyx":17186
 *         if self._data.size == 1:
 *             return int(self._data.feature_state[0])
 *         return self._data.feature_state             # <<<<<<<<<<<<<<
 * 
 *     @feature_state.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_feature_state); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17186, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17181
 *         self._data.feature_code = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def feature_state(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.feature_state.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17188
 *         return self._data.feature_state
 * 
 *     @feature_state.setter             # <<<<<<<<<<<<<<
 *     def feature_state(self, val):
 *         self._data.feature_state = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_13feature_state_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_13feature_state_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_13feature_state_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_13feature_state_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":17190
 *     @feature_state.setter
 *     def feature_state(self, val):
 *         self._data.feature_state = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_feature_state, __pyx_v_val) < (0)) __PYX_ERR(0, 17190, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":17188
 *         return self._data.feature_state
 * 
 *     @feature_state.setter             # <<<<<<<<<<<<<<
 *     def feature_state(self, val):
 *         self._data.feature_state = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.feature_state.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17192
 *         self._data.feature_state = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def license_info(self):
 *         """~_numpy.int8: (array of length 128)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12license_info_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12license_info_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12license_info___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12license_info___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17195
 *     def license_info(self):
 *         """~_numpy.int8: (array of length 128)."""
 *         return self._data.license_info             # <<<<<<<<<<<<<<
 * 
 *     @license_info.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_license_info); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17195, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17192
 *         self._data.feature_state = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def license_info(self):
 *         """~_numpy.int8: (array of length 128)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.license_info.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17197
 *         return self._data.license_info
 * 
 *     @license_info.setter             # <<<<<<<<<<<<<<
 *     def license_info(self, val):
 *         self._data.license_info = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12license_info_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12license_info_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12license_info_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12license_info_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":17199
 *     @license_info.setter
 *     def license_info(self, val):
 *         self._data.license_info = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_license_info, __pyx_v_val) < (0)) __PYX_ERR(0, 17199, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":17197
 *         return self._data.license_info
 * 
 *     @license_info.setter             # <<<<<<<<<<<<<<
 *     def license_info(self, val):
 *         self._data.license_info = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.license_info.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17201
 *         self._data.license_info = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def product_name(self):
 *         """~_numpy.int8: (array of length 128)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12product_name_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12product_name_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12product_name___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12product_name___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17204
 *     def product_name(self):
 *         """~_numpy.int8: (array of length 128)."""
 *         return self._data.product_name             # <<<<<<<<<<<<<<
 * 
 *     @product_name.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_product_name); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17204, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17201
 *         self._data.license_info = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def product_name(self):
 *         """~_numpy.int8: (array of length 128)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.product_name.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17206
 *         return self._data.product_name
 * 
 *     @product_name.setter             # <<<<<<<<<<<<<<
 *     def product_name(self, val):
 *         self._data.product_name = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12product_name_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12product_name_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12product_name_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12product_name_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":17208
 *     @product_name.setter
 *     def product_name(self, val):
 *         self._data.product_name = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_product_name, __pyx_v_val) < (0)) __PYX_ERR(0, 17208, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":17206
 *         return self._data.product_name
 * 
 *     @product_name.setter             # <<<<<<<<<<<<<<
 *     def product_name(self, val):
 *         self._data.product_name = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.product_name.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17210
 *         self._data.product_name = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def feature_enabled(self):
 *         """Union[~_numpy.uint32, int]: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_15feature_enabled_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_15feature_enabled_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_15feature_enabled___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_15feature_enabled___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17213
 *     def feature_enabled(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.feature_enabled[0])
 *         return self._data.feature_enabled
*/
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17213, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 17213, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":17214
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:
 *             return int(self._data.feature_enabled[0])             # <<<<<<<<<<<<<<
 *         return self._data.feature_enabled
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_feature_enabled); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17214, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17214, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17214, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_r = __pyx_t_1;
    __pyx_t_1 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":17213
 *     def feature_enabled(self):
 *         """Union[~_numpy.uint32, int]: """
 *         if self._data.size == 1:             # <<<<<<<<<<<<<<
 *             return int(self._data.feature_enabled[0])
 *         return self._data.feature_enabled
*/
  }

  /* "cuda/bindings/_nvml.pyx":17215
 *         if self._data.size == 1:
 *             return int(self._data.feature_enabled[0])
 *         return self._data.feature_enabled             # <<<<<<<<<<<<<<
 * 
 *     @feature_enabled.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_feature_enabled); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17215, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17210
 *         self._data.product_name = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def feature_enabled(self):
 *         """Union[~_numpy.uint32, int]: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.feature_enabled.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17217
 *         return self._data.feature_enabled
 * 
 *     @feature_enabled.setter             # <<<<<<<<<<<<<<
 *     def feature_enabled(self, val):
 *         self._data.feature_enabled = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_15feature_enabled_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_15feature_enabled_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_15feature_enabled_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_15feature_enabled_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":17219
 *     @feature_enabled.setter
 *     def feature_enabled(self, val):
 *         self._data.feature_enabled = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_feature_enabled, __pyx_v_val) < (0)) __PYX_ERR(0, 17219, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":17217
 *         return self._data.feature_enabled
 * 
 *     @feature_enabled.setter             # <<<<<<<<<<<<<<
 *     def feature_enabled(self, val):
 *         self._data.feature_enabled = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.feature_enabled.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17221
 *         self._data.feature_enabled = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def license_expiry(self):
 *         """grid_license_expiry_dtype: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_14license_expiry_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_14license_expiry_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_14license_expiry___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_14license_expiry___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17224
 *     def license_expiry(self):
 *         """grid_license_expiry_dtype: """
 *         return self._data.license_expiry             # <<<<<<<<<<<<<<
 * 
 *     @license_expiry.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_license_expiry); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17224, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17221
 *         self._data.feature_enabled = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def license_expiry(self):
 *         """grid_license_expiry_dtype: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.license_expiry.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17226
 *         return self._data.license_expiry
 * 
 *     @license_expiry.setter             # <<<<<<<<<<<<<<
 *     def license_expiry(self, val):
 *         self._data.license_expiry = val
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_14license_expiry_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_14license_expiry_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_14license_expiry_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_14license_expiry_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":17228
 *     @license_expiry.setter
 *     def license_expiry(self, val):
 *         self._data.license_expiry = val             # <<<<<<<<<<<<<<
 * 
 *     def __getitem__(self, key):
*/
  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_license_expiry, __pyx_v_val) < (0)) __PYX_ERR(0, 17228, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":17226
 *         return self._data.license_expiry
 * 
 *     @license_expiry.setter             # <<<<<<<<<<<<<<
 *     def license_expiry(self, val):
 *         self._data.license_expiry = val
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.license_expiry.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17230
 *         self._data.license_expiry = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_11__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_10__getitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self), ((PyObject *)__pyx_v_key));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_10__getitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_key) {
  Py_ssize_t __pyx_v_key_;
  Py_ssize_t __pyx_v_size;
  PyObject *__pyx_v_out = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__getitem__", 0);

  /* "cuda/bindings/_nvml.pyx":17233
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  __pyx_t_1 = PyLong_Check(__pyx_v_key); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":17234
 *         cdef ssize_t size
 *         if isinstance(key, int):
 *             key_ = key             # <<<<<<<<<<<<<<
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
*/
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 17234, __pyx_L1_error)
    __pyx_v_key_ = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":17235
 *         if isinstance(key, int):
 *             key_ = key
 *             size = self._data.size             # <<<<<<<<<<<<<<
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
*/
    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_data, __pyx_mstate_global->__pyx_n_u_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17235, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = PyLong_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 17235, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_size = __pyx_t_2;

    /* "cuda/bindings/_nvml.pyx":17236
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    __pyx_t_4 = (__pyx_v_key_ >= __pyx_v_size);
    if (!__pyx_t_4) {
    } else {
      __pyx_t_1 = __pyx_t_4;
      goto __pyx_L5_bool_binop_done;
    }
    __pyx_t_4 = (__pyx_v_key_ <= (-(__pyx_v_size + 1)));
    __pyx_t_1 = __pyx_t_4;
    __pyx_L5_bool_binop_done:;
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":17237
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")             # <<<<<<<<<<<<<<
 *             if key_ < 0:
 *                 key_ += size
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_index_is_out_of_bounds};
        __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_IndexError)), __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17237, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_3);
      }
      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
      __PYX_ERR(0, 17237, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":17236
 *             key_ = key
 *             size = self._data.size
 *             if key_ >= size or key_ <= -(size+1):             # <<<<<<<<<<<<<<
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
*/
    }

    /* "cuda/bindings/_nvml.pyx":17238
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return GridLicensableFeature.from_data(self._data[key_:key_+1])
*/
    __pyx_t_1 = (__pyx_v_key_ < 0);
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":17239
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:
 *                 key_ += size             # <<<<<<<<<<<<<<
 *             return GridLicensableFeature.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
*/
      __pyx_v_key_ = (__pyx_v_key_ + __pyx_v_size);

      /* "cuda/bindings/_nvml.pyx":17238
 *             if key_ >= size or key_ <= -(size+1):
 *                 raise IndexError("index is out of bounds")
 *             if key_ < 0:             # <<<<<<<<<<<<<<
 *                 key_ += size
 *             return GridLicensableFeature.from_data(self._data[key_:key_+1])
*/
    }

    /* "cuda/bindings/_nvml.pyx":17240
 *             if key_ < 0:
 *                 key_ += size
 *             return GridLicensableFeature.from_data(self._data[key_:key_+1])             # <<<<<<<<<<<<<<
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == grid_licensable_feature_dtype:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature);
    __Pyx_INCREF(__pyx_t_5);
    __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_v_self->_data, __pyx_v_key_, (__pyx_v_key_ + 1), NULL, NULL, NULL, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17240, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_7};
      __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17240, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":17233
 *         cdef ssize_t key_
 *         cdef ssize_t size
 *         if isinstance(key, int):             # <<<<<<<<<<<<<<
 *             key_ = key
 *             size = self._data.size
*/
  }

  /* "cuda/bindings/_nvml.pyx":17241
 *                 key_ += size
 *             return GridLicensableFeature.from_data(self._data[key_:key_+1])
 *         out = self._data[key]             # <<<<<<<<<<<<<<
 *         if isinstance(out, _numpy.recarray) and out.dtype == grid_licensable_feature_dtype:
 *             return GridLicensableFeature.from_data(out)
*/
  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_self->_data, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17241, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v_out = __pyx_t_3;
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":17242
 *             return GridLicensableFeature.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == grid_licensable_feature_dtype:             # <<<<<<<<<<<<<<
 *             return GridLicensableFeature.from_data(out)
 *         return out
*/
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17242, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17242, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = PyObject_IsInstance(__pyx_v_out, __pyx_t_7); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 17242, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  if (__pyx_t_4) {
  } else {
    __pyx_t_1 = __pyx_t_4;
    goto __pyx_L9_bool_binop_done;
  }
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_out, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17242, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_grid_licensable_feature_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17242, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_5 = PyObject_RichCompare(__pyx_t_7, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17242, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 17242, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_1 = __pyx_t_4;
  __pyx_L9_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":17243
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == grid_licensable_feature_dtype:
 *             return GridLicensableFeature.from_data(out)             # <<<<<<<<<<<<<<
 *         return out
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature);
    __Pyx_INCREF(__pyx_t_3);
    __pyx_t_6 = 0;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_out};
      __pyx_t_5 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_data, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17243, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
    }
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":17242
 *             return GridLicensableFeature.from_data(self._data[key_:key_+1])
 *         out = self._data[key]
 *         if isinstance(out, _numpy.recarray) and out.dtype == grid_licensable_feature_dtype:             # <<<<<<<<<<<<<<
 *             return GridLicensableFeature.from_data(out)
 *         return out
*/
  }

  /* "cuda/bindings/_nvml.pyx":17244
 *         if isinstance(out, _numpy.recarray) and out.dtype == grid_licensable_feature_dtype:
 *             return GridLicensableFeature.from_data(out)
 *         return out             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_out);
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17230
 *         self._data.license_expiry = val
 * 
 *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
 *         cdef ssize_t key_
 *         cdef ssize_t size
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_out);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17246
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_13__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_12__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":17247
 * 
 *     def __setitem__(self, key, val):
 *         self._data[key] = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely((PyObject_SetItem(__pyx_v_self->_data, __pyx_v_key, __pyx_v_val) < 0))) __PYX_ERR(0, 17247, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":17246
 *         return out
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         self._data[key] = val
 * 
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17249
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GridLicensableFeature instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21GridLicensableFeature_14from_data, "GridLicensableFeature.from_data(data)\n\nCreate an GridLicensableFeature instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a 1D array of dtype `grid_licensable_feature_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21GridLicensableFeature_15from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21GridLicensableFeature_14from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_15from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17249, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17249, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 17249, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 17249, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17249, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 17249, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_14from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_14from_data(PyObject *__pyx_v_data) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_t_3;
  int __pyx_t_4;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":17256
 *             data (_numpy.ndarray): a 1D array of dtype `grid_licensable_feature_dtype` holding the data.
 *         """
 *         cdef GridLicensableFeature obj = GridLicensableFeature.__new__(GridLicensableFeature)             # <<<<<<<<<<<<<<
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
*/
  __pyx_t_1 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_GridLicensableFeature(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17256, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_1);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":17257
 *         """
 *         cdef GridLicensableFeature obj = GridLicensableFeature.__new__(GridLicensableFeature)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17257, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17257, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 17257, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_4 = (!__pyx_t_3);
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":17258
 *         cdef GridLicensableFeature obj = GridLicensableFeature.__new__(GridLicensableFeature)
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")             # <<<<<<<<<<<<<<
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_argument_must_be_a_NumPy_nd};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17258, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 17258, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17257
 *         """
 *         cdef GridLicensableFeature obj = GridLicensableFeature.__new__(GridLicensableFeature)
 *         if not isinstance(data, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
*/
  }

  /* "cuda/bindings/_nvml.pyx":17259
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != grid_licensable_feature_dtype:
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17259, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_4 = (__Pyx_PyLong_BoolNeObjC(__pyx_t_2, __pyx_mstate_global->__pyx_int_1, 1, 0)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 17259, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":17260
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")             # <<<<<<<<<<<<<<
 *         if data.dtype != grid_licensable_feature_dtype:
 *             raise ValueError("data array must be of dtype grid_licensable_feature_dtype")
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_1D};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17260, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 17260, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17259
 *         if not isinstance(data, _numpy.ndarray):
 *             raise TypeError("data argument must be a NumPy ndarray")
 *         if data.ndim != 1:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != grid_licensable_feature_dtype:
*/
  }

  /* "cuda/bindings/_nvml.pyx":17261
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != grid_licensable_feature_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype grid_licensable_feature_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17261, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_grid_licensable_feature_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17261, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_6 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17261, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 17261, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  if (unlikely(__pyx_t_4)) {

    /* "cuda/bindings/_nvml.pyx":17262
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != grid_licensable_feature_dtype:
 *             raise ValueError("data array must be of dtype grid_licensable_feature_dtype")             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
    __pyx_t_1 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_data_array_must_be_of_dtype_grid};
      __pyx_t_6 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
      if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17262, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
    }
    __Pyx_Raise(__pyx_t_6, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __PYX_ERR(0, 17262, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17261
 *         if data.ndim != 1:
 *             raise ValueError("data array must be 1D")
 *         if data.dtype != grid_licensable_feature_dtype:             # <<<<<<<<<<<<<<
 *             raise ValueError("data array must be of dtype grid_licensable_feature_dtype")
 *         obj._data = data.view(_numpy.recarray)
*/
  }

  /* "cuda/bindings/_nvml.pyx":17263
 *         if data.dtype != grid_licensable_feature_dtype:
 *             raise ValueError("data array must be of dtype grid_licensable_feature_dtype")
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_1 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_1);
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17263, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17263, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_7};
    __pyx_t_6 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17263, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
  }
  __Pyx_GIVEREF(__pyx_t_6);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_6;
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":17265
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17249
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GridLicensableFeature instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17267
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an GridLicensableFeature instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21GridLicensableFeature_16from_ptr, "GridLicensableFeature.from_ptr(intptr_t ptr, size_t size=1, bool readonly=False)\n\nCreate an GridLicensableFeature instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    size (int): number of structs, default=1.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21GridLicensableFeature_17from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21GridLicensableFeature_16from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_17from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  size_t __pyx_v_size;
  int __pyx_v_readonly;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size,&__pyx_mstate_global->__pyx_n_u_readonly,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17267, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17267, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17267, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17267, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 17267, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 17267, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17267, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17267, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17267, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 17268, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 17268, __pyx_L3_error)
    } else {
      __pyx_v_size = ((size_t)1);
    }
    if (values[2]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17268, __pyx_L3_error)
    } else {

      /* "cuda/bindings/_nvml.pyx":17268
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):             # <<<<<<<<<<<<<<
 *         """Create an GridLicensableFeature instance wrapping the given pointer.
 * 
*/
      __pyx_v_readonly = ((int)0);
    }
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 17267, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_16from_ptr(__pyx_v_ptr, __pyx_v_size, __pyx_v_readonly);

  /* "cuda/bindings/_nvml.pyx":17267
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an GridLicensableFeature instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_16from_ptr(intptr_t __pyx_v_ptr, size_t __pyx_v_size, int __pyx_v_readonly) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_obj = 0;
  PyObject *__pyx_v_flag = 0;
  PyObject *__pyx_v_buf = 0;
  PyObject *__pyx_v_data = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":17276
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GridLicensableFeature obj = GridLicensableFeature.__new__(GridLicensableFeature)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":17277
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef GridLicensableFeature obj = GridLicensableFeature.__new__(GridLicensableFeature)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17277, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 17277, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17276
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GridLicensableFeature obj = GridLicensableFeature.__new__(GridLicensableFeature)
*/
  }

  /* "cuda/bindings/_nvml.pyx":17278
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GridLicensableFeature obj = GridLicensableFeature.__new__(GridLicensableFeature)             # <<<<<<<<<<<<<<
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_GridLicensableFeature(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17278, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":17279
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GridLicensableFeature obj = GridLicensableFeature.__new__(GridLicensableFeature)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE             # <<<<<<<<<<<<<<
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlGridLicensableFeature_t) * size, flag)
*/
  if (__pyx_v_readonly) {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_READ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17279, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  } else {
    __pyx_t_3 = __Pyx_PyLong_From___pyx_anon_enum(PyBUF_WRITE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17279, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __pyx_t_3 = 0;
  }
  __pyx_v_flag = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":17281
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlGridLicensableFeature_t) * size, flag)             # <<<<<<<<<<<<<<
 *         data = _numpy.ndarray(size, buffer=buf, dtype=grid_licensable_feature_dtype)
 *         obj._data = data.view(_numpy.recarray)
*/
  __pyx_t_5 = __Pyx_PyLong_As_int(__pyx_v_flag); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17281, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":17280
 *         cdef GridLicensableFeature obj = GridLicensableFeature.__new__(GridLicensableFeature)
 *         cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(             # <<<<<<<<<<<<<<
 *             <char*>ptr, sizeof(nvmlGridLicensableFeature_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=grid_licensable_feature_dtype)
*/
  __pyx_t_2 = PyMemoryView_FromMemory(((char *)__pyx_v_ptr), ((sizeof(nvmlGridLicensableFeature_t)) * __pyx_v_size), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17280, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_buf = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":17282
 *         cdef object buf = cpython.memoryview.PyMemoryView_FromMemory(
 *             <char*>ptr, sizeof(nvmlGridLicensableFeature_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=grid_licensable_feature_dtype)             # <<<<<<<<<<<<<<
 *         obj._data = data.view(_numpy.recarray)
 * 
*/
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17282, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17282, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17282, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_grid_licensable_feature_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 17282, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_4 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_7))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_7, __pyx__function);
    __pyx_t_4 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 2 : 0)] = {__pyx_t_3, __pyx_t_6};
    __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17282, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_buffer, __pyx_v_buf, __pyx_t_9, __pyx_callargs+2, 0) < (0)) __PYX_ERR(0, 17282, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_dtype, __pyx_t_8, __pyx_t_9, __pyx_callargs+2, 1) < (0)) __PYX_ERR(0, 17282, __pyx_L1_error)
    __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_t_7, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17282, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":17283
 *             <char*>ptr, sizeof(nvmlGridLicensableFeature_t) * size, flag)
 *         data = _numpy.ndarray(size, buffer=buf, dtype=grid_licensable_feature_dtype)
 *         obj._data = data.view(_numpy.recarray)             # <<<<<<<<<<<<<<
 * 
 *         return obj
*/
  __pyx_t_7 = __pyx_v_data;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17283, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_recarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 17283, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_view, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17283, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_data);
  __Pyx_DECREF(__pyx_v_obj->_data);
  __pyx_v_obj->_data = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":17285
 *         obj._data = data.view(_numpy.recarray)
 * 
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17267
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an GridLicensableFeature instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XDECREF(__pyx_v_flag);
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_XDECREF(__pyx_v_data);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17131
 *     """
 *     cdef:
 *         readonly object _data             # <<<<<<<<<<<<<<
 * 
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_5_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_5_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_5_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_5_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__", 0);
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v_self->_data);
  __pyx_r = __pyx_v_self->_data;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21GridLicensableFeature_18__reduce_cython__, "GridLicensableFeature.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21GridLicensableFeature_19__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21GridLicensableFeature_18__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_19__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_18__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_18__reduce_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self) {
  PyObject *__pyx_v_state = 0;
  PyObject *__pyx_v__dict = 0;
  int __pyx_v_use_setstate;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":5
 *     cdef object _dict
 *     cdef bint use_setstate
 *     state = (self._data,)             # <<<<<<<<<<<<<<
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
*/
  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_v_self->_data);
  __Pyx_GIVEREF(__pyx_v_self->_data);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->_data) != (0)) __PYX_ERR(1, 5, __pyx_L1_error);
  __pyx_v_state = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "(tree fragment)":6
 *     cdef bint use_setstate
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
 *     if _dict is not None and _dict:
 *         state += (_dict,)
*/
  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v__dict = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
  __pyx_t_3 = (__pyx_v__dict != Py_None);
  if (__pyx_t_3) {
  } else {
    __pyx_t_2 = __pyx_t_3;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v__dict); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 7, __pyx_L1_error)
  __pyx_t_2 = __pyx_t_3;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_2) {

    /* "(tree fragment)":8
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:
 *         state += (_dict,)             # <<<<<<<<<<<<<<
 *         use_setstate = True
 *     else:
*/
    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(__pyx_v__dict);
    __Pyx_GIVEREF(__pyx_v__dict);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict) != (0)) __PYX_ERR(1, 8, __pyx_L1_error);
    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
    __pyx_t_4 = 0;

    /* "(tree fragment)":9
 *     if _dict is not None and _dict:
 *         state += (_dict,)
 *         use_setstate = True             # <<<<<<<<<<<<<<
 *     else:
 *         use_setstate = ('self._data is not None',)
*/
    __pyx_v_use_setstate = 1;

    /* "(tree fragment)":7
 *     state = (self._data,)
 *     _dict = getattr(self, '__dict__', None)
 *     if _dict is not None and _dict:             # <<<<<<<<<<<<<<
 *         state += (_dict,)
 *         use_setstate = True
*/
    goto __pyx_L3;
  }

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_GridLicensableFeature, (type(self), 0xa75e18a, None), state
*/
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_mstate_global->__pyx_tuple[2]); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 11, __pyx_L1_error)
    __pyx_v_use_setstate = __pyx_t_2;
  }
  __pyx_L3:;

  /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_GridLicensableFeature, (type(self), 0xa75e18a, None), state
 *     else:
*/
  if (__pyx_v_use_setstate) {

    /* "(tree fragment)":13
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:
 *         return __pyx_unpickle_GridLicensableFeature, (type(self), 0xa75e18a, None), state             # <<<<<<<<<<<<<<
 *     else:
 *         return __pyx_unpickle_GridLicensableFeature, (type(self), 0xa75e18a, state)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_GridLicensableFea); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_4);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 13, __pyx_L1_error);
    __pyx_t_4 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "(tree fragment)":12
 *     else:
 *         use_setstate = ('self._data is not None',)
 *     if use_setstate:             # <<<<<<<<<<<<<<
 *         return __pyx_unpickle_GridLicensableFeature, (type(self), 0xa75e18a, None), state
 *     else:
*/
  }

  /* "(tree fragment)":15
 *         return __pyx_unpickle_GridLicensableFeature, (type(self), 0xa75e18a, None), state
 *     else:
 *         return __pyx_unpickle_GridLicensableFeature, (type(self), 0xa75e18a, state)             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_GridLicensableFeature__set_state(self, __pyx_state)
*/
  /*else*/ {
    __Pyx_XDECREF(__pyx_r);
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_GridLicensableFea); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_int_175497610);
    __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_175497610);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_mstate_global->__pyx_int_175497610) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_INCREF(__pyx_v_state);
    __Pyx_GIVEREF(__pyx_v_state);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_GIVEREF(__pyx_t_5);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_1);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1) != (0)) __PYX_ERR(1, 15, __pyx_L1_error);
    __pyx_t_5 = 0;
    __pyx_t_1 = 0;
    __pyx_r = __pyx_t_4;
    __pyx_t_4 = 0;
    goto __pyx_L0;
  }

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_state);
  __Pyx_XDECREF(__pyx_v__dict);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_GridLicensableFeature, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_GridLicensableFeature__set_state(self, __pyx_state)
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21GridLicensableFeature_20__setstate_cython__, "GridLicensableFeature.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21GridLicensableFeature_21__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21GridLicensableFeature_20__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_21__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 16, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 16, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 16, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 16, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_20__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21GridLicensableFeature_20__setstate_cython__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":17
 *         return __pyx_unpickle_GridLicensableFeature, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):
 *     __pyx_unpickle_GridLicensableFeature__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_v___pyx_state;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(PyTuple_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_t_1))) __PYX_ERR(1, 17, __pyx_L1_error)
  if (unlikely(__pyx_t_1 == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
    __PYX_ERR(1, 17, __pyx_L1_error)
  }
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_GridLicensableFeature__set_state(__pyx_v_self, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 17, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_GridLicensableFeature, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_GridLicensableFeature__set_state(self, __pyx_state)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeature.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17288
 * 
 * 
 * cdef _get_unit_fan_speeds_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlUnitFanSpeeds_t pod = nvmlUnitFanSpeeds_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_unit_fan_speeds_dtype_offsets(void) {
  nvmlUnitFanSpeeds_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlUnitFanSpeeds_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_unit_fan_speeds_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":17289
 * 
 * cdef _get_unit_fan_speeds_dtype_offsets():
 *     cdef nvmlUnitFanSpeeds_t pod = nvmlUnitFanSpeeds_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['fans', 'count'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":17290
 * cdef _get_unit_fan_speeds_dtype_offsets():
 *     cdef nvmlUnitFanSpeeds_t pod = nvmlUnitFanSpeeds_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['fans', 'count'],
 *         'formats': [unit_fan_info_dtype, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17290, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17290, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":17291
 *     cdef nvmlUnitFanSpeeds_t pod = nvmlUnitFanSpeeds_t()
 *     return _numpy.dtype({
 *         'names': ['fans', 'count'],             # <<<<<<<<<<<<<<
 *         'formats': [unit_fan_info_dtype, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17291, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17291, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_fans);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_fans);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_fans) != (0)) __PYX_ERR(0, 17291, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_count) != (0)) __PYX_ERR(0, 17291, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 17291, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":17292
 *     return _numpy.dtype({
 *         'names': ['fans', 'count'],
 *         'formats': [unit_fan_info_dtype, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.fans)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_unit_fan_info_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17292, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17292, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 17292, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17292, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 17292, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 17292, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_7) < (0)) __PYX_ERR(0, 17291, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":17294
 *         'formats': [unit_fan_info_dtype, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.fans)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_7 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.fans)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17294, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);

  /* "cuda/bindings/_nvml.pyx":17295
 *         'offsets': [
 *             (<intptr_t>&(pod.fans)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlUnitFanSpeeds_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.count)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 17295, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":17293
 *         'names': ['fans', 'count'],
 *         'formats': [unit_fan_info_dtype, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.fans)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17293, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 17293, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 17293, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_6) < (0)) __PYX_ERR(0, 17291, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":17297
 *             (<intptr_t>&(pod.count)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlUnitFanSpeeds_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_6 = __Pyx_PyLong_FromSize_t((sizeof(nvmlUnitFanSpeeds_t))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_6) < (0)) __PYX_ERR(0, 17291, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17290, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17288
 * 
 * 
 * cdef _get_unit_fan_speeds_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlUnitFanSpeeds_t pod = nvmlUnitFanSpeeds_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_unit_fan_speeds_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17314
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlUnitFanSpeeds_t *>calloc(1, sizeof(nvmlUnitFanSpeeds_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":17315
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlUnitFanSpeeds_t *>calloc(1, sizeof(nvmlUnitFanSpeeds_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating UnitFanSpeeds")
*/
  __pyx_v_self->_ptr = ((nvmlUnitFanSpeeds_t *)calloc(1, (sizeof(nvmlUnitFanSpeeds_t))));

  /* "cuda/bindings/_nvml.pyx":17316
 *     def __init__(self):
 *         self._ptr = <nvmlUnitFanSpeeds_t *>calloc(1, sizeof(nvmlUnitFanSpeeds_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating UnitFanSpeeds")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":17317
 *         self._ptr = <nvmlUnitFanSpeeds_t *>calloc(1, sizeof(nvmlUnitFanSpeeds_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating UnitFanSpeeds")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17317, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_UnitFanSpeeds};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17317, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 17317, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17316
 *     def __init__(self):
 *         self._ptr = <nvmlUnitFanSpeeds_t *>calloc(1, sizeof(nvmlUnitFanSpeeds_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating UnitFanSpeeds")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":17318
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating UnitFanSpeeds")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":17319
 *             raise MemoryError("Error allocating UnitFanSpeeds")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":17320
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":17314
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlUnitFanSpeeds_t *>calloc(1, sizeof(nvmlUnitFanSpeeds_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanSpeeds.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17322
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlUnitFanSpeeds_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self) {
  nvmlUnitFanSpeeds_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlUnitFanSpeeds_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":17324
 *     def __dealloc__(self):
 *         cdef nvmlUnitFanSpeeds_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":17325
 *         cdef nvmlUnitFanSpeeds_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":17326
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":17327
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":17324
 *     def __dealloc__(self):
 *         cdef nvmlUnitFanSpeeds_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":17322
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlUnitFanSpeeds_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":17329
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.UnitFanSpeeds object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":17330
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.UnitFanSpeeds object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17330, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17330, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17330, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17330, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17330, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_UnitFanSpeeds_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 25 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17330, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17329
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.UnitFanSpeeds object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanSpeeds.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17332
 *         return f"<{__name__}.UnitFanSpeeds object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17335
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17335, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17332
 *         return f"<{__name__}.UnitFanSpeeds object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanSpeeds.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17337
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13UnitFanSpeeds__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":17338
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17337
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17340
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":17341
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17341, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17340
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanSpeeds.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17343
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef UnitFanSpeeds other_
 *         if not isinstance(other, UnitFanSpeeds):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":17345
 *     def __eq__(self, other):
 *         cdef UnitFanSpeeds other_
 *         if not isinstance(other, UnitFanSpeeds):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":17346
 *         cdef UnitFanSpeeds other_
 *         if not isinstance(other, UnitFanSpeeds):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlUnitFanSpeeds_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":17345
 *     def __eq__(self, other):
 *         cdef UnitFanSpeeds other_
 *         if not isinstance(other, UnitFanSpeeds):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":17347
 *         if not isinstance(other, UnitFanSpeeds):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlUnitFanSpeeds_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds))))) __PYX_ERR(0, 17347, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":17348
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlUnitFanSpeeds_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlUnitFanSpeeds_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17348, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17343
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef UnitFanSpeeds other_
 *         if not isinstance(other, UnitFanSpeeds):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanSpeeds.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17350
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlUnitFanSpeeds_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlUnitFanSpeeds_t *>malloc(sizeof(nvmlUnitFanSpeeds_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":17351
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlUnitFanSpeeds_t *>malloc(sizeof(nvmlUnitFanSpeeds_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 17351, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17351, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17351, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 17351, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":17352
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlUnitFanSpeeds_t *>malloc(sizeof(nvmlUnitFanSpeeds_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating UnitFanSpeeds")
*/
    __pyx_v_self->_ptr = ((nvmlUnitFanSpeeds_t *)malloc((sizeof(nvmlUnitFanSpeeds_t))));

    /* "cuda/bindings/_nvml.pyx":17353
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlUnitFanSpeeds_t *>malloc(sizeof(nvmlUnitFanSpeeds_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating UnitFanSpeeds")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUnitFanSpeeds_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":17354
 *             self._ptr = <nvmlUnitFanSpeeds_t *>malloc(sizeof(nvmlUnitFanSpeeds_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating UnitFanSpeeds")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUnitFanSpeeds_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17354, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_UnitFanSpeeds};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17354, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 17354, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":17353
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlUnitFanSpeeds_t *>malloc(sizeof(nvmlUnitFanSpeeds_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating UnitFanSpeeds")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUnitFanSpeeds_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":17355
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating UnitFanSpeeds")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUnitFanSpeeds_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17355, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17355, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 17355, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlUnitFanSpeeds_t))));

    /* "cuda/bindings/_nvml.pyx":17356
 *                 raise MemoryError("Error allocating UnitFanSpeeds")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUnitFanSpeeds_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":17357
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlUnitFanSpeeds_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":17358
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17358, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17358, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 17358, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":17351
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlUnitFanSpeeds_t *>malloc(sizeof(nvmlUnitFanSpeeds_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":17360
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 17360, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":17350
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlUnitFanSpeeds_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlUnitFanSpeeds_t *>malloc(sizeof(nvmlUnitFanSpeeds_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanSpeeds.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17362
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def fans(self):
 *         """UnitFanInfo: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_4fans_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_4fans_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_4fans___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_4fans___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17365
 *     def fans(self):
 *         """UnitFanInfo: """
 *         return UnitFanInfo.from_ptr(<intptr_t>&(self._ptr[0].fans), 24, self._readonly)             # <<<<<<<<<<<<<<
 * 
 *     @fans.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).fans))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17365, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17365, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_mstate_global->__pyx_int_24, __pyx_t_4};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17365, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17362
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def fans(self):
 *         """UnitFanInfo: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanSpeeds.fans.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17367
 *         return UnitFanInfo.from_ptr(<intptr_t>&(self._ptr[0].fans), 24, self._readonly)
 * 
 *     @fans.setter             # <<<<<<<<<<<<<<
 *     def fans(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_4fans_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_4fans_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_4fans_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_4fans_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  intptr_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17369
 *     @fans.setter
 *     def fans(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This UnitFanSpeeds instance is read-only")
 *         cdef UnitFanInfo val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17370
 *     def fans(self, val):
 *         if self._readonly:
 *             raise ValueError("This UnitFanSpeeds instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef UnitFanInfo val_ = val
 *         if len(val) != 24:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_UnitFanSpeeds_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17370, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17370, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17369
 *     @fans.setter
 *     def fans(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This UnitFanSpeeds instance is read-only")
 *         cdef UnitFanInfo val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17371
 *         if self._readonly:
 *             raise ValueError("This UnitFanSpeeds instance is read-only")
 *         cdef UnitFanInfo val_ = val             # <<<<<<<<<<<<<<
 *         if len(val) != 24:
 *             raise ValueError(f"Expected length 24 for field fans, got {len(val)}")
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo))))) __PYX_ERR(0, 17371, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":17372
 *             raise ValueError("This UnitFanSpeeds instance is read-only")
 *         cdef UnitFanInfo val_ = val
 *         if len(val) != 24:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 24 for field fans, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].fans), <void *>(val_._get_ptr()), sizeof(nvmlUnitFanInfo_t) * 24)
*/
  __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17372, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 != 24);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":17373
 *         cdef UnitFanInfo val_ = val
 *         if len(val) != 24:
 *             raise ValueError(f"Expected length 24 for field fans, got {len(val)}")             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].fans), <void *>(val_._get_ptr()), sizeof(nvmlUnitFanInfo_t) * 24)
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17373, __pyx_L1_error)
    __pyx_t_6 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_t_4, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17373, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __pyx_t_7 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Expected_length_24_for_field_fan, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17373, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_7};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17373, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17373, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17372
 *             raise ValueError("This UnitFanSpeeds instance is read-only")
 *         cdef UnitFanInfo val_ = val
 *         if len(val) != 24:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 24 for field fans, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].fans), <void *>(val_._get_ptr()), sizeof(nvmlUnitFanInfo_t) * 24)
*/
  }

  /* "cuda/bindings/_nvml.pyx":17374
 *         if len(val) != 24:
 *             raise ValueError(f"Expected length 24 for field fans, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].fans), <void *>(val_._get_ptr()), sizeof(nvmlUnitFanInfo_t) * 24)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 17374, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).fans)), ((void *)__pyx_t_8), ((sizeof(nvmlUnitFanInfo_t)) * 24)));

  /* "cuda/bindings/_nvml.pyx":17367
 *         return UnitFanInfo.from_ptr(<intptr_t>&(self._ptr[0].fans), 24, self._readonly)
 * 
 *     @fans.setter             # <<<<<<<<<<<<<<
 *     def fans(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanSpeeds.fans.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17376
 *         memcpy(<void *>&(self._ptr[0].fans), <void *>(val_._get_ptr()), sizeof(nvmlUnitFanInfo_t) * 24)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_5count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_5count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_5count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_5count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17379
 *     def count(self):
 *         """int: """
 *         return self._ptr[0].count             # <<<<<<<<<<<<<<
 * 
 *     @count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).count); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17379, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17376
 *         memcpy(<void *>&(self._ptr[0].fans), <void *>(val_._get_ptr()), sizeof(nvmlUnitFanInfo_t) * 24)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanSpeeds.count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17381
 *         return self._ptr[0].count
 * 
 *     @count.setter             # <<<<<<<<<<<<<<
 *     def count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_5count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_5count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_5count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_5count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17383
 *     @count.setter
 *     def count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This UnitFanSpeeds instance is read-only")
 *         self._ptr[0].count = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17384
 *     def count(self, val):
 *         if self._readonly:
 *             raise ValueError("This UnitFanSpeeds instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].count = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_UnitFanSpeeds_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17384, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17384, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17383
 *     @count.setter
 *     def count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This UnitFanSpeeds instance is read-only")
 *         self._ptr[0].count = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17385
 *         if self._readonly:
 *             raise ValueError("This UnitFanSpeeds instance is read-only")
 *         self._ptr[0].count = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17385, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).count = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":17381
 *         return self._ptr[0].count
 * 
 *     @count.setter             # <<<<<<<<<<<<<<
 *     def count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanSpeeds.count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17387
 *         self._ptr[0].count = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an UnitFanSpeeds instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13UnitFanSpeeds_12from_data, "UnitFanSpeeds.from_data(data)\n\nCreate an UnitFanSpeeds instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `unit_fan_speeds_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13UnitFanSpeeds_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13UnitFanSpeeds_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17387, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17387, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 17387, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 17387, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17387, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 17387, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanSpeeds.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":17394
 *             data (_numpy.ndarray): a single-element array of dtype `unit_fan_speeds_dtype` holding the data.
 *         """
 *         return __from_data(data, "unit_fan_speeds_dtype", unit_fan_speeds_dtype, UnitFanSpeeds)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_unit_fan_speeds_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17394, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_unit_fan_speeds_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17394, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17387
 *         self._ptr[0].count = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an UnitFanSpeeds instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanSpeeds.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17396
 *         return __from_data(data, "unit_fan_speeds_dtype", unit_fan_speeds_dtype, UnitFanSpeeds)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an UnitFanSpeeds instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13UnitFanSpeeds_14from_ptr, "UnitFanSpeeds.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an UnitFanSpeeds instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13UnitFanSpeeds_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13UnitFanSpeeds_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17396, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17396, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17396, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17396, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 17396, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":17397
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an UnitFanSpeeds instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 17396, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17396, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17396, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17396, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 17397, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17397, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 17396, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanSpeeds.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":17396
 *         return __from_data(data, "unit_fan_speeds_dtype", unit_fan_speeds_dtype, UnitFanSpeeds)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an UnitFanSpeeds instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":17405
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef UnitFanSpeeds obj = UnitFanSpeeds.__new__(UnitFanSpeeds)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":17406
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef UnitFanSpeeds obj = UnitFanSpeeds.__new__(UnitFanSpeeds)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17406, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 17406, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17405
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef UnitFanSpeeds obj = UnitFanSpeeds.__new__(UnitFanSpeeds)
*/
  }

  /* "cuda/bindings/_nvml.pyx":17407
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef UnitFanSpeeds obj = UnitFanSpeeds.__new__(UnitFanSpeeds)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlUnitFanSpeeds_t *>malloc(sizeof(nvmlUnitFanSpeeds_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_UnitFanSpeeds(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17407, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":17408
 *             raise ValueError("ptr must not be null (0)")
 *         cdef UnitFanSpeeds obj = UnitFanSpeeds.__new__(UnitFanSpeeds)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlUnitFanSpeeds_t *>malloc(sizeof(nvmlUnitFanSpeeds_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":17409
 *         cdef UnitFanSpeeds obj = UnitFanSpeeds.__new__(UnitFanSpeeds)
 *         if owner is None:
 *             obj._ptr = <nvmlUnitFanSpeeds_t *>malloc(sizeof(nvmlUnitFanSpeeds_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating UnitFanSpeeds")
*/
    __pyx_v_obj->_ptr = ((nvmlUnitFanSpeeds_t *)malloc((sizeof(nvmlUnitFanSpeeds_t))));

    /* "cuda/bindings/_nvml.pyx":17410
 *         if owner is None:
 *             obj._ptr = <nvmlUnitFanSpeeds_t *>malloc(sizeof(nvmlUnitFanSpeeds_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating UnitFanSpeeds")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUnitFanSpeeds_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":17411
 *             obj._ptr = <nvmlUnitFanSpeeds_t *>malloc(sizeof(nvmlUnitFanSpeeds_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating UnitFanSpeeds")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUnitFanSpeeds_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17411, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_UnitFanSpeeds};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17411, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 17411, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":17410
 *         if owner is None:
 *             obj._ptr = <nvmlUnitFanSpeeds_t *>malloc(sizeof(nvmlUnitFanSpeeds_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating UnitFanSpeeds")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUnitFanSpeeds_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":17412
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating UnitFanSpeeds")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUnitFanSpeeds_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlUnitFanSpeeds_t))));

    /* "cuda/bindings/_nvml.pyx":17413
 *                 raise MemoryError("Error allocating UnitFanSpeeds")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUnitFanSpeeds_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":17414
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlUnitFanSpeeds_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlUnitFanSpeeds_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":17408
 *             raise ValueError("ptr must not be null (0)")
 *         cdef UnitFanSpeeds obj = UnitFanSpeeds.__new__(UnitFanSpeeds)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlUnitFanSpeeds_t *>malloc(sizeof(nvmlUnitFanSpeeds_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":17416
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlUnitFanSpeeds_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlUnitFanSpeeds_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":17417
 *         else:
 *             obj._ptr = <nvmlUnitFanSpeeds_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":17418
 *             obj._ptr = <nvmlUnitFanSpeeds_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":17419
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":17420
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17396
 *         return __from_data(data, "unit_fan_speeds_dtype", unit_fan_speeds_dtype, UnitFanSpeeds)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an UnitFanSpeeds instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanSpeeds.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13UnitFanSpeeds_16__reduce_cython__, "UnitFanSpeeds.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13UnitFanSpeeds_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13UnitFanSpeeds_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanSpeeds.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13UnitFanSpeeds_18__setstate_cython__, "UnitFanSpeeds.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13UnitFanSpeeds_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13UnitFanSpeeds_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanSpeeds.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13UnitFanSpeeds_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.UnitFanSpeeds.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17423
 * 
 * 
 * cdef _get_vgpu_pgpu_metadata_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuPgpuMetadata_t pod = nvmlVgpuPgpuMetadata_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_pgpu_metadata_dtype_offsets(void) {
  nvmlVgpuPgpuMetadata_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuPgpuMetadata_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  PyObject *__pyx_t_14 = NULL;
  size_t __pyx_t_15;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_pgpu_metadata_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":17424
 * 
 * cdef _get_vgpu_pgpu_metadata_dtype_offsets():
 *     cdef nvmlVgpuPgpuMetadata_t pod = nvmlVgpuPgpuMetadata_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'revision', 'host_driver_version', 'pgpu_virtualization_caps', 'reserved', 'host_supported_vgpu_range', 'opaque_data_size', 'opaque_data'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":17425
 * cdef _get_vgpu_pgpu_metadata_dtype_offsets():
 *     cdef nvmlVgpuPgpuMetadata_t pod = nvmlVgpuPgpuMetadata_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'revision', 'host_driver_version', 'pgpu_virtualization_caps', 'reserved', 'host_supported_vgpu_range', 'opaque_data_size', 'opaque_data'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int8, _numpy.uint32, _numpy.uint32, vgpu_version_dtype, _numpy.uint32, _numpy.int8],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17425, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17425, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":17426
 *     cdef nvmlVgpuPgpuMetadata_t pod = nvmlVgpuPgpuMetadata_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'revision', 'host_driver_version', 'pgpu_virtualization_caps', 'reserved', 'host_supported_vgpu_range', 'opaque_data_size', 'opaque_data'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int8, _numpy.uint32, _numpy.uint32, vgpu_version_dtype, _numpy.uint32, _numpy.int8],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17426, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17426, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 17426, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_revision);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_revision);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_revision) != (0)) __PYX_ERR(0, 17426, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_host_driver_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_host_driver_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_host_driver_version) != (0)) __PYX_ERR(0, 17426, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_pgpu_virtualization_caps);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_pgpu_virtualization_caps);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_pgpu_virtualization_caps) != (0)) __PYX_ERR(0, 17426, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_reserved);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_reserved);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_reserved) != (0)) __PYX_ERR(0, 17426, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_host_supported_vgpu_range);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_host_supported_vgpu_range);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_host_supported_vgpu_range) != (0)) __PYX_ERR(0, 17426, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_opaque_data_size);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_opaque_data_size);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_opaque_data_size) != (0)) __PYX_ERR(0, 17426, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_opaque_data);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_opaque_data);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 7, __pyx_mstate_global->__pyx_n_u_opaque_data) != (0)) __PYX_ERR(0, 17426, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 17426, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":17427
 *     return _numpy.dtype({
 *         'names': ['version', 'revision', 'host_driver_version', 'pgpu_virtualization_caps', 'reserved', 'host_supported_vgpu_range', 'opaque_data_size', 'opaque_data'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int8, _numpy.uint32, _numpy.uint32, vgpu_version_dtype, _numpy.uint32, _numpy.int8],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17427, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17427, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17427, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 17427, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17427, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17427, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17427, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17427, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17427, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 17427, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_vgpu_version_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17427, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_12, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 17427, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_12, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 17427, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_12, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 17427, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_12, __pyx_mstate_global->__pyx_n_u_int8); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 17427, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
  __pyx_t_12 = PyList_New(8); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 17427, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 17427, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 17427, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 17427, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 17427, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 17427, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 5, __pyx_t_6) != (0)) __PYX_ERR(0, 17427, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 6, __pyx_t_13) != (0)) __PYX_ERR(0, 17427, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 7, __pyx_t_14) != (0)) __PYX_ERR(0, 17427, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_6 = 0;
  __pyx_t_13 = 0;
  __pyx_t_14 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_12) < (0)) __PYX_ERR(0, 17426, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;

  /* "cuda/bindings/_nvml.pyx":17429
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int8, _numpy.uint32, _numpy.uint32, vgpu_version_dtype, _numpy.uint32, _numpy.int8],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.revision)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hostDriverVersion)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 17429, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":17430
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.revision)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.hostDriverVersion)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pgpuVirtualizationCaps)) - (<intptr_t>&pod),
*/
  __pyx_t_14 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.revision)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 17430, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);

  /* "cuda/bindings/_nvml.pyx":17431
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.revision)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hostDriverVersion)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.pgpuVirtualizationCaps)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.hostDriverVersion)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 17431, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":17432
 *             (<intptr_t>&(pod.revision)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hostDriverVersion)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pgpuVirtualizationCaps)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hostSupportedVgpuRange)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.pgpuVirtualizationCaps)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17432, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":17433
 *             (<intptr_t>&(pod.hostDriverVersion)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.pgpuVirtualizationCaps)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.hostSupportedVgpuRange)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.opaqueDataSize)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.reserved)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 17433, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":17434
 *             (<intptr_t>&(pod.pgpuVirtualizationCaps)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hostSupportedVgpuRange)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.opaqueDataSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.opaqueData)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.hostSupportedVgpuRange)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17434, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":17435
 *             (<intptr_t>&(pod.reserved)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.hostSupportedVgpuRange)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.opaqueDataSize)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.opaqueData)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.opaqueDataSize)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17435, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":17436
 *             (<intptr_t>&(pod.hostSupportedVgpuRange)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.opaqueDataSize)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.opaqueData)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuPgpuMetadata_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.opaqueData)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 17436, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":17428
 *         'names': ['version', 'revision', 'host_driver_version', 'pgpu_virtualization_caps', 'reserved', 'host_supported_vgpu_range', 'opaque_data_size', 'opaque_data'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.int8, _numpy.uint32, _numpy.uint32, vgpu_version_dtype, _numpy.uint32, _numpy.int8],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.revision)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17428, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_12) != (0)) __PYX_ERR(0, 17428, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_14);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_14) != (0)) __PYX_ERR(0, 17428, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_13) != (0)) __PYX_ERR(0, 17428, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_6) != (0)) __PYX_ERR(0, 17428, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 17428, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_10) != (0)) __PYX_ERR(0, 17428, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_9) != (0)) __PYX_ERR(0, 17428, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 7, __pyx_t_8) != (0)) __PYX_ERR(0, 17428, __pyx_L1_error);
  __pyx_t_12 = 0;
  __pyx_t_14 = 0;
  __pyx_t_13 = 0;
  __pyx_t_6 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 17426, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":17438
 *             (<intptr_t>&(pod.opaqueData)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuPgpuMetadata_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuPgpuMetadata_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 17426, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_15 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_15 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_15, (2-__pyx_t_15) | (__pyx_t_15*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17425, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17423
 * 
 * 
 * cdef _get_vgpu_pgpu_metadata_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuPgpuMetadata_t pod = nvmlVgpuPgpuMetadata_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_XDECREF(__pyx_t_14);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_pgpu_metadata_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17455
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuPgpuMetadata_t *>calloc(1, sizeof(nvmlVgpuPgpuMetadata_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":17456
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuPgpuMetadata_t *>calloc(1, sizeof(nvmlVgpuPgpuMetadata_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuPgpuMetadata")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuPgpuMetadata_t *)calloc(1, (sizeof(nvmlVgpuPgpuMetadata_t))));

  /* "cuda/bindings/_nvml.pyx":17457
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuPgpuMetadata_t *>calloc(1, sizeof(nvmlVgpuPgpuMetadata_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuPgpuMetadata")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":17458
 *         self._ptr = <nvmlVgpuPgpuMetadata_t *>calloc(1, sizeof(nvmlVgpuPgpuMetadata_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuPgpuMetadata")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17458, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuPgpuMetadat};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17458, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 17458, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17457
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuPgpuMetadata_t *>calloc(1, sizeof(nvmlVgpuPgpuMetadata_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuPgpuMetadata")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":17459
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuPgpuMetadata")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":17460
 *             raise MemoryError("Error allocating VgpuPgpuMetadata")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":17461
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":17455
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuPgpuMetadata_t *>calloc(1, sizeof(nvmlVgpuPgpuMetadata_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17463
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuPgpuMetadata_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self) {
  nvmlVgpuPgpuMetadata_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuPgpuMetadata_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":17465
 *     def __dealloc__(self):
 *         cdef nvmlVgpuPgpuMetadata_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":17466
 *         cdef nvmlVgpuPgpuMetadata_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":17467
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":17468
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":17465
 *     def __dealloc__(self):
 *         cdef nvmlVgpuPgpuMetadata_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":17463
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuPgpuMetadata_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":17470
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuPgpuMetadata object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":17471
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuPgpuMetadata object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17471, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17471, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17471, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17471, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17471, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuPgpuMetadata_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 28 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17471, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17470
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuPgpuMetadata object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17473
 *         return f"<{__name__}.VgpuPgpuMetadata object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17476
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17476, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17473
 *         return f"<{__name__}.VgpuPgpuMetadata object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17478
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":17479
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17478
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17481
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":17482
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17482, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17481
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17484
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuPgpuMetadata other_
 *         if not isinstance(other, VgpuPgpuMetadata):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":17486
 *     def __eq__(self, other):
 *         cdef VgpuPgpuMetadata other_
 *         if not isinstance(other, VgpuPgpuMetadata):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":17487
 *         cdef VgpuPgpuMetadata other_
 *         if not isinstance(other, VgpuPgpuMetadata):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPgpuMetadata_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":17486
 *     def __eq__(self, other):
 *         cdef VgpuPgpuMetadata other_
 *         if not isinstance(other, VgpuPgpuMetadata):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":17488
 *         if not isinstance(other, VgpuPgpuMetadata):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPgpuMetadata_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata))))) __PYX_ERR(0, 17488, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":17489
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPgpuMetadata_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuPgpuMetadata_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17489, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17484
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuPgpuMetadata other_
 *         if not isinstance(other, VgpuPgpuMetadata):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17491
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPgpuMetadata_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPgpuMetadata_t *>malloc(sizeof(nvmlVgpuPgpuMetadata_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":17492
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuPgpuMetadata_t *>malloc(sizeof(nvmlVgpuPgpuMetadata_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 17492, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17492, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17492, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 17492, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":17493
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPgpuMetadata_t *>malloc(sizeof(nvmlVgpuPgpuMetadata_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPgpuMetadata")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuPgpuMetadata_t *)malloc((sizeof(nvmlVgpuPgpuMetadata_t))));

    /* "cuda/bindings/_nvml.pyx":17494
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPgpuMetadata_t *>malloc(sizeof(nvmlVgpuPgpuMetadata_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuPgpuMetadata")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPgpuMetadata_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":17495
 *             self._ptr = <nvmlVgpuPgpuMetadata_t *>malloc(sizeof(nvmlVgpuPgpuMetadata_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPgpuMetadata")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPgpuMetadata_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17495, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuPgpuMetadat};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17495, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 17495, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":17494
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPgpuMetadata_t *>malloc(sizeof(nvmlVgpuPgpuMetadata_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuPgpuMetadata")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPgpuMetadata_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":17496
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPgpuMetadata")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPgpuMetadata_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17496, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17496, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 17496, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuPgpuMetadata_t))));

    /* "cuda/bindings/_nvml.pyx":17497
 *                 raise MemoryError("Error allocating VgpuPgpuMetadata")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPgpuMetadata_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":17498
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuPgpuMetadata_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":17499
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17499, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17499, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 17499, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":17492
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuPgpuMetadata_t *>malloc(sizeof(nvmlVgpuPgpuMetadata_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":17501
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 17501, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":17491
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuPgpuMetadata_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuPgpuMetadata_t *>malloc(sizeof(nvmlVgpuPgpuMetadata_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17503
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def host_supported_vgpu_range(self):
 *         """VgpuVersion: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_25host_supported_vgpu_range_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_25host_supported_vgpu_range_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_25host_supported_vgpu_range___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_25host_supported_vgpu_range___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17506
 *     def host_supported_vgpu_range(self):
 *         """VgpuVersion: """
 *         return VgpuVersion.from_ptr(<intptr_t>&(self._ptr[0].hostSupportedVgpuRange), self._readonly, self)             # <<<<<<<<<<<<<<
 * 
 *     @host_supported_vgpu_range.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).hostSupportedVgpuRange))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17506, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17506, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_t_4, ((PyObject *)__pyx_v_self)};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17506, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17503
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def host_supported_vgpu_range(self):
 *         """VgpuVersion: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.host_supported_vgpu_range.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17508
 *         return VgpuVersion.from_ptr(<intptr_t>&(self._ptr[0].hostSupportedVgpuRange), self._readonly, self)
 * 
 *     @host_supported_vgpu_range.setter             # <<<<<<<<<<<<<<
 *     def host_supported_vgpu_range(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_25host_supported_vgpu_range_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_25host_supported_vgpu_range_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_25host_supported_vgpu_range_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_25host_supported_vgpu_range_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17510
 *     @host_supported_vgpu_range.setter
 *     def host_supported_vgpu_range(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         cdef VgpuVersion val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17511
 *     def host_supported_vgpu_range(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef VgpuVersion val_ = val
 *         memcpy(<void *>&(self._ptr[0].hostSupportedVgpuRange), <void *>(val_._get_ptr()), sizeof(nvmlVgpuVersion_t) * 1)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuPgpuMetadata_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17511, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17511, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17510
 *     @host_supported_vgpu_range.setter
 *     def host_supported_vgpu_range(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         cdef VgpuVersion val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17512
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         cdef VgpuVersion val_ = val             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].hostSupportedVgpuRange), <void *>(val_._get_ptr()), sizeof(nvmlVgpuVersion_t) * 1)
 * 
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion))))) __PYX_ERR(0, 17512, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":17513
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         cdef VgpuVersion val_ = val
 *         memcpy(<void *>&(self._ptr[0].hostSupportedVgpuRange), <void *>(val_._get_ptr()), sizeof(nvmlVgpuVersion_t) * 1)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 17513, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).hostSupportedVgpuRange)), ((void *)__pyx_t_4), ((sizeof(nvmlVgpuVersion_t)) * 1)));

  /* "cuda/bindings/_nvml.pyx":17508
 *         return VgpuVersion.from_ptr(<intptr_t>&(self._ptr[0].hostSupportedVgpuRange), self._readonly, self)
 * 
 *     @host_supported_vgpu_range.setter             # <<<<<<<<<<<<<<
 *     def host_supported_vgpu_range(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.host_supported_vgpu_range.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17515
 *         memcpy(<void *>&(self._ptr[0].hostSupportedVgpuRange), <void *>(val_._get_ptr()), sizeof(nvmlVgpuVersion_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17518
 *     def version(self):
 *         """int: """
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17518, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17515
 *         memcpy(<void *>&(self._ptr[0].hostSupportedVgpuRange), <void *>(val_._get_ptr()), sizeof(nvmlVgpuVersion_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17520
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17522
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17523
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuPgpuMetadata_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17523, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17523, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17522
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17524
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17524, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":17520
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17526
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def revision(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_8revision_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_8revision_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_8revision___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_8revision___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17529
 *     def revision(self):
 *         """int: """
 *         return self._ptr[0].revision             # <<<<<<<<<<<<<<
 * 
 *     @revision.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).revision); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17529, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17526
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def revision(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.revision.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17531
 *         return self._ptr[0].revision
 * 
 *     @revision.setter             # <<<<<<<<<<<<<<
 *     def revision(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_8revision_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_8revision_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_8revision_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_8revision_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17533
 *     @revision.setter
 *     def revision(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         self._ptr[0].revision = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17534
 *     def revision(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].revision = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuPgpuMetadata_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17534, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17534, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17533
 *     @revision.setter
 *     def revision(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         self._ptr[0].revision = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17535
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         self._ptr[0].revision = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17535, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).revision = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":17531
 *         return self._ptr[0].revision
 * 
 *     @revision.setter             # <<<<<<<<<<<<<<
 *     def revision(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.revision.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17537
 *         self._ptr[0].revision = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def host_driver_version(self):
 *         """~_numpy.int8: (array of length 80)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19host_driver_version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19host_driver_version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19host_driver_version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19host_driver_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17540
 *     def host_driver_version(self):
 *         """~_numpy.int8: (array of length 80)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].hostDriverVersion)             # <<<<<<<<<<<<<<
 * 
 *     @host_driver_version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).hostDriverVersion); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17540, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17537
 *         self._ptr[0].revision = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def host_driver_version(self):
 *         """~_numpy.int8: (array of length 80)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.host_driver_version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17542
 *         return cpython.PyUnicode_FromString(self._ptr[0].hostDriverVersion)
 * 
 *     @host_driver_version.setter             # <<<<<<<<<<<<<<
 *     def host_driver_version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19host_driver_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19host_driver_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19host_driver_version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19host_driver_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17544
 *     @host_driver_version.setter
 *     def host_driver_version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17545
 *     def host_driver_version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 80:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuPgpuMetadata_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17545, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17545, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17544
 *     @host_driver_version.setter
 *     def host_driver_version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":17546
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 80:
 *             raise ValueError("String too long for field host_driver_version, max length is 79")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17546, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 17546, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":17547
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 80:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field host_driver_version, max length is 79")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 17547, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17547, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 80);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":17548
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 80:
 *             raise ValueError("String too long for field host_driver_version, max length is 79")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].hostDriverVersion), <void *>ptr, 80)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_host_d};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17548, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17548, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17547
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 80:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field host_driver_version, max length is 79")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":17549
 *         if len(buf) >= 80:
 *             raise ValueError("String too long for field host_driver_version, max length is 79")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].hostDriverVersion), <void *>ptr, 80)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 17549, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 17549, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":17550
 *             raise ValueError("String too long for field host_driver_version, max length is 79")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].hostDriverVersion), <void *>ptr, 80)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).hostDriverVersion), ((void *)__pyx_v_ptr), 80));

  /* "cuda/bindings/_nvml.pyx":17542
 *         return cpython.PyUnicode_FromString(self._ptr[0].hostDriverVersion)
 * 
 *     @host_driver_version.setter             # <<<<<<<<<<<<<<
 *     def host_driver_version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.host_driver_version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17552
 *         memcpy(<void *>(self._ptr[0].hostDriverVersion), <void *>ptr, 80)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pgpu_virtualization_caps(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_24pgpu_virtualization_caps_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_24pgpu_virtualization_caps_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_24pgpu_virtualization_caps___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_24pgpu_virtualization_caps___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17555
 *     def pgpu_virtualization_caps(self):
 *         """int: """
 *         return self._ptr[0].pgpuVirtualizationCaps             # <<<<<<<<<<<<<<
 * 
 *     @pgpu_virtualization_caps.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).pgpuVirtualizationCaps); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17555, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17552
 *         memcpy(<void *>(self._ptr[0].hostDriverVersion), <void *>ptr, 80)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def pgpu_virtualization_caps(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.pgpu_virtualization_caps.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17557
 *         return self._ptr[0].pgpuVirtualizationCaps
 * 
 *     @pgpu_virtualization_caps.setter             # <<<<<<<<<<<<<<
 *     def pgpu_virtualization_caps(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_24pgpu_virtualization_caps_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_24pgpu_virtualization_caps_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_24pgpu_virtualization_caps_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_24pgpu_virtualization_caps_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17559
 *     @pgpu_virtualization_caps.setter
 *     def pgpu_virtualization_caps(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         self._ptr[0].pgpuVirtualizationCaps = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17560
 *     def pgpu_virtualization_caps(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].pgpuVirtualizationCaps = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuPgpuMetadata_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17560, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17560, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17559
 *     @pgpu_virtualization_caps.setter
 *     def pgpu_virtualization_caps(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         self._ptr[0].pgpuVirtualizationCaps = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17561
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         self._ptr[0].pgpuVirtualizationCaps = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17561, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).pgpuVirtualizationCaps = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":17557
 *         return self._ptr[0].pgpuVirtualizationCaps
 * 
 *     @pgpu_virtualization_caps.setter             # <<<<<<<<<<<<<<
 *     def pgpu_virtualization_caps(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.pgpu_virtualization_caps.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17563
 *         self._ptr[0].pgpuVirtualizationCaps = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def opaque_data_size(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16opaque_data_size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16opaque_data_size_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16opaque_data_size___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16opaque_data_size___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17566
 *     def opaque_data_size(self):
 *         """int: """
 *         return self._ptr[0].opaqueDataSize             # <<<<<<<<<<<<<<
 * 
 *     @opaque_data_size.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).opaqueDataSize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17566, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17563
 *         self._ptr[0].pgpuVirtualizationCaps = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def opaque_data_size(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.opaque_data_size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17568
 *         return self._ptr[0].opaqueDataSize
 * 
 *     @opaque_data_size.setter             # <<<<<<<<<<<<<<
 *     def opaque_data_size(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16opaque_data_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16opaque_data_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16opaque_data_size_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16opaque_data_size_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17570
 *     @opaque_data_size.setter
 *     def opaque_data_size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         self._ptr[0].opaqueDataSize = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17571
 *     def opaque_data_size(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].opaqueDataSize = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuPgpuMetadata_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17571, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17571, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17570
 *     @opaque_data_size.setter
 *     def opaque_data_size(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         self._ptr[0].opaqueDataSize = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17572
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         self._ptr[0].opaqueDataSize = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17572, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).opaqueDataSize = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":17568
 *         return self._ptr[0].opaqueDataSize
 * 
 *     @opaque_data_size.setter             # <<<<<<<<<<<<<<
 *     def opaque_data_size(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.opaque_data_size.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17574
 *         self._ptr[0].opaqueDataSize = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def opaque_data(self):
 *         """~_numpy.int8: (array of length 4)."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_11opaque_data_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_11opaque_data_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_11opaque_data___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_11opaque_data___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17577
 *     def opaque_data(self):
 *         """~_numpy.int8: (array of length 4)."""
 *         return cpython.PyUnicode_FromString(self._ptr[0].opaqueData)             # <<<<<<<<<<<<<<
 * 
 *     @opaque_data.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyUnicode_FromString((__pyx_v_self->_ptr[0]).opaqueData); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17577, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17574
 *         self._ptr[0].opaqueDataSize = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def opaque_data(self):
 *         """~_numpy.int8: (array of length 4)."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.opaque_data.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17579
 *         return cpython.PyUnicode_FromString(self._ptr[0].opaqueData)
 * 
 *     @opaque_data.setter             # <<<<<<<<<<<<<<
 *     def opaque_data(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_11opaque_data_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_11opaque_data_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_11opaque_data_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_11opaque_data_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, PyObject *__pyx_v_val) {
  PyObject *__pyx_v_buf = 0;
  char *__pyx_v_ptr;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  char *__pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17581
 *     @opaque_data.setter
 *     def opaque_data(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17582
 *     def opaque_data(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 4:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuPgpuMetadata_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17582, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17582, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17581
 *     @opaque_data.setter
 *     def opaque_data(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":17583
 *         if self._readonly:
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()             # <<<<<<<<<<<<<<
 *         if len(buf) >= 4:
 *             raise ValueError("String too long for field opaque_data, max length is 3")
*/
  __pyx_t_2 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_encode, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17583, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  if (!(likely(PyBytes_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_1))) __PYX_ERR(0, 17583, __pyx_L1_error)
  __pyx_v_buf = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":17584
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 4:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field opaque_data, max length is 3")
 *         cdef char *ptr = buf
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
    __PYX_ERR(0, 17584, __pyx_L1_error)
  }
  __pyx_t_4 = __Pyx_PyBytes_GET_SIZE(__pyx_v_buf); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17584, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 >= 4);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":17585
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 4:
 *             raise ValueError("String too long for field opaque_data, max length is 3")             # <<<<<<<<<<<<<<
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].opaqueData), <void *>ptr, 4)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_String_too_long_for_field_opaque};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17585, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17585, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17584
 *             raise ValueError("This VgpuPgpuMetadata instance is read-only")
 *         cdef bytes buf = val.encode()
 *         if len(buf) >= 4:             # <<<<<<<<<<<<<<
 *             raise ValueError("String too long for field opaque_data, max length is 3")
 *         cdef char *ptr = buf
*/
  }

  /* "cuda/bindings/_nvml.pyx":17586
 *         if len(buf) >= 4:
 *             raise ValueError("String too long for field opaque_data, max length is 3")
 *         cdef char *ptr = buf             # <<<<<<<<<<<<<<
 *         memcpy(<void *>(self._ptr[0].opaqueData), <void *>ptr, 4)
 * 
*/
  if (unlikely(__pyx_v_buf == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
    __PYX_ERR(0, 17586, __pyx_L1_error)
  }
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 17586, __pyx_L1_error)
  __pyx_v_ptr = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":17587
 *             raise ValueError("String too long for field opaque_data, max length is 3")
 *         cdef char *ptr = buf
 *         memcpy(<void *>(self._ptr[0].opaqueData), <void *>ptr, 4)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  (void)(memcpy(((void *)(__pyx_v_self->_ptr[0]).opaqueData), ((void *)__pyx_v_ptr), 4));

  /* "cuda/bindings/_nvml.pyx":17579
 *         return cpython.PyUnicode_FromString(self._ptr[0].opaqueData)
 * 
 *     @opaque_data.setter             # <<<<<<<<<<<<<<
 *     def opaque_data(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.opaque_data.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_buf);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17589
 *         memcpy(<void *>(self._ptr[0].opaqueData), <void *>ptr, 4)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuPgpuMetadata instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_12from_data, "VgpuPgpuMetadata.from_data(data)\n\nCreate an VgpuPgpuMetadata instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_pgpu_metadata_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17589, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17589, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 17589, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 17589, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17589, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 17589, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":17596
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_pgpu_metadata_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_pgpu_metadata_dtype", vgpu_pgpu_metadata_dtype, VgpuPgpuMetadata)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_pgpu_metadata_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17596, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_pgpu_metadata_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17596, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17589
 *         memcpy(<void *>(self._ptr[0].opaqueData), <void *>ptr, 4)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuPgpuMetadata instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17598
 *         return __from_data(data, "vgpu_pgpu_metadata_dtype", vgpu_pgpu_metadata_dtype, VgpuPgpuMetadata)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuPgpuMetadata instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_14from_ptr, "VgpuPgpuMetadata.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuPgpuMetadata instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17598, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17598, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17598, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17598, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 17598, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":17599
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuPgpuMetadata instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 17598, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17598, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17598, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17598, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 17599, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17599, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 17598, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":17598
 *         return __from_data(data, "vgpu_pgpu_metadata_dtype", vgpu_pgpu_metadata_dtype, VgpuPgpuMetadata)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuPgpuMetadata instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":17607
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPgpuMetadata obj = VgpuPgpuMetadata.__new__(VgpuPgpuMetadata)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":17608
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuPgpuMetadata obj = VgpuPgpuMetadata.__new__(VgpuPgpuMetadata)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17608, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 17608, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17607
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPgpuMetadata obj = VgpuPgpuMetadata.__new__(VgpuPgpuMetadata)
*/
  }

  /* "cuda/bindings/_nvml.pyx":17609
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPgpuMetadata obj = VgpuPgpuMetadata.__new__(VgpuPgpuMetadata)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuPgpuMetadata_t *>malloc(sizeof(nvmlVgpuPgpuMetadata_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPgpuMetadata(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17609, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":17610
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPgpuMetadata obj = VgpuPgpuMetadata.__new__(VgpuPgpuMetadata)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuPgpuMetadata_t *>malloc(sizeof(nvmlVgpuPgpuMetadata_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":17611
 *         cdef VgpuPgpuMetadata obj = VgpuPgpuMetadata.__new__(VgpuPgpuMetadata)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuPgpuMetadata_t *>malloc(sizeof(nvmlVgpuPgpuMetadata_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPgpuMetadata")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuPgpuMetadata_t *)malloc((sizeof(nvmlVgpuPgpuMetadata_t))));

    /* "cuda/bindings/_nvml.pyx":17612
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuPgpuMetadata_t *>malloc(sizeof(nvmlVgpuPgpuMetadata_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuPgpuMetadata")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPgpuMetadata_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":17613
 *             obj._ptr = <nvmlVgpuPgpuMetadata_t *>malloc(sizeof(nvmlVgpuPgpuMetadata_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPgpuMetadata")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPgpuMetadata_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17613, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuPgpuMetadat};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17613, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 17613, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":17612
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuPgpuMetadata_t *>malloc(sizeof(nvmlVgpuPgpuMetadata_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuPgpuMetadata")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPgpuMetadata_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":17614
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuPgpuMetadata")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPgpuMetadata_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuPgpuMetadata_t))));

    /* "cuda/bindings/_nvml.pyx":17615
 *                 raise MemoryError("Error allocating VgpuPgpuMetadata")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPgpuMetadata_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":17616
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuPgpuMetadata_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuPgpuMetadata_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":17610
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuPgpuMetadata obj = VgpuPgpuMetadata.__new__(VgpuPgpuMetadata)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuPgpuMetadata_t *>malloc(sizeof(nvmlVgpuPgpuMetadata_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":17618
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuPgpuMetadata_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuPgpuMetadata_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":17619
 *         else:
 *             obj._ptr = <nvmlVgpuPgpuMetadata_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":17620
 *             obj._ptr = <nvmlVgpuPgpuMetadata_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":17621
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":17622
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17598
 *         return __from_data(data, "vgpu_pgpu_metadata_dtype", vgpu_pgpu_metadata_dtype, VgpuPgpuMetadata)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuPgpuMetadata instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16__reduce_cython__, "VgpuPgpuMetadata.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_18__setstate_cython__, "VgpuPgpuMetadata.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuPgpuMetadata.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17625
 * 
 * 
 * cdef _get_gpu_instance_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuInstanceInfo_t pod = nvmlGpuInstanceInfo_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_gpu_instance_info_dtype_offsets(void) {
  nvmlGpuInstanceInfo_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlGpuInstanceInfo_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  size_t __pyx_t_11;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_gpu_instance_info_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":17626
 * 
 * cdef _get_gpu_instance_info_dtype_offsets():
 *     cdef nvmlGpuInstanceInfo_t pod = nvmlGpuInstanceInfo_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['device_', 'id', 'profile_id', 'placement'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":17627
 * cdef _get_gpu_instance_info_dtype_offsets():
 *     cdef nvmlGpuInstanceInfo_t pod = nvmlGpuInstanceInfo_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['device_', 'id', 'profile_id', 'placement'],
 *         'formats': [_numpy.intp, _numpy.uint32, _numpy.uint32, gpu_instance_placement_dtype],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17627, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17627, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":17628
 *     cdef nvmlGpuInstanceInfo_t pod = nvmlGpuInstanceInfo_t()
 *     return _numpy.dtype({
 *         'names': ['device_', 'id', 'profile_id', 'placement'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.intp, _numpy.uint32, _numpy.uint32, gpu_instance_placement_dtype],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17628, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17628, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_device);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_device);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_device) != (0)) __PYX_ERR(0, 17628, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_id) != (0)) __PYX_ERR(0, 17628, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_profile_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_profile_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_profile_id) != (0)) __PYX_ERR(0, 17628, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_placement);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_placement);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_placement) != (0)) __PYX_ERR(0, 17628, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 17628, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":17629
 *     return _numpy.dtype({
 *         'names': ['device_', 'id', 'profile_id', 'placement'],
 *         'formats': [_numpy.intp, _numpy.uint32, _numpy.uint32, gpu_instance_placement_dtype],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_intp); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 17629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_gpu_instance_placement_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = PyList_New(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 17629, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 17629, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 17629, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 3, __pyx_t_6) != (0)) __PYX_ERR(0, 17629, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_10) < (0)) __PYX_ERR(0, 17628, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":17631
 *         'formats': [_numpy.intp, _numpy.uint32, _numpy.uint32, gpu_instance_placement_dtype],
 *         'offsets': [
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.profileId)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.device)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17631, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":17632
 *         'offsets': [
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.profileId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.placement)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.id)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17632, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":17633
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.profileId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.placement)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.profileId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17633, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":17634
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.profileId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.placement)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlGpuInstanceInfo_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.placement)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 17634, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":17630
 *         'names': ['device_', 'id', 'profile_id', 'placement'],
 *         'formats': [_numpy.intp, _numpy.uint32, _numpy.uint32, gpu_instance_placement_dtype],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17630, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_10) != (0)) __PYX_ERR(0, 17630, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 17630, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 17630, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_8) != (0)) __PYX_ERR(0, 17630, __pyx_L1_error);
  __pyx_t_10 = 0;
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 17628, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":17636
 *             (<intptr_t>&(pod.placement)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlGpuInstanceInfo_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlGpuInstanceInfo_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17636, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 17628, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_11 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_11 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_11, (2-__pyx_t_11) | (__pyx_t_11*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17627, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17625
 * 
 * 
 * cdef _get_gpu_instance_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuInstanceInfo_t pod = nvmlGpuInstanceInfo_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_gpu_instance_info_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17653
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGpuInstanceInfo_t *>calloc(1, sizeof(nvmlGpuInstanceInfo_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":17654
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlGpuInstanceInfo_t *>calloc(1, sizeof(nvmlGpuInstanceInfo_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuInstanceInfo")
*/
  __pyx_v_self->_ptr = ((nvmlGpuInstanceInfo_t *)calloc(1, (sizeof(nvmlGpuInstanceInfo_t))));

  /* "cuda/bindings/_nvml.pyx":17655
 *     def __init__(self):
 *         self._ptr = <nvmlGpuInstanceInfo_t *>calloc(1, sizeof(nvmlGpuInstanceInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GpuInstanceInfo")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":17656
 *         self._ptr = <nvmlGpuInstanceInfo_t *>calloc(1, sizeof(nvmlGpuInstanceInfo_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuInstanceInfo")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17656, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuInstanceInfo};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17656, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 17656, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17655
 *     def __init__(self):
 *         self._ptr = <nvmlGpuInstanceInfo_t *>calloc(1, sizeof(nvmlGpuInstanceInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GpuInstanceInfo")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":17657
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GpuInstanceInfo")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":17658
 *             raise MemoryError("Error allocating GpuInstanceInfo")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":17659
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":17653
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGpuInstanceInfo_t *>calloc(1, sizeof(nvmlGpuInstanceInfo_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17661
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGpuInstanceInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self) {
  nvmlGpuInstanceInfo_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlGpuInstanceInfo_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":17663
 *     def __dealloc__(self):
 *         cdef nvmlGpuInstanceInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":17664
 *         cdef nvmlGpuInstanceInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":17665
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":17666
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":17663
 *     def __dealloc__(self):
 *         cdef nvmlGpuInstanceInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":17661
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGpuInstanceInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":17668
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GpuInstanceInfo object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":17669
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.GpuInstanceInfo object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17669, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17669, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17669, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17669, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17669, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_GpuInstanceInfo_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 27 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17669, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17668
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GpuInstanceInfo object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17671
 *         return f"<{__name__}.GpuInstanceInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17674
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17674, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17671
 *         return f"<{__name__}.GpuInstanceInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17676
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_15GpuInstanceInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":17677
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17676
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17679
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":17680
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17680, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17679
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17682
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GpuInstanceInfo other_
 *         if not isinstance(other, GpuInstanceInfo):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":17684
 *     def __eq__(self, other):
 *         cdef GpuInstanceInfo other_
 *         if not isinstance(other, GpuInstanceInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":17685
 *         cdef GpuInstanceInfo other_
 *         if not isinstance(other, GpuInstanceInfo):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuInstanceInfo_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":17684
 *     def __eq__(self, other):
 *         cdef GpuInstanceInfo other_
 *         if not isinstance(other, GpuInstanceInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":17686
 *         if not isinstance(other, GpuInstanceInfo):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuInstanceInfo_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo))))) __PYX_ERR(0, 17686, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":17687
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuInstanceInfo_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlGpuInstanceInfo_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17687, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17682
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GpuInstanceInfo other_
 *         if not isinstance(other, GpuInstanceInfo):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17689
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuInstanceInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuInstanceInfo_t *>malloc(sizeof(nvmlGpuInstanceInfo_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":17690
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGpuInstanceInfo_t *>malloc(sizeof(nvmlGpuInstanceInfo_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 17690, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17690, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17690, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 17690, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":17691
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuInstanceInfo_t *>malloc(sizeof(nvmlGpuInstanceInfo_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceInfo")
*/
    __pyx_v_self->_ptr = ((nvmlGpuInstanceInfo_t *)malloc((sizeof(nvmlGpuInstanceInfo_t))));

    /* "cuda/bindings/_nvml.pyx":17692
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuInstanceInfo_t *>malloc(sizeof(nvmlGpuInstanceInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuInstanceInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceInfo_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":17693
 *             self._ptr = <nvmlGpuInstanceInfo_t *>malloc(sizeof(nvmlGpuInstanceInfo_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceInfo_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17693, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuInstanceInfo};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17693, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 17693, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":17692
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuInstanceInfo_t *>malloc(sizeof(nvmlGpuInstanceInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuInstanceInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":17694
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceInfo_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17694, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17694, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 17694, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlGpuInstanceInfo_t))));

    /* "cuda/bindings/_nvml.pyx":17695
 *                 raise MemoryError("Error allocating GpuInstanceInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceInfo_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":17696
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGpuInstanceInfo_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":17697
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17697, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17697, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 17697, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":17690
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGpuInstanceInfo_t *>malloc(sizeof(nvmlGpuInstanceInfo_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":17699
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 17699, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":17689
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGpuInstanceInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGpuInstanceInfo_t *>malloc(sizeof(nvmlGpuInstanceInfo_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17701
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def placement(self):
 *         """GpuInstancePlacement: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_9placement_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_9placement_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_9placement___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_9placement___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17704
 *     def placement(self):
 *         """GpuInstancePlacement: """
 *         return GpuInstancePlacement.from_ptr(<intptr_t>&(self._ptr[0].placement), self._readonly, self)             # <<<<<<<<<<<<<<
 * 
 *     @placement.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).placement))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17704, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17704, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_t_4, ((PyObject *)__pyx_v_self)};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17704, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17701
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def placement(self):
 *         """GpuInstancePlacement: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.placement.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17706
 *         return GpuInstancePlacement.from_ptr(<intptr_t>&(self._ptr[0].placement), self._readonly, self)
 * 
 *     @placement.setter             # <<<<<<<<<<<<<<
 *     def placement(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_9placement_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_9placement_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_9placement_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_9placement_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17708
 *     @placement.setter
 *     def placement(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceInfo instance is read-only")
 *         cdef GpuInstancePlacement val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17709
 *     def placement(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef GpuInstancePlacement val_ = val
 *         memcpy(<void *>&(self._ptr[0].placement), <void *>(val_._get_ptr()), sizeof(nvmlGpuInstancePlacement_t) * 1)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceInfo_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17709, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17709, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17708
 *     @placement.setter
 *     def placement(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceInfo instance is read-only")
 *         cdef GpuInstancePlacement val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17710
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceInfo instance is read-only")
 *         cdef GpuInstancePlacement val_ = val             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].placement), <void *>(val_._get_ptr()), sizeof(nvmlGpuInstancePlacement_t) * 1)
 * 
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement))))) __PYX_ERR(0, 17710, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":17711
 *             raise ValueError("This GpuInstanceInfo instance is read-only")
 *         cdef GpuInstancePlacement val_ = val
 *         memcpy(<void *>&(self._ptr[0].placement), <void *>(val_._get_ptr()), sizeof(nvmlGpuInstancePlacement_t) * 1)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 17711, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).placement)), ((void *)__pyx_t_4), ((sizeof(nvmlGpuInstancePlacement_t)) * 1)));

  /* "cuda/bindings/_nvml.pyx":17706
 *         return GpuInstancePlacement.from_ptr(<intptr_t>&(self._ptr[0].placement), self._readonly, self)
 * 
 *     @placement.setter             # <<<<<<<<<<<<<<
 *     def placement(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.placement.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17713
 *         memcpy(<void *>&(self._ptr[0].placement), <void *>(val_._get_ptr()), sizeof(nvmlGpuInstancePlacement_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def device_(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_7device__1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_7device__1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_7device____get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_7device____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17716
 *     def device_(self):
 *         """int: """
 *         return <intptr_t>(self._ptr[0].device)             # <<<<<<<<<<<<<<
 * 
 *     @device_.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)(__pyx_v_self->_ptr[0]).device)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17716, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17713
 *         memcpy(<void *>&(self._ptr[0].placement), <void *>(val_._get_ptr()), sizeof(nvmlGpuInstancePlacement_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def device_(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.device_.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17718
 *         return <intptr_t>(self._ptr[0].device)
 * 
 *     @device_.setter             # <<<<<<<<<<<<<<
 *     def device_(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_7device__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_7device__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_7device__2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_7device__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17720
 *     @device_.setter
 *     def device_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceInfo instance is read-only")
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17721
 *     def device_(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceInfo_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17721, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17721, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17720
 *     @device_.setter
 *     def device_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceInfo instance is read-only")
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17722
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceInfo instance is read-only")
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = PyLong_AsSsize_t(__pyx_v_val); if (unlikely((__pyx_t_4 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 17722, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).device = ((nvmlDevice_t)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":17718
 *         return <intptr_t>(self._ptr[0].device)
 * 
 *     @device_.setter             # <<<<<<<<<<<<<<
 *     def device_(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.device_.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17724
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def id(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_2id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_2id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_2id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_2id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17727
 *     def id(self):
 *         """int: """
 *         return self._ptr[0].id             # <<<<<<<<<<<<<<
 * 
 *     @id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17727, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17724
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def id(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17729
 *         return self._ptr[0].id
 * 
 *     @id.setter             # <<<<<<<<<<<<<<
 *     def id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_2id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_2id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_2id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_2id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17731
 *     @id.setter
 *     def id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceInfo instance is read-only")
 *         self._ptr[0].id = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17732
 *     def id(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].id = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceInfo_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17732, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17732, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17731
 *     @id.setter
 *     def id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceInfo instance is read-only")
 *         self._ptr[0].id = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17733
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceInfo instance is read-only")
 *         self._ptr[0].id = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17733, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).id = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":17729
 *         return self._ptr[0].id
 * 
 *     @id.setter             # <<<<<<<<<<<<<<
 *     def id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17735
 *         self._ptr[0].id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def profile_id(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_10profile_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_10profile_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_10profile_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_10profile_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17738
 *     def profile_id(self):
 *         """int: """
 *         return self._ptr[0].profileId             # <<<<<<<<<<<<<<
 * 
 *     @profile_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).profileId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17738, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17735
 *         self._ptr[0].id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def profile_id(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.profile_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17740
 *         return self._ptr[0].profileId
 * 
 *     @profile_id.setter             # <<<<<<<<<<<<<<
 *     def profile_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_10profile_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_10profile_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_10profile_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_10profile_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17742
 *     @profile_id.setter
 *     def profile_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceInfo instance is read-only")
 *         self._ptr[0].profileId = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17743
 *     def profile_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].profileId = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GpuInstanceInfo_instance_is};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17743, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17743, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17742
 *     @profile_id.setter
 *     def profile_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GpuInstanceInfo instance is read-only")
 *         self._ptr[0].profileId = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17744
 *         if self._readonly:
 *             raise ValueError("This GpuInstanceInfo instance is read-only")
 *         self._ptr[0].profileId = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17744, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).profileId = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":17740
 *         return self._ptr[0].profileId
 * 
 *     @profile_id.setter             # <<<<<<<<<<<<<<
 *     def profile_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.profile_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17746
 *         self._ptr[0].profileId = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuInstanceInfo instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15GpuInstanceInfo_12from_data, "GpuInstanceInfo.from_data(data)\n\nCreate an GpuInstanceInfo instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `gpu_instance_info_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15GpuInstanceInfo_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15GpuInstanceInfo_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17746, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17746, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 17746, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 17746, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17746, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 17746, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":17753
 *             data (_numpy.ndarray): a single-element array of dtype `gpu_instance_info_dtype` holding the data.
 *         """
 *         return __from_data(data, "gpu_instance_info_dtype", gpu_instance_info_dtype, GpuInstanceInfo)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_gpu_instance_info_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17753, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_gpu_instance_info_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17753, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17746
 *         self._ptr[0].profileId = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuInstanceInfo instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17755
 *         return __from_data(data, "gpu_instance_info_dtype", gpu_instance_info_dtype, GpuInstanceInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuInstanceInfo instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15GpuInstanceInfo_14from_ptr, "GpuInstanceInfo.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an GpuInstanceInfo instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15GpuInstanceInfo_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15GpuInstanceInfo_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17755, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17755, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17755, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17755, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 17755, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":17756
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an GpuInstanceInfo instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 17755, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17755, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17755, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17755, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 17756, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17756, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 17755, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":17755
 *         return __from_data(data, "gpu_instance_info_dtype", gpu_instance_info_dtype, GpuInstanceInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuInstanceInfo instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":17764
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstanceInfo obj = GpuInstanceInfo.__new__(GpuInstanceInfo)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":17765
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef GpuInstanceInfo obj = GpuInstanceInfo.__new__(GpuInstanceInfo)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17765, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 17765, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17764
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstanceInfo obj = GpuInstanceInfo.__new__(GpuInstanceInfo)
*/
  }

  /* "cuda/bindings/_nvml.pyx":17766
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstanceInfo obj = GpuInstanceInfo.__new__(GpuInstanceInfo)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlGpuInstanceInfo_t *>malloc(sizeof(nvmlGpuInstanceInfo_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstanceInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17766, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":17767
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstanceInfo obj = GpuInstanceInfo.__new__(GpuInstanceInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGpuInstanceInfo_t *>malloc(sizeof(nvmlGpuInstanceInfo_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":17768
 *         cdef GpuInstanceInfo obj = GpuInstanceInfo.__new__(GpuInstanceInfo)
 *         if owner is None:
 *             obj._ptr = <nvmlGpuInstanceInfo_t *>malloc(sizeof(nvmlGpuInstanceInfo_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceInfo")
*/
    __pyx_v_obj->_ptr = ((nvmlGpuInstanceInfo_t *)malloc((sizeof(nvmlGpuInstanceInfo_t))));

    /* "cuda/bindings/_nvml.pyx":17769
 *         if owner is None:
 *             obj._ptr = <nvmlGpuInstanceInfo_t *>malloc(sizeof(nvmlGpuInstanceInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuInstanceInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceInfo_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":17770
 *             obj._ptr = <nvmlGpuInstanceInfo_t *>malloc(sizeof(nvmlGpuInstanceInfo_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceInfo_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17770, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GpuInstanceInfo};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17770, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 17770, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":17769
 *         if owner is None:
 *             obj._ptr = <nvmlGpuInstanceInfo_t *>malloc(sizeof(nvmlGpuInstanceInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GpuInstanceInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":17771
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GpuInstanceInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceInfo_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlGpuInstanceInfo_t))));

    /* "cuda/bindings/_nvml.pyx":17772
 *                 raise MemoryError("Error allocating GpuInstanceInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceInfo_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":17773
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGpuInstanceInfo_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlGpuInstanceInfo_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":17767
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GpuInstanceInfo obj = GpuInstanceInfo.__new__(GpuInstanceInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGpuInstanceInfo_t *>malloc(sizeof(nvmlGpuInstanceInfo_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":17775
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlGpuInstanceInfo_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlGpuInstanceInfo_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":17776
 *         else:
 *             obj._ptr = <nvmlGpuInstanceInfo_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":17777
 *             obj._ptr = <nvmlGpuInstanceInfo_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":17778
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":17779
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17755
 *         return __from_data(data, "gpu_instance_info_dtype", gpu_instance_info_dtype, GpuInstanceInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuInstanceInfo instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15GpuInstanceInfo_16__reduce_cython__, "GpuInstanceInfo.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15GpuInstanceInfo_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15GpuInstanceInfo_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_15GpuInstanceInfo_18__setstate_cython__, "GpuInstanceInfo.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15GpuInstanceInfo_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15GpuInstanceInfo_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_15GpuInstanceInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GpuInstanceInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17782
 * 
 * 
 * cdef _get_compute_instance_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlComputeInstanceInfo_t pod = nvmlComputeInstanceInfo_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_compute_instance_info_dtype_offsets(void) {
  nvmlComputeInstanceInfo_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlComputeInstanceInfo_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  size_t __pyx_t_12;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_compute_instance_info_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":17783
 * 
 * cdef _get_compute_instance_info_dtype_offsets():
 *     cdef nvmlComputeInstanceInfo_t pod = nvmlComputeInstanceInfo_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['device_', 'gpu_instance', 'id', 'profile_id', 'placement'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":17784
 * cdef _get_compute_instance_info_dtype_offsets():
 *     cdef nvmlComputeInstanceInfo_t pod = nvmlComputeInstanceInfo_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['device_', 'gpu_instance', 'id', 'profile_id', 'placement'],
 *         'formats': [_numpy.intp, _numpy.intp, _numpy.uint32, _numpy.uint32, compute_instance_placement_dtype],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17784, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17784, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":17785
 *     cdef nvmlComputeInstanceInfo_t pod = nvmlComputeInstanceInfo_t()
 *     return _numpy.dtype({
 *         'names': ['device_', 'gpu_instance', 'id', 'profile_id', 'placement'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.intp, _numpy.intp, _numpy.uint32, _numpy.uint32, compute_instance_placement_dtype],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17785, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17785, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_device);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_device);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_device) != (0)) __PYX_ERR(0, 17785, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_gpu_instance);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_gpu_instance);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_gpu_instance) != (0)) __PYX_ERR(0, 17785, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_id) != (0)) __PYX_ERR(0, 17785, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_profile_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_profile_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_profile_id) != (0)) __PYX_ERR(0, 17785, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_placement);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_placement);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_placement) != (0)) __PYX_ERR(0, 17785, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 17785, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":17786
 *     return _numpy.dtype({
 *         'names': ['device_', 'gpu_instance', 'id', 'profile_id', 'placement'],
 *         'formats': [_numpy.intp, _numpy.intp, _numpy.uint32, _numpy.uint32, compute_instance_placement_dtype],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17786, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_intp); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17786, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17786, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_intp); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 17786, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17786, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17786, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17786, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17786, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_compute_instance_placement_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17786, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = PyList_New(5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 17786, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_11, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 17786, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_11, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 17786, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_11, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 17786, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_11, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 17786, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_11, 4, __pyx_t_6) != (0)) __PYX_ERR(0, 17786, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_11) < (0)) __PYX_ERR(0, 17785, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":17788
 *         'formats': [_numpy.intp, _numpy.intp, _numpy.uint32, _numpy.uint32, compute_instance_placement_dtype],
 *         'offsets': [
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.gpuInstance)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.device)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 17788, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":17789
 *         'offsets': [
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gpuInstance)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.profileId)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.gpuInstance)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17789, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":17790
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gpuInstance)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.profileId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.placement)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.id)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17790, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":17791
 *             (<intptr_t>&(pod.gpuInstance)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.profileId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.placement)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.profileId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17791, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":17792
 *             (<intptr_t>&(pod.id)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.profileId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.placement)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlComputeInstanceInfo_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.placement)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 17792, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":17787
 *         'names': ['device_', 'gpu_instance', 'id', 'profile_id', 'placement'],
 *         'formats': [_numpy.intp, _numpy.intp, _numpy.uint32, _numpy.uint32, compute_instance_placement_dtype],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.device)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gpuInstance)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17787, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_11) != (0)) __PYX_ERR(0, 17787, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 17787, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_10) != (0)) __PYX_ERR(0, 17787, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_9) != (0)) __PYX_ERR(0, 17787, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_8) != (0)) __PYX_ERR(0, 17787, __pyx_L1_error);
  __pyx_t_11 = 0;
  __pyx_t_6 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 17785, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":17794
 *             (<intptr_t>&(pod.placement)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlComputeInstanceInfo_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlComputeInstanceInfo_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17794, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 17785, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_12 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_12 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_12, (2-__pyx_t_12) | (__pyx_t_12*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17784, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17782
 * 
 * 
 * cdef _get_compute_instance_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlComputeInstanceInfo_t pod = nvmlComputeInstanceInfo_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_compute_instance_info_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17811
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlComputeInstanceInfo_t *>calloc(1, sizeof(nvmlComputeInstanceInfo_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":17812
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlComputeInstanceInfo_t *>calloc(1, sizeof(nvmlComputeInstanceInfo_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ComputeInstanceInfo")
*/
  __pyx_v_self->_ptr = ((nvmlComputeInstanceInfo_t *)calloc(1, (sizeof(nvmlComputeInstanceInfo_t))));

  /* "cuda/bindings/_nvml.pyx":17813
 *     def __init__(self):
 *         self._ptr = <nvmlComputeInstanceInfo_t *>calloc(1, sizeof(nvmlComputeInstanceInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ComputeInstanceInfo")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":17814
 *         self._ptr = <nvmlComputeInstanceInfo_t *>calloc(1, sizeof(nvmlComputeInstanceInfo_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ComputeInstanceInfo")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17814, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ComputeInstance_3};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17814, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 17814, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17813
 *     def __init__(self):
 *         self._ptr = <nvmlComputeInstanceInfo_t *>calloc(1, sizeof(nvmlComputeInstanceInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating ComputeInstanceInfo")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":17815
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating ComputeInstanceInfo")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":17816
 *             raise MemoryError("Error allocating ComputeInstanceInfo")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":17817
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":17811
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlComputeInstanceInfo_t *>calloc(1, sizeof(nvmlComputeInstanceInfo_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17819
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlComputeInstanceInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self) {
  nvmlComputeInstanceInfo_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlComputeInstanceInfo_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":17821
 *     def __dealloc__(self):
 *         cdef nvmlComputeInstanceInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":17822
 *         cdef nvmlComputeInstanceInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":17823
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":17824
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":17821
 *     def __dealloc__(self):
 *         cdef nvmlComputeInstanceInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":17819
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlComputeInstanceInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":17826
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ComputeInstanceInfo object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":17827
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.ComputeInstanceInfo object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17827, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17827, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17827, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17827, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17827, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_ComputeInstanceInfo_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 31 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17827, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17826
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.ComputeInstanceInfo object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17829
 *         return f"<{__name__}.ComputeInstanceInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17832
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17832, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17829
 *         return f"<{__name__}.ComputeInstanceInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17834
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_19ComputeInstanceInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":17835
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17834
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17837
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":17838
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17838, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17837
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17840
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ComputeInstanceInfo other_
 *         if not isinstance(other, ComputeInstanceInfo):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":17842
 *     def __eq__(self, other):
 *         cdef ComputeInstanceInfo other_
 *         if not isinstance(other, ComputeInstanceInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":17843
 *         cdef ComputeInstanceInfo other_
 *         if not isinstance(other, ComputeInstanceInfo):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlComputeInstanceInfo_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":17842
 *     def __eq__(self, other):
 *         cdef ComputeInstanceInfo other_
 *         if not isinstance(other, ComputeInstanceInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":17844
 *         if not isinstance(other, ComputeInstanceInfo):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlComputeInstanceInfo_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo))))) __PYX_ERR(0, 17844, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":17845
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlComputeInstanceInfo_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlComputeInstanceInfo_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17845, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17840
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef ComputeInstanceInfo other_
 *         if not isinstance(other, ComputeInstanceInfo):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17847
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlComputeInstanceInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlComputeInstanceInfo_t *>malloc(sizeof(nvmlComputeInstanceInfo_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":17848
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlComputeInstanceInfo_t *>malloc(sizeof(nvmlComputeInstanceInfo_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 17848, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17848, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17848, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 17848, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":17849
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlComputeInstanceInfo_t *>malloc(sizeof(nvmlComputeInstanceInfo_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceInfo")
*/
    __pyx_v_self->_ptr = ((nvmlComputeInstanceInfo_t *)malloc((sizeof(nvmlComputeInstanceInfo_t))));

    /* "cuda/bindings/_nvml.pyx":17850
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlComputeInstanceInfo_t *>malloc(sizeof(nvmlComputeInstanceInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ComputeInstanceInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceInfo_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":17851
 *             self._ptr = <nvmlComputeInstanceInfo_t *>malloc(sizeof(nvmlComputeInstanceInfo_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceInfo_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17851, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ComputeInstance_3};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17851, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 17851, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":17850
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlComputeInstanceInfo_t *>malloc(sizeof(nvmlComputeInstanceInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ComputeInstanceInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":17852
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceInfo_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17852, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17852, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 17852, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlComputeInstanceInfo_t))));

    /* "cuda/bindings/_nvml.pyx":17853
 *                 raise MemoryError("Error allocating ComputeInstanceInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceInfo_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":17854
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlComputeInstanceInfo_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":17855
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17855, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17855, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 17855, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":17848
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlComputeInstanceInfo_t *>malloc(sizeof(nvmlComputeInstanceInfo_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":17857
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 17857, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":17847
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlComputeInstanceInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlComputeInstanceInfo_t *>malloc(sizeof(nvmlComputeInstanceInfo_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17859
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def placement(self):
 *         """ComputeInstancePlacement: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_9placement_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_9placement_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_9placement___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_9placement___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17862
 *     def placement(self):
 *         """ComputeInstancePlacement: """
 *         return ComputeInstancePlacement.from_ptr(<intptr_t>&(self._ptr[0].placement), self._readonly, self)             # <<<<<<<<<<<<<<
 * 
 *     @placement.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).placement))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17862, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17862, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_t_4, ((PyObject *)__pyx_v_self)};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17862, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17859
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def placement(self):
 *         """ComputeInstancePlacement: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.placement.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17864
 *         return ComputeInstancePlacement.from_ptr(<intptr_t>&(self._ptr[0].placement), self._readonly, self)
 * 
 *     @placement.setter             # <<<<<<<<<<<<<<
 *     def placement(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_9placement_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_9placement_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_9placement_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_9placement_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17866
 *     @placement.setter
 *     def placement(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")
 *         cdef ComputeInstancePlacement val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17867
 *     def placement(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef ComputeInstancePlacement val_ = val
 *         memcpy(<void *>&(self._ptr[0].placement), <void *>(val_._get_ptr()), sizeof(nvmlComputeInstancePlacement_t) * 1)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceInfo_instanc};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17867, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17867, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17866
 *     @placement.setter
 *     def placement(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")
 *         cdef ComputeInstancePlacement val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17868
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")
 *         cdef ComputeInstancePlacement val_ = val             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].placement), <void *>(val_._get_ptr()), sizeof(nvmlComputeInstancePlacement_t) * 1)
 * 
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement))))) __PYX_ERR(0, 17868, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":17869
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")
 *         cdef ComputeInstancePlacement val_ = val
 *         memcpy(<void *>&(self._ptr[0].placement), <void *>(val_._get_ptr()), sizeof(nvmlComputeInstancePlacement_t) * 1)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 17869, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).placement)), ((void *)__pyx_t_4), ((sizeof(nvmlComputeInstancePlacement_t)) * 1)));

  /* "cuda/bindings/_nvml.pyx":17864
 *         return ComputeInstancePlacement.from_ptr(<intptr_t>&(self._ptr[0].placement), self._readonly, self)
 * 
 *     @placement.setter             # <<<<<<<<<<<<<<
 *     def placement(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.placement.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17871
 *         memcpy(<void *>&(self._ptr[0].placement), <void *>(val_._get_ptr()), sizeof(nvmlComputeInstancePlacement_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def device_(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_7device__1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_7device__1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_7device____get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_7device____get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17874
 *     def device_(self):
 *         """int: """
 *         return <intptr_t>(self._ptr[0].device)             # <<<<<<<<<<<<<<
 * 
 *     @device_.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)(__pyx_v_self->_ptr[0]).device)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17874, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17871
 *         memcpy(<void *>&(self._ptr[0].placement), <void *>(val_._get_ptr()), sizeof(nvmlComputeInstancePlacement_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def device_(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.device_.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17876
 *         return <intptr_t>(self._ptr[0].device)
 * 
 *     @device_.setter             # <<<<<<<<<<<<<<
 *     def device_(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_7device__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_7device__3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_7device__2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_7device__2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17878
 *     @device_.setter
 *     def device_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17879
 *     def device_(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceInfo_instanc};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17879, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17879, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17878
 *     @device_.setter
 *     def device_(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17880
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = PyLong_AsSsize_t(__pyx_v_val); if (unlikely((__pyx_t_4 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 17880, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).device = ((nvmlDevice_t)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":17876
 *         return <intptr_t>(self._ptr[0].device)
 * 
 *     @device_.setter             # <<<<<<<<<<<<<<
 *     def device_(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.device_.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17882
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def gpu_instance(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12gpu_instance_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12gpu_instance_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12gpu_instance___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12gpu_instance___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17885
 *     def gpu_instance(self):
 *         """int: """
 *         return <intptr_t>(self._ptr[0].gpuInstance)             # <<<<<<<<<<<<<<
 * 
 *     @gpu_instance.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)(__pyx_v_self->_ptr[0]).gpuInstance)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17885, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17882
 *         self._ptr[0].device = <nvmlDevice_t><intptr_t>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def gpu_instance(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.gpu_instance.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17887
 *         return <intptr_t>(self._ptr[0].gpuInstance)
 * 
 *     @gpu_instance.setter             # <<<<<<<<<<<<<<
 *     def gpu_instance(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12gpu_instance_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12gpu_instance_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12gpu_instance_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12gpu_instance_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17889
 *     @gpu_instance.setter
 *     def gpu_instance(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")
 *         self._ptr[0].gpuInstance = <nvmlGpuInstance_t><intptr_t>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17890
 *     def gpu_instance(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].gpuInstance = <nvmlGpuInstance_t><intptr_t>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceInfo_instanc};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17890, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17890, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17889
 *     @gpu_instance.setter
 *     def gpu_instance(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")
 *         self._ptr[0].gpuInstance = <nvmlGpuInstance_t><intptr_t>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17891
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")
 *         self._ptr[0].gpuInstance = <nvmlGpuInstance_t><intptr_t>val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = PyLong_AsSsize_t(__pyx_v_val); if (unlikely((__pyx_t_4 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 17891, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).gpuInstance = ((nvmlGpuInstance_t)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":17887
 *         return <intptr_t>(self._ptr[0].gpuInstance)
 * 
 *     @gpu_instance.setter             # <<<<<<<<<<<<<<
 *     def gpu_instance(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.gpu_instance.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17893
 *         self._ptr[0].gpuInstance = <nvmlGpuInstance_t><intptr_t>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def id(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_2id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_2id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_2id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_2id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17896
 *     def id(self):
 *         """int: """
 *         return self._ptr[0].id             # <<<<<<<<<<<<<<
 * 
 *     @id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17896, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17893
 *         self._ptr[0].gpuInstance = <nvmlGpuInstance_t><intptr_t>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def id(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17898
 *         return self._ptr[0].id
 * 
 *     @id.setter             # <<<<<<<<<<<<<<
 *     def id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_2id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_2id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_2id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_2id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17900
 *     @id.setter
 *     def id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")
 *         self._ptr[0].id = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17901
 *     def id(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].id = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceInfo_instanc};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17901, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17901, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17900
 *     @id.setter
 *     def id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")
 *         self._ptr[0].id = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17902
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")
 *         self._ptr[0].id = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17902, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).id = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":17898
 *         return self._ptr[0].id
 * 
 *     @id.setter             # <<<<<<<<<<<<<<
 *     def id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17904
 *         self._ptr[0].id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def profile_id(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_10profile_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_10profile_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_10profile_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_10profile_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":17907
 *     def profile_id(self):
 *         """int: """
 *         return self._ptr[0].profileId             # <<<<<<<<<<<<<<
 * 
 *     @profile_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).profileId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17907, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17904
 *         self._ptr[0].id = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def profile_id(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.profile_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17909
 *         return self._ptr[0].profileId
 * 
 *     @profile_id.setter             # <<<<<<<<<<<<<<
 *     def profile_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_10profile_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_10profile_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_10profile_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_10profile_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":17911
 *     @profile_id.setter
 *     def profile_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")
 *         self._ptr[0].profileId = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":17912
 *     def profile_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].profileId = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_ComputeInstanceInfo_instanc};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17912, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 17912, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17911
 *     @profile_id.setter
 *     def profile_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")
 *         self._ptr[0].profileId = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":17913
 *         if self._readonly:
 *             raise ValueError("This ComputeInstanceInfo instance is read-only")
 *         self._ptr[0].profileId = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17913, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).profileId = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":17909
 *         return self._ptr[0].profileId
 * 
 *     @profile_id.setter             # <<<<<<<<<<<<<<
 *     def profile_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.profile_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17915
 *         self._ptr[0].profileId = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ComputeInstanceInfo instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12from_data, "ComputeInstanceInfo.from_data(data)\n\nCreate an ComputeInstanceInfo instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `compute_instance_info_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17915, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17915, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 17915, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 17915, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17915, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 17915, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":17922
 *             data (_numpy.ndarray): a single-element array of dtype `compute_instance_info_dtype` holding the data.
 *         """
 *         return __from_data(data, "compute_instance_info_dtype", compute_instance_info_dtype, ComputeInstanceInfo)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_compute_instance_info_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17922, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_compute_instance_info_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17922, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17915
 *         self._ptr[0].profileId = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ComputeInstanceInfo instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17924
 *         return __from_data(data, "compute_instance_info_dtype", compute_instance_info_dtype, ComputeInstanceInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ComputeInstanceInfo instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_14from_ptr, "ComputeInstanceInfo.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an ComputeInstanceInfo instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17924, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17924, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17924, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17924, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 17924, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":17925
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an ComputeInstanceInfo instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 17924, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17924, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17924, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17924, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 17925, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17925, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 17924, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":17924
 *         return __from_data(data, "compute_instance_info_dtype", compute_instance_info_dtype, ComputeInstanceInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ComputeInstanceInfo instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":17933
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstanceInfo obj = ComputeInstanceInfo.__new__(ComputeInstanceInfo)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":17934
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef ComputeInstanceInfo obj = ComputeInstanceInfo.__new__(ComputeInstanceInfo)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17934, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 17934, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17933
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstanceInfo obj = ComputeInstanceInfo.__new__(ComputeInstanceInfo)
*/
  }

  /* "cuda/bindings/_nvml.pyx":17935
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstanceInfo obj = ComputeInstanceInfo.__new__(ComputeInstanceInfo)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlComputeInstanceInfo_t *>malloc(sizeof(nvmlComputeInstanceInfo_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstanceInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17935, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":17936
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstanceInfo obj = ComputeInstanceInfo.__new__(ComputeInstanceInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlComputeInstanceInfo_t *>malloc(sizeof(nvmlComputeInstanceInfo_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":17937
 *         cdef ComputeInstanceInfo obj = ComputeInstanceInfo.__new__(ComputeInstanceInfo)
 *         if owner is None:
 *             obj._ptr = <nvmlComputeInstanceInfo_t *>malloc(sizeof(nvmlComputeInstanceInfo_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceInfo")
*/
    __pyx_v_obj->_ptr = ((nvmlComputeInstanceInfo_t *)malloc((sizeof(nvmlComputeInstanceInfo_t))));

    /* "cuda/bindings/_nvml.pyx":17938
 *         if owner is None:
 *             obj._ptr = <nvmlComputeInstanceInfo_t *>malloc(sizeof(nvmlComputeInstanceInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ComputeInstanceInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceInfo_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":17939
 *             obj._ptr = <nvmlComputeInstanceInfo_t *>malloc(sizeof(nvmlComputeInstanceInfo_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceInfo_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17939, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_ComputeInstance_3};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17939, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 17939, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":17938
 *         if owner is None:
 *             obj._ptr = <nvmlComputeInstanceInfo_t *>malloc(sizeof(nvmlComputeInstanceInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating ComputeInstanceInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":17940
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating ComputeInstanceInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceInfo_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlComputeInstanceInfo_t))));

    /* "cuda/bindings/_nvml.pyx":17941
 *                 raise MemoryError("Error allocating ComputeInstanceInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceInfo_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":17942
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlComputeInstanceInfo_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlComputeInstanceInfo_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":17936
 *             raise ValueError("ptr must not be null (0)")
 *         cdef ComputeInstanceInfo obj = ComputeInstanceInfo.__new__(ComputeInstanceInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlComputeInstanceInfo_t *>malloc(sizeof(nvmlComputeInstanceInfo_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":17944
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlComputeInstanceInfo_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlComputeInstanceInfo_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":17945
 *         else:
 *             obj._ptr = <nvmlComputeInstanceInfo_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":17946
 *             obj._ptr = <nvmlComputeInstanceInfo_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":17947
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":17948
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17924
 *         return __from_data(data, "compute_instance_info_dtype", compute_instance_info_dtype, ComputeInstanceInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ComputeInstanceInfo instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_16__reduce_cython__, "ComputeInstanceInfo.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_18__setstate_cython__, "ComputeInstanceInfo.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.ComputeInstanceInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17951
 * 
 * 
 * cdef _get_ecc_sram_unique_uncorrected_error_counts_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t pod = nvmlEccSramUniqueUncorrectedErrorCounts_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_ecc_sram_unique_uncorrected_error_counts_v1_dtype_offsets(void) {
  nvmlEccSramUniqueUncorrectedErrorCounts_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlEccSramUniqueUncorrectedErrorCounts_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_ecc_sram_unique_uncorrected_error_counts_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":17952
 * 
 * cdef _get_ecc_sram_unique_uncorrected_error_counts_v1_dtype_offsets():
 *     cdef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t pod = nvmlEccSramUniqueUncorrectedErrorCounts_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'entry_count', 'entries'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":17953
 * cdef _get_ecc_sram_unique_uncorrected_error_counts_v1_dtype_offsets():
 *     cdef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t pod = nvmlEccSramUniqueUncorrectedErrorCounts_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'entry_count', 'entries'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.intp],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":17954
 *     cdef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t pod = nvmlEccSramUniqueUncorrectedErrorCounts_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'entry_count', 'entries'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.intp],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17954, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17954, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 17954, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_entry_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_entry_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_entry_count) != (0)) __PYX_ERR(0, 17954, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_entries);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_entries);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_entries) != (0)) __PYX_ERR(0, 17954, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 17954, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":17955
 *     return _numpy.dtype({
 *         'names': ['version', 'entry_count', 'entries'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.intp],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17955, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17955, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17955, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 17955, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17955, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_intp); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17955, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17955, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 17955, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 17955, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 17955, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 17954, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":17957
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.intp],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.entryCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.entries)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17957, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":17958
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.entryCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.entries)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.entryCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17958, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":17959
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.entryCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.entries)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.entries)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 17959, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":17956
 *         'names': ['version', 'entry_count', 'entries'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.intp],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.entryCount)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17956, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 17956, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 17956, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 17956, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 17954, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":17961
 *             (<intptr_t>&(pod.entries)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17961, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 17954, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17953, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17951
 * 
 * 
 * cdef _get_ecc_sram_unique_uncorrected_error_counts_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t pod = nvmlEccSramUniqueUncorrectedErrorCounts_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_ecc_sram_unique_uncorrected_error_counts_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17979
 *         dict _refs
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>calloc(1, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":17980
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>calloc(1, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")
*/
  __pyx_v_self->_ptr = ((nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *)calloc(1, (sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))));

  /* "cuda/bindings/_nvml.pyx":17981
 *     def __init__(self):
 *         self._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>calloc(1, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":17982
 *         self._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>calloc(1, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17982, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_EccSramUniqueUn};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17982, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 17982, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":17981
 *     def __init__(self):
 *         self._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>calloc(1, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":17983
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":17984
 *             raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 *         self._refs = {}
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":17985
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 *         self._refs = {}
 * 
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":17986
 *         self._owned = True
 *         self._readonly = False
 *         self._refs = {}             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17986, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_self->_refs);
  __Pyx_DECREF(__pyx_v_self->_refs);
  __pyx_v_self->_refs = ((PyObject*)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":17979
 *         dict _refs
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>calloc(1, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17988
 *         self._refs = {}
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self) {
  nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":17990
 *     def __dealloc__(self):
 *         cdef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":17991
 *         cdef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":17992
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":17993
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":17990
 *     def __dealloc__(self):
 *         cdef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":17988
 *         self._refs = {}
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":17995
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.EccSramUniqueUncorrectedErrorCounts_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":17996
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.EccSramUniqueUncorrectedErrorCounts_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17996, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17996, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17996, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17996, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17996, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_EccSramUniqueUncorrectedErrorCo;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 50 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17996, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17995
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.EccSramUniqueUncorrectedErrorCounts_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":17998
 *         return f"<{__name__}.EccSramUniqueUncorrectedErrorCounts_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18001
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18001, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":17998
 *         return f"<{__name__}.EccSramUniqueUncorrectedErrorCounts_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18003
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":18004
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18003
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18006
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":18007
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18007, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18006
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18009
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef EccSramUniqueUncorrectedErrorCounts_v1 other_
 *         if not isinstance(other, EccSramUniqueUncorrectedErrorCounts_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":18011
 *     def __eq__(self, other):
 *         cdef EccSramUniqueUncorrectedErrorCounts_v1 other_
 *         if not isinstance(other, EccSramUniqueUncorrectedErrorCounts_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":18012
 *         cdef EccSramUniqueUncorrectedErrorCounts_v1 other_
 *         if not isinstance(other, EccSramUniqueUncorrectedErrorCounts_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":18011
 *     def __eq__(self, other):
 *         cdef EccSramUniqueUncorrectedErrorCounts_v1 other_
 *         if not isinstance(other, EccSramUniqueUncorrectedErrorCounts_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":18013
 *         if not isinstance(other, EccSramUniqueUncorrectedErrorCounts_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1))))) __PYX_ERR(0, 18013, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":18014
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18014, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18009
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef EccSramUniqueUncorrectedErrorCounts_v1 other_
 *         if not isinstance(other, EccSramUniqueUncorrectedErrorCounts_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18016
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>malloc(sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":18017
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>malloc(sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 18017, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18017, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18017, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 18017, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18018
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>malloc(sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")
*/
    __pyx_v_self->_ptr = ((nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *)malloc((sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))));

    /* "cuda/bindings/_nvml.pyx":18019
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>malloc(sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":18020
 *             self._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>malloc(sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18020, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_EccSramUniqueUn};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18020, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 18020, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":18019
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>malloc(sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":18021
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18021, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18021, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 18021, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))));

    /* "cuda/bindings/_nvml.pyx":18022
 *                 raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":18023
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":18024
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18024, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18024, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 18024, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":18017
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>malloc(sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":18026
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 18026, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":18016
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>malloc(sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18028
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: the API version number"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18031
 *     def version(self):
 *         """int: the API version number"""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18031, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18028
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: the API version number"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18033
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18035
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramUniqueUncorrectedErrorCounts_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18036
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This EccSramUniqueUncorrectedErrorCounts_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EccSramUniqueUncorrectedErr};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18036, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18036, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18035
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramUniqueUncorrectedErrorCounts_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18037
 *         if self._readonly:
 *             raise ValueError("This EccSramUniqueUncorrectedErrorCounts_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18037, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":18033
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18039
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def entries(self):
 *         """int: pointer to caller-supplied buffer to return the SRAM unique uncorrected ECC error count entries"""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7entries_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7entries_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7entries___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7entries___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18042
 *     def entries(self):
 *         """int: pointer to caller-supplied buffer to return the SRAM unique uncorrected ECC error count entries"""
 *         if self._ptr[0].entries == NULL or self._ptr[0].entryCount == 0:             # <<<<<<<<<<<<<<
 *             return []
 *         return EccSramUniqueUncorrectedErrorEntry_v1.from_ptr(<intptr_t>(self._ptr[0].entries), self._ptr[0].entryCount)
*/
  __pyx_t_2 = ((__pyx_v_self->_ptr[0]).entries == NULL);
  if (!__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = ((__pyx_v_self->_ptr[0]).entryCount == 0);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18043
 *         """int: pointer to caller-supplied buffer to return the SRAM unique uncorrected ECC error count entries"""
 *         if self._ptr[0].entries == NULL or self._ptr[0].entryCount == 0:
 *             return []             # <<<<<<<<<<<<<<
 *         return EccSramUniqueUncorrectedErrorEntry_v1.from_ptr(<intptr_t>(self._ptr[0].entries), self._ptr[0].entryCount)
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18043, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":18042
 *     def entries(self):
 *         """int: pointer to caller-supplied buffer to return the SRAM unique uncorrected ECC error count entries"""
 *         if self._ptr[0].entries == NULL or self._ptr[0].entryCount == 0:             # <<<<<<<<<<<<<<
 *             return []
 *         return EccSramUniqueUncorrectedErrorEntry_v1.from_ptr(<intptr_t>(self._ptr[0].entries), self._ptr[0].entryCount)
*/
  }

  /* "cuda/bindings/_nvml.pyx":18044
 *         if self._ptr[0].entries == NULL or self._ptr[0].entryCount == 0:
 *             return []
 *         return EccSramUniqueUncorrectedErrorEntry_v1.from_ptr(<intptr_t>(self._ptr[0].entries), self._ptr[0].entryCount)             # <<<<<<<<<<<<<<
 * 
 *     @entries.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1);
  __Pyx_INCREF(__pyx_t_4);
  __pyx_t_5 = PyLong_FromSsize_t(((intptr_t)(__pyx_v_self->_ptr[0]).entries)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).entryCount); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = 0;
  {
    PyObject *__pyx_callargs[3] = {__pyx_t_4, __pyx_t_5, __pyx_t_6};
    __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_7, (3-__pyx_t_7) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18044, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
  }
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18039
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def entries(self):
 *         """int: pointer to caller-supplied buffer to return the SRAM unique uncorrected ECC error count entries"""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1.entries.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18046
 *         return EccSramUniqueUncorrectedErrorEntry_v1.from_ptr(<intptr_t>(self._ptr[0].entries), self._ptr[0].entryCount)
 * 
 *     @entries.setter             # <<<<<<<<<<<<<<
 *     def entries(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7entries_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7entries_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7entries_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7entries_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_arr = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  Py_ssize_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18048
 *     @entries.setter
 *     def entries(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramUniqueUncorrectedErrorCounts_v1 instance is read-only")
 *         cdef EccSramUniqueUncorrectedErrorEntry_v1 arr = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18049
 *     def entries(self, val):
 *         if self._readonly:
 *             raise ValueError("This EccSramUniqueUncorrectedErrorCounts_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef EccSramUniqueUncorrectedErrorEntry_v1 arr = val
 *         self._ptr[0].entries = <nvmlEccSramUniqueUncorrectedErrorEntry_v1_t*><intptr_t>(arr._get_ptr())
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_EccSramUniqueUncorrectedErr};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18049, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18049, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18048
 *     @entries.setter
 *     def entries(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This EccSramUniqueUncorrectedErrorCounts_v1 instance is read-only")
 *         cdef EccSramUniqueUncorrectedErrorEntry_v1 arr = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18050
 *         if self._readonly:
 *             raise ValueError("This EccSramUniqueUncorrectedErrorCounts_v1 instance is read-only")
 *         cdef EccSramUniqueUncorrectedErrorEntry_v1 arr = val             # <<<<<<<<<<<<<<
 *         self._ptr[0].entries = <nvmlEccSramUniqueUncorrectedErrorEntry_v1_t*><intptr_t>(arr._get_ptr())
 *         self._ptr[0].entryCount = len(arr)
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1))))) __PYX_ERR(0, 18050, __pyx_L1_error)
  __pyx_v_arr = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":18051
 *             raise ValueError("This EccSramUniqueUncorrectedErrorCounts_v1 instance is read-only")
 *         cdef EccSramUniqueUncorrectedErrorEntry_v1 arr = val
 *         self._ptr[0].entries = <nvmlEccSramUniqueUncorrectedErrorEntry_v1_t*><intptr_t>(arr._get_ptr())             # <<<<<<<<<<<<<<
 *         self._ptr[0].entryCount = len(arr)
 *         self._refs["entries"] = arr
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v_arr->__pyx_vtab)->_get_ptr(__pyx_v_arr); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 18051, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).entries = ((nvmlEccSramUniqueUncorrectedErrorEntry_v1_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":18052
 *         cdef EccSramUniqueUncorrectedErrorEntry_v1 arr = val
 *         self._ptr[0].entries = <nvmlEccSramUniqueUncorrectedErrorEntry_v1_t*><intptr_t>(arr._get_ptr())
 *         self._ptr[0].entryCount = len(arr)             # <<<<<<<<<<<<<<
 *         self._refs["entries"] = arr
 * 
*/
  __pyx_t_5 = PyObject_Length(((PyObject *)__pyx_v_arr)); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18052, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).entryCount = __pyx_t_5;

  /* "cuda/bindings/_nvml.pyx":18053
 *         self._ptr[0].entries = <nvmlEccSramUniqueUncorrectedErrorEntry_v1_t*><intptr_t>(arr._get_ptr())
 *         self._ptr[0].entryCount = len(arr)
 *         self._refs["entries"] = arr             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely(__pyx_v_self->_refs == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
    __PYX_ERR(0, 18053, __pyx_L1_error)
  }
  if (unlikely((PyDict_SetItem(__pyx_v_self->_refs, __pyx_mstate_global->__pyx_n_u_entries, ((PyObject *)__pyx_v_arr)) < 0))) __PYX_ERR(0, 18053, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":18046
 *         return EccSramUniqueUncorrectedErrorEntry_v1.from_ptr(<intptr_t>(self._ptr[0].entries), self._ptr[0].entryCount)
 * 
 *     @entries.setter             # <<<<<<<<<<<<<<
 *     def entries(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1.entries.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18055
 *         self._refs["entries"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an EccSramUniqueUncorrectedErrorCounts_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_12from_data, "EccSramUniqueUncorrectedErrorCounts_v1.from_data(data)\n\nCreate an EccSramUniqueUncorrectedErrorCounts_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `ecc_sram_unique_uncorrected_error_counts_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18055, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18055, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 18055, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 18055, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18055, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 18055, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":18062
 *             data (_numpy.ndarray): a single-element array of dtype `ecc_sram_unique_uncorrected_error_counts_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "ecc_sram_unique_uncorrected_error_counts_v1_dtype", ecc_sram_unique_uncorrected_error_counts_v1_dtype, EccSramUniqueUncorrectedErrorCounts_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_ecc_sram_unique_uncorrected_erro_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18062, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_ecc_sram_unique_uncorrected_erro_2, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18062, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18055
 *         self._refs["entries"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an EccSramUniqueUncorrectedErrorCounts_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18064
 *         return __from_data(data, "ecc_sram_unique_uncorrected_error_counts_v1_dtype", ecc_sram_unique_uncorrected_error_counts_v1_dtype, EccSramUniqueUncorrectedErrorCounts_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an EccSramUniqueUncorrectedErrorCounts_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_14from_ptr, "EccSramUniqueUncorrectedErrorCounts_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an EccSramUniqueUncorrectedErrorCounts_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18064, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 18064, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18064, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18064, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 18064, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":18065
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an EccSramUniqueUncorrectedErrorCounts_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 18064, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 18064, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18064, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18064, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 18065, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18065, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 18064, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":18064
 *         return __from_data(data, "ecc_sram_unique_uncorrected_error_counts_v1_dtype", ecc_sram_unique_uncorrected_error_counts_v1_dtype, EccSramUniqueUncorrectedErrorCounts_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an EccSramUniqueUncorrectedErrorCounts_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":18073
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EccSramUniqueUncorrectedErrorCounts_v1 obj = EccSramUniqueUncorrectedErrorCounts_v1.__new__(EccSramUniqueUncorrectedErrorCounts_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":18074
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef EccSramUniqueUncorrectedErrorCounts_v1 obj = EccSramUniqueUncorrectedErrorCounts_v1.__new__(EccSramUniqueUncorrectedErrorCounts_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18074, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 18074, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18073
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EccSramUniqueUncorrectedErrorCounts_v1 obj = EccSramUniqueUncorrectedErrorCounts_v1.__new__(EccSramUniqueUncorrectedErrorCounts_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":18075
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EccSramUniqueUncorrectedErrorCounts_v1 obj = EccSramUniqueUncorrectedErrorCounts_v1.__new__(EccSramUniqueUncorrectedErrorCounts_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>malloc(sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18075, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":18076
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EccSramUniqueUncorrectedErrorCounts_v1 obj = EccSramUniqueUncorrectedErrorCounts_v1.__new__(EccSramUniqueUncorrectedErrorCounts_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>malloc(sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18077
 *         cdef EccSramUniqueUncorrectedErrorCounts_v1 obj = EccSramUniqueUncorrectedErrorCounts_v1.__new__(EccSramUniqueUncorrectedErrorCounts_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>malloc(sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *)malloc((sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))));

    /* "cuda/bindings/_nvml.pyx":18078
 *         if owner is None:
 *             obj._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>malloc(sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":18079
 *             obj._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>malloc(sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18079, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_EccSramUniqueUn};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18079, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 18079, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":18078
 *         if owner is None:
 *             obj._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>malloc(sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":18080
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))));

    /* "cuda/bindings/_nvml.pyx":18081
 *                 raise MemoryError("Error allocating EccSramUniqueUncorrectedErrorCounts_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":18082
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":18076
 *             raise ValueError("ptr must not be null (0)")
 *         cdef EccSramUniqueUncorrectedErrorCounts_v1 obj = EccSramUniqueUncorrectedErrorCounts_v1.__new__(EccSramUniqueUncorrectedErrorCounts_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>malloc(sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":18084
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":18085
 *         else:
 *             obj._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":18086
 *             obj._ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         obj._refs = {}
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":18087
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         obj._refs = {}
 *         return obj
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":18088
 *             obj._owned = False
 *         obj._readonly = readonly
 *         obj._refs = {}             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18088, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_refs);
  __Pyx_DECREF(__pyx_v_obj->_refs);
  __pyx_v_obj->_refs = ((PyObject*)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":18089
 *         obj._readonly = readonly
 *         obj._refs = {}
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18064
 *         return __from_data(data, "ecc_sram_unique_uncorrected_error_counts_v1_dtype", ecc_sram_unique_uncorrected_error_counts_v1_dtype, EccSramUniqueUncorrectedErrorCounts_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an EccSramUniqueUncorrectedErrorCounts_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_16__reduce_cython__, "EccSramUniqueUncorrectedErrorCounts_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_18__setstate_cython__, "EccSramUniqueUncorrectedErrorCounts_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18092
 * 
 * 
 * cdef _get_nvlink_firmware_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlNvlinkFirmwareInfo_t pod = nvmlNvlinkFirmwareInfo_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_nvlink_firmware_info_dtype_offsets(void) {
  nvmlNvlinkFirmwareInfo_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlNvlinkFirmwareInfo_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_nvlink_firmware_info_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":18093
 * 
 * cdef _get_nvlink_firmware_info_dtype_offsets():
 *     cdef nvmlNvlinkFirmwareInfo_t pod = nvmlNvlinkFirmwareInfo_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['firmware_version', 'num_valid_entries'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":18094
 * cdef _get_nvlink_firmware_info_dtype_offsets():
 *     cdef nvmlNvlinkFirmwareInfo_t pod = nvmlNvlinkFirmwareInfo_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['firmware_version', 'num_valid_entries'],
 *         'formats': [nvlink_firmware_version_dtype, _numpy.uint32],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18094, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18094, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":18095
 *     cdef nvmlNvlinkFirmwareInfo_t pod = nvmlNvlinkFirmwareInfo_t()
 *     return _numpy.dtype({
 *         'names': ['firmware_version', 'num_valid_entries'],             # <<<<<<<<<<<<<<
 *         'formats': [nvlink_firmware_version_dtype, _numpy.uint32],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18095, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18095, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_firmware_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_firmware_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_firmware_version) != (0)) __PYX_ERR(0, 18095, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_num_valid_entries);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_num_valid_entries);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_num_valid_entries) != (0)) __PYX_ERR(0, 18095, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 18095, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":18096
 *     return _numpy.dtype({
 *         'names': ['firmware_version', 'num_valid_entries'],
 *         'formats': [nvlink_firmware_version_dtype, _numpy.uint32],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.firmwareVersion)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_nvlink_firmware_version_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18096, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18096, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 18096, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18096, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 18096, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 18096, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_7) < (0)) __PYX_ERR(0, 18095, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":18098
 *         'formats': [nvlink_firmware_version_dtype, _numpy.uint32],
 *         'offsets': [
 *             (<intptr_t>&(pod.firmwareVersion)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.numValidEntries)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_7 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.firmwareVersion)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18098, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);

  /* "cuda/bindings/_nvml.pyx":18099
 *         'offsets': [
 *             (<intptr_t>&(pod.firmwareVersion)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.numValidEntries)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlNvlinkFirmwareInfo_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.numValidEntries)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 18099, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":18097
 *         'names': ['firmware_version', 'num_valid_entries'],
 *         'formats': [nvlink_firmware_version_dtype, _numpy.uint32],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.firmwareVersion)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.numValidEntries)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18097, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 18097, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 18097, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_6) < (0)) __PYX_ERR(0, 18095, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":18101
 *             (<intptr_t>&(pod.numValidEntries)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlNvlinkFirmwareInfo_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_6 = __Pyx_PyLong_FromSize_t((sizeof(nvmlNvlinkFirmwareInfo_t))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18101, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_6) < (0)) __PYX_ERR(0, 18095, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_9 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_9 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18094, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18092
 * 
 * 
 * cdef _get_nvlink_firmware_info_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlNvlinkFirmwareInfo_t pod = nvmlNvlinkFirmwareInfo_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_nvlink_firmware_info_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18118
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlNvlinkFirmwareInfo_t *>calloc(1, sizeof(nvmlNvlinkFirmwareInfo_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":18119
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlNvlinkFirmwareInfo_t *>calloc(1, sizeof(nvmlNvlinkFirmwareInfo_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvlinkFirmwareInfo")
*/
  __pyx_v_self->_ptr = ((nvmlNvlinkFirmwareInfo_t *)calloc(1, (sizeof(nvmlNvlinkFirmwareInfo_t))));

  /* "cuda/bindings/_nvml.pyx":18120
 *     def __init__(self):
 *         self._ptr = <nvmlNvlinkFirmwareInfo_t *>calloc(1, sizeof(nvmlNvlinkFirmwareInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating NvlinkFirmwareInfo")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":18121
 *         self._ptr = <nvmlNvlinkFirmwareInfo_t *>calloc(1, sizeof(nvmlNvlinkFirmwareInfo_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvlinkFirmwareInfo")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18121, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvlinkFirmwareI};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18121, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 18121, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18120
 *     def __init__(self):
 *         self._ptr = <nvmlNvlinkFirmwareInfo_t *>calloc(1, sizeof(nvmlNvlinkFirmwareInfo_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating NvlinkFirmwareInfo")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":18122
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvlinkFirmwareInfo")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":18123
 *             raise MemoryError("Error allocating NvlinkFirmwareInfo")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":18124
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":18118
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlNvlinkFirmwareInfo_t *>calloc(1, sizeof(nvmlNvlinkFirmwareInfo_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18126
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlNvlinkFirmwareInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self) {
  nvmlNvlinkFirmwareInfo_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlNvlinkFirmwareInfo_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":18128
 *     def __dealloc__(self):
 *         cdef nvmlNvlinkFirmwareInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18129
 *         cdef nvmlNvlinkFirmwareInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":18130
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":18131
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":18128
 *     def __dealloc__(self):
 *         cdef nvmlNvlinkFirmwareInfo_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":18126
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlNvlinkFirmwareInfo_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":18133
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.NvlinkFirmwareInfo object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":18134
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.NvlinkFirmwareInfo object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18134, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18134, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18134, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18134, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18134, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_NvlinkFirmwareInfo_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 30 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18134, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18133
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.NvlinkFirmwareInfo object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18136
 *         return f"<{__name__}.NvlinkFirmwareInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18139
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18139, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18136
 *         return f"<{__name__}.NvlinkFirmwareInfo object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareInfo.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18141
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":18142
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18141
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18144
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":18145
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18145, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18144
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareInfo.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18147
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef NvlinkFirmwareInfo other_
 *         if not isinstance(other, NvlinkFirmwareInfo):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":18149
 *     def __eq__(self, other):
 *         cdef NvlinkFirmwareInfo other_
 *         if not isinstance(other, NvlinkFirmwareInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":18150
 *         cdef NvlinkFirmwareInfo other_
 *         if not isinstance(other, NvlinkFirmwareInfo):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkFirmwareInfo_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":18149
 *     def __eq__(self, other):
 *         cdef NvlinkFirmwareInfo other_
 *         if not isinstance(other, NvlinkFirmwareInfo):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":18151
 *         if not isinstance(other, NvlinkFirmwareInfo):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkFirmwareInfo_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo))))) __PYX_ERR(0, 18151, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":18152
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkFirmwareInfo_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlNvlinkFirmwareInfo_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18147
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef NvlinkFirmwareInfo other_
 *         if not isinstance(other, NvlinkFirmwareInfo):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareInfo.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18154
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkFirmwareInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkFirmwareInfo_t *>malloc(sizeof(nvmlNvlinkFirmwareInfo_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":18155
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlNvlinkFirmwareInfo_t *>malloc(sizeof(nvmlNvlinkFirmwareInfo_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 18155, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18155, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18155, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 18155, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18156
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkFirmwareInfo_t *>malloc(sizeof(nvmlNvlinkFirmwareInfo_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkFirmwareInfo")
*/
    __pyx_v_self->_ptr = ((nvmlNvlinkFirmwareInfo_t *)malloc((sizeof(nvmlNvlinkFirmwareInfo_t))));

    /* "cuda/bindings/_nvml.pyx":18157
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkFirmwareInfo_t *>malloc(sizeof(nvmlNvlinkFirmwareInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkFirmwareInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkFirmwareInfo_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":18158
 *             self._ptr = <nvmlNvlinkFirmwareInfo_t *>malloc(sizeof(nvmlNvlinkFirmwareInfo_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkFirmwareInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkFirmwareInfo_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18158, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvlinkFirmwareI};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18158, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 18158, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":18157
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkFirmwareInfo_t *>malloc(sizeof(nvmlNvlinkFirmwareInfo_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkFirmwareInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkFirmwareInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":18159
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkFirmwareInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkFirmwareInfo_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18159, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18159, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 18159, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlNvlinkFirmwareInfo_t))));

    /* "cuda/bindings/_nvml.pyx":18160
 *                 raise MemoryError("Error allocating NvlinkFirmwareInfo")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkFirmwareInfo_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":18161
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvlinkFirmwareInfo_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":18162
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18162, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18162, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 18162, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":18155
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlNvlinkFirmwareInfo_t *>malloc(sizeof(nvmlNvlinkFirmwareInfo_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":18164
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 18164, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":18154
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvlinkFirmwareInfo_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvlinkFirmwareInfo_t *>malloc(sizeof(nvmlNvlinkFirmwareInfo_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareInfo.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18166
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def firmware_version(self):
 *         """NvlinkFirmwareVersion: OUT - NVLINK firmware version."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16firmware_version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16firmware_version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16firmware_version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16firmware_version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18169
 *     def firmware_version(self):
 *         """NvlinkFirmwareVersion: OUT - NVLINK firmware version."""
 *         return NvlinkFirmwareVersion.from_ptr(<intptr_t>&(self._ptr[0].firmwareVersion), 100, self._readonly)             # <<<<<<<<<<<<<<
 * 
 *     @firmware_version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).firmwareVersion))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18169, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18169, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_mstate_global->__pyx_int_100, __pyx_t_4};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18169, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18166
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def firmware_version(self):
 *         """NvlinkFirmwareVersion: OUT - NVLINK firmware version."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareInfo.firmware_version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18171
 *         return NvlinkFirmwareVersion.from_ptr(<intptr_t>&(self._ptr[0].firmwareVersion), 100, self._readonly)
 * 
 *     @firmware_version.setter             # <<<<<<<<<<<<<<
 *     def firmware_version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16firmware_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16firmware_version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16firmware_version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16firmware_version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  intptr_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18173
 *     @firmware_version.setter
 *     def firmware_version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkFirmwareInfo instance is read-only")
 *         cdef NvlinkFirmwareVersion val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18174
 *     def firmware_version(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvlinkFirmwareInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef NvlinkFirmwareVersion val_ = val
 *         if len(val) != 100:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvlinkFirmwareInfo_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18174, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18174, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18173
 *     @firmware_version.setter
 *     def firmware_version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkFirmwareInfo instance is read-only")
 *         cdef NvlinkFirmwareVersion val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18175
 *         if self._readonly:
 *             raise ValueError("This NvlinkFirmwareInfo instance is read-only")
 *         cdef NvlinkFirmwareVersion val_ = val             # <<<<<<<<<<<<<<
 *         if len(val) != 100:
 *             raise ValueError(f"Expected length 100 for field firmware_version, got {len(val)}")
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion))))) __PYX_ERR(0, 18175, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":18176
 *             raise ValueError("This NvlinkFirmwareInfo instance is read-only")
 *         cdef NvlinkFirmwareVersion val_ = val
 *         if len(val) != 100:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 100 for field firmware_version, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].firmwareVersion), <void *>(val_._get_ptr()), sizeof(nvmlNvlinkFirmwareVersion_t) * 100)
*/
  __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18176, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 != 0x64);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":18177
 *         cdef NvlinkFirmwareVersion val_ = val
 *         if len(val) != 100:
 *             raise ValueError(f"Expected length 100 for field firmware_version, got {len(val)}")             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].firmwareVersion), <void *>(val_._get_ptr()), sizeof(nvmlNvlinkFirmwareVersion_t) * 100)
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18177, __pyx_L1_error)
    __pyx_t_6 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_t_4, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18177, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __pyx_t_7 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Expected_length_100_for_field_fi, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18177, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_7};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18177, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18177, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18176
 *             raise ValueError("This NvlinkFirmwareInfo instance is read-only")
 *         cdef NvlinkFirmwareVersion val_ = val
 *         if len(val) != 100:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 100 for field firmware_version, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].firmwareVersion), <void *>(val_._get_ptr()), sizeof(nvmlNvlinkFirmwareVersion_t) * 100)
*/
  }

  /* "cuda/bindings/_nvml.pyx":18178
 *         if len(val) != 100:
 *             raise ValueError(f"Expected length 100 for field firmware_version, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].firmwareVersion), <void *>(val_._get_ptr()), sizeof(nvmlNvlinkFirmwareVersion_t) * 100)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 18178, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).firmwareVersion)), ((void *)__pyx_t_8), ((sizeof(nvmlNvlinkFirmwareVersion_t)) * 0x64)));

  /* "cuda/bindings/_nvml.pyx":18171
 *         return NvlinkFirmwareVersion.from_ptr(<intptr_t>&(self._ptr[0].firmwareVersion), 100, self._readonly)
 * 
 *     @firmware_version.setter             # <<<<<<<<<<<<<<
 *     def firmware_version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareInfo.firmware_version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18180
 *         memcpy(<void *>&(self._ptr[0].firmwareVersion), <void *>(val_._get_ptr()), sizeof(nvmlNvlinkFirmwareVersion_t) * 100)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def num_valid_entries(self):
 *         """int: OUT - Number of valid firmware entries."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17num_valid_entries_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17num_valid_entries_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17num_valid_entries___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17num_valid_entries___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18183
 *     def num_valid_entries(self):
 *         """int: OUT - Number of valid firmware entries."""
 *         return self._ptr[0].numValidEntries             # <<<<<<<<<<<<<<
 * 
 *     @num_valid_entries.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).numValidEntries); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18183, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18180
 *         memcpy(<void *>&(self._ptr[0].firmwareVersion), <void *>(val_._get_ptr()), sizeof(nvmlNvlinkFirmwareVersion_t) * 100)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def num_valid_entries(self):
 *         """int: OUT - Number of valid firmware entries."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareInfo.num_valid_entries.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18185
 *         return self._ptr[0].numValidEntries
 * 
 *     @num_valid_entries.setter             # <<<<<<<<<<<<<<
 *     def num_valid_entries(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17num_valid_entries_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17num_valid_entries_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17num_valid_entries_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17num_valid_entries_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18187
 *     @num_valid_entries.setter
 *     def num_valid_entries(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkFirmwareInfo instance is read-only")
 *         self._ptr[0].numValidEntries = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18188
 *     def num_valid_entries(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvlinkFirmwareInfo instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].numValidEntries = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvlinkFirmwareInfo_instance};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18188, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18188, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18187
 *     @num_valid_entries.setter
 *     def num_valid_entries(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvlinkFirmwareInfo instance is read-only")
 *         self._ptr[0].numValidEntries = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18189
 *         if self._readonly:
 *             raise ValueError("This NvlinkFirmwareInfo instance is read-only")
 *         self._ptr[0].numValidEntries = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18189, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).numValidEntries = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":18185
 *         return self._ptr[0].numValidEntries
 * 
 *     @num_valid_entries.setter             # <<<<<<<<<<<<<<
 *     def num_valid_entries(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareInfo.num_valid_entries.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18191
 *         self._ptr[0].numValidEntries = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvlinkFirmwareInfo instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_12from_data, "NvlinkFirmwareInfo.from_data(data)\n\nCreate an NvlinkFirmwareInfo instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `nvlink_firmware_info_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18191, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18191, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 18191, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 18191, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18191, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 18191, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":18198
 *             data (_numpy.ndarray): a single-element array of dtype `nvlink_firmware_info_dtype` holding the data.
 *         """
 *         return __from_data(data, "nvlink_firmware_info_dtype", nvlink_firmware_info_dtype, NvlinkFirmwareInfo)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_nvlink_firmware_info_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18198, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_nvlink_firmware_info_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18198, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18191
 *         self._ptr[0].numValidEntries = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvlinkFirmwareInfo instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareInfo.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18200
 *         return __from_data(data, "nvlink_firmware_info_dtype", nvlink_firmware_info_dtype, NvlinkFirmwareInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkFirmwareInfo instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_14from_ptr, "NvlinkFirmwareInfo.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an NvlinkFirmwareInfo instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18200, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 18200, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18200, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18200, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 18200, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":18201
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an NvlinkFirmwareInfo instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 18200, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 18200, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18200, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18200, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 18201, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18201, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 18200, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":18200
 *         return __from_data(data, "nvlink_firmware_info_dtype", nvlink_firmware_info_dtype, NvlinkFirmwareInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkFirmwareInfo instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":18209
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkFirmwareInfo obj = NvlinkFirmwareInfo.__new__(NvlinkFirmwareInfo)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":18210
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef NvlinkFirmwareInfo obj = NvlinkFirmwareInfo.__new__(NvlinkFirmwareInfo)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18210, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 18210, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18209
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkFirmwareInfo obj = NvlinkFirmwareInfo.__new__(NvlinkFirmwareInfo)
*/
  }

  /* "cuda/bindings/_nvml.pyx":18211
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkFirmwareInfo obj = NvlinkFirmwareInfo.__new__(NvlinkFirmwareInfo)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkFirmwareInfo_t *>malloc(sizeof(nvmlNvlinkFirmwareInfo_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18211, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":18212
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkFirmwareInfo obj = NvlinkFirmwareInfo.__new__(NvlinkFirmwareInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlNvlinkFirmwareInfo_t *>malloc(sizeof(nvmlNvlinkFirmwareInfo_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18213
 *         cdef NvlinkFirmwareInfo obj = NvlinkFirmwareInfo.__new__(NvlinkFirmwareInfo)
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkFirmwareInfo_t *>malloc(sizeof(nvmlNvlinkFirmwareInfo_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkFirmwareInfo")
*/
    __pyx_v_obj->_ptr = ((nvmlNvlinkFirmwareInfo_t *)malloc((sizeof(nvmlNvlinkFirmwareInfo_t))));

    /* "cuda/bindings/_nvml.pyx":18214
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkFirmwareInfo_t *>malloc(sizeof(nvmlNvlinkFirmwareInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkFirmwareInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkFirmwareInfo_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":18215
 *             obj._ptr = <nvmlNvlinkFirmwareInfo_t *>malloc(sizeof(nvmlNvlinkFirmwareInfo_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkFirmwareInfo")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkFirmwareInfo_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18215, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvlinkFirmwareI};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18215, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 18215, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":18214
 *         if owner is None:
 *             obj._ptr = <nvmlNvlinkFirmwareInfo_t *>malloc(sizeof(nvmlNvlinkFirmwareInfo_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvlinkFirmwareInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkFirmwareInfo_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":18216
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvlinkFirmwareInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkFirmwareInfo_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlNvlinkFirmwareInfo_t))));

    /* "cuda/bindings/_nvml.pyx":18217
 *                 raise MemoryError("Error allocating NvlinkFirmwareInfo")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkFirmwareInfo_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":18218
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvlinkFirmwareInfo_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlNvlinkFirmwareInfo_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":18212
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvlinkFirmwareInfo obj = NvlinkFirmwareInfo.__new__(NvlinkFirmwareInfo)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlNvlinkFirmwareInfo_t *>malloc(sizeof(nvmlNvlinkFirmwareInfo_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":18220
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlNvlinkFirmwareInfo_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlNvlinkFirmwareInfo_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":18221
 *         else:
 *             obj._ptr = <nvmlNvlinkFirmwareInfo_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":18222
 *             obj._ptr = <nvmlNvlinkFirmwareInfo_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":18223
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":18224
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18200
 *         return __from_data(data, "nvlink_firmware_info_dtype", nvlink_firmware_info_dtype, NvlinkFirmwareInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkFirmwareInfo instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareInfo.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16__reduce_cython__, "NvlinkFirmwareInfo.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareInfo.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_18__setstate_cython__, "NvlinkFirmwareInfo.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.NvlinkFirmwareInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18227
 * 
 * 
 * cdef _get_vgpu_instances_utilization_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuInstancesUtilizationInfo_v1_t pod = nvmlVgpuInstancesUtilizationInfo_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_instances_utilization_info_v1_dtype_offsets(void) {
  nvmlVgpuInstancesUtilizationInfo_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuInstancesUtilizationInfo_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  size_t __pyx_t_12;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_instances_utilization_info_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":18228
 * 
 * cdef _get_vgpu_instances_utilization_info_v1_dtype_offsets():
 *     cdef nvmlVgpuInstancesUtilizationInfo_v1_t pod = nvmlVgpuInstancesUtilizationInfo_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'sample_val_type', 'vgpu_instance_count', 'last_seen_time_stamp', 'vgpu_util_array'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":18229
 * cdef _get_vgpu_instances_utilization_info_v1_dtype_offsets():
 *     cdef nvmlVgpuInstancesUtilizationInfo_v1_t pod = nvmlVgpuInstancesUtilizationInfo_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'sample_val_type', 'vgpu_instance_count', 'last_seen_time_stamp', 'vgpu_util_array'],
 *         'formats': [_numpy.uint32, _numpy.int32, _numpy.uint32, _numpy.uint64, _numpy.intp],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18229, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18229, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":18230
 *     cdef nvmlVgpuInstancesUtilizationInfo_v1_t pod = nvmlVgpuInstancesUtilizationInfo_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'sample_val_type', 'vgpu_instance_count', 'last_seen_time_stamp', 'vgpu_util_array'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.int32, _numpy.uint32, _numpy.uint64, _numpy.intp],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18230, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18230, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 18230, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_sample_val_type);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_sample_val_type);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_sample_val_type) != (0)) __PYX_ERR(0, 18230, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_vgpu_instance_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_vgpu_instance_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_vgpu_instance_count) != (0)) __PYX_ERR(0, 18230, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_last_seen_time_stamp);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_last_seen_time_stamp);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_last_seen_time_stamp) != (0)) __PYX_ERR(0, 18230, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_vgpu_util_array);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_vgpu_util_array);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_vgpu_util_array) != (0)) __PYX_ERR(0, 18230, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 18230, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":18231
 *     return _numpy.dtype({
 *         'names': ['version', 'sample_val_type', 'vgpu_instance_count', 'last_seen_time_stamp', 'vgpu_util_array'],
 *         'formats': [_numpy.uint32, _numpy.int32, _numpy.uint32, _numpy.uint64, _numpy.intp],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18231, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18231, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18231, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 18231, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18231, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18231, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18231, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18231, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18231, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_intp); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18231, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18231, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 18231, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 18231, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 18231, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 18231, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 18231, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_6) < (0)) __PYX_ERR(0, 18230, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":18233
 *         'formats': [_numpy.uint32, _numpy.int32, _numpy.uint32, _numpy.uint64, _numpy.intp],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.sampleValType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuInstanceCount)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18233, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":18234
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sampleValType)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vgpuInstanceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.lastSeenTimeStamp)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.sampleValType)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18234, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":18235
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sampleValType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuInstanceCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.lastSeenTimeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuUtilArray)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vgpuInstanceCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18235, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":18236
 *             (<intptr_t>&(pod.sampleValType)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuInstanceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.lastSeenTimeStamp)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.vgpuUtilArray)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.lastSeenTimeStamp)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18236, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":18237
 *             (<intptr_t>&(pod.vgpuInstanceCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.lastSeenTimeStamp)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.vgpuUtilArray)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.vgpuUtilArray)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 18237, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":18232
 *         'names': ['version', 'sample_val_type', 'vgpu_instance_count', 'last_seen_time_stamp', 'vgpu_util_array'],
 *         'formats': [_numpy.uint32, _numpy.int32, _numpy.uint32, _numpy.uint64, _numpy.intp],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.sampleValType)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18232, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 18232, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_11) != (0)) __PYX_ERR(0, 18232, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_10) != (0)) __PYX_ERR(0, 18232, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_9) != (0)) __PYX_ERR(0, 18232, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_8) != (0)) __PYX_ERR(0, 18232, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 18230, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":18239
 *             (<intptr_t>&(pod.vgpuUtilArray)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 18230, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_12 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_12 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_12, (2-__pyx_t_12) | (__pyx_t_12*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18229, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18227
 * 
 * 
 * cdef _get_vgpu_instances_utilization_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuInstancesUtilizationInfo_v1_t pod = nvmlVgpuInstancesUtilizationInfo_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_instances_utilization_info_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18257
 *         dict _refs
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":18258
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuInstancesUtilizationInfo_v1_t *)calloc(1, (sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))));

  /* "cuda/bindings/_nvml.pyx":18259
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":18260
 *         self._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18260, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuInstancesUt};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18260, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 18260, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18259
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":18261
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":18262
 *             raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 *         self._refs = {}
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":18263
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 *         self._refs = {}
 * 
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":18264
 *         self._owned = True
 *         self._readonly = False
 *         self._refs = {}             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18264, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_self->_refs);
  __Pyx_DECREF(__pyx_v_self->_refs);
  __pyx_v_self->_refs = ((PyObject*)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":18257
 *         dict _refs
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>calloc(1, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18266
 *         self._refs = {}
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuInstancesUtilizationInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self) {
  nvmlVgpuInstancesUtilizationInfo_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuInstancesUtilizationInfo_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":18268
 *     def __dealloc__(self):
 *         cdef nvmlVgpuInstancesUtilizationInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18269
 *         cdef nvmlVgpuInstancesUtilizationInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":18270
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":18271
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":18268
 *     def __dealloc__(self):
 *         cdef nvmlVgpuInstancesUtilizationInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":18266
 *         self._refs = {}
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuInstancesUtilizationInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":18273
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuInstancesUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":18274
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuInstancesUtilizationInfo_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuInstancesUtilizationInfo_v1;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 43 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18273
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuInstancesUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18276
 *         return f"<{__name__}.VgpuInstancesUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18279
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18279, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18276
 *         return f"<{__name__}.VgpuInstancesUtilizationInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18281
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":18282
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18281
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18284
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":18285
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18285, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18284
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18287
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuInstancesUtilizationInfo_v1 other_
 *         if not isinstance(other, VgpuInstancesUtilizationInfo_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":18289
 *     def __eq__(self, other):
 *         cdef VgpuInstancesUtilizationInfo_v1 other_
 *         if not isinstance(other, VgpuInstancesUtilizationInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":18290
 *         cdef VgpuInstancesUtilizationInfo_v1 other_
 *         if not isinstance(other, VgpuInstancesUtilizationInfo_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":18289
 *     def __eq__(self, other):
 *         cdef VgpuInstancesUtilizationInfo_v1 other_
 *         if not isinstance(other, VgpuInstancesUtilizationInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":18291
 *         if not isinstance(other, VgpuInstancesUtilizationInfo_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1))))) __PYX_ERR(0, 18291, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":18292
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18292, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18287
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuInstancesUtilizationInfo_v1 other_
 *         if not isinstance(other, VgpuInstancesUtilizationInfo_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18294
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":18295
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 18295, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18295, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18295, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 18295, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18296
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuInstancesUtilizationInfo_v1_t *)malloc((sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":18297
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":18298
 *             self._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18298, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuInstancesUt};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18298, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 18298, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":18297
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":18299
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18299, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18299, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 18299, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":18300
 *                 raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":18301
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":18302
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18302, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18302, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 18302, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":18295
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":18304
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 18304, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":18294
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18306
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18309
 *     def version(self):
 *         """int: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18309, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18306
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18311
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18313
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuInstancesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18314
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuInstancesUtilizationInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuInstancesUtilizationInf};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18314, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18314, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18313
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuInstancesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18315
 *         if self._readonly:
 *             raise ValueError("This VgpuInstancesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18315, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":18311
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18317
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sample_val_type(self):
 *         """int: Hold the type of returned sample values."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15sample_val_type_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15sample_val_type_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15sample_val_type___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15sample_val_type___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18320
 *     def sample_val_type(self):
 *         """int: Hold the type of returned sample values."""
 *         return <int>(self._ptr[0].sampleValType)             # <<<<<<<<<<<<<<
 * 
 *     @sample_val_type.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int(((int)(__pyx_v_self->_ptr[0]).sampleValType)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18320, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18317
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def sample_val_type(self):
 *         """int: Hold the type of returned sample values."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.sample_val_type.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18322
 *         return <int>(self._ptr[0].sampleValType)
 * 
 *     @sample_val_type.setter             # <<<<<<<<<<<<<<
 *     def sample_val_type(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15sample_val_type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15sample_val_type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15sample_val_type_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15sample_val_type_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18324
 *     @sample_val_type.setter
 *     def sample_val_type(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuInstancesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].sampleValType = <nvmlValueType_t><int>val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18325
 *     def sample_val_type(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuInstancesUtilizationInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].sampleValType = <nvmlValueType_t><int>val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuInstancesUtilizationInf};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18325, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18325, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18324
 *     @sample_val_type.setter
 *     def sample_val_type(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuInstancesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].sampleValType = <nvmlValueType_t><int>val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18326
 *         if self._readonly:
 *             raise ValueError("This VgpuInstancesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].sampleValType = <nvmlValueType_t><int>val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18326, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).sampleValType = ((nvmlValueType_t)((int)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":18322
 *         return <int>(self._ptr[0].sampleValType)
 * 
 *     @sample_val_type.setter             # <<<<<<<<<<<<<<
 *     def sample_val_type(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.sample_val_type.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18328
 *         self._ptr[0].sampleValType = <nvmlValueType_t><int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def last_seen_time_stamp(self):
 *         """int: Return only samples with timestamp greater than lastSeenTimeStamp."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_20last_seen_time_stamp_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_20last_seen_time_stamp_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_20last_seen_time_stamp___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_20last_seen_time_stamp___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18331
 *     def last_seen_time_stamp(self):
 *         """int: Return only samples with timestamp greater than lastSeenTimeStamp."""
 *         return self._ptr[0].lastSeenTimeStamp             # <<<<<<<<<<<<<<
 * 
 *     @last_seen_time_stamp.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_ptr[0]).lastSeenTimeStamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18331, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18328
 *         self._ptr[0].sampleValType = <nvmlValueType_t><int>val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def last_seen_time_stamp(self):
 *         """int: Return only samples with timestamp greater than lastSeenTimeStamp."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.last_seen_time_stamp.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18333
 *         return self._ptr[0].lastSeenTimeStamp
 * 
 *     @last_seen_time_stamp.setter             # <<<<<<<<<<<<<<
 *     def last_seen_time_stamp(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_20last_seen_time_stamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_20last_seen_time_stamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_20last_seen_time_stamp_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_20last_seen_time_stamp_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned PY_LONG_LONG __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18335
 *     @last_seen_time_stamp.setter
 *     def last_seen_time_stamp(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuInstancesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].lastSeenTimeStamp = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18336
 *     def last_seen_time_stamp(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuInstancesUtilizationInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].lastSeenTimeStamp = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuInstancesUtilizationInf};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18336, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18336, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18335
 *     @last_seen_time_stamp.setter
 *     def last_seen_time_stamp(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuInstancesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].lastSeenTimeStamp = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18337
 *         if self._readonly:
 *             raise ValueError("This VgpuInstancesUtilizationInfo_v1 instance is read-only")
 *         self._ptr[0].lastSeenTimeStamp = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 18337, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).lastSeenTimeStamp = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":18333
 *         return self._ptr[0].lastSeenTimeStamp
 * 
 *     @last_seen_time_stamp.setter             # <<<<<<<<<<<<<<
 *     def last_seen_time_stamp(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.last_seen_time_stamp.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18339
 *         self._ptr[0].lastSeenTimeStamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_util_array(self):
 *         """int: The array (allocated by caller) in which vGPU utilization are returned."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15vgpu_util_array_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15vgpu_util_array_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15vgpu_util_array___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15vgpu_util_array___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18342
 *     def vgpu_util_array(self):
 *         """int: The array (allocated by caller) in which vGPU utilization are returned."""
 *         if self._ptr[0].vgpuUtilArray == NULL or self._ptr[0].vgpuInstanceCount == 0:             # <<<<<<<<<<<<<<
 *             return []
 *         return VgpuInstanceUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].vgpuUtilArray), self._ptr[0].vgpuInstanceCount)
*/
  __pyx_t_2 = ((__pyx_v_self->_ptr[0]).vgpuUtilArray == NULL);
  if (!__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = ((__pyx_v_self->_ptr[0]).vgpuInstanceCount == 0);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18343
 *         """int: The array (allocated by caller) in which vGPU utilization are returned."""
 *         if self._ptr[0].vgpuUtilArray == NULL or self._ptr[0].vgpuInstanceCount == 0:
 *             return []             # <<<<<<<<<<<<<<
 *         return VgpuInstanceUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].vgpuUtilArray), self._ptr[0].vgpuInstanceCount)
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18343, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_r = __pyx_t_3;
    __pyx_t_3 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":18342
 *     def vgpu_util_array(self):
 *         """int: The array (allocated by caller) in which vGPU utilization are returned."""
 *         if self._ptr[0].vgpuUtilArray == NULL or self._ptr[0].vgpuInstanceCount == 0:             # <<<<<<<<<<<<<<
 *             return []
 *         return VgpuInstanceUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].vgpuUtilArray), self._ptr[0].vgpuInstanceCount)
*/
  }

  /* "cuda/bindings/_nvml.pyx":18344
 *         if self._ptr[0].vgpuUtilArray == NULL or self._ptr[0].vgpuInstanceCount == 0:
 *             return []
 *         return VgpuInstanceUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].vgpuUtilArray), self._ptr[0].vgpuInstanceCount)             # <<<<<<<<<<<<<<
 * 
 *     @vgpu_util_array.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1);
  __Pyx_INCREF(__pyx_t_4);
  __pyx_t_5 = PyLong_FromSsize_t(((intptr_t)(__pyx_v_self->_ptr[0]).vgpuUtilArray)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18344, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).vgpuInstanceCount); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18344, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = 0;
  {
    PyObject *__pyx_callargs[3] = {__pyx_t_4, __pyx_t_5, __pyx_t_6};
    __pyx_t_3 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_7, (3-__pyx_t_7) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18344, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
  }
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18339
 *         self._ptr[0].lastSeenTimeStamp = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def vgpu_util_array(self):
 *         """int: The array (allocated by caller) in which vGPU utilization are returned."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.vgpu_util_array.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18346
 *         return VgpuInstanceUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].vgpuUtilArray), self._ptr[0].vgpuInstanceCount)
 * 
 *     @vgpu_util_array.setter             # <<<<<<<<<<<<<<
 *     def vgpu_util_array(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15vgpu_util_array_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15vgpu_util_array_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15vgpu_util_array_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15vgpu_util_array_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_arr = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  Py_ssize_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18348
 *     @vgpu_util_array.setter
 *     def vgpu_util_array(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuInstancesUtilizationInfo_v1 instance is read-only")
 *         cdef VgpuInstanceUtilizationInfo_v1 arr = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18349
 *     def vgpu_util_array(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuInstancesUtilizationInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef VgpuInstanceUtilizationInfo_v1 arr = val
 *         self._ptr[0].vgpuUtilArray = <nvmlVgpuInstanceUtilizationInfo_v1_t*><intptr_t>(arr._get_ptr())
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuInstancesUtilizationInf};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18349, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18349, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18348
 *     @vgpu_util_array.setter
 *     def vgpu_util_array(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuInstancesUtilizationInfo_v1 instance is read-only")
 *         cdef VgpuInstanceUtilizationInfo_v1 arr = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18350
 *         if self._readonly:
 *             raise ValueError("This VgpuInstancesUtilizationInfo_v1 instance is read-only")
 *         cdef VgpuInstanceUtilizationInfo_v1 arr = val             # <<<<<<<<<<<<<<
 *         self._ptr[0].vgpuUtilArray = <nvmlVgpuInstanceUtilizationInfo_v1_t*><intptr_t>(arr._get_ptr())
 *         self._ptr[0].vgpuInstanceCount = len(arr)
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1))))) __PYX_ERR(0, 18350, __pyx_L1_error)
  __pyx_v_arr = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":18351
 *             raise ValueError("This VgpuInstancesUtilizationInfo_v1 instance is read-only")
 *         cdef VgpuInstanceUtilizationInfo_v1 arr = val
 *         self._ptr[0].vgpuUtilArray = <nvmlVgpuInstanceUtilizationInfo_v1_t*><intptr_t>(arr._get_ptr())             # <<<<<<<<<<<<<<
 *         self._ptr[0].vgpuInstanceCount = len(arr)
 *         self._refs["vgpu_util_array"] = arr
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v_arr->__pyx_vtab)->_get_ptr(__pyx_v_arr); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 18351, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).vgpuUtilArray = ((nvmlVgpuInstanceUtilizationInfo_v1_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":18352
 *         cdef VgpuInstanceUtilizationInfo_v1 arr = val
 *         self._ptr[0].vgpuUtilArray = <nvmlVgpuInstanceUtilizationInfo_v1_t*><intptr_t>(arr._get_ptr())
 *         self._ptr[0].vgpuInstanceCount = len(arr)             # <<<<<<<<<<<<<<
 *         self._refs["vgpu_util_array"] = arr
 * 
*/
  __pyx_t_5 = PyObject_Length(((PyObject *)__pyx_v_arr)); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18352, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).vgpuInstanceCount = __pyx_t_5;

  /* "cuda/bindings/_nvml.pyx":18353
 *         self._ptr[0].vgpuUtilArray = <nvmlVgpuInstanceUtilizationInfo_v1_t*><intptr_t>(arr._get_ptr())
 *         self._ptr[0].vgpuInstanceCount = len(arr)
 *         self._refs["vgpu_util_array"] = arr             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  if (unlikely(__pyx_v_self->_refs == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
    __PYX_ERR(0, 18353, __pyx_L1_error)
  }
  if (unlikely((PyDict_SetItem(__pyx_v_self->_refs, __pyx_mstate_global->__pyx_n_u_vgpu_util_array, ((PyObject *)__pyx_v_arr)) < 0))) __PYX_ERR(0, 18353, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":18346
 *         return VgpuInstanceUtilizationInfo_v1.from_ptr(<intptr_t>(self._ptr[0].vgpuUtilArray), self._ptr[0].vgpuInstanceCount)
 * 
 *     @vgpu_util_array.setter             # <<<<<<<<<<<<<<
 *     def vgpu_util_array(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.vgpu_util_array.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18355
 *         self._refs["vgpu_util_array"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuInstancesUtilizationInfo_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_12from_data, "VgpuInstancesUtilizationInfo_v1.from_data(data)\n\nCreate an VgpuInstancesUtilizationInfo_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_instances_utilization_info_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18355, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18355, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 18355, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 18355, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18355, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 18355, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":18362
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_instances_utilization_info_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_instances_utilization_info_v1_dtype", vgpu_instances_utilization_info_v1_dtype, VgpuInstancesUtilizationInfo_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_instances_utilization_info); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18362, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_instances_utilization_info, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18362, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18355
 *         self._refs["vgpu_util_array"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuInstancesUtilizationInfo_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18364
 *         return __from_data(data, "vgpu_instances_utilization_info_v1_dtype", vgpu_instances_utilization_info_v1_dtype, VgpuInstancesUtilizationInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuInstancesUtilizationInfo_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_14from_ptr, "VgpuInstancesUtilizationInfo_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuInstancesUtilizationInfo_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18364, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 18364, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18364, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18364, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 18364, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":18365
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuInstancesUtilizationInfo_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 18364, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 18364, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18364, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18364, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 18365, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18365, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 18364, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":18364
 *         return __from_data(data, "vgpu_instances_utilization_info_v1_dtype", vgpu_instances_utilization_info_v1_dtype, VgpuInstancesUtilizationInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuInstancesUtilizationInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":18373
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuInstancesUtilizationInfo_v1 obj = VgpuInstancesUtilizationInfo_v1.__new__(VgpuInstancesUtilizationInfo_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":18374
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuInstancesUtilizationInfo_v1 obj = VgpuInstancesUtilizationInfo_v1.__new__(VgpuInstancesUtilizationInfo_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18374, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 18374, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18373
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuInstancesUtilizationInfo_v1 obj = VgpuInstancesUtilizationInfo_v1.__new__(VgpuInstancesUtilizationInfo_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":18375
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuInstancesUtilizationInfo_v1 obj = VgpuInstancesUtilizationInfo_v1.__new__(VgpuInstancesUtilizationInfo_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18375, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":18376
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuInstancesUtilizationInfo_v1 obj = VgpuInstancesUtilizationInfo_v1.__new__(VgpuInstancesUtilizationInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18377
 *         cdef VgpuInstancesUtilizationInfo_v1 obj = VgpuInstancesUtilizationInfo_v1.__new__(VgpuInstancesUtilizationInfo_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuInstancesUtilizationInfo_v1_t *)malloc((sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":18378
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":18379
 *             obj._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18379, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuInstancesUt};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18379, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 18379, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":18378
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":18380
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":18381
 *                 raise MemoryError("Error allocating VgpuInstancesUtilizationInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":18382
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":18376
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuInstancesUtilizationInfo_v1 obj = VgpuInstancesUtilizationInfo_v1.__new__(VgpuInstancesUtilizationInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>malloc(sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":18384
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuInstancesUtilizationInfo_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":18385
 *         else:
 *             obj._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":18386
 *             obj._ptr = <nvmlVgpuInstancesUtilizationInfo_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         obj._refs = {}
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":18387
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         obj._refs = {}
 *         return obj
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":18388
 *             obj._owned = False
 *         obj._readonly = readonly
 *         obj._refs = {}             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18388, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_2);
  __Pyx_GOTREF(__pyx_v_obj->_refs);
  __Pyx_DECREF(__pyx_v_obj->_refs);
  __pyx_v_obj->_refs = ((PyObject*)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":18389
 *         obj._readonly = readonly
 *         obj._refs = {}
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18364
 *         return __from_data(data, "vgpu_instances_utilization_info_v1_dtype", vgpu_instances_utilization_info_v1_dtype, VgpuInstancesUtilizationInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuInstancesUtilizationInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_16__reduce_cython__, "VgpuInstancesUtilizationInfo_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_18__setstate_cython__, "VgpuInstancesUtilizationInfo_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18392
 * 
 * 
 * cdef _get_vgpu_scheduler_log_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerLog_t pod = nvmlVgpuSchedulerLog_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_log_dtype_offsets(void) {
  nvmlVgpuSchedulerLog_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuSchedulerLog_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  size_t __pyx_t_13;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_scheduler_log_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":18393
 * 
 * cdef _get_vgpu_scheduler_log_dtype_offsets():
 *     cdef nvmlVgpuSchedulerLog_t pod = nvmlVgpuSchedulerLog_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['engine_id', 'scheduler_policy', 'arr_mode', 'scheduler_params', 'entries_count', 'log_entries'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":18394
 * cdef _get_vgpu_scheduler_log_dtype_offsets():
 *     cdef nvmlVgpuSchedulerLog_t pod = nvmlVgpuSchedulerLog_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['engine_id', 'scheduler_policy', 'arr_mode', 'scheduler_params', 'entries_count', 'log_entries'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype, _numpy.uint32, vgpu_scheduler_log_entry_dtype],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18394, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18394, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":18395
 *     cdef nvmlVgpuSchedulerLog_t pod = nvmlVgpuSchedulerLog_t()
 *     return _numpy.dtype({
 *         'names': ['engine_id', 'scheduler_policy', 'arr_mode', 'scheduler_params', 'entries_count', 'log_entries'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype, _numpy.uint32, vgpu_scheduler_log_entry_dtype],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18395, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18395, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_engine_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_engine_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_engine_id) != (0)) __PYX_ERR(0, 18395, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_scheduler_policy);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_scheduler_policy);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_scheduler_policy) != (0)) __PYX_ERR(0, 18395, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_arr_mode);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_arr_mode);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_arr_mode) != (0)) __PYX_ERR(0, 18395, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_scheduler_params);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_scheduler_params);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_scheduler_params) != (0)) __PYX_ERR(0, 18395, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_entries_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_entries_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_entries_count) != (0)) __PYX_ERR(0, 18395, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_log_entries);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_log_entries);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_log_entries) != (0)) __PYX_ERR(0, 18395, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 18395, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":18396
 *     return _numpy.dtype({
 *         'names': ['engine_id', 'scheduler_policy', 'arr_mode', 'scheduler_params', 'entries_count', 'log_entries'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype, _numpy.uint32, vgpu_scheduler_log_entry_dtype],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 18396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_params_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_log_entry_dtype); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_12 = PyList_New(6); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 18396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 18396, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 18396, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 18396, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 3, __pyx_t_6) != (0)) __PYX_ERR(0, 18396, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 4, __pyx_t_11) != (0)) __PYX_ERR(0, 18396, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_12, 5, __pyx_t_10) != (0)) __PYX_ERR(0, 18396, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_6 = 0;
  __pyx_t_11 = 0;
  __pyx_t_10 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_12) < (0)) __PYX_ERR(0, 18395, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;

  /* "cuda/bindings/_nvml.pyx":18398
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype, _numpy.uint32, vgpu_scheduler_log_entry_dtype],
 *         'offsets': [
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.engineId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 18398, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":18399
 *         'offsets': [
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.schedulerPolicy)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":18400
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.entriesCount)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.arrMode)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18400, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":18401
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.entriesCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.logEntries)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.schedulerParams)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":18402
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.entriesCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.logEntries)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.entriesCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18402, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":18403
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.entriesCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.logEntries)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuSchedulerLog_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.logEntries)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 18403, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":18397
 *         'names': ['engine_id', 'scheduler_policy', 'arr_mode', 'scheduler_params', 'entries_count', 'log_entries'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype, _numpy.uint32, vgpu_scheduler_log_entry_dtype],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_12) != (0)) __PYX_ERR(0, 18397, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_10) != (0)) __PYX_ERR(0, 18397, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_11) != (0)) __PYX_ERR(0, 18397, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_6) != (0)) __PYX_ERR(0, 18397, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_9) != (0)) __PYX_ERR(0, 18397, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_8) != (0)) __PYX_ERR(0, 18397, __pyx_L1_error);
  __pyx_t_12 = 0;
  __pyx_t_10 = 0;
  __pyx_t_11 = 0;
  __pyx_t_6 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 18395, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":18405
 *             (<intptr_t>&(pod.logEntries)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuSchedulerLog_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuSchedulerLog_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18405, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 18395, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_13 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_13 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_13, (2-__pyx_t_13) | (__pyx_t_13*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18394, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18392
 * 
 * 
 * cdef _get_vgpu_scheduler_log_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerLog_t pod = nvmlVgpuSchedulerLog_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_scheduler_log_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18422
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuSchedulerLog_t *>calloc(1, sizeof(nvmlVgpuSchedulerLog_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":18423
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerLog_t *>calloc(1, sizeof(nvmlVgpuSchedulerLog_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerLog")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuSchedulerLog_t *)calloc(1, (sizeof(nvmlVgpuSchedulerLog_t))));

  /* "cuda/bindings/_nvml.pyx":18424
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerLog_t *>calloc(1, sizeof(nvmlVgpuSchedulerLog_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuSchedulerLog")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":18425
 *         self._ptr = <nvmlVgpuSchedulerLog_t *>calloc(1, sizeof(nvmlVgpuSchedulerLog_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerLog")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18425, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerLo};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18425, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 18425, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18424
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerLog_t *>calloc(1, sizeof(nvmlVgpuSchedulerLog_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuSchedulerLog")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":18426
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerLog")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":18427
 *             raise MemoryError("Error allocating VgpuSchedulerLog")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":18428
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":18422
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuSchedulerLog_t *>calloc(1, sizeof(nvmlVgpuSchedulerLog_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18430
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuSchedulerLog_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self) {
  nvmlVgpuSchedulerLog_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuSchedulerLog_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":18432
 *     def __dealloc__(self):
 *         cdef nvmlVgpuSchedulerLog_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18433
 *         cdef nvmlVgpuSchedulerLog_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":18434
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":18435
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":18432
 *     def __dealloc__(self):
 *         cdef nvmlVgpuSchedulerLog_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":18430
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuSchedulerLog_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":18437
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuSchedulerLog object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":18438
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuSchedulerLog object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuSchedulerLog_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 28 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18437
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuSchedulerLog object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18440
 *         return f"<{__name__}.VgpuSchedulerLog object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18443
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18440
 *         return f"<{__name__}.VgpuSchedulerLog object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18445
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_16VgpuSchedulerLog__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":18446
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18445
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18448
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":18449
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18449, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18448
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18451
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerLog other_
 *         if not isinstance(other, VgpuSchedulerLog):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":18453
 *     def __eq__(self, other):
 *         cdef VgpuSchedulerLog other_
 *         if not isinstance(other, VgpuSchedulerLog):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":18454
 *         cdef VgpuSchedulerLog other_
 *         if not isinstance(other, VgpuSchedulerLog):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerLog_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":18453
 *     def __eq__(self, other):
 *         cdef VgpuSchedulerLog other_
 *         if not isinstance(other, VgpuSchedulerLog):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":18455
 *         if not isinstance(other, VgpuSchedulerLog):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerLog_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog))))) __PYX_ERR(0, 18455, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":18456
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerLog_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuSchedulerLog_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18456, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18451
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerLog other_
 *         if not isinstance(other, VgpuSchedulerLog):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18458
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerLog_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerLog_t *>malloc(sizeof(nvmlVgpuSchedulerLog_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":18459
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuSchedulerLog_t *>malloc(sizeof(nvmlVgpuSchedulerLog_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 18459, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18459, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18459, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 18459, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18460
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerLog_t *>malloc(sizeof(nvmlVgpuSchedulerLog_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerLog")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuSchedulerLog_t *)malloc((sizeof(nvmlVgpuSchedulerLog_t))));

    /* "cuda/bindings/_nvml.pyx":18461
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerLog_t *>malloc(sizeof(nvmlVgpuSchedulerLog_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerLog")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerLog_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":18462
 *             self._ptr = <nvmlVgpuSchedulerLog_t *>malloc(sizeof(nvmlVgpuSchedulerLog_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerLog")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerLog_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18462, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerLo};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18462, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 18462, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":18461
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerLog_t *>malloc(sizeof(nvmlVgpuSchedulerLog_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerLog")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerLog_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":18463
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerLog")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerLog_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18463, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18463, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 18463, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuSchedulerLog_t))));

    /* "cuda/bindings/_nvml.pyx":18464
 *                 raise MemoryError("Error allocating VgpuSchedulerLog")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerLog_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":18465
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerLog_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":18466
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18466, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18466, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 18466, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":18459
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuSchedulerLog_t *>malloc(sizeof(nvmlVgpuSchedulerLog_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":18468
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 18468, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":18458
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerLog_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerLog_t *>malloc(sizeof(nvmlVgpuSchedulerLog_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18470
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_params(self):
 *         """VgpuSchedulerParams: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_params_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_params_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_params___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_params___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18473
 *     def scheduler_params(self):
 *         """VgpuSchedulerParams: """
 *         return VgpuSchedulerParams.from_ptr(<intptr_t>&(self._ptr[0].schedulerParams), self._readonly, self)             # <<<<<<<<<<<<<<
 * 
 *     @scheduler_params.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).schedulerParams))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18473, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18473, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_t_4, ((PyObject *)__pyx_v_self)};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18473, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18470
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_params(self):
 *         """VgpuSchedulerParams: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.scheduler_params.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18475
 *         return VgpuSchedulerParams.from_ptr(<intptr_t>&(self._ptr[0].schedulerParams), self._readonly, self)
 * 
 *     @scheduler_params.setter             # <<<<<<<<<<<<<<
 *     def scheduler_params(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_params_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_params_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_params_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_params_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18477
 *     @scheduler_params.setter
 *     def scheduler_params(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         cdef VgpuSchedulerParams val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18478
 *     def scheduler_params(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerParams val_ = val
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerLog_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18478, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18478, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18477
 *     @scheduler_params.setter
 *     def scheduler_params(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         cdef VgpuSchedulerParams val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18479
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         cdef VgpuSchedulerParams val_ = val             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)
 * 
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams))))) __PYX_ERR(0, 18479, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":18480
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         cdef VgpuSchedulerParams val_ = val
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 18480, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).schedulerParams)), ((void *)__pyx_t_4), ((sizeof(nvmlVgpuSchedulerParams_t)) * 1)));

  /* "cuda/bindings/_nvml.pyx":18475
 *         return VgpuSchedulerParams.from_ptr(<intptr_t>&(self._ptr[0].schedulerParams), self._readonly, self)
 * 
 *     @scheduler_params.setter             # <<<<<<<<<<<<<<
 *     def scheduler_params(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.scheduler_params.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18482
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def log_entries(self):
 *         """VgpuSchedulerLogEntry: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_11log_entries_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_11log_entries_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_11log_entries___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_11log_entries___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18485
 *     def log_entries(self):
 *         """VgpuSchedulerLogEntry: """
 *         return VgpuSchedulerLogEntry.from_ptr(<intptr_t>&(self._ptr[0].logEntries), 200, self._readonly)             # <<<<<<<<<<<<<<
 * 
 *     @log_entries.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).logEntries))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18485, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18485, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_mstate_global->__pyx_int_200, __pyx_t_4};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18485, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18482
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def log_entries(self):
 *         """VgpuSchedulerLogEntry: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.log_entries.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18487
 *         return VgpuSchedulerLogEntry.from_ptr(<intptr_t>&(self._ptr[0].logEntries), 200, self._readonly)
 * 
 *     @log_entries.setter             # <<<<<<<<<<<<<<
 *     def log_entries(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_11log_entries_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_11log_entries_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_11log_entries_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_11log_entries_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  intptr_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18489
 *     @log_entries.setter
 *     def log_entries(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         cdef VgpuSchedulerLogEntry val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18490
 *     def log_entries(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerLogEntry val_ = val
 *         if len(val) != 200:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerLog_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18490, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18490, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18489
 *     @log_entries.setter
 *     def log_entries(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         cdef VgpuSchedulerLogEntry val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18491
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         cdef VgpuSchedulerLogEntry val_ = val             # <<<<<<<<<<<<<<
 *         if len(val) != 200:
 *             raise ValueError(f"Expected length 200 for field log_entries, got {len(val)}")
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry))))) __PYX_ERR(0, 18491, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":18492
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         cdef VgpuSchedulerLogEntry val_ = val
 *         if len(val) != 200:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 200 for field log_entries, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].logEntries), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerLogEntry_t) * 200)
*/
  __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18492, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 != 0xC8);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":18493
 *         cdef VgpuSchedulerLogEntry val_ = val
 *         if len(val) != 200:
 *             raise ValueError(f"Expected length 200 for field log_entries, got {len(val)}")             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].logEntries), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerLogEntry_t) * 200)
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18493, __pyx_L1_error)
    __pyx_t_6 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_t_4, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18493, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __pyx_t_7 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Expected_length_200_for_field_lo, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18493, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_7};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18493, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18493, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18492
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         cdef VgpuSchedulerLogEntry val_ = val
 *         if len(val) != 200:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 200 for field log_entries, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].logEntries), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerLogEntry_t) * 200)
*/
  }

  /* "cuda/bindings/_nvml.pyx":18494
 *         if len(val) != 200:
 *             raise ValueError(f"Expected length 200 for field log_entries, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].logEntries), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerLogEntry_t) * 200)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 18494, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).logEntries)), ((void *)__pyx_t_8), ((sizeof(nvmlVgpuSchedulerLogEntry_t)) * 0xC8)));

  /* "cuda/bindings/_nvml.pyx":18487
 *         return VgpuSchedulerLogEntry.from_ptr(<intptr_t>&(self._ptr[0].logEntries), 200, self._readonly)
 * 
 *     @log_entries.setter             # <<<<<<<<<<<<<<
 *     def log_entries(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.log_entries.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18496
 *         memcpy(<void *>&(self._ptr[0].logEntries), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerLogEntry_t) * 200)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def engine_id(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_9engine_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_9engine_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_9engine_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_9engine_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18499
 *     def engine_id(self):
 *         """int: """
 *         return self._ptr[0].engineId             # <<<<<<<<<<<<<<
 * 
 *     @engine_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).engineId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18499, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18496
 *         memcpy(<void *>&(self._ptr[0].logEntries), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerLogEntry_t) * 200)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def engine_id(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.engine_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18501
 *         return self._ptr[0].engineId
 * 
 *     @engine_id.setter             # <<<<<<<<<<<<<<
 *     def engine_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_9engine_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_9engine_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_9engine_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_9engine_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18503
 *     @engine_id.setter
 *     def engine_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         self._ptr[0].engineId = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18504
 *     def engine_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].engineId = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerLog_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18504, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18504, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18503
 *     @engine_id.setter
 *     def engine_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         self._ptr[0].engineId = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18505
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         self._ptr[0].engineId = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18505, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).engineId = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":18501
 *         return self._ptr[0].engineId
 * 
 *     @engine_id.setter             # <<<<<<<<<<<<<<
 *     def engine_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.engine_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18507
 *         self._ptr[0].engineId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_policy_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_policy_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_policy___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_policy___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18510
 *     def scheduler_policy(self):
 *         """int: """
 *         return self._ptr[0].schedulerPolicy             # <<<<<<<<<<<<<<
 * 
 *     @scheduler_policy.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).schedulerPolicy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18510, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18507
 *         self._ptr[0].engineId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.scheduler_policy.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18512
 *         return self._ptr[0].schedulerPolicy
 * 
 *     @scheduler_policy.setter             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_policy_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_policy_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_policy_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_policy_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18514
 *     @scheduler_policy.setter
 *     def scheduler_policy(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         self._ptr[0].schedulerPolicy = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18515
 *     def scheduler_policy(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].schedulerPolicy = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerLog_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18515, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18515, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18514
 *     @scheduler_policy.setter
 *     def scheduler_policy(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         self._ptr[0].schedulerPolicy = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18516
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         self._ptr[0].schedulerPolicy = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18516, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).schedulerPolicy = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":18512
 *         return self._ptr[0].schedulerPolicy
 * 
 *     @scheduler_policy.setter             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.scheduler_policy.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18518
 *         self._ptr[0].schedulerPolicy = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def arr_mode(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_8arr_mode_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_8arr_mode_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_8arr_mode___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_8arr_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18521
 *     def arr_mode(self):
 *         """int: """
 *         return self._ptr[0].arrMode             # <<<<<<<<<<<<<<
 * 
 *     @arr_mode.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).arrMode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18521, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18518
 *         self._ptr[0].schedulerPolicy = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def arr_mode(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.arr_mode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18523
 *         return self._ptr[0].arrMode
 * 
 *     @arr_mode.setter             # <<<<<<<<<<<<<<
 *     def arr_mode(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_8arr_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_8arr_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_8arr_mode_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_8arr_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18525
 *     @arr_mode.setter
 *     def arr_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         self._ptr[0].arrMode = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18526
 *     def arr_mode(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].arrMode = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerLog_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18526, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18526, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18525
 *     @arr_mode.setter
 *     def arr_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         self._ptr[0].arrMode = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18527
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         self._ptr[0].arrMode = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18527, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).arrMode = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":18523
 *         return self._ptr[0].arrMode
 * 
 *     @arr_mode.setter             # <<<<<<<<<<<<<<
 *     def arr_mode(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.arr_mode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18529
 *         self._ptr[0].arrMode = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def entries_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13entries_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13entries_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13entries_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13entries_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18532
 *     def entries_count(self):
 *         """int: """
 *         return self._ptr[0].entriesCount             # <<<<<<<<<<<<<<
 * 
 *     @entries_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).entriesCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18532, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18529
 *         self._ptr[0].arrMode = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def entries_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.entries_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18534
 *         return self._ptr[0].entriesCount
 * 
 *     @entries_count.setter             # <<<<<<<<<<<<<<
 *     def entries_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13entries_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13entries_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13entries_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13entries_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18536
 *     @entries_count.setter
 *     def entries_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         self._ptr[0].entriesCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18537
 *     def entries_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].entriesCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerLog_instance_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18537, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18537, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18536
 *     @entries_count.setter
 *     def entries_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         self._ptr[0].entriesCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18538
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLog instance is read-only")
 *         self._ptr[0].entriesCount = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18538, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).entriesCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":18534
 *         return self._ptr[0].entriesCount
 * 
 *     @entries_count.setter             # <<<<<<<<<<<<<<
 *     def entries_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.entries_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18540
 *         self._ptr[0].entriesCount = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerLog instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_12from_data, "VgpuSchedulerLog.from_data(data)\n\nCreate an VgpuSchedulerLog instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_scheduler_log_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18540, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18540, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 18540, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 18540, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18540, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 18540, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":18547
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_scheduler_log_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_scheduler_log_dtype", vgpu_scheduler_log_dtype, VgpuSchedulerLog)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_log_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18547, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_log_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18547, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18540
 *         self._ptr[0].entriesCount = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerLog instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18549
 *         return __from_data(data, "vgpu_scheduler_log_dtype", vgpu_scheduler_log_dtype, VgpuSchedulerLog)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerLog instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_14from_ptr, "VgpuSchedulerLog.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuSchedulerLog instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18549, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 18549, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18549, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18549, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 18549, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":18550
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuSchedulerLog instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 18549, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 18549, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18549, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18549, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 18550, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18550, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 18549, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":18549
 *         return __from_data(data, "vgpu_scheduler_log_dtype", vgpu_scheduler_log_dtype, VgpuSchedulerLog)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerLog instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":18558
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerLog obj = VgpuSchedulerLog.__new__(VgpuSchedulerLog)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":18559
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerLog obj = VgpuSchedulerLog.__new__(VgpuSchedulerLog)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18559, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 18559, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18558
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerLog obj = VgpuSchedulerLog.__new__(VgpuSchedulerLog)
*/
  }

  /* "cuda/bindings/_nvml.pyx":18560
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerLog obj = VgpuSchedulerLog.__new__(VgpuSchedulerLog)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerLog_t *>malloc(sizeof(nvmlVgpuSchedulerLog_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerLog(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18560, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":18561
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerLog obj = VgpuSchedulerLog.__new__(VgpuSchedulerLog)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuSchedulerLog_t *>malloc(sizeof(nvmlVgpuSchedulerLog_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18562
 *         cdef VgpuSchedulerLog obj = VgpuSchedulerLog.__new__(VgpuSchedulerLog)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerLog_t *>malloc(sizeof(nvmlVgpuSchedulerLog_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerLog")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuSchedulerLog_t *)malloc((sizeof(nvmlVgpuSchedulerLog_t))));

    /* "cuda/bindings/_nvml.pyx":18563
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerLog_t *>malloc(sizeof(nvmlVgpuSchedulerLog_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerLog")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerLog_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":18564
 *             obj._ptr = <nvmlVgpuSchedulerLog_t *>malloc(sizeof(nvmlVgpuSchedulerLog_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerLog")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerLog_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18564, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerLo};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18564, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 18564, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":18563
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerLog_t *>malloc(sizeof(nvmlVgpuSchedulerLog_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerLog")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerLog_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":18565
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerLog")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerLog_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuSchedulerLog_t))));

    /* "cuda/bindings/_nvml.pyx":18566
 *                 raise MemoryError("Error allocating VgpuSchedulerLog")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerLog_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":18567
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerLog_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerLog_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":18561
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerLog obj = VgpuSchedulerLog.__new__(VgpuSchedulerLog)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuSchedulerLog_t *>malloc(sizeof(nvmlVgpuSchedulerLog_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":18569
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerLog_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuSchedulerLog_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":18570
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerLog_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":18571
 *             obj._ptr = <nvmlVgpuSchedulerLog_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":18572
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":18573
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18549
 *         return __from_data(data, "vgpu_scheduler_log_dtype", vgpu_scheduler_log_dtype, VgpuSchedulerLog)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerLog instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16__reduce_cython__, "VgpuSchedulerLog.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_18__setstate_cython__, "VgpuSchedulerLog.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLog.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18576
 * 
 * 
 * cdef _get_vgpu_scheduler_get_state_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerGetState_t pod = nvmlVgpuSchedulerGetState_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_get_state_dtype_offsets(void) {
  nvmlVgpuSchedulerGetState_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuSchedulerGetState_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_scheduler_get_state_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":18577
 * 
 * cdef _get_vgpu_scheduler_get_state_dtype_offsets():
 *     cdef nvmlVgpuSchedulerGetState_t pod = nvmlVgpuSchedulerGetState_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['scheduler_policy', 'arr_mode', 'scheduler_params'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":18578
 * cdef _get_vgpu_scheduler_get_state_dtype_offsets():
 *     cdef nvmlVgpuSchedulerGetState_t pod = nvmlVgpuSchedulerGetState_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['scheduler_policy', 'arr_mode', 'scheduler_params'],
 *         'formats': [_numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18578, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18578, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":18579
 *     cdef nvmlVgpuSchedulerGetState_t pod = nvmlVgpuSchedulerGetState_t()
 *     return _numpy.dtype({
 *         'names': ['scheduler_policy', 'arr_mode', 'scheduler_params'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18579, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18579, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_scheduler_policy);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_scheduler_policy);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_scheduler_policy) != (0)) __PYX_ERR(0, 18579, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_arr_mode);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_arr_mode);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_arr_mode) != (0)) __PYX_ERR(0, 18579, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_scheduler_params);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_scheduler_params);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_scheduler_params) != (0)) __PYX_ERR(0, 18579, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 18579, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":18580
 *     return _numpy.dtype({
 *         'names': ['scheduler_policy', 'arr_mode', 'scheduler_params'],
 *         'formats': [_numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 18580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_params_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = PyList_New(3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 18580, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 18580, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 2, __pyx_t_6) != (0)) __PYX_ERR(0, 18580, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_9) < (0)) __PYX_ERR(0, 18579, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;

  /* "cuda/bindings/_nvml.pyx":18582
 *         'formats': [_numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype],
 *         'offsets': [
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.schedulerPolicy)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18582, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":18583
 *         'offsets': [
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.arrMode)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18583, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":18584
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuSchedulerGetState_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.schedulerParams)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 18584, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":18581
 *         'names': ['scheduler_policy', 'arr_mode', 'scheduler_params'],
 *         'formats': [_numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18581, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_9) != (0)) __PYX_ERR(0, 18581, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 18581, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 18581, __pyx_L1_error);
  __pyx_t_9 = 0;
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 18579, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":18586
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuSchedulerGetState_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuSchedulerGetState_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18586, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 18579, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18578, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18576
 * 
 * 
 * cdef _get_vgpu_scheduler_get_state_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerGetState_t pod = nvmlVgpuSchedulerGetState_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_scheduler_get_state_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18603
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuSchedulerGetState_t *>calloc(1, sizeof(nvmlVgpuSchedulerGetState_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":18604
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerGetState_t *>calloc(1, sizeof(nvmlVgpuSchedulerGetState_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerGetState")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuSchedulerGetState_t *)calloc(1, (sizeof(nvmlVgpuSchedulerGetState_t))));

  /* "cuda/bindings/_nvml.pyx":18605
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerGetState_t *>calloc(1, sizeof(nvmlVgpuSchedulerGetState_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuSchedulerGetState")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":18606
 *         self._ptr = <nvmlVgpuSchedulerGetState_t *>calloc(1, sizeof(nvmlVgpuSchedulerGetState_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerGetState")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18606, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerGe};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18606, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 18606, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18605
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerGetState_t *>calloc(1, sizeof(nvmlVgpuSchedulerGetState_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuSchedulerGetState")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":18607
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerGetState")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":18608
 *             raise MemoryError("Error allocating VgpuSchedulerGetState")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":18609
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":18603
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuSchedulerGetState_t *>calloc(1, sizeof(nvmlVgpuSchedulerGetState_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18611
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuSchedulerGetState_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self) {
  nvmlVgpuSchedulerGetState_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuSchedulerGetState_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":18613
 *     def __dealloc__(self):
 *         cdef nvmlVgpuSchedulerGetState_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18614
 *         cdef nvmlVgpuSchedulerGetState_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":18615
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":18616
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":18613
 *     def __dealloc__(self):
 *         cdef nvmlVgpuSchedulerGetState_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":18611
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuSchedulerGetState_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":18618
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuSchedulerGetState object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":18619
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuSchedulerGetState object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18619, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18619, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18619, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18619, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18619, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuSchedulerGetState_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 33 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18619, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18618
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuSchedulerGetState object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18621
 *         return f"<{__name__}.VgpuSchedulerGetState object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18624
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18624, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18621
 *         return f"<{__name__}.VgpuSchedulerGetState object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18626
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":18627
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18626
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18629
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":18630
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18630, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18629
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18632
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerGetState other_
 *         if not isinstance(other, VgpuSchedulerGetState):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":18634
 *     def __eq__(self, other):
 *         cdef VgpuSchedulerGetState other_
 *         if not isinstance(other, VgpuSchedulerGetState):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":18635
 *         cdef VgpuSchedulerGetState other_
 *         if not isinstance(other, VgpuSchedulerGetState):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerGetState_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":18634
 *     def __eq__(self, other):
 *         cdef VgpuSchedulerGetState other_
 *         if not isinstance(other, VgpuSchedulerGetState):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":18636
 *         if not isinstance(other, VgpuSchedulerGetState):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerGetState_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState))))) __PYX_ERR(0, 18636, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":18637
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerGetState_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuSchedulerGetState_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18637, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18632
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerGetState other_
 *         if not isinstance(other, VgpuSchedulerGetState):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18639
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerGetState_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerGetState_t *>malloc(sizeof(nvmlVgpuSchedulerGetState_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":18640
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuSchedulerGetState_t *>malloc(sizeof(nvmlVgpuSchedulerGetState_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 18640, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18640, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18640, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 18640, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18641
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerGetState_t *>malloc(sizeof(nvmlVgpuSchedulerGetState_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerGetState")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuSchedulerGetState_t *)malloc((sizeof(nvmlVgpuSchedulerGetState_t))));

    /* "cuda/bindings/_nvml.pyx":18642
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerGetState_t *>malloc(sizeof(nvmlVgpuSchedulerGetState_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerGetState")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerGetState_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":18643
 *             self._ptr = <nvmlVgpuSchedulerGetState_t *>malloc(sizeof(nvmlVgpuSchedulerGetState_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerGetState")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerGetState_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18643, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerGe};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18643, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 18643, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":18642
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerGetState_t *>malloc(sizeof(nvmlVgpuSchedulerGetState_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerGetState")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerGetState_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":18644
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerGetState")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerGetState_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18644, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18644, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 18644, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuSchedulerGetState_t))));

    /* "cuda/bindings/_nvml.pyx":18645
 *                 raise MemoryError("Error allocating VgpuSchedulerGetState")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerGetState_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":18646
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerGetState_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":18647
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18647, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18647, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 18647, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":18640
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuSchedulerGetState_t *>malloc(sizeof(nvmlVgpuSchedulerGetState_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":18649
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 18649, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":18639
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerGetState_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerGetState_t *>malloc(sizeof(nvmlVgpuSchedulerGetState_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18651
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_params(self):
 *         """VgpuSchedulerParams: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_params_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_params_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_params___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_params___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18654
 *     def scheduler_params(self):
 *         """VgpuSchedulerParams: """
 *         return VgpuSchedulerParams.from_ptr(<intptr_t>&(self._ptr[0].schedulerParams), self._readonly, self)             # <<<<<<<<<<<<<<
 * 
 *     @scheduler_params.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).schedulerParams))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18654, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18654, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_t_4, ((PyObject *)__pyx_v_self)};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18654, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18651
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_params(self):
 *         """VgpuSchedulerParams: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.scheduler_params.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18656
 *         return VgpuSchedulerParams.from_ptr(<intptr_t>&(self._ptr[0].schedulerParams), self._readonly, self)
 * 
 *     @scheduler_params.setter             # <<<<<<<<<<<<<<
 *     def scheduler_params(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_params_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_params_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_params_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_params_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18658
 *     @scheduler_params.setter
 *     def scheduler_params(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerGetState instance is read-only")
 *         cdef VgpuSchedulerParams val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18659
 *     def scheduler_params(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerGetState instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerParams val_ = val
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerGetState_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18659, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18659, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18658
 *     @scheduler_params.setter
 *     def scheduler_params(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerGetState instance is read-only")
 *         cdef VgpuSchedulerParams val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18660
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerGetState instance is read-only")
 *         cdef VgpuSchedulerParams val_ = val             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)
 * 
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams))))) __PYX_ERR(0, 18660, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":18661
 *             raise ValueError("This VgpuSchedulerGetState instance is read-only")
 *         cdef VgpuSchedulerParams val_ = val
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 18661, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).schedulerParams)), ((void *)__pyx_t_4), ((sizeof(nvmlVgpuSchedulerParams_t)) * 1)));

  /* "cuda/bindings/_nvml.pyx":18656
 *         return VgpuSchedulerParams.from_ptr(<intptr_t>&(self._ptr[0].schedulerParams), self._readonly, self)
 * 
 *     @scheduler_params.setter             # <<<<<<<<<<<<<<
 *     def scheduler_params(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.scheduler_params.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18663
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_policy_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_policy_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_policy___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_policy___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18666
 *     def scheduler_policy(self):
 *         """int: """
 *         return self._ptr[0].schedulerPolicy             # <<<<<<<<<<<<<<
 * 
 *     @scheduler_policy.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).schedulerPolicy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18666, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18663
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.scheduler_policy.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18668
 *         return self._ptr[0].schedulerPolicy
 * 
 *     @scheduler_policy.setter             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_policy_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_policy_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_policy_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_policy_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18670
 *     @scheduler_policy.setter
 *     def scheduler_policy(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerGetState instance is read-only")
 *         self._ptr[0].schedulerPolicy = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18671
 *     def scheduler_policy(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerGetState instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].schedulerPolicy = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerGetState_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18671, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18671, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18670
 *     @scheduler_policy.setter
 *     def scheduler_policy(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerGetState instance is read-only")
 *         self._ptr[0].schedulerPolicy = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18672
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerGetState instance is read-only")
 *         self._ptr[0].schedulerPolicy = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18672, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).schedulerPolicy = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":18668
 *         return self._ptr[0].schedulerPolicy
 * 
 *     @scheduler_policy.setter             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.scheduler_policy.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18674
 *         self._ptr[0].schedulerPolicy = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def arr_mode(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_8arr_mode_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_8arr_mode_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_8arr_mode___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_8arr_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18677
 *     def arr_mode(self):
 *         """int: """
 *         return self._ptr[0].arrMode             # <<<<<<<<<<<<<<
 * 
 *     @arr_mode.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).arrMode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18677, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18674
 *         self._ptr[0].schedulerPolicy = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def arr_mode(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.arr_mode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18679
 *         return self._ptr[0].arrMode
 * 
 *     @arr_mode.setter             # <<<<<<<<<<<<<<
 *     def arr_mode(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_8arr_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_8arr_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_8arr_mode_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_8arr_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18681
 *     @arr_mode.setter
 *     def arr_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerGetState instance is read-only")
 *         self._ptr[0].arrMode = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18682
 *     def arr_mode(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerGetState instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].arrMode = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerGetState_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18682, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18682, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18681
 *     @arr_mode.setter
 *     def arr_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerGetState instance is read-only")
 *         self._ptr[0].arrMode = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18683
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerGetState instance is read-only")
 *         self._ptr[0].arrMode = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18683, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).arrMode = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":18679
 *         return self._ptr[0].arrMode
 * 
 *     @arr_mode.setter             # <<<<<<<<<<<<<<
 *     def arr_mode(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.arr_mode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18685
 *         self._ptr[0].arrMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerGetState instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_12from_data, "VgpuSchedulerGetState.from_data(data)\n\nCreate an VgpuSchedulerGetState instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_scheduler_get_state_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18685, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18685, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 18685, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 18685, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18685, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 18685, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":18692
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_scheduler_get_state_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_scheduler_get_state_dtype", vgpu_scheduler_get_state_dtype, VgpuSchedulerGetState)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_get_state_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18692, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_get_state_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18692, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18685
 *         self._ptr[0].arrMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerGetState instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18694
 *         return __from_data(data, "vgpu_scheduler_get_state_dtype", vgpu_scheduler_get_state_dtype, VgpuSchedulerGetState)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerGetState instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_14from_ptr, "VgpuSchedulerGetState.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuSchedulerGetState instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18694, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 18694, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18694, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18694, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 18694, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":18695
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuSchedulerGetState instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 18694, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 18694, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18694, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18694, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 18695, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18695, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 18694, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":18694
 *         return __from_data(data, "vgpu_scheduler_get_state_dtype", vgpu_scheduler_get_state_dtype, VgpuSchedulerGetState)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerGetState instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":18703
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerGetState obj = VgpuSchedulerGetState.__new__(VgpuSchedulerGetState)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":18704
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerGetState obj = VgpuSchedulerGetState.__new__(VgpuSchedulerGetState)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18704, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 18704, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18703
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerGetState obj = VgpuSchedulerGetState.__new__(VgpuSchedulerGetState)
*/
  }

  /* "cuda/bindings/_nvml.pyx":18705
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerGetState obj = VgpuSchedulerGetState.__new__(VgpuSchedulerGetState)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerGetState_t *>malloc(sizeof(nvmlVgpuSchedulerGetState_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerGetState(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18705, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":18706
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerGetState obj = VgpuSchedulerGetState.__new__(VgpuSchedulerGetState)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuSchedulerGetState_t *>malloc(sizeof(nvmlVgpuSchedulerGetState_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18707
 *         cdef VgpuSchedulerGetState obj = VgpuSchedulerGetState.__new__(VgpuSchedulerGetState)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerGetState_t *>malloc(sizeof(nvmlVgpuSchedulerGetState_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerGetState")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuSchedulerGetState_t *)malloc((sizeof(nvmlVgpuSchedulerGetState_t))));

    /* "cuda/bindings/_nvml.pyx":18708
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerGetState_t *>malloc(sizeof(nvmlVgpuSchedulerGetState_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerGetState")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerGetState_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":18709
 *             obj._ptr = <nvmlVgpuSchedulerGetState_t *>malloc(sizeof(nvmlVgpuSchedulerGetState_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerGetState")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerGetState_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18709, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerGe};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18709, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 18709, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":18708
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerGetState_t *>malloc(sizeof(nvmlVgpuSchedulerGetState_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerGetState")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerGetState_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":18710
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerGetState")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerGetState_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuSchedulerGetState_t))));

    /* "cuda/bindings/_nvml.pyx":18711
 *                 raise MemoryError("Error allocating VgpuSchedulerGetState")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerGetState_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":18712
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerGetState_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerGetState_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":18706
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerGetState obj = VgpuSchedulerGetState.__new__(VgpuSchedulerGetState)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuSchedulerGetState_t *>malloc(sizeof(nvmlVgpuSchedulerGetState_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":18714
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerGetState_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuSchedulerGetState_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":18715
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerGetState_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":18716
 *             obj._ptr = <nvmlVgpuSchedulerGetState_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":18717
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":18718
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18694
 *         return __from_data(data, "vgpu_scheduler_get_state_dtype", vgpu_scheduler_get_state_dtype, VgpuSchedulerGetState)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerGetState instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16__reduce_cython__, "VgpuSchedulerGetState.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_18__setstate_cython__, "VgpuSchedulerGetState.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerGetState.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18721
 * 
 * 
 * cdef _get_vgpu_scheduler_state_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerStateInfo_v1_t pod = nvmlVgpuSchedulerStateInfo_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_state_info_v1_dtype_offsets(void) {
  nvmlVgpuSchedulerStateInfo_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuSchedulerStateInfo_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  size_t __pyx_t_12;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_scheduler_state_info_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":18722
 * 
 * cdef _get_vgpu_scheduler_state_info_v1_dtype_offsets():
 *     cdef nvmlVgpuSchedulerStateInfo_v1_t pod = nvmlVgpuSchedulerStateInfo_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'engine_id', 'scheduler_policy', 'arr_mode', 'scheduler_params'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":18723
 * cdef _get_vgpu_scheduler_state_info_v1_dtype_offsets():
 *     cdef nvmlVgpuSchedulerStateInfo_v1_t pod = nvmlVgpuSchedulerStateInfo_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'engine_id', 'scheduler_policy', 'arr_mode', 'scheduler_params'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18723, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18723, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":18724
 *     cdef nvmlVgpuSchedulerStateInfo_v1_t pod = nvmlVgpuSchedulerStateInfo_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'engine_id', 'scheduler_policy', 'arr_mode', 'scheduler_params'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18724, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18724, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 18724, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_engine_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_engine_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_engine_id) != (0)) __PYX_ERR(0, 18724, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_scheduler_policy);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_scheduler_policy);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_scheduler_policy) != (0)) __PYX_ERR(0, 18724, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_arr_mode);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_arr_mode);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_arr_mode) != (0)) __PYX_ERR(0, 18724, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_scheduler_params);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_scheduler_params);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_scheduler_params) != (0)) __PYX_ERR(0, 18724, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 18724, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":18725
 *     return _numpy.dtype({
 *         'names': ['version', 'engine_id', 'scheduler_policy', 'arr_mode', 'scheduler_params'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18725, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18725, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18725, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 18725, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18725, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18725, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18725, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18725, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_params_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18725, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = PyList_New(5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18725, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_11, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 18725, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_11, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 18725, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_11, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 18725, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_11, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 18725, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_11, 4, __pyx_t_6) != (0)) __PYX_ERR(0, 18725, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_11) < (0)) __PYX_ERR(0, 18724, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":18727
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18727, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":18728
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.engineId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18728, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":18729
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.schedulerPolicy)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18729, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":18730
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.arrMode)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18730, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":18731
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuSchedulerStateInfo_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.schedulerParams)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 18731, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":18726
 *         'names': ['version', 'engine_id', 'scheduler_policy', 'arr_mode', 'scheduler_params'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18726, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_11) != (0)) __PYX_ERR(0, 18726, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 18726, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_10) != (0)) __PYX_ERR(0, 18726, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_9) != (0)) __PYX_ERR(0, 18726, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_8) != (0)) __PYX_ERR(0, 18726, __pyx_L1_error);
  __pyx_t_11 = 0;
  __pyx_t_6 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 18724, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":18733
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuSchedulerStateInfo_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuSchedulerStateInfo_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18733, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 18724, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_12 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_12 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_12, (2-__pyx_t_12) | (__pyx_t_12*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18723, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18721
 * 
 * 
 * cdef _get_vgpu_scheduler_state_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerStateInfo_v1_t pod = nvmlVgpuSchedulerStateInfo_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_scheduler_state_info_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18750
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":18751
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuSchedulerStateInfo_v1_t *)calloc(1, (sizeof(nvmlVgpuSchedulerStateInfo_v1_t))));

  /* "cuda/bindings/_nvml.pyx":18752
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":18753
 *         self._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18753, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerSt};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18753, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 18753, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18752
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":18754
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":18755
 *             raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":18756
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":18750
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18758
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuSchedulerStateInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self) {
  nvmlVgpuSchedulerStateInfo_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuSchedulerStateInfo_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":18760
 *     def __dealloc__(self):
 *         cdef nvmlVgpuSchedulerStateInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18761
 *         cdef nvmlVgpuSchedulerStateInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":18762
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":18763
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":18760
 *     def __dealloc__(self):
 *         cdef nvmlVgpuSchedulerStateInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":18758
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuSchedulerStateInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":18765
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuSchedulerStateInfo_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":18766
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuSchedulerStateInfo_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18766, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18766, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18766, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18766, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18766, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuSchedulerStateInfo_v1_objec;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 37 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18766, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18765
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuSchedulerStateInfo_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18768
 *         return f"<{__name__}.VgpuSchedulerStateInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18771
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18771, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18768
 *         return f"<{__name__}.VgpuSchedulerStateInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18773
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":18774
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18773
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18776
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":18777
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18777, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18776
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18779
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerStateInfo_v1 other_
 *         if not isinstance(other, VgpuSchedulerStateInfo_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":18781
 *     def __eq__(self, other):
 *         cdef VgpuSchedulerStateInfo_v1 other_
 *         if not isinstance(other, VgpuSchedulerStateInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":18782
 *         cdef VgpuSchedulerStateInfo_v1 other_
 *         if not isinstance(other, VgpuSchedulerStateInfo_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerStateInfo_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":18781
 *     def __eq__(self, other):
 *         cdef VgpuSchedulerStateInfo_v1 other_
 *         if not isinstance(other, VgpuSchedulerStateInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":18783
 *         if not isinstance(other, VgpuSchedulerStateInfo_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerStateInfo_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1))))) __PYX_ERR(0, 18783, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":18784
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerStateInfo_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuSchedulerStateInfo_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18784, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18779
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerStateInfo_v1 other_
 *         if not isinstance(other, VgpuSchedulerStateInfo_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18786
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerStateInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":18787
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 18787, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18787, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18787, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 18787, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18788
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerStateInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuSchedulerStateInfo_v1_t *)malloc((sizeof(nvmlVgpuSchedulerStateInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":18789
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":18790
 *             self._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18790, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerSt};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18790, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 18790, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":18789
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":18791
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18791, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18791, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 18791, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuSchedulerStateInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":18792
 *                 raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":18793
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":18794
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18794, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18794, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 18794, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":18787
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":18796
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 18796, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":18786
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerStateInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18798
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_params(self):
 *         """VgpuSchedulerParams: OUT: vGPU Scheduler Parameters."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_params_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_params_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_params___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_params___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18801
 *     def scheduler_params(self):
 *         """VgpuSchedulerParams: OUT: vGPU Scheduler Parameters."""
 *         return VgpuSchedulerParams.from_ptr(<intptr_t>&(self._ptr[0].schedulerParams), self._readonly, self)             # <<<<<<<<<<<<<<
 * 
 *     @scheduler_params.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).schedulerParams))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18801, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18801, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_t_4, ((PyObject *)__pyx_v_self)};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18801, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18798
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_params(self):
 *         """VgpuSchedulerParams: OUT: vGPU Scheduler Parameters."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.scheduler_params.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18803
 *         return VgpuSchedulerParams.from_ptr(<intptr_t>&(self._ptr[0].schedulerParams), self._readonly, self)
 * 
 *     @scheduler_params.setter             # <<<<<<<<<<<<<<
 *     def scheduler_params(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_params_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_params_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_params_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_params_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18805
 *     @scheduler_params.setter
 *     def scheduler_params(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")
 *         cdef VgpuSchedulerParams val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18806
 *     def scheduler_params(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerParams val_ = val
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerStateInfo_v1_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18806, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18806, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18805
 *     @scheduler_params.setter
 *     def scheduler_params(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")
 *         cdef VgpuSchedulerParams val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18807
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")
 *         cdef VgpuSchedulerParams val_ = val             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)
 * 
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams))))) __PYX_ERR(0, 18807, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":18808
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")
 *         cdef VgpuSchedulerParams val_ = val
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 18808, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).schedulerParams)), ((void *)__pyx_t_4), ((sizeof(nvmlVgpuSchedulerParams_t)) * 1)));

  /* "cuda/bindings/_nvml.pyx":18803
 *         return VgpuSchedulerParams.from_ptr(<intptr_t>&(self._ptr[0].schedulerParams), self._readonly, self)
 * 
 *     @scheduler_params.setter             # <<<<<<<<<<<<<<
 *     def scheduler_params(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.scheduler_params.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18810
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18813
 *     def version(self):
 *         """int: IN: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18813, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18810
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18815
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18817
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18818
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerStateInfo_v1_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18818, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18818, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18817
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18819
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18819, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":18815
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18821
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def engine_id(self):
 *         """int: IN: Engine whose software scheduler state info is fetched. One of NVML_VGPU_SCHEDULER_ENGINE_TYPE_*."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_9engine_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_9engine_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_9engine_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_9engine_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18824
 *     def engine_id(self):
 *         """int: IN: Engine whose software scheduler state info is fetched. One of NVML_VGPU_SCHEDULER_ENGINE_TYPE_*."""
 *         return self._ptr[0].engineId             # <<<<<<<<<<<<<<
 * 
 *     @engine_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).engineId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18824, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18821
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def engine_id(self):
 *         """int: IN: Engine whose software scheduler state info is fetched. One of NVML_VGPU_SCHEDULER_ENGINE_TYPE_*."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.engine_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18826
 *         return self._ptr[0].engineId
 * 
 *     @engine_id.setter             # <<<<<<<<<<<<<<
 *     def engine_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_9engine_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_9engine_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_9engine_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_9engine_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18828
 *     @engine_id.setter
 *     def engine_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")
 *         self._ptr[0].engineId = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18829
 *     def engine_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].engineId = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerStateInfo_v1_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18829, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18829, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18828
 *     @engine_id.setter
 *     def engine_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")
 *         self._ptr[0].engineId = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18830
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")
 *         self._ptr[0].engineId = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18830, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).engineId = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":18826
 *         return self._ptr[0].engineId
 * 
 *     @engine_id.setter             # <<<<<<<<<<<<<<
 *     def engine_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.engine_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18832
 *         self._ptr[0].engineId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self):
 *         """int: OUT: Scheduler policy."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_policy_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_policy_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_policy___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_policy___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18835
 *     def scheduler_policy(self):
 *         """int: OUT: Scheduler policy."""
 *         return self._ptr[0].schedulerPolicy             # <<<<<<<<<<<<<<
 * 
 *     @scheduler_policy.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).schedulerPolicy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18835, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18832
 *         self._ptr[0].engineId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self):
 *         """int: OUT: Scheduler policy."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.scheduler_policy.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18837
 *         return self._ptr[0].schedulerPolicy
 * 
 *     @scheduler_policy.setter             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_policy_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_policy_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_policy_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_policy_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18839
 *     @scheduler_policy.setter
 *     def scheduler_policy(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")
 *         self._ptr[0].schedulerPolicy = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18840
 *     def scheduler_policy(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].schedulerPolicy = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerStateInfo_v1_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18840, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18840, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18839
 *     @scheduler_policy.setter
 *     def scheduler_policy(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")
 *         self._ptr[0].schedulerPolicy = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18841
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")
 *         self._ptr[0].schedulerPolicy = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18841, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).schedulerPolicy = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":18837
 *         return self._ptr[0].schedulerPolicy
 * 
 *     @scheduler_policy.setter             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.scheduler_policy.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18843
 *         self._ptr[0].schedulerPolicy = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def arr_mode(self):
 *         """int: OUT: Adaptive Round Robin scheduler mode. One of the NVML_VGPU_SCHEDULER_ARR_*."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_8arr_mode_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_8arr_mode_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_8arr_mode___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_8arr_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18846
 *     def arr_mode(self):
 *         """int: OUT: Adaptive Round Robin scheduler mode. One of the NVML_VGPU_SCHEDULER_ARR_*."""
 *         return self._ptr[0].arrMode             # <<<<<<<<<<<<<<
 * 
 *     @arr_mode.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).arrMode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18846, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18843
 *         self._ptr[0].schedulerPolicy = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def arr_mode(self):
 *         """int: OUT: Adaptive Round Robin scheduler mode. One of the NVML_VGPU_SCHEDULER_ARR_*."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.arr_mode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18848
 *         return self._ptr[0].arrMode
 * 
 *     @arr_mode.setter             # <<<<<<<<<<<<<<
 *     def arr_mode(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_8arr_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_8arr_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_8arr_mode_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_8arr_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18850
 *     @arr_mode.setter
 *     def arr_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")
 *         self._ptr[0].arrMode = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18851
 *     def arr_mode(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].arrMode = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerStateInfo_v1_i};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18851, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18851, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18850
 *     @arr_mode.setter
 *     def arr_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")
 *         self._ptr[0].arrMode = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18852
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerStateInfo_v1 instance is read-only")
 *         self._ptr[0].arrMode = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18852, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).arrMode = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":18848
 *         return self._ptr[0].arrMode
 * 
 *     @arr_mode.setter             # <<<<<<<<<<<<<<
 *     def arr_mode(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.arr_mode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18854
 *         self._ptr[0].arrMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerStateInfo_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_12from_data, "VgpuSchedulerStateInfo_v1.from_data(data)\n\nCreate an VgpuSchedulerStateInfo_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_scheduler_state_info_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18854, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18854, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 18854, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 18854, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18854, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 18854, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":18861
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_scheduler_state_info_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_scheduler_state_info_v1_dtype", vgpu_scheduler_state_info_v1_dtype, VgpuSchedulerStateInfo_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_state_info_v1_dty); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18861, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_state_info_v1_dty, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18861, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18854
 *         self._ptr[0].arrMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerStateInfo_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18863
 *         return __from_data(data, "vgpu_scheduler_state_info_v1_dtype", vgpu_scheduler_state_info_v1_dtype, VgpuSchedulerStateInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerStateInfo_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_14from_ptr, "VgpuSchedulerStateInfo_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuSchedulerStateInfo_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18863, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 18863, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18863, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18863, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 18863, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":18864
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuSchedulerStateInfo_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 18863, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 18863, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18863, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18863, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 18864, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18864, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 18863, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":18863
 *         return __from_data(data, "vgpu_scheduler_state_info_v1_dtype", vgpu_scheduler_state_info_v1_dtype, VgpuSchedulerStateInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerStateInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":18872
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerStateInfo_v1 obj = VgpuSchedulerStateInfo_v1.__new__(VgpuSchedulerStateInfo_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":18873
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerStateInfo_v1 obj = VgpuSchedulerStateInfo_v1.__new__(VgpuSchedulerStateInfo_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18873, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 18873, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18872
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerStateInfo_v1 obj = VgpuSchedulerStateInfo_v1.__new__(VgpuSchedulerStateInfo_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":18874
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerStateInfo_v1 obj = VgpuSchedulerStateInfo_v1.__new__(VgpuSchedulerStateInfo_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18874, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":18875
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerStateInfo_v1 obj = VgpuSchedulerStateInfo_v1.__new__(VgpuSchedulerStateInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18876
 *         cdef VgpuSchedulerStateInfo_v1 obj = VgpuSchedulerStateInfo_v1.__new__(VgpuSchedulerStateInfo_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerStateInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuSchedulerStateInfo_v1_t *)malloc((sizeof(nvmlVgpuSchedulerStateInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":18877
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":18878
 *             obj._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18878, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerSt};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18878, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 18878, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":18877
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":18879
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuSchedulerStateInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":18880
 *                 raise MemoryError("Error allocating VgpuSchedulerStateInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":18881
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":18875
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerStateInfo_v1 obj = VgpuSchedulerStateInfo_v1.__new__(VgpuSchedulerStateInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerStateInfo_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":18883
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuSchedulerStateInfo_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":18884
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":18885
 *             obj._ptr = <nvmlVgpuSchedulerStateInfo_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":18886
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":18887
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18863
 *         return __from_data(data, "vgpu_scheduler_state_info_v1_dtype", vgpu_scheduler_state_info_v1_dtype, VgpuSchedulerStateInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerStateInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16__reduce_cython__, "VgpuSchedulerStateInfo_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_18__setstate_cython__, "VgpuSchedulerStateInfo_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerStateInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18890
 * 
 * 
 * cdef _get_vgpu_scheduler_log_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerLogInfo_v1_t pod = nvmlVgpuSchedulerLogInfo_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_log_info_v1_dtype_offsets(void) {
  nvmlVgpuSchedulerLogInfo_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuSchedulerLogInfo_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  size_t __pyx_t_14;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_scheduler_log_info_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":18891
 * 
 * cdef _get_vgpu_scheduler_log_info_v1_dtype_offsets():
 *     cdef nvmlVgpuSchedulerLogInfo_v1_t pod = nvmlVgpuSchedulerLogInfo_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'engine_id', 'scheduler_policy', 'arr_mode', 'scheduler_params', 'entries_count', 'log_entries'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":18892
 * cdef _get_vgpu_scheduler_log_info_v1_dtype_offsets():
 *     cdef nvmlVgpuSchedulerLogInfo_v1_t pod = nvmlVgpuSchedulerLogInfo_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'engine_id', 'scheduler_policy', 'arr_mode', 'scheduler_params', 'entries_count', 'log_entries'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype, _numpy.uint32, vgpu_scheduler_log_entry_dtype],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18892, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18892, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":18893
 *     cdef nvmlVgpuSchedulerLogInfo_v1_t pod = nvmlVgpuSchedulerLogInfo_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'engine_id', 'scheduler_policy', 'arr_mode', 'scheduler_params', 'entries_count', 'log_entries'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype, _numpy.uint32, vgpu_scheduler_log_entry_dtype],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18893, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18893, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 18893, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_engine_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_engine_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_engine_id) != (0)) __PYX_ERR(0, 18893, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_scheduler_policy);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_scheduler_policy);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_scheduler_policy) != (0)) __PYX_ERR(0, 18893, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_arr_mode);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_arr_mode);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_arr_mode) != (0)) __PYX_ERR(0, 18893, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_scheduler_params);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_scheduler_params);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_scheduler_params) != (0)) __PYX_ERR(0, 18893, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_entries_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_entries_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 5, __pyx_mstate_global->__pyx_n_u_entries_count) != (0)) __PYX_ERR(0, 18893, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_log_entries);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_log_entries);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 6, __pyx_mstate_global->__pyx_n_u_log_entries) != (0)) __PYX_ERR(0, 18893, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 18893, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":18894
 *     return _numpy.dtype({
 *         'names': ['version', 'engine_id', 'scheduler_policy', 'arr_mode', 'scheduler_params', 'entries_count', 'log_entries'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype, _numpy.uint32, vgpu_scheduler_log_entry_dtype],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 18894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_params_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GetModuleGlobalName(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 18894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_log_entry_dtype); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_13 = PyList_New(7); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 18894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_13, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 18894, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_13, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 18894, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_13, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 18894, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_13, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 18894, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_13, 4, __pyx_t_6) != (0)) __PYX_ERR(0, 18894, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_13, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 18894, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_13, 6, __pyx_t_11) != (0)) __PYX_ERR(0, 18894, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_6 = 0;
  __pyx_t_12 = 0;
  __pyx_t_11 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_13) < (0)) __PYX_ERR(0, 18893, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;

  /* "cuda/bindings/_nvml.pyx":18896
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype, _numpy.uint32, vgpu_scheduler_log_entry_dtype],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
*/
  __pyx_t_13 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 18896, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);

  /* "cuda/bindings/_nvml.pyx":18897
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.engineId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18897, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":18898
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),
*/
  __pyx_t_12 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.schedulerPolicy)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 18898, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);

  /* "cuda/bindings/_nvml.pyx":18899
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.entriesCount)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.arrMode)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18899, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":18900
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.entriesCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.logEntries)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.schedulerParams)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18900, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":18901
 *             (<intptr_t>&(pod.arrMode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.entriesCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.logEntries)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.entriesCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18901, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":18902
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.entriesCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.logEntries)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuSchedulerLogInfo_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.logEntries)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 18902, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":18895
 *         'names': ['version', 'engine_id', 'scheduler_policy', 'arr_mode', 'scheduler_params', 'entries_count', 'log_entries'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_params_dtype, _numpy.uint32, vgpu_scheduler_log_entry_dtype],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18895, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_13);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_13) != (0)) __PYX_ERR(0, 18895, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_11) != (0)) __PYX_ERR(0, 18895, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_12);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_12) != (0)) __PYX_ERR(0, 18895, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_6) != (0)) __PYX_ERR(0, 18895, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_10) != (0)) __PYX_ERR(0, 18895, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 5, __pyx_t_9) != (0)) __PYX_ERR(0, 18895, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 6, __pyx_t_8) != (0)) __PYX_ERR(0, 18895, __pyx_L1_error);
  __pyx_t_13 = 0;
  __pyx_t_11 = 0;
  __pyx_t_12 = 0;
  __pyx_t_6 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 18893, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":18904
 *             (<intptr_t>&(pod.logEntries)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuSchedulerLogInfo_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuSchedulerLogInfo_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18904, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 18893, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_14 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_14 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_14, (2-__pyx_t_14) | (__pyx_t_14*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18892, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18890
 * 
 * 
 * cdef _get_vgpu_scheduler_log_info_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerLogInfo_v1_t pod = nvmlVgpuSchedulerLogInfo_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_scheduler_log_info_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18921
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":18922
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuSchedulerLogInfo_v1_t *)calloc(1, (sizeof(nvmlVgpuSchedulerLogInfo_v1_t))));

  /* "cuda/bindings/_nvml.pyx":18923
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":18924
 *         self._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18924, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerLo_2};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18924, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 18924, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18923
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":18925
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":18926
 *             raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":18927
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":18921
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18929
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuSchedulerLogInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self) {
  nvmlVgpuSchedulerLogInfo_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuSchedulerLogInfo_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":18931
 *     def __dealloc__(self):
 *         cdef nvmlVgpuSchedulerLogInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18932
 *         cdef nvmlVgpuSchedulerLogInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":18933
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":18934
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":18931
 *     def __dealloc__(self):
 *         cdef nvmlVgpuSchedulerLogInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":18929
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuSchedulerLogInfo_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":18936
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuSchedulerLogInfo_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":18937
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuSchedulerLogInfo_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18937, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18937, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18937, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18937, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18937, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuSchedulerLogInfo_v1_object;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 35 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18937, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18936
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuSchedulerLogInfo_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18939
 *         return f"<{__name__}.VgpuSchedulerLogInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18942
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18942, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18939
 *         return f"<{__name__}.VgpuSchedulerLogInfo_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18944
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":18945
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18944
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18947
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":18948
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18948, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18947
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18950
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerLogInfo_v1 other_
 *         if not isinstance(other, VgpuSchedulerLogInfo_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":18952
 *     def __eq__(self, other):
 *         cdef VgpuSchedulerLogInfo_v1 other_
 *         if not isinstance(other, VgpuSchedulerLogInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":18953
 *         cdef VgpuSchedulerLogInfo_v1 other_
 *         if not isinstance(other, VgpuSchedulerLogInfo_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerLogInfo_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":18952
 *     def __eq__(self, other):
 *         cdef VgpuSchedulerLogInfo_v1 other_
 *         if not isinstance(other, VgpuSchedulerLogInfo_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":18954
 *         if not isinstance(other, VgpuSchedulerLogInfo_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerLogInfo_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1))))) __PYX_ERR(0, 18954, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":18955
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerLogInfo_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuSchedulerLogInfo_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18955, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18950
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerLogInfo_v1 other_
 *         if not isinstance(other, VgpuSchedulerLogInfo_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18957
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerLogInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":18958
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 18958, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18958, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18958, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 18958, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":18959
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerLogInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuSchedulerLogInfo_v1_t *)malloc((sizeof(nvmlVgpuSchedulerLogInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":18960
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":18961
 *             self._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18961, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerLo_2};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18961, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 18961, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":18960
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":18962
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18962, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18962, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 18962, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuSchedulerLogInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":18963
 *                 raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":18964
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":18965
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18965, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18965, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 18965, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":18958
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":18967
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 18967, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":18957
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerLogInfo_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18969
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_params(self):
 *         """VgpuSchedulerParams: OUT: vGPU Scheduler Parameters."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_params_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_params_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_params___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_params___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18972
 *     def scheduler_params(self):
 *         """VgpuSchedulerParams: OUT: vGPU Scheduler Parameters."""
 *         return VgpuSchedulerParams.from_ptr(<intptr_t>&(self._ptr[0].schedulerParams), self._readonly, self)             # <<<<<<<<<<<<<<
 * 
 *     @scheduler_params.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).schedulerParams))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18972, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18972, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_t_4, ((PyObject *)__pyx_v_self)};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18972, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18969
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_params(self):
 *         """VgpuSchedulerParams: OUT: vGPU Scheduler Parameters."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.scheduler_params.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18974
 *         return VgpuSchedulerParams.from_ptr(<intptr_t>&(self._ptr[0].schedulerParams), self._readonly, self)
 * 
 *     @scheduler_params.setter             # <<<<<<<<<<<<<<
 *     def scheduler_params(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_params_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_params_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_params_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_params_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18976
 *     @scheduler_params.setter
 *     def scheduler_params(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         cdef VgpuSchedulerParams val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18977
 *     def scheduler_params(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerParams val_ = val
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerLogInfo_v1_ins};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18977, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18977, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18976
 *     @scheduler_params.setter
 *     def scheduler_params(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         cdef VgpuSchedulerParams val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18978
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         cdef VgpuSchedulerParams val_ = val             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)
 * 
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams))))) __PYX_ERR(0, 18978, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":18979
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         cdef VgpuSchedulerParams val_ = val
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 18979, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).schedulerParams)), ((void *)__pyx_t_4), ((sizeof(nvmlVgpuSchedulerParams_t)) * 1)));

  /* "cuda/bindings/_nvml.pyx":18974
 *         return VgpuSchedulerParams.from_ptr(<intptr_t>&(self._ptr[0].schedulerParams), self._readonly, self)
 * 
 *     @scheduler_params.setter             # <<<<<<<<<<<<<<
 *     def scheduler_params(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.scheduler_params.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18981
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def log_entries(self):
 *         """VgpuSchedulerLogEntry: OUT: Structure to store the state and logs of a software runlist."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_11log_entries_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_11log_entries_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_11log_entries___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_11log_entries___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18984
 *     def log_entries(self):
 *         """VgpuSchedulerLogEntry: OUT: Structure to store the state and logs of a software runlist."""
 *         return VgpuSchedulerLogEntry.from_ptr(<intptr_t>&(self._ptr[0].logEntries), 200, self._readonly)             # <<<<<<<<<<<<<<
 * 
 *     @log_entries.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).logEntries))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18984, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18984, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_mstate_global->__pyx_int_200, __pyx_t_4};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18984, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18981
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerParams_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def log_entries(self):
 *         """VgpuSchedulerLogEntry: OUT: Structure to store the state and logs of a software runlist."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.log_entries.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18986
 *         return VgpuSchedulerLogEntry.from_ptr(<intptr_t>&(self._ptr[0].logEntries), 200, self._readonly)
 * 
 *     @log_entries.setter             # <<<<<<<<<<<<<<
 *     def log_entries(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_11log_entries_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_11log_entries_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_11log_entries_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_11log_entries_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  intptr_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":18988
 *     @log_entries.setter
 *     def log_entries(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         cdef VgpuSchedulerLogEntry val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":18989
 *     def log_entries(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerLogEntry val_ = val
 *         if len(val) != 200:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerLogInfo_v1_ins};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18989, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18989, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18988
 *     @log_entries.setter
 *     def log_entries(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         cdef VgpuSchedulerLogEntry val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":18990
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         cdef VgpuSchedulerLogEntry val_ = val             # <<<<<<<<<<<<<<
 *         if len(val) != 200:
 *             raise ValueError(f"Expected length 200 for field log_entries, got {len(val)}")
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry))))) __PYX_ERR(0, 18990, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":18991
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         cdef VgpuSchedulerLogEntry val_ = val
 *         if len(val) != 200:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 200 for field log_entries, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].logEntries), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerLogEntry_t) * 200)
*/
  __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18991, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 != 0xC8);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":18992
 *         cdef VgpuSchedulerLogEntry val_ = val
 *         if len(val) != 200:
 *             raise ValueError(f"Expected length 200 for field log_entries, got {len(val)}")             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].logEntries), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerLogEntry_t) * 200)
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18992, __pyx_L1_error)
    __pyx_t_6 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_t_4, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 18992, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __pyx_t_7 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Expected_length_200_for_field_lo, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 18992, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_7};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18992, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 18992, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":18991
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         cdef VgpuSchedulerLogEntry val_ = val
 *         if len(val) != 200:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 200 for field log_entries, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].logEntries), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerLogEntry_t) * 200)
*/
  }

  /* "cuda/bindings/_nvml.pyx":18993
 *         if len(val) != 200:
 *             raise ValueError(f"Expected length 200 for field log_entries, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].logEntries), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerLogEntry_t) * 200)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 18993, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).logEntries)), ((void *)__pyx_t_8), ((sizeof(nvmlVgpuSchedulerLogEntry_t)) * 0xC8)));

  /* "cuda/bindings/_nvml.pyx":18986
 *         return VgpuSchedulerLogEntry.from_ptr(<intptr_t>&(self._ptr[0].logEntries), 200, self._readonly)
 * 
 *     @log_entries.setter             # <<<<<<<<<<<<<<
 *     def log_entries(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.log_entries.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":18995
 *         memcpy(<void *>&(self._ptr[0].logEntries), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerLogEntry_t) * 200)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":18998
 *     def version(self):
 *         """int: IN: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18998, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":18995
 *         memcpy(<void *>&(self._ptr[0].logEntries), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerLogEntry_t) * 200)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19000
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":19002
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":19003
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerLogInfo_v1_ins};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19003, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 19003, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19002
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":19004
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19004, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":19000
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19006
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def engine_id(self):
 *         """int: IN: Engine whose software runlist log entries are fetched. One of One of NVML_VGPU_SCHEDULER_ENGINE_TYPE_*."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_9engine_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_9engine_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_9engine_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_9engine_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19009
 *     def engine_id(self):
 *         """int: IN: Engine whose software runlist log entries are fetched. One of One of NVML_VGPU_SCHEDULER_ENGINE_TYPE_*."""
 *         return self._ptr[0].engineId             # <<<<<<<<<<<<<<
 * 
 *     @engine_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).engineId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19009, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19006
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def engine_id(self):
 *         """int: IN: Engine whose software runlist log entries are fetched. One of One of NVML_VGPU_SCHEDULER_ENGINE_TYPE_*."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.engine_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19011
 *         return self._ptr[0].engineId
 * 
 *     @engine_id.setter             # <<<<<<<<<<<<<<
 *     def engine_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_9engine_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_9engine_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_9engine_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_9engine_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":19013
 *     @engine_id.setter
 *     def engine_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         self._ptr[0].engineId = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":19014
 *     def engine_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].engineId = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerLogInfo_v1_ins};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19014, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 19014, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19013
 *     @engine_id.setter
 *     def engine_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         self._ptr[0].engineId = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":19015
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         self._ptr[0].engineId = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19015, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).engineId = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":19011
 *         return self._ptr[0].engineId
 * 
 *     @engine_id.setter             # <<<<<<<<<<<<<<
 *     def engine_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.engine_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19017
 *         self._ptr[0].engineId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self):
 *         """int: OUT: Scheduler policy."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_policy_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_policy_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_policy___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_policy___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19020
 *     def scheduler_policy(self):
 *         """int: OUT: Scheduler policy."""
 *         return self._ptr[0].schedulerPolicy             # <<<<<<<<<<<<<<
 * 
 *     @scheduler_policy.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).schedulerPolicy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19020, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19017
 *         self._ptr[0].engineId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self):
 *         """int: OUT: Scheduler policy."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.scheduler_policy.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19022
 *         return self._ptr[0].schedulerPolicy
 * 
 *     @scheduler_policy.setter             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_policy_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_policy_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_policy_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_policy_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":19024
 *     @scheduler_policy.setter
 *     def scheduler_policy(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         self._ptr[0].schedulerPolicy = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":19025
 *     def scheduler_policy(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].schedulerPolicy = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerLogInfo_v1_ins};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19025, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 19025, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19024
 *     @scheduler_policy.setter
 *     def scheduler_policy(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         self._ptr[0].schedulerPolicy = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":19026
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         self._ptr[0].schedulerPolicy = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19026, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).schedulerPolicy = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":19022
 *         return self._ptr[0].schedulerPolicy
 * 
 *     @scheduler_policy.setter             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.scheduler_policy.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19028
 *         self._ptr[0].schedulerPolicy = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def arr_mode(self):
 *         """int: OUT: Adaptive Round Robin scheduler mode. One of the NVML_VGPU_SCHEDULER_ARR_*."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_8arr_mode_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_8arr_mode_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_8arr_mode___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_8arr_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19031
 *     def arr_mode(self):
 *         """int: OUT: Adaptive Round Robin scheduler mode. One of the NVML_VGPU_SCHEDULER_ARR_*."""
 *         return self._ptr[0].arrMode             # <<<<<<<<<<<<<<
 * 
 *     @arr_mode.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).arrMode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19031, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19028
 *         self._ptr[0].schedulerPolicy = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def arr_mode(self):
 *         """int: OUT: Adaptive Round Robin scheduler mode. One of the NVML_VGPU_SCHEDULER_ARR_*."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.arr_mode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19033
 *         return self._ptr[0].arrMode
 * 
 *     @arr_mode.setter             # <<<<<<<<<<<<<<
 *     def arr_mode(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_8arr_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_8arr_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_8arr_mode_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_8arr_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":19035
 *     @arr_mode.setter
 *     def arr_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         self._ptr[0].arrMode = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":19036
 *     def arr_mode(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].arrMode = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerLogInfo_v1_ins};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19036, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 19036, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19035
 *     @arr_mode.setter
 *     def arr_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         self._ptr[0].arrMode = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":19037
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         self._ptr[0].arrMode = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19037, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).arrMode = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":19033
 *         return self._ptr[0].arrMode
 * 
 *     @arr_mode.setter             # <<<<<<<<<<<<<<
 *     def arr_mode(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.arr_mode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19039
 *         self._ptr[0].arrMode = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def entries_count(self):
 *         """int: OUT: Count of log entries fetched."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13entries_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13entries_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13entries_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13entries_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19042
 *     def entries_count(self):
 *         """int: OUT: Count of log entries fetched."""
 *         return self._ptr[0].entriesCount             # <<<<<<<<<<<<<<
 * 
 *     @entries_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).entriesCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19042, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19039
 *         self._ptr[0].arrMode = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def entries_count(self):
 *         """int: OUT: Count of log entries fetched."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.entries_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19044
 *         return self._ptr[0].entriesCount
 * 
 *     @entries_count.setter             # <<<<<<<<<<<<<<
 *     def entries_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13entries_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13entries_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13entries_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13entries_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":19046
 *     @entries_count.setter
 *     def entries_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         self._ptr[0].entriesCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":19047
 *     def entries_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].entriesCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerLogInfo_v1_ins};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19047, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 19047, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19046
 *     @entries_count.setter
 *     def entries_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         self._ptr[0].entriesCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":19048
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerLogInfo_v1 instance is read-only")
 *         self._ptr[0].entriesCount = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19048, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).entriesCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":19044
 *         return self._ptr[0].entriesCount
 * 
 *     @entries_count.setter             # <<<<<<<<<<<<<<
 *     def entries_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.entries_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19050
 *         self._ptr[0].entriesCount = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerLogInfo_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_12from_data, "VgpuSchedulerLogInfo_v1.from_data(data)\n\nCreate an VgpuSchedulerLogInfo_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_scheduler_log_info_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19050, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19050, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 19050, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 19050, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19050, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19050, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":19057
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_scheduler_log_info_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_scheduler_log_info_v1_dtype", vgpu_scheduler_log_info_v1_dtype, VgpuSchedulerLogInfo_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_log_info_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19057, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_log_info_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19057, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19050
 *         self._ptr[0].entriesCount = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerLogInfo_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19059
 *         return __from_data(data, "vgpu_scheduler_log_info_v1_dtype", vgpu_scheduler_log_info_v1_dtype, VgpuSchedulerLogInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerLogInfo_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_14from_ptr, "VgpuSchedulerLogInfo_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuSchedulerLogInfo_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19059, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19059, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19059, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19059, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 19059, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":19060
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuSchedulerLogInfo_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 19059, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19059, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19059, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19059, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 19060, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19060, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 19059, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":19059
 *         return __from_data(data, "vgpu_scheduler_log_info_v1_dtype", vgpu_scheduler_log_info_v1_dtype, VgpuSchedulerLogInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerLogInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":19068
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerLogInfo_v1 obj = VgpuSchedulerLogInfo_v1.__new__(VgpuSchedulerLogInfo_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":19069
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerLogInfo_v1 obj = VgpuSchedulerLogInfo_v1.__new__(VgpuSchedulerLogInfo_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19069, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 19069, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19068
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerLogInfo_v1 obj = VgpuSchedulerLogInfo_v1.__new__(VgpuSchedulerLogInfo_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":19070
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerLogInfo_v1 obj = VgpuSchedulerLogInfo_v1.__new__(VgpuSchedulerLogInfo_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19070, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":19071
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerLogInfo_v1 obj = VgpuSchedulerLogInfo_v1.__new__(VgpuSchedulerLogInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":19072
 *         cdef VgpuSchedulerLogInfo_v1 obj = VgpuSchedulerLogInfo_v1.__new__(VgpuSchedulerLogInfo_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerLogInfo_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuSchedulerLogInfo_v1_t *)malloc((sizeof(nvmlVgpuSchedulerLogInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":19073
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":19074
 *             obj._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19074, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerLo_2};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19074, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 19074, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":19073
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":19075
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuSchedulerLogInfo_v1_t))));

    /* "cuda/bindings/_nvml.pyx":19076
 *                 raise MemoryError("Error allocating VgpuSchedulerLogInfo_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":19077
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":19071
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerLogInfo_v1 obj = VgpuSchedulerLogInfo_v1.__new__(VgpuSchedulerLogInfo_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>malloc(sizeof(nvmlVgpuSchedulerLogInfo_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":19079
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuSchedulerLogInfo_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":19080
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":19081
 *             obj._ptr = <nvmlVgpuSchedulerLogInfo_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":19082
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":19083
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19059
 *         return __from_data(data, "vgpu_scheduler_log_info_v1_dtype", vgpu_scheduler_log_info_v1_dtype, VgpuSchedulerLogInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerLogInfo_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16__reduce_cython__, "VgpuSchedulerLogInfo_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_18__setstate_cython__, "VgpuSchedulerLogInfo_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerLogInfo_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19086
 * 
 * 
 * cdef _get_vgpu_scheduler_state_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerState_v1_t pod = nvmlVgpuSchedulerState_v1_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_state_v1_dtype_offsets(void) {
  nvmlVgpuSchedulerState_v1_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlVgpuSchedulerState_v1_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  size_t __pyx_t_12;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_vgpu_scheduler_state_v1_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":19087
 * 
 * cdef _get_vgpu_scheduler_state_v1_dtype_offsets():
 *     cdef nvmlVgpuSchedulerState_v1_t pod = nvmlVgpuSchedulerState_v1_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'engine_id', 'scheduler_policy', 'enable_arr_mode', 'scheduler_params'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":19088
 * cdef _get_vgpu_scheduler_state_v1_dtype_offsets():
 *     cdef nvmlVgpuSchedulerState_v1_t pod = nvmlVgpuSchedulerState_v1_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'engine_id', 'scheduler_policy', 'enable_arr_mode', 'scheduler_params'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_set_params_dtype],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19088, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19088, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":19089
 *     cdef nvmlVgpuSchedulerState_v1_t pod = nvmlVgpuSchedulerState_v1_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'engine_id', 'scheduler_policy', 'enable_arr_mode', 'scheduler_params'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_set_params_dtype],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19089, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19089, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 19089, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_engine_id);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_engine_id);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_engine_id) != (0)) __PYX_ERR(0, 19089, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_scheduler_policy);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_scheduler_policy);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_scheduler_policy) != (0)) __PYX_ERR(0, 19089, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_enable_arr_mode);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_enable_arr_mode);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 3, __pyx_mstate_global->__pyx_n_u_enable_arr_mode) != (0)) __PYX_ERR(0, 19089, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_scheduler_params);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_scheduler_params);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 4, __pyx_mstate_global->__pyx_n_u_scheduler_params) != (0)) __PYX_ERR(0, 19089, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 19089, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":19090
 *     return _numpy.dtype({
 *         'names': ['version', 'engine_id', 'scheduler_policy', 'enable_arr_mode', 'scheduler_params'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_set_params_dtype],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 19090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 19090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 19090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 19090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_set_params_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_11 = PyList_New(5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 19090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_11, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 19090, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_11, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 19090, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_11, 2, __pyx_t_9) != (0)) __PYX_ERR(0, 19090, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_11, 3, __pyx_t_10) != (0)) __PYX_ERR(0, 19090, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_11, 4, __pyx_t_6) != (0)) __PYX_ERR(0, 19090, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_9 = 0;
  __pyx_t_10 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_11) < (0)) __PYX_ERR(0, 19089, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":19092
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_set_params_dtype],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
*/
  __pyx_t_11 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 19092, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);

  /* "cuda/bindings/_nvml.pyx":19093
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.enableARRMode)) - (<intptr_t>&pod),
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.engineId)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19093, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":19094
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.enableARRMode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),
*/
  __pyx_t_10 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.schedulerPolicy)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 19094, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);

  /* "cuda/bindings/_nvml.pyx":19095
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.enableARRMode)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.enableARRMode)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 19095, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":19096
 *             (<intptr_t>&(pod.schedulerPolicy)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.enableARRMode)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlVgpuSchedulerState_v1_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.schedulerParams)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 19096, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":19091
 *         'names': ['version', 'engine_id', 'scheduler_policy', 'enable_arr_mode', 'scheduler_params'],
 *         'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, vgpu_scheduler_set_params_dtype],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.engineId)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 19091, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_11);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_11) != (0)) __PYX_ERR(0, 19091, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 19091, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_10);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_10) != (0)) __PYX_ERR(0, 19091, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 3, __pyx_t_9) != (0)) __PYX_ERR(0, 19091, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 4, __pyx_t_8) != (0)) __PYX_ERR(0, 19091, __pyx_L1_error);
  __pyx_t_11 = 0;
  __pyx_t_6 = 0;
  __pyx_t_10 = 0;
  __pyx_t_9 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 19089, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":19098
 *             (<intptr_t>&(pod.schedulerParams)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlVgpuSchedulerState_v1_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuSchedulerState_v1_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 19098, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 19089, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_12 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_12 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_12, (2-__pyx_t_12) | (__pyx_t_12*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19088, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19086
 * 
 * 
 * cdef _get_vgpu_scheduler_state_v1_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerState_v1_t pod = nvmlVgpuSchedulerState_v1_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_vgpu_scheduler_state_v1_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19115
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuSchedulerState_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerState_v1_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":19116
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerState_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerState_v1_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerState_v1")
*/
  __pyx_v_self->_ptr = ((nvmlVgpuSchedulerState_v1_t *)calloc(1, (sizeof(nvmlVgpuSchedulerState_v1_t))));

  /* "cuda/bindings/_nvml.pyx":19117
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerState_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerState_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuSchedulerState_v1")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":19118
 *         self._ptr = <nvmlVgpuSchedulerState_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerState_v1_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerState_v1")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19118, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerSt_2};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19118, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 19118, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19117
 *     def __init__(self):
 *         self._ptr = <nvmlVgpuSchedulerState_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerState_v1_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating VgpuSchedulerState_v1")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":19119
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating VgpuSchedulerState_v1")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":19120
 *             raise MemoryError("Error allocating VgpuSchedulerState_v1")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":19121
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":19115
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlVgpuSchedulerState_v1_t *>calloc(1, sizeof(nvmlVgpuSchedulerState_v1_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19123
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuSchedulerState_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self) {
  nvmlVgpuSchedulerState_v1_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlVgpuSchedulerState_v1_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":19125
 *     def __dealloc__(self):
 *         cdef nvmlVgpuSchedulerState_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":19126
 *         cdef nvmlVgpuSchedulerState_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":19127
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":19128
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":19125
 *     def __dealloc__(self):
 *         cdef nvmlVgpuSchedulerState_v1_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":19123
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlVgpuSchedulerState_v1_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":19130
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuSchedulerState_v1 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":19131
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.VgpuSchedulerState_v1 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19131, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19131, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19131, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19131, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19131, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_VgpuSchedulerState_v1_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 33 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19131, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19130
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.VgpuSchedulerState_v1 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19133
 *         return f"<{__name__}.VgpuSchedulerState_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19136
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19136, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19133
 *         return f"<{__name__}.VgpuSchedulerState_v1 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19138
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":19139
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19138
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19141
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":19142
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19142, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19141
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19144
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerState_v1 other_
 *         if not isinstance(other, VgpuSchedulerState_v1):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":19146
 *     def __eq__(self, other):
 *         cdef VgpuSchedulerState_v1 other_
 *         if not isinstance(other, VgpuSchedulerState_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":19147
 *         cdef VgpuSchedulerState_v1 other_
 *         if not isinstance(other, VgpuSchedulerState_v1):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerState_v1_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":19146
 *     def __eq__(self, other):
 *         cdef VgpuSchedulerState_v1 other_
 *         if not isinstance(other, VgpuSchedulerState_v1):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":19148
 *         if not isinstance(other, VgpuSchedulerState_v1):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerState_v1_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1))))) __PYX_ERR(0, 19148, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":19149
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerState_v1_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlVgpuSchedulerState_v1_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19149, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19144
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerState_v1 other_
 *         if not isinstance(other, VgpuSchedulerState_v1):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19151
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerState_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerState_v1_t *>malloc(sizeof(nvmlVgpuSchedulerState_v1_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":19152
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuSchedulerState_v1_t *>malloc(sizeof(nvmlVgpuSchedulerState_v1_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 19152, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 19152, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":19153
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerState_v1_t *>malloc(sizeof(nvmlVgpuSchedulerState_v1_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerState_v1")
*/
    __pyx_v_self->_ptr = ((nvmlVgpuSchedulerState_v1_t *)malloc((sizeof(nvmlVgpuSchedulerState_v1_t))));

    /* "cuda/bindings/_nvml.pyx":19154
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerState_v1_t *>malloc(sizeof(nvmlVgpuSchedulerState_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerState_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerState_v1_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":19155
 *             self._ptr = <nvmlVgpuSchedulerState_v1_t *>malloc(sizeof(nvmlVgpuSchedulerState_v1_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerState_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerState_v1_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19155, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerSt_2};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19155, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 19155, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":19154
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerState_v1_t *>malloc(sizeof(nvmlVgpuSchedulerState_v1_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerState_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerState_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":19156
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerState_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerState_v1_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19156, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19156, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 19156, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlVgpuSchedulerState_v1_t))));

    /* "cuda/bindings/_nvml.pyx":19157
 *                 raise MemoryError("Error allocating VgpuSchedulerState_v1")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerState_v1_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":19158
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlVgpuSchedulerState_v1_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":19159
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19159, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19159, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 19159, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":19152
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlVgpuSchedulerState_v1_t *>malloc(sizeof(nvmlVgpuSchedulerState_v1_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":19161
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 19161, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":19151
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlVgpuSchedulerState_v1_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlVgpuSchedulerState_v1_t *>malloc(sizeof(nvmlVgpuSchedulerState_v1_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19163
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_params(self):
 *         """VgpuSchedulerSetParams: IN: vGPU Scheduler Parameters."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_params_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_params_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_params___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_params___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19166
 *     def scheduler_params(self):
 *         """VgpuSchedulerSetParams: IN: vGPU Scheduler Parameters."""
 *         return VgpuSchedulerSetParams.from_ptr(<intptr_t>&(self._ptr[0].schedulerParams), self._readonly, self)             # <<<<<<<<<<<<<<
 * 
 *     @scheduler_params.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).schedulerParams))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19166, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19166, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_t_4, ((PyObject *)__pyx_v_self)};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19166, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19163
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_params(self):
 *         """VgpuSchedulerSetParams: IN: vGPU Scheduler Parameters."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.scheduler_params.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19168
 *         return VgpuSchedulerSetParams.from_ptr(<intptr_t>&(self._ptr[0].schedulerParams), self._readonly, self)
 * 
 *     @scheduler_params.setter             # <<<<<<<<<<<<<<
 *     def scheduler_params(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_params_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_params_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_params_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_params_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":19170
 *     @scheduler_params.setter
 *     def scheduler_params(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")
 *         cdef VgpuSchedulerSetParams val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":19171
 *     def scheduler_params(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerSetParams val_ = val
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerSetParams_t) * 1)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerState_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19171, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 19171, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19170
 *     @scheduler_params.setter
 *     def scheduler_params(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")
 *         cdef VgpuSchedulerSetParams val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":19172
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")
 *         cdef VgpuSchedulerSetParams val_ = val             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerSetParams_t) * 1)
 * 
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams))))) __PYX_ERR(0, 19172, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":19173
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")
 *         cdef VgpuSchedulerSetParams val_ = val
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerSetParams_t) * 1)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 19173, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).schedulerParams)), ((void *)__pyx_t_4), ((sizeof(nvmlVgpuSchedulerSetParams_t)) * 1)));

  /* "cuda/bindings/_nvml.pyx":19168
 *         return VgpuSchedulerSetParams.from_ptr(<intptr_t>&(self._ptr[0].schedulerParams), self._readonly, self)
 * 
 *     @scheduler_params.setter             # <<<<<<<<<<<<<<
 *     def scheduler_params(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.scheduler_params.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19175
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerSetParams_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19178
 *     def version(self):
 *         """int: IN: The version number of this struct."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19178, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19175
 *         memcpy(<void *>&(self._ptr[0].schedulerParams), <void *>(val_._get_ptr()), sizeof(nvmlVgpuSchedulerSetParams_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN: The version number of this struct."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19180
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":19182
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":19183
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerState_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19183, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 19183, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19182
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":19184
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19184, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":19180
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19186
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def engine_id(self):
 *         """int: IN: One of NVML_VGPU_SCHEDULER_ENGINE_TYPE_*."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_9engine_id_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_9engine_id_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_9engine_id___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_9engine_id___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19189
 *     def engine_id(self):
 *         """int: IN: One of NVML_VGPU_SCHEDULER_ENGINE_TYPE_*."""
 *         return self._ptr[0].engineId             # <<<<<<<<<<<<<<
 * 
 *     @engine_id.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).engineId); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19189, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19186
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def engine_id(self):
 *         """int: IN: One of NVML_VGPU_SCHEDULER_ENGINE_TYPE_*."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.engine_id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19191
 *         return self._ptr[0].engineId
 * 
 *     @engine_id.setter             # <<<<<<<<<<<<<<
 *     def engine_id(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_9engine_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_9engine_id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_9engine_id_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_9engine_id_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":19193
 *     @engine_id.setter
 *     def engine_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")
 *         self._ptr[0].engineId = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":19194
 *     def engine_id(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].engineId = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerState_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19194, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 19194, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19193
 *     @engine_id.setter
 *     def engine_id(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")
 *         self._ptr[0].engineId = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":19195
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")
 *         self._ptr[0].engineId = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19195, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).engineId = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":19191
 *         return self._ptr[0].engineId
 * 
 *     @engine_id.setter             # <<<<<<<<<<<<<<
 *     def engine_id(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.engine_id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19197
 *         self._ptr[0].engineId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self):
 *         """int: IN: Scheduler policy."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_policy_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_policy_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_policy___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_policy___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19200
 *     def scheduler_policy(self):
 *         """int: IN: Scheduler policy."""
 *         return self._ptr[0].schedulerPolicy             # <<<<<<<<<<<<<<
 * 
 *     @scheduler_policy.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).schedulerPolicy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19200, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19197
 *         self._ptr[0].engineId = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self):
 *         """int: IN: Scheduler policy."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.scheduler_policy.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19202
 *         return self._ptr[0].schedulerPolicy
 * 
 *     @scheduler_policy.setter             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_policy_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_policy_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_policy_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_policy_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":19204
 *     @scheduler_policy.setter
 *     def scheduler_policy(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")
 *         self._ptr[0].schedulerPolicy = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":19205
 *     def scheduler_policy(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].schedulerPolicy = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerState_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19205, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 19205, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19204
 *     @scheduler_policy.setter
 *     def scheduler_policy(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")
 *         self._ptr[0].schedulerPolicy = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":19206
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")
 *         self._ptr[0].schedulerPolicy = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19206, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).schedulerPolicy = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":19202
 *         return self._ptr[0].schedulerPolicy
 * 
 *     @scheduler_policy.setter             # <<<<<<<<<<<<<<
 *     def scheduler_policy(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.scheduler_policy.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19208
 *         self._ptr[0].schedulerPolicy = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def enable_arr_mode(self):
 *         """int: IN: Adaptive Round Robin scheduler."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15enable_arr_mode_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15enable_arr_mode_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15enable_arr_mode___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15enable_arr_mode___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19211
 *     def enable_arr_mode(self):
 *         """int: IN: Adaptive Round Robin scheduler."""
 *         return self._ptr[0].enableARRMode             # <<<<<<<<<<<<<<
 * 
 *     @enable_arr_mode.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).enableARRMode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19211, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19208
 *         self._ptr[0].schedulerPolicy = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def enable_arr_mode(self):
 *         """int: IN: Adaptive Round Robin scheduler."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.enable_arr_mode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19213
 *         return self._ptr[0].enableARRMode
 * 
 *     @enable_arr_mode.setter             # <<<<<<<<<<<<<<
 *     def enable_arr_mode(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15enable_arr_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15enable_arr_mode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15enable_arr_mode_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15enable_arr_mode_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":19215
 *     @enable_arr_mode.setter
 *     def enable_arr_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")
 *         self._ptr[0].enableARRMode = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":19216
 *     def enable_arr_mode(self, val):
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].enableARRMode = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_VgpuSchedulerState_v1_insta};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19216, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 19216, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19215
 *     @enable_arr_mode.setter
 *     def enable_arr_mode(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")
 *         self._ptr[0].enableARRMode = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":19217
 *         if self._readonly:
 *             raise ValueError("This VgpuSchedulerState_v1 instance is read-only")
 *         self._ptr[0].enableARRMode = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19217, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).enableARRMode = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":19213
 *         return self._ptr[0].enableARRMode
 * 
 *     @enable_arr_mode.setter             # <<<<<<<<<<<<<<
 *     def enable_arr_mode(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.enable_arr_mode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19219
 *         self._ptr[0].enableARRMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerState_v1 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_12from_data, "VgpuSchedulerState_v1.from_data(data)\n\nCreate an VgpuSchedulerState_v1 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `vgpu_scheduler_state_v1_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19219, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19219, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 19219, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 19219, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19219, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19219, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":19226
 *             data (_numpy.ndarray): a single-element array of dtype `vgpu_scheduler_state_v1_dtype` holding the data.
 *         """
 *         return __from_data(data, "vgpu_scheduler_state_v1_dtype", vgpu_scheduler_state_v1_dtype, VgpuSchedulerState_v1)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_state_v1_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19226, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_state_v1_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19226, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19219
 *         self._ptr[0].enableARRMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerState_v1 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19228
 *         return __from_data(data, "vgpu_scheduler_state_v1_dtype", vgpu_scheduler_state_v1_dtype, VgpuSchedulerState_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerState_v1 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_14from_ptr, "VgpuSchedulerState_v1.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an VgpuSchedulerState_v1 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19228, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19228, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19228, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19228, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 19228, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":19229
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an VgpuSchedulerState_v1 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 19228, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19228, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19228, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19228, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 19229, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19229, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 19228, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":19228
 *         return __from_data(data, "vgpu_scheduler_state_v1_dtype", vgpu_scheduler_state_v1_dtype, VgpuSchedulerState_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerState_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":19237
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerState_v1 obj = VgpuSchedulerState_v1.__new__(VgpuSchedulerState_v1)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":19238
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef VgpuSchedulerState_v1 obj = VgpuSchedulerState_v1.__new__(VgpuSchedulerState_v1)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19238, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 19238, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19237
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerState_v1 obj = VgpuSchedulerState_v1.__new__(VgpuSchedulerState_v1)
*/
  }

  /* "cuda/bindings/_nvml.pyx":19239
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerState_v1 obj = VgpuSchedulerState_v1.__new__(VgpuSchedulerState_v1)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerState_v1_t *>malloc(sizeof(nvmlVgpuSchedulerState_v1_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19239, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":19240
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerState_v1 obj = VgpuSchedulerState_v1.__new__(VgpuSchedulerState_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuSchedulerState_v1_t *>malloc(sizeof(nvmlVgpuSchedulerState_v1_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":19241
 *         cdef VgpuSchedulerState_v1 obj = VgpuSchedulerState_v1.__new__(VgpuSchedulerState_v1)
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerState_v1_t *>malloc(sizeof(nvmlVgpuSchedulerState_v1_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerState_v1")
*/
    __pyx_v_obj->_ptr = ((nvmlVgpuSchedulerState_v1_t *)malloc((sizeof(nvmlVgpuSchedulerState_v1_t))));

    /* "cuda/bindings/_nvml.pyx":19242
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerState_v1_t *>malloc(sizeof(nvmlVgpuSchedulerState_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerState_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerState_v1_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":19243
 *             obj._ptr = <nvmlVgpuSchedulerState_v1_t *>malloc(sizeof(nvmlVgpuSchedulerState_v1_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerState_v1")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerState_v1_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19243, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_VgpuSchedulerSt_2};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19243, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 19243, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":19242
 *         if owner is None:
 *             obj._ptr = <nvmlVgpuSchedulerState_v1_t *>malloc(sizeof(nvmlVgpuSchedulerState_v1_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating VgpuSchedulerState_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerState_v1_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":19244
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating VgpuSchedulerState_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerState_v1_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlVgpuSchedulerState_v1_t))));

    /* "cuda/bindings/_nvml.pyx":19245
 *                 raise MemoryError("Error allocating VgpuSchedulerState_v1")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerState_v1_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":19246
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlVgpuSchedulerState_v1_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerState_v1_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":19240
 *             raise ValueError("ptr must not be null (0)")
 *         cdef VgpuSchedulerState_v1 obj = VgpuSchedulerState_v1.__new__(VgpuSchedulerState_v1)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlVgpuSchedulerState_v1_t *>malloc(sizeof(nvmlVgpuSchedulerState_v1_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":19248
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerState_v1_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlVgpuSchedulerState_v1_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":19249
 *         else:
 *             obj._ptr = <nvmlVgpuSchedulerState_v1_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":19250
 *             obj._ptr = <nvmlVgpuSchedulerState_v1_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":19251
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":19252
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19228
 *         return __from_data(data, "vgpu_scheduler_state_v1_dtype", vgpu_scheduler_state_v1_dtype, VgpuSchedulerState_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerState_v1 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16__reduce_cython__, "VgpuSchedulerState_v1.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_18__setstate_cython__, "VgpuSchedulerState_v1.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.VgpuSchedulerState_v1.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19255
 * 
 * 
 * cdef _get_grid_licensable_features_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGridLicensableFeatures_t pod = nvmlGridLicensableFeatures_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_grid_licensable_features_dtype_offsets(void) {
  nvmlGridLicensableFeatures_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlGridLicensableFeatures_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_grid_licensable_features_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":19256
 * 
 * cdef _get_grid_licensable_features_dtype_offsets():
 *     cdef nvmlGridLicensableFeatures_t pod = nvmlGridLicensableFeatures_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['is_grid_license_supported', 'licensable_features_count', 'grid_licensable_features'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":19257
 * cdef _get_grid_licensable_features_dtype_offsets():
 *     cdef nvmlGridLicensableFeatures_t pod = nvmlGridLicensableFeatures_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['is_grid_license_supported', 'licensable_features_count', 'grid_licensable_features'],
 *         'formats': [_numpy.int32, _numpy.uint32, grid_licensable_feature_dtype],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19257, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19257, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":19258
 *     cdef nvmlGridLicensableFeatures_t pod = nvmlGridLicensableFeatures_t()
 *     return _numpy.dtype({
 *         'names': ['is_grid_license_supported', 'licensable_features_count', 'grid_licensable_features'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.int32, _numpy.uint32, grid_licensable_feature_dtype],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19258, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19258, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_is_grid_license_supported);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_is_grid_license_supported);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_is_grid_license_supported) != (0)) __PYX_ERR(0, 19258, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_licensable_features_count);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_licensable_features_count);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_licensable_features_count) != (0)) __PYX_ERR(0, 19258, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_grid_licensable_features);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_grid_licensable_features);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_grid_licensable_features) != (0)) __PYX_ERR(0, 19258, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 19258, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":19259
 *     return _numpy.dtype({
 *         'names': ['is_grid_license_supported', 'licensable_features_count', 'grid_licensable_features'],
 *         'formats': [_numpy.int32, _numpy.uint32, grid_licensable_feature_dtype],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.isGridLicenseSupported)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19259, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 19259, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19259, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 19259, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_grid_licensable_feature_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19259, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = PyList_New(3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 19259, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 19259, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 19259, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 2, __pyx_t_6) != (0)) __PYX_ERR(0, 19259, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_9) < (0)) __PYX_ERR(0, 19258, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;

  /* "cuda/bindings/_nvml.pyx":19261
 *         'formats': [_numpy.int32, _numpy.uint32, grid_licensable_feature_dtype],
 *         'offsets': [
 *             (<intptr_t>&(pod.isGridLicenseSupported)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.licensableFeaturesCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gridLicensableFeatures)) - (<intptr_t>&pod),
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.isGridLicenseSupported)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 19261, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":19262
 *         'offsets': [
 *             (<intptr_t>&(pod.isGridLicenseSupported)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.licensableFeaturesCount)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.gridLicensableFeatures)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.licensableFeaturesCount)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19262, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":19263
 *             (<intptr_t>&(pod.isGridLicenseSupported)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.licensableFeaturesCount)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.gridLicensableFeatures)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlGridLicensableFeatures_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.gridLicensableFeatures)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 19263, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":19260
 *         'names': ['is_grid_license_supported', 'licensable_features_count', 'grid_licensable_features'],
 *         'formats': [_numpy.int32, _numpy.uint32, grid_licensable_feature_dtype],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.isGridLicenseSupported)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.licensableFeaturesCount)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 19260, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_9) != (0)) __PYX_ERR(0, 19260, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 19260, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 19260, __pyx_L1_error);
  __pyx_t_9 = 0;
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 19258, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":19265
 *             (<intptr_t>&(pod.gridLicensableFeatures)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlGridLicensableFeatures_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlGridLicensableFeatures_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 19265, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 19258, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19257, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19255
 * 
 * 
 * cdef _get_grid_licensable_features_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlGridLicensableFeatures_t pod = nvmlGridLicensableFeatures_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_grid_licensable_features_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19282
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGridLicensableFeatures_t *>calloc(1, sizeof(nvmlGridLicensableFeatures_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":19283
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlGridLicensableFeatures_t *>calloc(1, sizeof(nvmlGridLicensableFeatures_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GridLicensableFeatures")
*/
  __pyx_v_self->_ptr = ((nvmlGridLicensableFeatures_t *)calloc(1, (sizeof(nvmlGridLicensableFeatures_t))));

  /* "cuda/bindings/_nvml.pyx":19284
 *     def __init__(self):
 *         self._ptr = <nvmlGridLicensableFeatures_t *>calloc(1, sizeof(nvmlGridLicensableFeatures_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GridLicensableFeatures")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":19285
 *         self._ptr = <nvmlGridLicensableFeatures_t *>calloc(1, sizeof(nvmlGridLicensableFeatures_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GridLicensableFeatures")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19285, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GridLicensableF};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19285, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 19285, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19284
 *     def __init__(self):
 *         self._ptr = <nvmlGridLicensableFeatures_t *>calloc(1, sizeof(nvmlGridLicensableFeatures_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating GridLicensableFeatures")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":19286
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating GridLicensableFeatures")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":19287
 *             raise MemoryError("Error allocating GridLicensableFeatures")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":19288
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":19282
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlGridLicensableFeatures_t *>calloc(1, sizeof(nvmlGridLicensableFeatures_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19290
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGridLicensableFeatures_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self) {
  nvmlGridLicensableFeatures_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlGridLicensableFeatures_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":19292
 *     def __dealloc__(self):
 *         cdef nvmlGridLicensableFeatures_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":19293
 *         cdef nvmlGridLicensableFeatures_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":19294
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":19295
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":19292
 *     def __dealloc__(self):
 *         cdef nvmlGridLicensableFeatures_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":19290
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlGridLicensableFeatures_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":19297
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GridLicensableFeatures object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":19298
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.GridLicensableFeatures object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19298, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19298, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19298, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19298, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19298, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_GridLicensableFeatures_object_a;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 34 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19298, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19297
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.GridLicensableFeatures object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19300
 *         return f"<{__name__}.GridLicensableFeatures object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19303
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19303, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19300
 *         return f"<{__name__}.GridLicensableFeatures object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19305
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_22GridLicensableFeatures__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":19306
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19305
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19308
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":19309
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19309, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19308
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19311
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GridLicensableFeatures other_
 *         if not isinstance(other, GridLicensableFeatures):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":19313
 *     def __eq__(self, other):
 *         cdef GridLicensableFeatures other_
 *         if not isinstance(other, GridLicensableFeatures):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":19314
 *         cdef GridLicensableFeatures other_
 *         if not isinstance(other, GridLicensableFeatures):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGridLicensableFeatures_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":19313
 *     def __eq__(self, other):
 *         cdef GridLicensableFeatures other_
 *         if not isinstance(other, GridLicensableFeatures):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":19315
 *         if not isinstance(other, GridLicensableFeatures):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGridLicensableFeatures_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures))))) __PYX_ERR(0, 19315, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":19316
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGridLicensableFeatures_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlGridLicensableFeatures_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19316, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19311
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef GridLicensableFeatures other_
 *         if not isinstance(other, GridLicensableFeatures):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19318
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGridLicensableFeatures_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGridLicensableFeatures_t *>malloc(sizeof(nvmlGridLicensableFeatures_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":19319
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGridLicensableFeatures_t *>malloc(sizeof(nvmlGridLicensableFeatures_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 19319, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19319, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19319, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 19319, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":19320
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGridLicensableFeatures_t *>malloc(sizeof(nvmlGridLicensableFeatures_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GridLicensableFeatures")
*/
    __pyx_v_self->_ptr = ((nvmlGridLicensableFeatures_t *)malloc((sizeof(nvmlGridLicensableFeatures_t))));

    /* "cuda/bindings/_nvml.pyx":19321
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGridLicensableFeatures_t *>malloc(sizeof(nvmlGridLicensableFeatures_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GridLicensableFeatures")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGridLicensableFeatures_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":19322
 *             self._ptr = <nvmlGridLicensableFeatures_t *>malloc(sizeof(nvmlGridLicensableFeatures_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GridLicensableFeatures")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGridLicensableFeatures_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19322, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GridLicensableF};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19322, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 19322, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":19321
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGridLicensableFeatures_t *>malloc(sizeof(nvmlGridLicensableFeatures_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GridLicensableFeatures")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGridLicensableFeatures_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":19323
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating GridLicensableFeatures")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGridLicensableFeatures_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19323, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19323, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 19323, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlGridLicensableFeatures_t))));

    /* "cuda/bindings/_nvml.pyx":19324
 *                 raise MemoryError("Error allocating GridLicensableFeatures")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGridLicensableFeatures_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":19325
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlGridLicensableFeatures_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":19326
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19326, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19326, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 19326, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":19319
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlGridLicensableFeatures_t *>malloc(sizeof(nvmlGridLicensableFeatures_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":19328
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 19328, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":19318
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlGridLicensableFeatures_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlGridLicensableFeatures_t *>malloc(sizeof(nvmlGridLicensableFeatures_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19330
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def grid_licensable_features(self):
 *         """GridLicensableFeature: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_24grid_licensable_features_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_24grid_licensable_features_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_24grid_licensable_features___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_24grid_licensable_features___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19333
 *     def grid_licensable_features(self):
 *         """GridLicensableFeature: """
 *         return GridLicensableFeature.from_ptr(<intptr_t>&(self._ptr[0].gridLicensableFeatures), 3, self._readonly)             # <<<<<<<<<<<<<<
 * 
 *     @grid_licensable_features.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).gridLicensableFeatures))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19333, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19333, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_mstate_global->__pyx_int_3, __pyx_t_4};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19333, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19330
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def grid_licensable_features(self):
 *         """GridLicensableFeature: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.grid_licensable_features.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19335
 *         return GridLicensableFeature.from_ptr(<intptr_t>&(self._ptr[0].gridLicensableFeatures), 3, self._readonly)
 * 
 *     @grid_licensable_features.setter             # <<<<<<<<<<<<<<
 *     def grid_licensable_features(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_24grid_licensable_features_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_24grid_licensable_features_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_24grid_licensable_features_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_24grid_licensable_features_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  Py_ssize_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  intptr_t __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":19337
 *     @grid_licensable_features.setter
 *     def grid_licensable_features(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicensableFeatures instance is read-only")
 *         cdef GridLicensableFeature val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":19338
 *     def grid_licensable_features(self, val):
 *         if self._readonly:
 *             raise ValueError("This GridLicensableFeatures instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef GridLicensableFeature val_ = val
 *         if len(val) != 3:
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GridLicensableFeatures_inst};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19338, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 19338, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19337
 *     @grid_licensable_features.setter
 *     def grid_licensable_features(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicensableFeatures instance is read-only")
 *         cdef GridLicensableFeature val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":19339
 *         if self._readonly:
 *             raise ValueError("This GridLicensableFeatures instance is read-only")
 *         cdef GridLicensableFeature val_ = val             # <<<<<<<<<<<<<<
 *         if len(val) != 3:
 *             raise ValueError(f"Expected length 3 for field grid_licensable_features, got {len(val)}")
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature))))) __PYX_ERR(0, 19339, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":19340
 *             raise ValueError("This GridLicensableFeatures instance is read-only")
 *         cdef GridLicensableFeature val_ = val
 *         if len(val) != 3:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 3 for field grid_licensable_features, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].gridLicensableFeatures), <void *>(val_._get_ptr()), sizeof(nvmlGridLicensableFeature_t) * 3)
*/
  __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 19340, __pyx_L1_error)
  __pyx_t_5 = (__pyx_t_4 != 3);
  if (unlikely(__pyx_t_5)) {

    /* "cuda/bindings/_nvml.pyx":19341
 *         cdef GridLicensableFeature val_ = val
 *         if len(val) != 3:
 *             raise ValueError(f"Expected length 3 for field grid_licensable_features, got {len(val)}")             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].gridLicensableFeatures), <void *>(val_._get_ptr()), sizeof(nvmlGridLicensableFeature_t) * 3)
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_4 = PyObject_Length(__pyx_v_val); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 19341, __pyx_L1_error)
    __pyx_t_6 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_t_4, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19341, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __pyx_t_7 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Expected_length_3_for_field_grid, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 19341, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_7};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19341, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 19341, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19340
 *             raise ValueError("This GridLicensableFeatures instance is read-only")
 *         cdef GridLicensableFeature val_ = val
 *         if len(val) != 3:             # <<<<<<<<<<<<<<
 *             raise ValueError(f"Expected length 3 for field grid_licensable_features, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].gridLicensableFeatures), <void *>(val_._get_ptr()), sizeof(nvmlGridLicensableFeature_t) * 3)
*/
  }

  /* "cuda/bindings/_nvml.pyx":19342
 *         if len(val) != 3:
 *             raise ValueError(f"Expected length 3 for field grid_licensable_features, got {len(val)}")
 *         memcpy(<void *>&(self._ptr[0].gridLicensableFeatures), <void *>(val_._get_ptr()), sizeof(nvmlGridLicensableFeature_t) * 3)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 19342, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).gridLicensableFeatures)), ((void *)__pyx_t_8), ((sizeof(nvmlGridLicensableFeature_t)) * 3)));

  /* "cuda/bindings/_nvml.pyx":19335
 *         return GridLicensableFeature.from_ptr(<intptr_t>&(self._ptr[0].gridLicensableFeatures), 3, self._readonly)
 * 
 *     @grid_licensable_features.setter             # <<<<<<<<<<<<<<
 *     def grid_licensable_features(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.grid_licensable_features.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19344
 *         memcpy(<void *>&(self._ptr[0].gridLicensableFeatures), <void *>(val_._get_ptr()), sizeof(nvmlGridLicensableFeature_t) * 3)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_grid_license_supported(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25is_grid_license_supported_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25is_grid_license_supported_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25is_grid_license_supported___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25is_grid_license_supported___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19347
 *     def is_grid_license_supported(self):
 *         """int: """
 *         return self._ptr[0].isGridLicenseSupported             # <<<<<<<<<<<<<<
 * 
 *     @is_grid_license_supported.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_ptr[0]).isGridLicenseSupported); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19347, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19344
 *         memcpy(<void *>&(self._ptr[0].gridLicensableFeatures), <void *>(val_._get_ptr()), sizeof(nvmlGridLicensableFeature_t) * 3)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_grid_license_supported(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.is_grid_license_supported.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19349
 *         return self._ptr[0].isGridLicenseSupported
 * 
 *     @is_grid_license_supported.setter             # <<<<<<<<<<<<<<
 *     def is_grid_license_supported(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25is_grid_license_supported_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25is_grid_license_supported_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25is_grid_license_supported_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25is_grid_license_supported_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":19351
 *     @is_grid_license_supported.setter
 *     def is_grid_license_supported(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicensableFeatures instance is read-only")
 *         self._ptr[0].isGridLicenseSupported = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":19352
 *     def is_grid_license_supported(self, val):
 *         if self._readonly:
 *             raise ValueError("This GridLicensableFeatures instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].isGridLicenseSupported = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GridLicensableFeatures_inst};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19352, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 19352, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19351
 *     @is_grid_license_supported.setter
 *     def is_grid_license_supported(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicensableFeatures instance is read-only")
 *         self._ptr[0].isGridLicenseSupported = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":19353
 *         if self._readonly:
 *             raise ValueError("This GridLicensableFeatures instance is read-only")
 *         self._ptr[0].isGridLicenseSupported = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19353, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).isGridLicenseSupported = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":19349
 *         return self._ptr[0].isGridLicenseSupported
 * 
 *     @is_grid_license_supported.setter             # <<<<<<<<<<<<<<
 *     def is_grid_license_supported(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.is_grid_license_supported.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19355
 *         self._ptr[0].isGridLicenseSupported = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def licensable_features_count(self):
 *         """int: """
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25licensable_features_count_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25licensable_features_count_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25licensable_features_count___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25licensable_features_count___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19358
 *     def licensable_features_count(self):
 *         """int: """
 *         return self._ptr[0].licensableFeaturesCount             # <<<<<<<<<<<<<<
 * 
 *     @licensable_features_count.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).licensableFeaturesCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19358, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19355
 *         self._ptr[0].isGridLicenseSupported = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def licensable_features_count(self):
 *         """int: """
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.licensable_features_count.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19360
 *         return self._ptr[0].licensableFeaturesCount
 * 
 *     @licensable_features_count.setter             # <<<<<<<<<<<<<<
 *     def licensable_features_count(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25licensable_features_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25licensable_features_count_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25licensable_features_count_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25licensable_features_count_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":19362
 *     @licensable_features_count.setter
 *     def licensable_features_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicensableFeatures instance is read-only")
 *         self._ptr[0].licensableFeaturesCount = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":19363
 *     def licensable_features_count(self, val):
 *         if self._readonly:
 *             raise ValueError("This GridLicensableFeatures instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].licensableFeaturesCount = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_GridLicensableFeatures_inst};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19363, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 19363, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19362
 *     @licensable_features_count.setter
 *     def licensable_features_count(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This GridLicensableFeatures instance is read-only")
 *         self._ptr[0].licensableFeaturesCount = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":19364
 *         if self._readonly:
 *             raise ValueError("This GridLicensableFeatures instance is read-only")
 *         self._ptr[0].licensableFeaturesCount = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19364, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).licensableFeaturesCount = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":19360
 *         return self._ptr[0].licensableFeaturesCount
 * 
 *     @licensable_features_count.setter             # <<<<<<<<<<<<<<
 *     def licensable_features_count(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.licensable_features_count.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19366
 *         self._ptr[0].licensableFeaturesCount = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GridLicensableFeatures instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22GridLicensableFeatures_12from_data, "GridLicensableFeatures.from_data(data)\n\nCreate an GridLicensableFeatures instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `grid_licensable_features_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22GridLicensableFeatures_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22GridLicensableFeatures_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19366, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19366, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 19366, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 19366, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19366, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19366, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":19373
 *             data (_numpy.ndarray): a single-element array of dtype `grid_licensable_features_dtype` holding the data.
 *         """
 *         return __from_data(data, "grid_licensable_features_dtype", grid_licensable_features_dtype, GridLicensableFeatures)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_grid_licensable_features_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19373, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_grid_licensable_features_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19373, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19366
 *         self._ptr[0].licensableFeaturesCount = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GridLicensableFeatures instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19375
 *         return __from_data(data, "grid_licensable_features_dtype", grid_licensable_features_dtype, GridLicensableFeatures)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GridLicensableFeatures instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22GridLicensableFeatures_14from_ptr, "GridLicensableFeatures.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an GridLicensableFeatures instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22GridLicensableFeatures_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22GridLicensableFeatures_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19375, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19375, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19375, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19375, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 19375, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":19376
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an GridLicensableFeatures instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 19375, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19375, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19375, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19375, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 19376, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19376, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 19375, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":19375
 *         return __from_data(data, "grid_licensable_features_dtype", grid_licensable_features_dtype, GridLicensableFeatures)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GridLicensableFeatures instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":19384
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GridLicensableFeatures obj = GridLicensableFeatures.__new__(GridLicensableFeatures)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":19385
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef GridLicensableFeatures obj = GridLicensableFeatures.__new__(GridLicensableFeatures)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19385, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 19385, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19384
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GridLicensableFeatures obj = GridLicensableFeatures.__new__(GridLicensableFeatures)
*/
  }

  /* "cuda/bindings/_nvml.pyx":19386
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GridLicensableFeatures obj = GridLicensableFeatures.__new__(GridLicensableFeatures)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlGridLicensableFeatures_t *>malloc(sizeof(nvmlGridLicensableFeatures_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_GridLicensableFeatures(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19386, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":19387
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GridLicensableFeatures obj = GridLicensableFeatures.__new__(GridLicensableFeatures)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGridLicensableFeatures_t *>malloc(sizeof(nvmlGridLicensableFeatures_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":19388
 *         cdef GridLicensableFeatures obj = GridLicensableFeatures.__new__(GridLicensableFeatures)
 *         if owner is None:
 *             obj._ptr = <nvmlGridLicensableFeatures_t *>malloc(sizeof(nvmlGridLicensableFeatures_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GridLicensableFeatures")
*/
    __pyx_v_obj->_ptr = ((nvmlGridLicensableFeatures_t *)malloc((sizeof(nvmlGridLicensableFeatures_t))));

    /* "cuda/bindings/_nvml.pyx":19389
 *         if owner is None:
 *             obj._ptr = <nvmlGridLicensableFeatures_t *>malloc(sizeof(nvmlGridLicensableFeatures_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GridLicensableFeatures")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGridLicensableFeatures_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":19390
 *             obj._ptr = <nvmlGridLicensableFeatures_t *>malloc(sizeof(nvmlGridLicensableFeatures_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GridLicensableFeatures")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGridLicensableFeatures_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19390, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_GridLicensableF};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19390, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 19390, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":19389
 *         if owner is None:
 *             obj._ptr = <nvmlGridLicensableFeatures_t *>malloc(sizeof(nvmlGridLicensableFeatures_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating GridLicensableFeatures")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGridLicensableFeatures_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":19391
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating GridLicensableFeatures")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGridLicensableFeatures_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlGridLicensableFeatures_t))));

    /* "cuda/bindings/_nvml.pyx":19392
 *                 raise MemoryError("Error allocating GridLicensableFeatures")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGridLicensableFeatures_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":19393
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlGridLicensableFeatures_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlGridLicensableFeatures_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":19387
 *             raise ValueError("ptr must not be null (0)")
 *         cdef GridLicensableFeatures obj = GridLicensableFeatures.__new__(GridLicensableFeatures)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlGridLicensableFeatures_t *>malloc(sizeof(nvmlGridLicensableFeatures_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":19395
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlGridLicensableFeatures_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlGridLicensableFeatures_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":19396
 *         else:
 *             obj._ptr = <nvmlGridLicensableFeatures_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":19397
 *             obj._ptr = <nvmlGridLicensableFeatures_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":19398
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":19399
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19375
 *         return __from_data(data, "grid_licensable_features_dtype", grid_licensable_features_dtype, GridLicensableFeatures)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GridLicensableFeatures instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22GridLicensableFeatures_16__reduce_cython__, "GridLicensableFeatures.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22GridLicensableFeatures_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22GridLicensableFeatures_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22GridLicensableFeatures_18__setstate_cython__, "GridLicensableFeatures.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_22GridLicensableFeatures_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22GridLicensableFeatures_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22GridLicensableFeatures_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.GridLicensableFeatures.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19402
 * 
 * 
 * cdef _get_nv_link_info_v2_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlNvLinkInfo_v2_t pod = nvmlNvLinkInfo_v2_t()
 *     return _numpy.dtype({
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml__get_nv_link_info_v2_dtype_offsets(void) {
  nvmlNvLinkInfo_v2_t __pyx_v_pod;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlNvLinkInfo_v2_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  size_t __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_get_nv_link_info_v2_dtype_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":19403
 * 
 * cdef _get_nv_link_info_v2_dtype_offsets():
 *     cdef nvmlNvLinkInfo_v2_t pod = nvmlNvLinkInfo_v2_t()             # <<<<<<<<<<<<<<
 *     return _numpy.dtype({
 *         'names': ['version', 'is_nvle_enabled', 'firmware_info'],
*/
  __pyx_v_pod = __pyx_t_1;

  /* "cuda/bindings/_nvml.pyx":19404
 * cdef _get_nv_link_info_v2_dtype_offsets():
 *     cdef nvmlNvLinkInfo_v2_t pod = nvmlNvLinkInfo_v2_t()
 *     return _numpy.dtype({             # <<<<<<<<<<<<<<
 *         'names': ['version', 'is_nvle_enabled', 'firmware_info'],
 *         'formats': [_numpy.uint32, _numpy.uint32, nvlink_firmware_info_dtype],
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19404, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19404, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":19405
 *     cdef nvmlNvLinkInfo_v2_t pod = nvmlNvLinkInfo_v2_t()
 *     return _numpy.dtype({
 *         'names': ['version', 'is_nvle_enabled', 'firmware_info'],             # <<<<<<<<<<<<<<
 *         'formats': [_numpy.uint32, _numpy.uint32, nvlink_firmware_info_dtype],
 *         'offsets': [
*/
  __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19405, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19405, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_version);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_version);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_mstate_global->__pyx_n_u_version) != (0)) __PYX_ERR(0, 19405, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_is_nvle_enabled);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_is_nvle_enabled);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 1, __pyx_mstate_global->__pyx_n_u_is_nvle_enabled) != (0)) __PYX_ERR(0, 19405, __pyx_L1_error);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_n_u_firmware_info);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_n_u_firmware_info);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 2, __pyx_mstate_global->__pyx_n_u_firmware_info) != (0)) __PYX_ERR(0, 19405, __pyx_L1_error);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_names, __pyx_t_6) < (0)) __PYX_ERR(0, 19405, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":19406
 *     return _numpy.dtype({
 *         'names': ['version', 'is_nvle_enabled', 'firmware_info'],
 *         'formats': [_numpy.uint32, _numpy.uint32, nvlink_firmware_info_dtype],             # <<<<<<<<<<<<<<
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19406, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 19406, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19406, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 19406, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_nvlink_firmware_info_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19406, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_9 = PyList_New(3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 19406, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 19406, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 1, __pyx_t_8) != (0)) __PYX_ERR(0, 19406, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 2, __pyx_t_6) != (0)) __PYX_ERR(0, 19406, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_8 = 0;
  __pyx_t_6 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_formats, __pyx_t_9) < (0)) __PYX_ERR(0, 19405, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;

  /* "cuda/bindings/_nvml.pyx":19408
 *         'formats': [_numpy.uint32, _numpy.uint32, nvlink_firmware_info_dtype],
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.isNvleEnabled)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.firmwareInfo)) - (<intptr_t>&pod),
*/
  __pyx_t_9 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.version)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 19408, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":19409
 *         'offsets': [
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.isNvleEnabled)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.firmwareInfo)) - (<intptr_t>&pod),
 *         ],
*/
  __pyx_t_6 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.isNvleEnabled)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19409, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);

  /* "cuda/bindings/_nvml.pyx":19410
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.isNvleEnabled)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.firmwareInfo)) - (<intptr_t>&pod),             # <<<<<<<<<<<<<<
 *         ],
 *         'itemsize': sizeof(nvmlNvLinkInfo_v2_t),
*/
  __pyx_t_8 = PyLong_FromSsize_t((((intptr_t)(&__pyx_v_pod.firmwareInfo)) - ((intptr_t)(&__pyx_v_pod)))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 19410, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);

  /* "cuda/bindings/_nvml.pyx":19407
 *         'names': ['version', 'is_nvle_enabled', 'firmware_info'],
 *         'formats': [_numpy.uint32, _numpy.uint32, nvlink_firmware_info_dtype],
 *         'offsets': [             # <<<<<<<<<<<<<<
 *             (<intptr_t>&(pod.version)) - (<intptr_t>&pod),
 *             (<intptr_t>&(pod.isNvleEnabled)) - (<intptr_t>&pod),
*/
  __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 19407, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_9) != (0)) __PYX_ERR(0, 19407, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 19407, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 19407, __pyx_L1_error);
  __pyx_t_9 = 0;
  __pyx_t_6 = 0;
  __pyx_t_8 = 0;
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_offsets, __pyx_t_7) < (0)) __PYX_ERR(0, 19405, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;

  /* "cuda/bindings/_nvml.pyx":19412
 *             (<intptr_t>&(pod.firmwareInfo)) - (<intptr_t>&pod),
 *         ],
 *         'itemsize': sizeof(nvmlNvLinkInfo_v2_t),             # <<<<<<<<<<<<<<
 *     })
 * 
*/
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(nvmlNvLinkInfo_v2_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 19412, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7) < (0)) __PYX_ERR(0, 19405, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_10 = 1;
  #if CYTHON_UNPACK_METHODS
  if (unlikely(PyMethod_Check(__pyx_t_5))) {
    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
    assert(__pyx_t_3);
    PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_INCREF(__pyx__function);
    __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
    __pyx_t_10 = 0;
  }
  #endif
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4};
    __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19404, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19402
 * 
 * 
 * cdef _get_nv_link_info_v2_dtype_offsets():             # <<<<<<<<<<<<<<
 *     cdef nvmlNvLinkInfo_v2_t pod = nvmlNvLinkInfo_v2_t()
 *     return _numpy.dtype({
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("cuda.bindings._nvml._get_nv_link_info_v2_dtype_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19429
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlNvLinkInfo_v2_t *>calloc(1, sizeof(nvmlNvLinkInfo_v2_t))
 *         if self._ptr == NULL:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1;
  #endif
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, __pyx_nargs); return -1; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return -1;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__init__", __pyx_kwds); return -1;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2___init__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2___init__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__init__", 0);

  /* "cuda/bindings/_nvml.pyx":19430
 * 
 *     def __init__(self):
 *         self._ptr = <nvmlNvLinkInfo_v2_t *>calloc(1, sizeof(nvmlNvLinkInfo_v2_t))             # <<<<<<<<<<<<<<
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvLinkInfo_v2")
*/
  __pyx_v_self->_ptr = ((nvmlNvLinkInfo_v2_t *)calloc(1, (sizeof(nvmlNvLinkInfo_v2_t))));

  /* "cuda/bindings/_nvml.pyx":19431
 *     def __init__(self):
 *         self._ptr = <nvmlNvLinkInfo_v2_t *>calloc(1, sizeof(nvmlNvLinkInfo_v2_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating NvLinkInfo_v2")
 *         self._owner = None
*/
  __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":19432
 *         self._ptr = <nvmlNvLinkInfo_v2_t *>calloc(1, sizeof(nvmlNvLinkInfo_v2_t))
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvLinkInfo_v2")             # <<<<<<<<<<<<<<
 *         self._owner = None
 *         self._owned = True
*/
    __pyx_t_3 = NULL;
    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19432, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    #if CYTHON_UNPACK_METHODS
    if (unlikely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
      assert(__pyx_t_3);
      PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4);
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_INCREF(__pyx__function);
      __Pyx_DECREF_SET(__pyx_t_4, __pyx__function);
      __pyx_t_5 = 0;
    }
    #endif
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvLinkInfo_v2};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19432, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 19432, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19431
 *     def __init__(self):
 *         self._ptr = <nvmlNvLinkInfo_v2_t *>calloc(1, sizeof(nvmlNvLinkInfo_v2_t))
 *         if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *             raise MemoryError("Error allocating NvLinkInfo_v2")
 *         self._owner = None
*/
  }

  /* "cuda/bindings/_nvml.pyx":19433
 *         if self._ptr == NULL:
 *             raise MemoryError("Error allocating NvLinkInfo_v2")
 *         self._owner = None             # <<<<<<<<<<<<<<
 *         self._owned = True
 *         self._readonly = False
*/
  __Pyx_INCREF(Py_None);
  __Pyx_GIVEREF(Py_None);
  __Pyx_GOTREF(__pyx_v_self->_owner);
  __Pyx_DECREF(__pyx_v_self->_owner);
  __pyx_v_self->_owner = Py_None;

  /* "cuda/bindings/_nvml.pyx":19434
 *             raise MemoryError("Error allocating NvLinkInfo_v2")
 *         self._owner = None
 *         self._owned = True             # <<<<<<<<<<<<<<
 *         self._readonly = False
 * 
*/
  __pyx_v_self->_owned = 1;

  /* "cuda/bindings/_nvml.pyx":19435
 *         self._owner = None
 *         self._owned = True
 *         self._readonly = False             # <<<<<<<<<<<<<<
 * 
 *     def __dealloc__(self):
*/
  __pyx_v_self->_readonly = 0;

  /* "cuda/bindings/_nvml.pyx":19429
 *         bint _readonly
 * 
 *     def __init__(self):             # <<<<<<<<<<<<<<
 *         self._ptr = <nvmlNvLinkInfo_v2_t *>calloc(1, sizeof(nvmlNvLinkInfo_v2_t))
 *         if self._ptr == NULL:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19437
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlNvLinkInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

/* Python wrapper */
static void __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_3__dealloc__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
}

static void __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_2__dealloc__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self) {
  nvmlNvLinkInfo_v2_t *__pyx_v_ptr;
  int __pyx_t_1;
  int __pyx_t_2;
  nvmlNvLinkInfo_v2_t *__pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":19439
 *     def __dealloc__(self):
 *         cdef nvmlNvLinkInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  if (__pyx_v_self->_owned) {
  } else {
    __pyx_t_1 = __pyx_v_self->_owned;
    goto __pyx_L4_bool_binop_done;
  }
  __pyx_t_2 = (__pyx_v_self->_ptr != NULL);
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":19440
 *         cdef nvmlNvLinkInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr             # <<<<<<<<<<<<<<
 *             self._ptr = NULL
 *             free(ptr)
*/
    __pyx_t_3 = __pyx_v_self->_ptr;
    __pyx_v_ptr = __pyx_t_3;

    /* "cuda/bindings/_nvml.pyx":19441
 *         if self._owned and self._ptr != NULL:
 *             ptr = self._ptr
 *             self._ptr = NULL             # <<<<<<<<<<<<<<
 *             free(ptr)
 * 
*/
    __pyx_v_self->_ptr = NULL;

    /* "cuda/bindings/_nvml.pyx":19442
 *             ptr = self._ptr
 *             self._ptr = NULL
 *             free(ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __repr__(self):
*/
    free(__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":19439
 *     def __dealloc__(self):
 *         cdef nvmlNvLinkInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:             # <<<<<<<<<<<<<<
 *             ptr = self._ptr
 *             self._ptr = NULL
*/
  }

  /* "cuda/bindings/_nvml.pyx":19437
 *         self._readonly = False
 * 
 *     def __dealloc__(self):             # <<<<<<<<<<<<<<
 *         cdef nvmlNvLinkInfo_v2_t *ptr
 *         if self._owned and self._ptr != NULL:
*/

  /* function exit code */
}

/* "cuda/bindings/_nvml.pyx":19444
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.NvLinkInfo_v2 object at {hex(id(self))}>"
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_5__repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_5__repr__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_4__repr__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_4__repr__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4[5];
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__repr__", 0);

  /* "cuda/bindings/_nvml.pyx":19445
 * 
 *     def __repr__(self):
 *         return f"<{__name__}.NvLinkInfo_v2 object at {hex(id(self))}>"             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19445, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_t_1, __pyx_mstate_global->__pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19445, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19445, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_3 = __Pyx_PyNumber_Hex(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19445, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyUnicode_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19445, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_4[0] = __pyx_mstate_global->__pyx_kp_u__6;
  __pyx_t_4[1] = __pyx_t_2;
  __pyx_t_4[2] = __pyx_mstate_global->__pyx_kp_u_NvLinkInfo_v2_object_at;
  __pyx_t_4[3] = __pyx_t_1;
  __pyx_t_4[4] = __pyx_mstate_global->__pyx_kp_u__3;
  __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_4, 5, 1 * 2 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2) + 25 + __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1), 127 | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) | __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1));
  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19445, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19444
 *             free(ptr)
 * 
 *     def __repr__(self):             # <<<<<<<<<<<<<<
 *         return f"<{__name__}.NvLinkInfo_v2 object at {hex(id(self))}>"
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19447
 *         return f"<{__name__}.NvLinkInfo_v2 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_3ptr_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_3ptr_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_3ptr___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_3ptr___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19450
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     cdef intptr_t _get_ptr(self):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19450, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19447
 *         return f"<{__name__}.NvLinkInfo_v2 object at {hex(id(self))}>"
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def ptr(self):
 *         """Get the pointer address to the data as Python :class:`int`."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.ptr.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19452
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

static intptr_t __pyx_f_4cuda_8bindings_5_nvml_13NvLinkInfo_v2__get_ptr(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self) {
  intptr_t __pyx_r;

  /* "cuda/bindings/_nvml.pyx":19453
 * 
 *     cdef intptr_t _get_ptr(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __int__(self):
*/
  __pyx_r = ((intptr_t)__pyx_v_self->_ptr);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19452
 *         return <intptr_t>(self._ptr)
 * 
 *     cdef intptr_t _get_ptr(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19455
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_7__int__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_7__int__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__int__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_6__int__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_6__int__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__int__", 0);

  /* "cuda/bindings/_nvml.pyx":19456
 * 
 *     def __int__(self):
 *         return <intptr_t>(self._ptr)             # <<<<<<<<<<<<<<
 * 
 *     def __eq__(self, other):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyLong_FromSsize_t(((intptr_t)__pyx_v_self->_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19456, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19455
 *         return <intptr_t>(self._ptr)
 * 
 *     def __int__(self):             # <<<<<<<<<<<<<<
 *         return <intptr_t>(self._ptr)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19458
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef NvLinkInfo_v2 other_
 *         if not isinstance(other, NvLinkInfo_v2):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_9__eq__(PyObject *__pyx_v_self, PyObject *__pyx_v_other) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_8__eq__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_other));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_8__eq__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self, PyObject *__pyx_v_other) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_other_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__eq__", 0);

  /* "cuda/bindings/_nvml.pyx":19460
 *     def __eq__(self, other):
 *         cdef NvLinkInfo_v2 other_
 *         if not isinstance(other, NvLinkInfo_v2):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_other, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2); 
  __pyx_t_2 = (!__pyx_t_1);
  if (__pyx_t_2) {

    /* "cuda/bindings/_nvml.pyx":19461
 *         cdef NvLinkInfo_v2 other_
 *         if not isinstance(other, NvLinkInfo_v2):
 *             return False             # <<<<<<<<<<<<<<
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvLinkInfo_v2_t)) == 0)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(Py_False);
    __pyx_r = Py_False;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":19460
 *     def __eq__(self, other):
 *         cdef NvLinkInfo_v2 other_
 *         if not isinstance(other, NvLinkInfo_v2):             # <<<<<<<<<<<<<<
 *             return False
 *         other_ = other
*/
  }

  /* "cuda/bindings/_nvml.pyx":19462
 *         if not isinstance(other, NvLinkInfo_v2):
 *             return False
 *         other_ = other             # <<<<<<<<<<<<<<
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvLinkInfo_v2_t)) == 0)
 * 
*/
  __pyx_t_3 = __pyx_v_other;
  __Pyx_INCREF(__pyx_t_3);
  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2))))) __PYX_ERR(0, 19462, __pyx_L1_error)
  __pyx_v_other_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":19463
 *             return False
 *         other_ = other
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvLinkInfo_v2_t)) == 0)             # <<<<<<<<<<<<<<
 * 
 *     def __setitem__(self, key, val):
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyBool_FromLong((memcmp(((void *)((intptr_t)__pyx_v_self->_ptr)), ((void *)((intptr_t)__pyx_v_other_->_ptr)), (sizeof(nvmlNvLinkInfo_v2_t))) == 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19463, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19458
 *         return <intptr_t>(self._ptr)
 * 
 *     def __eq__(self, other):             # <<<<<<<<<<<<<<
 *         cdef NvLinkInfo_v2 other_
 *         if not isinstance(other, NvLinkInfo_v2):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_other_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19465
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvLinkInfo_v2_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvLinkInfo_v2_t *>malloc(sizeof(nvmlNvLinkInfo_v2_t))
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_11__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_10__setitem__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_10__setitem__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  intptr_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setitem__", 0);

  /* "cuda/bindings/_nvml.pyx":19466
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlNvLinkInfo_v2_t *>malloc(sizeof(nvmlNvLinkInfo_v2_t))
 *             if self._ptr == NULL:
*/
  __pyx_t_2 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_key, __pyx_mstate_global->__pyx_int_0, 0, 0)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 19466, __pyx_L1_error)
  if (__pyx_t_2) {
  } else {
    __pyx_t_1 = __pyx_t_2;
    goto __pyx_L4_bool_binop_done;
  }
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19466, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19466, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  __pyx_t_2 = PyObject_IsInstance(__pyx_v_val, __pyx_t_4); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 19466, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_1 = __pyx_t_2;
  __pyx_L4_bool_binop_done:;
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":19467
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvLinkInfo_v2_t *>malloc(sizeof(nvmlNvLinkInfo_v2_t))             # <<<<<<<<<<<<<<
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvLinkInfo_v2")
*/
    __pyx_v_self->_ptr = ((nvmlNvLinkInfo_v2_t *)malloc((sizeof(nvmlNvLinkInfo_v2_t))));

    /* "cuda/bindings/_nvml.pyx":19468
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvLinkInfo_v2_t *>malloc(sizeof(nvmlNvLinkInfo_v2_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvLinkInfo_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvLinkInfo_v2_t))
*/
    __pyx_t_1 = (__pyx_v_self->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":19469
 *             self._ptr = <nvmlNvLinkInfo_v2_t *>malloc(sizeof(nvmlNvLinkInfo_v2_t))
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvLinkInfo_v2")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvLinkInfo_v2_t))
 *             self._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19469, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_6 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_6 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvLinkInfo_v2};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19469, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __PYX_ERR(0, 19469, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":19468
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvLinkInfo_v2_t *>malloc(sizeof(nvmlNvLinkInfo_v2_t))
 *             if self._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvLinkInfo_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvLinkInfo_v2_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":19470
 *             if self._ptr == NULL:
 *                 raise MemoryError("Error allocating NvLinkInfo_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvLinkInfo_v2_t))             # <<<<<<<<<<<<<<
 *             self._owner = None
 *             self._owned = True
*/
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_ctypes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19470, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19470, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_7 = PyLong_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_7 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 19470, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    (void)(memcpy(((void *)__pyx_v_self->_ptr), ((void *)((intptr_t)__pyx_t_7)), (sizeof(nvmlNvLinkInfo_v2_t))));

    /* "cuda/bindings/_nvml.pyx":19471
 *                 raise MemoryError("Error allocating NvLinkInfo_v2")
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvLinkInfo_v2_t))
 *             self._owner = None             # <<<<<<<<<<<<<<
 *             self._owned = True
 *             self._readonly = not val.flags.writeable
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_self->_owner);
    __Pyx_DECREF(__pyx_v_self->_owner);
    __pyx_v_self->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":19472
 *             memcpy(<void*>self._ptr, <void*><intptr_t>val.ctypes.data, sizeof(nvmlNvLinkInfo_v2_t))
 *             self._owner = None
 *             self._owned = True             # <<<<<<<<<<<<<<
 *             self._readonly = not val.flags.writeable
 *         else:
*/
    __pyx_v_self->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":19473
 *             self._owner = None
 *             self._owned = True
 *             self._readonly = not val.flags.writeable             # <<<<<<<<<<<<<<
 *         else:
 *             setattr(self, key, val)
*/
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_val, __pyx_mstate_global->__pyx_n_u_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19473, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_writeable); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19473, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 19473, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_v_self->_readonly = (!__pyx_t_1);

    /* "cuda/bindings/_nvml.pyx":19466
 * 
 *     def __setitem__(self, key, val):
 *         if key == 0 and isinstance(val, _numpy.ndarray):             # <<<<<<<<<<<<<<
 *             self._ptr = <nvmlNvLinkInfo_v2_t *>malloc(sizeof(nvmlNvLinkInfo_v2_t))
 *             if self._ptr == NULL:
*/
    goto __pyx_L3;
  }

  /* "cuda/bindings/_nvml.pyx":19475
 *             self._readonly = not val.flags.writeable
 *         else:
 *             setattr(self, key, val)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  /*else*/ {
    __pyx_t_8 = PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_val); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 19475, __pyx_L1_error)
  }
  __pyx_L3:;

  /* "cuda/bindings/_nvml.pyx":19465
 *         return (memcmp(<void *><intptr_t>(self._ptr), <void *><intptr_t>(other_._ptr), sizeof(nvmlNvLinkInfo_v2_t)) == 0)
 * 
 *     def __setitem__(self, key, val):             # <<<<<<<<<<<<<<
 *         if key == 0 and isinstance(val, _numpy.ndarray):
 *             self._ptr = <nvmlNvLinkInfo_v2_t *>malloc(sizeof(nvmlNvLinkInfo_v2_t))
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19477
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def firmware_info(self):
 *         """NvlinkFirmwareInfo: OUT - NVLINK Firmware info."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13firmware_info_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13firmware_info_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13firmware_info___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13firmware_info___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19480
 *     def firmware_info(self):
 *         """NvlinkFirmwareInfo: OUT - NVLINK Firmware info."""
 *         return NvlinkFirmwareInfo.from_ptr(<intptr_t>&(self._ptr[0].firmwareInfo), self._readonly, self)             # <<<<<<<<<<<<<<
 * 
 *     @firmware_info.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo);
  __Pyx_INCREF(__pyx_t_2);
  __pyx_t_3 = PyLong_FromSsize_t(((intptr_t)(&(__pyx_v_self->_ptr[0]).firmwareInfo))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19480, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_self->_readonly); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19480, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 0;
  {
    PyObject *__pyx_callargs[4] = {__pyx_t_2, __pyx_t_3, __pyx_t_4, ((PyObject *)__pyx_v_self)};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_5, (4-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19480, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19477
 *             setattr(self, key, val)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def firmware_info(self):
 *         """NvlinkFirmwareInfo: OUT - NVLINK Firmware info."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.firmware_info.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19482
 *         return NvlinkFirmwareInfo.from_ptr(<intptr_t>&(self._ptr[0].firmwareInfo), self._readonly, self)
 * 
 *     @firmware_info.setter             # <<<<<<<<<<<<<<
 *     def firmware_info(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13firmware_info_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13firmware_info_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13firmware_info_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13firmware_info_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *__pyx_v_val_ = 0;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":19484
 *     @firmware_info.setter
 *     def firmware_info(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvLinkInfo_v2 instance is read-only")
 *         cdef NvlinkFirmwareInfo val_ = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":19485
 *     def firmware_info(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvLinkInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         cdef NvlinkFirmwareInfo val_ = val
 *         memcpy(<void *>&(self._ptr[0].firmwareInfo), <void *>(val_._get_ptr()), sizeof(nvmlNvlinkFirmwareInfo_t) * 1)
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvLinkInfo_v2_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19485, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 19485, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19484
 *     @firmware_info.setter
 *     def firmware_info(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvLinkInfo_v2 instance is read-only")
 *         cdef NvlinkFirmwareInfo val_ = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":19486
 *         if self._readonly:
 *             raise ValueError("This NvLinkInfo_v2 instance is read-only")
 *         cdef NvlinkFirmwareInfo val_ = val             # <<<<<<<<<<<<<<
 *         memcpy(<void *>&(self._ptr[0].firmwareInfo), <void *>(val_._get_ptr()), sizeof(nvmlNvlinkFirmwareInfo_t) * 1)
 * 
*/
  __pyx_t_1 = __pyx_v_val;
  __Pyx_INCREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo))))) __PYX_ERR(0, 19486, __pyx_L1_error)
  __pyx_v_val_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":19487
 *             raise ValueError("This NvLinkInfo_v2 instance is read-only")
 *         cdef NvlinkFirmwareInfo val_ = val
 *         memcpy(<void *>&(self._ptr[0].firmwareInfo), <void *>(val_._get_ptr()), sizeof(nvmlNvlinkFirmwareInfo_t) * 1)             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)__pyx_v_val_->__pyx_vtab)->_get_ptr(__pyx_v_val_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 19487, __pyx_L1_error)
  (void)(memcpy(((void *)(&(__pyx_v_self->_ptr[0]).firmwareInfo)), ((void *)__pyx_t_4), ((sizeof(nvmlNvlinkFirmwareInfo_t)) * 1)));

  /* "cuda/bindings/_nvml.pyx":19482
 *         return NvlinkFirmwareInfo.from_ptr(<intptr_t>&(self._ptr[0].firmwareInfo), self._readonly, self)
 * 
 *     @firmware_info.setter             # <<<<<<<<<<<<<<
 *     def firmware_info(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.firmware_info.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_val_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19489
 *         memcpy(<void *>&(self._ptr[0].firmwareInfo), <void *>(val_._get_ptr()), sizeof(nvmlNvlinkFirmwareInfo_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN - the API version number."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_7version_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_7version___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_7version___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19492
 *     def version(self):
 *         """int: IN - the API version number."""
 *         return self._ptr[0].version             # <<<<<<<<<<<<<<
 * 
 *     @version.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19492, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19489
 *         memcpy(<void *>&(self._ptr[0].firmwareInfo), <void *>(val_._get_ptr()), sizeof(nvmlNvlinkFirmwareInfo_t) * 1)
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def version(self):
 *         """int: IN - the API version number."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.version.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19494
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_7version_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_7version_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_7version_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":19496
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvLinkInfo_v2 instance is read-only")
 *         self._ptr[0].version = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":19497
 *     def version(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvLinkInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].version = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvLinkInfo_v2_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19497, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 19497, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19496
 *     @version.setter
 *     def version(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvLinkInfo_v2 instance is read-only")
 *         self._ptr[0].version = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":19498
 *         if self._readonly:
 *             raise ValueError("This NvLinkInfo_v2 instance is read-only")
 *         self._ptr[0].version = val             # <<<<<<<<<<<<<<
 * 
 *     @property
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19498, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).version = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":19494
 *         return self._ptr[0].version
 * 
 *     @version.setter             # <<<<<<<<<<<<<<
 *     def version(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.version.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19500
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_nvle_enabled(self):
 *         """int: OUT - NVLINK encryption enablement."""
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15is_nvle_enabled_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15is_nvle_enabled_1__get__(PyObject *__pyx_v_self) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15is_nvle_enabled___get__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15is_nvle_enabled___get__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__get__", 0);

  /* "cuda/bindings/_nvml.pyx":19503
 *     def is_nvle_enabled(self):
 *         """int: OUT - NVLINK encryption enablement."""
 *         return self._ptr[0].isNvleEnabled             # <<<<<<<<<<<<<<
 * 
 *     @is_nvle_enabled.setter
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_ptr[0]).isNvleEnabled); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19503, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19500
 *         self._ptr[0].version = val
 * 
 *     @property             # <<<<<<<<<<<<<<
 *     def is_nvle_enabled(self):
 *         """int: OUT - NVLINK encryption enablement."""
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.is_nvle_enabled.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19505
 *         return self._ptr[0].isNvleEnabled
 * 
 *     @is_nvle_enabled.setter             # <<<<<<<<<<<<<<
 *     def is_nvle_enabled(self, val):
 *         if self._readonly:
*/

/* Python wrapper */
static int __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15is_nvle_enabled_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
static int __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15is_nvle_enabled_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15is_nvle_enabled_2__set__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_v_self), ((PyObject *)__pyx_v_val));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static int __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15is_nvle_enabled_2__set__(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self, PyObject *__pyx_v_val) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__set__", 0);

  /* "cuda/bindings/_nvml.pyx":19507
 *     @is_nvle_enabled.setter
 *     def is_nvle_enabled(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvLinkInfo_v2 instance is read-only")
 *         self._ptr[0].isNvleEnabled = val
*/
  if (unlikely(__pyx_v_self->_readonly)) {

    /* "cuda/bindings/_nvml.pyx":19508
 *     def is_nvle_enabled(self, val):
 *         if self._readonly:
 *             raise ValueError("This NvLinkInfo_v2 instance is read-only")             # <<<<<<<<<<<<<<
 *         self._ptr[0].isNvleEnabled = val
 * 
*/
    __pyx_t_2 = NULL;
    __pyx_t_3 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_mstate_global->__pyx_kp_u_This_NvLinkInfo_v2_instance_is_r};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19508, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 19508, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19507
 *     @is_nvle_enabled.setter
 *     def is_nvle_enabled(self, val):
 *         if self._readonly:             # <<<<<<<<<<<<<<
 *             raise ValueError("This NvLinkInfo_v2 instance is read-only")
 *         self._ptr[0].isNvleEnabled = val
*/
  }

  /* "cuda/bindings/_nvml.pyx":19509
 *         if self._readonly:
 *             raise ValueError("This NvLinkInfo_v2 instance is read-only")
 *         self._ptr[0].isNvleEnabled = val             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __pyx_t_4 = __Pyx_PyLong_As_unsigned_int(__pyx_v_val); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19509, __pyx_L1_error)
  (__pyx_v_self->_ptr[0]).isNvleEnabled = __pyx_t_4;

  /* "cuda/bindings/_nvml.pyx":19505
 *         return self._ptr[0].isNvleEnabled
 * 
 *     @is_nvle_enabled.setter             # <<<<<<<<<<<<<<
 *     def is_nvle_enabled(self, val):
 *         if self._readonly:
*/

  /* function exit code */
  __pyx_r = 0;
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.is_nvle_enabled.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19511
 *         self._ptr[0].isNvleEnabled = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvLinkInfo_v2 instance wrapping the given NumPy array.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_12from_data, "NvLinkInfo_v2.from_data(data)\n\nCreate an NvLinkInfo_v2 instance wrapping the given NumPy array.\n\nArgs:\n    data (_numpy.ndarray): a single-element array of dtype `nv_link_info_v2_dtype` holding the data.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13from_data = {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_12from_data};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13from_data(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_data = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_data (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_data,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19511, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19511, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_data", 0) < (0)) __PYX_ERR(0, 19511, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, i); __PYX_ERR(0, 19511, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19511, __pyx_L3_error)
    }
    __pyx_v_data = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_data", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19511, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_12from_data(__pyx_v_data);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_12from_data(PyObject *__pyx_v_data) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_data", 0);

  /* "cuda/bindings/_nvml.pyx":19518
 *             data (_numpy.ndarray): a single-element array of dtype `nv_link_info_v2_dtype` holding the data.
 *         """
 *         return __from_data(data, "nv_link_info_v2_dtype", nv_link_info_v2_dtype, NvLinkInfo_v2)             # <<<<<<<<<<<<<<
 * 
 *     @staticmethod
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_nv_link_info_v2_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19518, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___from_data(__pyx_v_data, __pyx_mstate_global->__pyx_n_u_nv_link_info_v2_dtype, __pyx_t_1, ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19518, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19511
 *         self._ptr[0].isNvleEnabled = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvLinkInfo_v2 instance wrapping the given NumPy array.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.from_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19520
 *         return __from_data(data, "nv_link_info_v2_dtype", nv_link_info_v2_dtype, NvLinkInfo_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvLinkInfo_v2 instance wrapping the given pointer.
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_14from_ptr, "NvLinkInfo_v2.from_ptr(intptr_t ptr, bool readonly=False, owner=None)\n\nCreate an NvLinkInfo_v2 instance wrapping the given pointer.\n\nArgs:\n    ptr (intptr_t): pointer address as Python :class:`int` to the data.\n    owner (object): The Python object that owns the pointer. If not provided, data will be copied.\n    readonly (bool): whether the data is read-only (to the user). default is `False`.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15from_ptr = {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_14from_ptr};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15from_ptr(CYTHON_UNUSED PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_ptr;
  int __pyx_v_readonly;
  PyObject *__pyx_v_owner = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("from_ptr (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_readonly,&__pyx_mstate_global->__pyx_n_u_owner,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19520, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19520, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19520, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19520, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "from_ptr", 0) < (0)) __PYX_ERR(0, 19520, __pyx_L3_error)

      /* "cuda/bindings/_nvml.pyx":19521
 * 
 *     @staticmethod
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):             # <<<<<<<<<<<<<<
 *         """Create an NvLinkInfo_v2 instance wrapping the given pointer.
 * 
*/
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, i); __PYX_ERR(0, 19520, __pyx_L3_error) }
      }
    } else {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19520, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19520, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19520, __pyx_L3_error)
        break;
        default: goto __pyx_L5_argtuple_error;
      }
      if (!values[2]) values[2] = __Pyx_NewRef(((PyObject *)Py_None));
    }
    __pyx_v_ptr = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_ptr == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 19521, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_readonly = __Pyx_PyObject_IsTrue(values[1]); if (unlikely((__pyx_v_readonly == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19521, __pyx_L3_error)
    } else {
      __pyx_v_readonly = ((int)0);
    }
    __pyx_v_owner = values[2];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("from_ptr", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 19520, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_14from_ptr(__pyx_v_ptr, __pyx_v_readonly, __pyx_v_owner);

  /* "cuda/bindings/_nvml.pyx":19520
 *         return __from_data(data, "nv_link_info_v2_dtype", nv_link_info_v2_dtype, NvLinkInfo_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvLinkInfo_v2 instance wrapping the given pointer.
*/

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_14from_ptr(intptr_t __pyx_v_ptr, int __pyx_v_readonly, PyObject *__pyx_v_owner) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_obj = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("from_ptr", 0);

  /* "cuda/bindings/_nvml.pyx":19529
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvLinkInfo_v2 obj = NvLinkInfo_v2.__new__(NvLinkInfo_v2)
*/
  __pyx_t_1 = (__pyx_v_ptr == 0);
  if (unlikely(__pyx_t_1)) {

    /* "cuda/bindings/_nvml.pyx":19530
 *         """
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")             # <<<<<<<<<<<<<<
 *         cdef NvLinkInfo_v2 obj = NvLinkInfo_v2.__new__(NvLinkInfo_v2)
 *         if owner is None:
*/
    __pyx_t_3 = NULL;
    __pyx_t_4 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_ptr_must_not_be_null_0};
      __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19530, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
    }
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 19530, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19529
 *             readonly (bool): whether the data is read-only (to the user). default is `False`.
 *         """
 *         if ptr == 0:             # <<<<<<<<<<<<<<
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvLinkInfo_v2 obj = NvLinkInfo_v2.__new__(NvLinkInfo_v2)
*/
  }

  /* "cuda/bindings/_nvml.pyx":19531
 *         if ptr == 0:
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvLinkInfo_v2 obj = NvLinkInfo_v2.__new__(NvLinkInfo_v2)             # <<<<<<<<<<<<<<
 *         if owner is None:
 *             obj._ptr = <nvmlNvLinkInfo_v2_t *>malloc(sizeof(nvmlNvLinkInfo_v2_t))
*/
  __pyx_t_2 = ((PyObject *)__pyx_tp_new_4cuda_8bindings_5_nvml_NvLinkInfo_v2(((PyTypeObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2), __pyx_mstate_global->__pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19531, __pyx_L1_error)
  __Pyx_GOTREF((PyObject *)__pyx_t_2);
  __pyx_v_obj = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":19532
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvLinkInfo_v2 obj = NvLinkInfo_v2.__new__(NvLinkInfo_v2)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlNvLinkInfo_v2_t *>malloc(sizeof(nvmlNvLinkInfo_v2_t))
 *             if obj._ptr == NULL:
*/
  __pyx_t_1 = (__pyx_v_owner == Py_None);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":19533
 *         cdef NvLinkInfo_v2 obj = NvLinkInfo_v2.__new__(NvLinkInfo_v2)
 *         if owner is None:
 *             obj._ptr = <nvmlNvLinkInfo_v2_t *>malloc(sizeof(nvmlNvLinkInfo_v2_t))             # <<<<<<<<<<<<<<
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvLinkInfo_v2")
*/
    __pyx_v_obj->_ptr = ((nvmlNvLinkInfo_v2_t *)malloc((sizeof(nvmlNvLinkInfo_v2_t))));

    /* "cuda/bindings/_nvml.pyx":19534
 *         if owner is None:
 *             obj._ptr = <nvmlNvLinkInfo_v2_t *>malloc(sizeof(nvmlNvLinkInfo_v2_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvLinkInfo_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvLinkInfo_v2_t))
*/
    __pyx_t_1 = (__pyx_v_obj->_ptr == NULL);
    if (unlikely(__pyx_t_1)) {

      /* "cuda/bindings/_nvml.pyx":19535
 *             obj._ptr = <nvmlNvLinkInfo_v2_t *>malloc(sizeof(nvmlNvLinkInfo_v2_t))
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvLinkInfo_v2")             # <<<<<<<<<<<<<<
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvLinkInfo_v2_t))
 *             obj._owner = None
*/
      __pyx_t_3 = NULL;
      __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19535, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      __pyx_t_4 = 1;
      #if CYTHON_UNPACK_METHODS
      if (unlikely(PyMethod_Check(__pyx_t_5))) {
        __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
        assert(__pyx_t_3);
        PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5);
        __Pyx_INCREF(__pyx_t_3);
        __Pyx_INCREF(__pyx__function);
        __Pyx_DECREF_SET(__pyx_t_5, __pyx__function);
        __pyx_t_4 = 0;
      }
      #endif
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Error_allocating_NvLinkInfo_v2};
        __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
        __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19535, __pyx_L1_error)
        __Pyx_GOTREF(__pyx_t_2);
      }
      __Pyx_Raise(__pyx_t_2, 0, 0, 0);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __PYX_ERR(0, 19535, __pyx_L1_error)

      /* "cuda/bindings/_nvml.pyx":19534
 *         if owner is None:
 *             obj._ptr = <nvmlNvLinkInfo_v2_t *>malloc(sizeof(nvmlNvLinkInfo_v2_t))
 *             if obj._ptr == NULL:             # <<<<<<<<<<<<<<
 *                 raise MemoryError("Error allocating NvLinkInfo_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvLinkInfo_v2_t))
*/
    }

    /* "cuda/bindings/_nvml.pyx":19536
 *             if obj._ptr == NULL:
 *                 raise MemoryError("Error allocating NvLinkInfo_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvLinkInfo_v2_t))             # <<<<<<<<<<<<<<
 *             obj._owner = None
 *             obj._owned = True
*/
    (void)(memcpy(((void *)__pyx_v_obj->_ptr), ((void *)__pyx_v_ptr), (sizeof(nvmlNvLinkInfo_v2_t))));

    /* "cuda/bindings/_nvml.pyx":19537
 *                 raise MemoryError("Error allocating NvLinkInfo_v2")
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvLinkInfo_v2_t))
 *             obj._owner = None             # <<<<<<<<<<<<<<
 *             obj._owned = True
 *         else:
*/
    __Pyx_INCREF(Py_None);
    __Pyx_GIVEREF(Py_None);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = Py_None;

    /* "cuda/bindings/_nvml.pyx":19538
 *             memcpy(<void*>(obj._ptr), <void*>ptr, sizeof(nvmlNvLinkInfo_v2_t))
 *             obj._owner = None
 *             obj._owned = True             # <<<<<<<<<<<<<<
 *         else:
 *             obj._ptr = <nvmlNvLinkInfo_v2_t *>ptr
*/
    __pyx_v_obj->_owned = 1;

    /* "cuda/bindings/_nvml.pyx":19532
 *             raise ValueError("ptr must not be null (0)")
 *         cdef NvLinkInfo_v2 obj = NvLinkInfo_v2.__new__(NvLinkInfo_v2)
 *         if owner is None:             # <<<<<<<<<<<<<<
 *             obj._ptr = <nvmlNvLinkInfo_v2_t *>malloc(sizeof(nvmlNvLinkInfo_v2_t))
 *             if obj._ptr == NULL:
*/
    goto __pyx_L4;
  }

  /* "cuda/bindings/_nvml.pyx":19540
 *             obj._owned = True
 *         else:
 *             obj._ptr = <nvmlNvLinkInfo_v2_t *>ptr             # <<<<<<<<<<<<<<
 *             obj._owner = owner
 *             obj._owned = False
*/
  /*else*/ {
    __pyx_v_obj->_ptr = ((nvmlNvLinkInfo_v2_t *)__pyx_v_ptr);

    /* "cuda/bindings/_nvml.pyx":19541
 *         else:
 *             obj._ptr = <nvmlNvLinkInfo_v2_t *>ptr
 *             obj._owner = owner             # <<<<<<<<<<<<<<
 *             obj._owned = False
 *         obj._readonly = readonly
*/
    __Pyx_INCREF(__pyx_v_owner);
    __Pyx_GIVEREF(__pyx_v_owner);
    __Pyx_GOTREF(__pyx_v_obj->_owner);
    __Pyx_DECREF(__pyx_v_obj->_owner);
    __pyx_v_obj->_owner = __pyx_v_owner;

    /* "cuda/bindings/_nvml.pyx":19542
 *             obj._ptr = <nvmlNvLinkInfo_v2_t *>ptr
 *             obj._owner = owner
 *             obj._owned = False             # <<<<<<<<<<<<<<
 *         obj._readonly = readonly
 *         return obj
*/
    __pyx_v_obj->_owned = 0;
  }
  __pyx_L4:;

  /* "cuda/bindings/_nvml.pyx":19543
 *             obj._owner = owner
 *             obj._owned = False
 *         obj._readonly = readonly             # <<<<<<<<<<<<<<
 *         return obj
 * 
*/
  __pyx_v_obj->_readonly = __pyx_v_readonly;

  /* "cuda/bindings/_nvml.pyx":19544
 *             obj._owned = False
 *         obj._readonly = readonly
 *         return obj             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_obj);
  __pyx_r = ((PyObject *)__pyx_v_obj);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19520
 *         return __from_data(data, "nv_link_info_v2_dtype", nv_link_info_v2_dtype, NvLinkInfo_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvLinkInfo_v2 instance wrapping the given pointer.
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.from_ptr", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_16__reduce_cython__, "NvLinkInfo_v2.__reduce_cython__(self)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_17__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_16__reduce_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_17__reduce_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; }
  const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
  if (unlikely(__pyx_kwds_len < 0)) return NULL;
  if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;}
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_16__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_v_self));

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_16__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__reduce_cython__", 0);

  /* "(tree fragment)":2
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 2, __pyx_L1_error)

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_18__setstate_cython__, "NvLinkInfo_v2.__setstate_cython__(self, __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_19__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_18__setstate_cython__};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_19__setstate_cython__(PyObject *__pyx_v_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 3, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(1, 3, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(1, 3, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 3, __pyx_L3_error)
    }
    __pyx_v___pyx_state = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_18__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_v_self), __pyx_v___pyx_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_18__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__setstate_cython__", 0);

  /* "(tree fragment)":4
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"             # <<<<<<<<<<<<<<
*/
  __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_TypeError))), __pyx_mstate_global->__pyx_kp_u_self__ptr_cannot_be_converted_to, 0, 0);
  __PYX_ERR(1, 4, __pyx_L1_error)

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.NvLinkInfo_v2.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19548
 * 
 * 
 * cpdef init_v2():             # <<<<<<<<<<<<<<
 *     """Initialize NVML, but don't initialize any GPUs yet.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5init_v2(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_init_v2(CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("init_v2", 0);

  /* "cuda/bindings/_nvml.pyx":19553
 *     .. seealso:: `nvmlInit_v2`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlInit_v2()
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19554
 *     """
 *     with nogil:
 *         __status__ = nvmlInit_v2()             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlInit_v2(); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19554, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":19553
 *     .. seealso:: `nvmlInit_v2`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlInit_v2()
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19555
 *     with nogil:
 *         __status__ = nvmlInit_v2()
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 19555, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19548
 * 
 * 
 * cpdef init_v2():             # <<<<<<<<<<<<<<
 *     """Initialize NVML, but don't initialize any GPUs yet.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.init_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5init_v2(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_4init_v2, "init_v2()\n\nInitialize NVML, but don't initialize any GPUs yet.\n\n.. seealso:: `nvmlInit_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_5init_v2 = {"init_v2", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_5init_v2, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_4init_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_5init_v2(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("init_v2 (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_4init_v2(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_4init_v2(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("init_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_init_v2(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.init_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19558
 * 
 * 
 * cpdef init_with_flags(unsigned int flags):             # <<<<<<<<<<<<<<
 *     """nvmlInitWithFlags is a variant of nvmlInit(), that allows passing a set of boolean values modifying the behaviour of nvmlInit(). Other than the "flags" parameter it is completely similar to ``nvmlInit_v2``.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7init_with_flags(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_init_with_flags(unsigned int __pyx_v_flags, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("init_with_flags", 0);

  /* "cuda/bindings/_nvml.pyx":19566
 *     .. seealso:: `nvmlInitWithFlags`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlInitWithFlags(flags)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19567
 *     """
 *     with nogil:
 *         __status__ = nvmlInitWithFlags(flags)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlInitWithFlags(__pyx_v_flags); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19567, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":19566
 *     .. seealso:: `nvmlInitWithFlags`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlInitWithFlags(flags)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19568
 *     with nogil:
 *         __status__ = nvmlInitWithFlags(flags)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 19568, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19558
 * 
 * 
 * cpdef init_with_flags(unsigned int flags):             # <<<<<<<<<<<<<<
 *     """nvmlInitWithFlags is a variant of nvmlInit(), that allows passing a set of boolean values modifying the behaviour of nvmlInit(). Other than the "flags" parameter it is completely similar to ``nvmlInit_v2``.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.init_with_flags", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7init_with_flags(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_6init_with_flags, "init_with_flags(unsigned int flags)\n\nnvmlInitWithFlags is a variant of nvmlInit(), that allows passing a set of boolean values modifying the behaviour of nvmlInit(). Other than the \"flags\" parameter it is completely similar to ``nvmlInit_v2``.\n\nArgs:\n    flags (unsigned int): behaviour modifier flags.\n\n.. seealso:: `nvmlInitWithFlags`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_7init_with_flags = {"init_with_flags", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_7init_with_flags, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6init_with_flags};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_7init_with_flags(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_flags;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("init_with_flags (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_flags,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19558, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19558, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "init_with_flags", 0) < (0)) __PYX_ERR(0, 19558, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("init_with_flags", 1, 1, 1, i); __PYX_ERR(0, 19558, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19558, __pyx_L3_error)
    }
    __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19558, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("init_with_flags", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19558, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.init_with_flags", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_6init_with_flags(__pyx_self, __pyx_v_flags);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_6init_with_flags(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_flags) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("init_with_flags", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_init_with_flags(__pyx_v_flags, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19558, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.init_with_flags", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19571
 * 
 * 
 * cpdef shutdown():             # <<<<<<<<<<<<<<
 *     """Shut down NVML by releasing all GPU resources previously allocated with :func:`init_v2`.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9shutdown(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_shutdown(CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("shutdown", 0);

  /* "cuda/bindings/_nvml.pyx":19576
 *     .. seealso:: `nvmlShutdown`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlShutdown()
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19577
 *     """
 *     with nogil:
 *         __status__ = nvmlShutdown()             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlShutdown(); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19577, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":19576
 *     .. seealso:: `nvmlShutdown`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlShutdown()
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19578
 *     with nogil:
 *         __status__ = nvmlShutdown()
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 19578, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19571
 * 
 * 
 * cpdef shutdown():             # <<<<<<<<<<<<<<
 *     """Shut down NVML by releasing all GPU resources previously allocated with :func:`init_v2`.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.shutdown", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9shutdown(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_8shutdown, "shutdown()\n\nShut down NVML by releasing all GPU resources previously allocated with :func:`init_v2`.\n\n.. seealso:: `nvmlShutdown`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_9shutdown = {"shutdown", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_9shutdown, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_8shutdown};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_9shutdown(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("shutdown (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_8shutdown(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_8shutdown(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("shutdown", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_shutdown(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19571, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.shutdown", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19581
 * 
 * 
 * cpdef str error_string(int result):             # <<<<<<<<<<<<<<
 *     """Helper method for converting NVML error codes into readable strings.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11error_string(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_error_string(int __pyx_v_result, CYTHON_UNUSED int __pyx_skip_dispatch) {
  PyObject *__pyx_v__output_ = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  char const *__pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("error_string", 0);

  /* "cuda/bindings/_nvml.pyx":19590
 *     """
 *     cdef bytes _output_
 *     _output_ = nvmlErrorString(<_Return>result)             # <<<<<<<<<<<<<<
 *     return _output_.decode()
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlErrorString(((__pyx_t_4cuda_8bindings_5_nvml__Return)__pyx_v_result)); if (unlikely(__pyx_t_1 == ((void *)NULL) && PyErr_Occurred())) __PYX_ERR(0, 19590, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyBytes_FromString(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19590, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v__output_ = ((PyObject*)__pyx_t_2);
  __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":19591
 *     cdef bytes _output_
 *     _output_ = nvmlErrorString(<_Return>result)
 *     return _output_.decode()             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_2 = __Pyx_decode_bytes(__pyx_v__output_, 0, PY_SSIZE_T_MAX, NULL, NULL, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19591, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = ((PyObject*)__pyx_t_2);
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19581
 * 
 * 
 * cpdef str error_string(int result):             # <<<<<<<<<<<<<<
 *     """Helper method for converting NVML error codes into readable strings.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.error_string", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v__output_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11error_string(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_10error_string, "error_string(int result) -> str\n\nHelper method for converting NVML error codes into readable strings.\n\nArgs:\n    result (Return): NVML error code to convert.\n\n.. seealso:: `nvmlErrorString`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_11error_string = {"error_string", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11error_string, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10error_string};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_11error_string(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  int __pyx_v_result;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("error_string (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_result,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19581, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19581, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "error_string", 0) < (0)) __PYX_ERR(0, 19581, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("error_string", 1, 1, 1, i); __PYX_ERR(0, 19581, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19581, __pyx_L3_error)
    }
    __pyx_v_result = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_result == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19581, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("error_string", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19581, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.error_string", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_10error_string(__pyx_self, __pyx_v_result);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_10error_string(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_result) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("error_string", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_error_string(__pyx_v_result, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19581, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.error_string", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19594
 * 
 * 
 * cpdef str system_get_driver_version():             # <<<<<<<<<<<<<<
 *     """Retrieves the version of the system's graphics driver.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13system_get_driver_version(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_driver_version(CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_length;
  char __pyx_v_version[80];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_driver_version", 0);

  /* "cuda/bindings/_nvml.pyx":19599
 *     .. seealso:: `nvmlSystemGetDriverVersion`
 *     """
 *     cdef unsigned int length = 80             # <<<<<<<<<<<<<<
 *     cdef char[80] version
 *     with nogil:
*/
  __pyx_v_length = 80;

  /* "cuda/bindings/_nvml.pyx":19601
 *     cdef unsigned int length = 80
 *     cdef char[80] version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetDriverVersion(version, length)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19602
 *     cdef char[80] version
 *     with nogil:
 *         __status__ = nvmlSystemGetDriverVersion(version, length)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(version)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetDriverVersion(__pyx_v_version, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19602, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":19601
 *     cdef unsigned int length = 80
 *     cdef char[80] version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetDriverVersion(version, length)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19603
 *     with nogil:
 *         __status__ = nvmlSystemGetDriverVersion(version, length)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(version)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 19603, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19604
 *         __status__ = nvmlSystemGetDriverVersion(version, length)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(version)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = PyUnicode_FromString(__pyx_v_version); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19604, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19594
 * 
 * 
 * cpdef str system_get_driver_version():             # <<<<<<<<<<<<<<
 *     """Retrieves the version of the system's graphics driver.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_driver_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13system_get_driver_version(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_12system_get_driver_version, "system_get_driver_version() -> str\n\nRetrieves the version of the system's graphics driver.\n\n.. seealso:: `nvmlSystemGetDriverVersion`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_13system_get_driver_version = {"system_get_driver_version", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_13system_get_driver_version, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_12system_get_driver_version};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_13system_get_driver_version(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_get_driver_version (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_12system_get_driver_version(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_12system_get_driver_version(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_driver_version", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_get_driver_version(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19594, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_driver_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19607
 * 
 * 
 * cpdef str system_get_nvml_version():             # <<<<<<<<<<<<<<
 *     """Retrieves the version of the NVML library.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15system_get_nvml_version(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_nvml_version(CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_length;
  char __pyx_v_version[80];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_nvml_version", 0);

  /* "cuda/bindings/_nvml.pyx":19612
 *     .. seealso:: `nvmlSystemGetNVMLVersion`
 *     """
 *     cdef unsigned int length = 80             # <<<<<<<<<<<<<<
 *     cdef char[80] version
 *     with nogil:
*/
  __pyx_v_length = 80;

  /* "cuda/bindings/_nvml.pyx":19614
 *     cdef unsigned int length = 80
 *     cdef char[80] version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetNVMLVersion(version, length)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19615
 *     cdef char[80] version
 *     with nogil:
 *         __status__ = nvmlSystemGetNVMLVersion(version, length)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(version)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetNVMLVersion(__pyx_v_version, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19615, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":19614
 *     cdef unsigned int length = 80
 *     cdef char[80] version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetNVMLVersion(version, length)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19616
 *     with nogil:
 *         __status__ = nvmlSystemGetNVMLVersion(version, length)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(version)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 19616, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19617
 *         __status__ = nvmlSystemGetNVMLVersion(version, length)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(version)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = PyUnicode_FromString(__pyx_v_version); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19617, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19607
 * 
 * 
 * cpdef str system_get_nvml_version():             # <<<<<<<<<<<<<<
 *     """Retrieves the version of the NVML library.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_nvml_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15system_get_nvml_version(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_14system_get_nvml_version, "system_get_nvml_version() -> str\n\nRetrieves the version of the NVML library.\n\n.. seealso:: `nvmlSystemGetNVMLVersion`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_15system_get_nvml_version = {"system_get_nvml_version", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_15system_get_nvml_version, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_14system_get_nvml_version};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_15system_get_nvml_version(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_get_nvml_version (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_14system_get_nvml_version(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_14system_get_nvml_version(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_nvml_version", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_get_nvml_version(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_nvml_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19620
 * 
 * 
 * cpdef int system_get_cuda_driver_version() except *:             # <<<<<<<<<<<<<<
 *     """Retrieves the version of the CUDA driver.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17system_get_cuda_driver_version(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_system_get_cuda_driver_version(CYTHON_UNUSED int __pyx_skip_dispatch) {
  int __pyx_v_cuda_driver_version;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":19629
 *     """
 *     cdef int cuda_driver_version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetCudaDriverVersion(&cuda_driver_version)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19630
 *     cdef int cuda_driver_version
 *     with nogil:
 *         __status__ = nvmlSystemGetCudaDriverVersion(&cuda_driver_version)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cuda_driver_version
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetCudaDriverVersion((&__pyx_v_cuda_driver_version)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19630, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":19629
 *     """
 *     cdef int cuda_driver_version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetCudaDriverVersion(&cuda_driver_version)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19631
 *     with nogil:
 *         __status__ = nvmlSystemGetCudaDriverVersion(&cuda_driver_version)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cuda_driver_version
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 19631, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19632
 *         __status__ = nvmlSystemGetCudaDriverVersion(&cuda_driver_version)
 *     check_status(__status__)
 *     return cuda_driver_version             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_cuda_driver_version;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19620
 * 
 * 
 * cpdef int system_get_cuda_driver_version() except *:             # <<<<<<<<<<<<<<
 *     """Retrieves the version of the CUDA driver.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_cuda_driver_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17system_get_cuda_driver_version(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_16system_get_cuda_driver_version, "system_get_cuda_driver_version() -> int\n\nRetrieves the version of the CUDA driver.\n\nReturns:\n    int: Reference in which to return the version identifier.\n\n.. seealso:: `nvmlSystemGetCudaDriverVersion`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_17system_get_cuda_driver_version = {"system_get_cuda_driver_version", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_17system_get_cuda_driver_version, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_16system_get_cuda_driver_version};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_17system_get_cuda_driver_version(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_get_cuda_driver_version (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_16system_get_cuda_driver_version(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_16system_get_cuda_driver_version(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_cuda_driver_version", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_get_cuda_driver_version(1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19620, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19620, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_cuda_driver_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19635
 * 
 * 
 * cpdef int system_get_cuda_driver_version_v2() except 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the version of the CUDA driver from the shared library.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19system_get_cuda_driver_version_v2(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_system_get_cuda_driver_version_v2(CYTHON_UNUSED int __pyx_skip_dispatch) {
  int __pyx_v_cuda_driver_version;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":19644
 *     """
 *     cdef int cuda_driver_version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetCudaDriverVersion_v2(&cuda_driver_version)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19645
 *     cdef int cuda_driver_version
 *     with nogil:
 *         __status__ = nvmlSystemGetCudaDriverVersion_v2(&cuda_driver_version)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cuda_driver_version
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetCudaDriverVersion_v2((&__pyx_v_cuda_driver_version)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19645, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":19644
 *     """
 *     cdef int cuda_driver_version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetCudaDriverVersion_v2(&cuda_driver_version)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19646
 *     with nogil:
 *         __status__ = nvmlSystemGetCudaDriverVersion_v2(&cuda_driver_version)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cuda_driver_version
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 19646, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19647
 *         __status__ = nvmlSystemGetCudaDriverVersion_v2(&cuda_driver_version)
 *     check_status(__status__)
 *     return cuda_driver_version             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_cuda_driver_version;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19635
 * 
 * 
 * cpdef int system_get_cuda_driver_version_v2() except 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the version of the CUDA driver from the shared library.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_cuda_driver_version_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19system_get_cuda_driver_version_v2(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_18system_get_cuda_driver_version_v2, "system_get_cuda_driver_version_v2() -> int\n\nRetrieves the version of the CUDA driver from the shared library.\n\nReturns:\n    int: Reference in which to return the version identifier.\n\n.. seealso:: `nvmlSystemGetCudaDriverVersion_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_19system_get_cuda_driver_version_v2 = {"system_get_cuda_driver_version_v2", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_19system_get_cuda_driver_version_v2, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_18system_get_cuda_driver_version_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_19system_get_cuda_driver_version_v2(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_get_cuda_driver_version_v2 (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_18system_get_cuda_driver_version_v2(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_18system_get_cuda_driver_version_v2(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_cuda_driver_version_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_get_cuda_driver_version_v2(1); if (unlikely(__pyx_t_1 == ((int)0))) __PYX_ERR(0, 19635, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19635, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_cuda_driver_version_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19650
 * 
 * 
 * cpdef str system_get_process_name(unsigned int pid):             # <<<<<<<<<<<<<<
 *     """Gets name of the process with provided process id.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21system_get_process_name(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_process_name(unsigned int __pyx_v_pid, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_length;
  char __pyx_v_name[1024];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_process_name", 0);

  /* "cuda/bindings/_nvml.pyx":19658
 *     .. seealso:: `nvmlSystemGetProcessName`
 *     """
 *     cdef unsigned int length = 1024             # <<<<<<<<<<<<<<
 *     cdef char[1024] name
 *     with nogil:
*/
  __pyx_v_length = 0x400;

  /* "cuda/bindings/_nvml.pyx":19660
 *     cdef unsigned int length = 1024
 *     cdef char[1024] name
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetProcessName(pid, name, length)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19661
 *     cdef char[1024] name
 *     with nogil:
 *         __status__ = nvmlSystemGetProcessName(pid, name, length)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(name)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetProcessName(__pyx_v_pid, __pyx_v_name, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19661, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":19660
 *     cdef unsigned int length = 1024
 *     cdef char[1024] name
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetProcessName(pid, name, length)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19662
 *     with nogil:
 *         __status__ = nvmlSystemGetProcessName(pid, name, length)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(name)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 19662, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19663
 *         __status__ = nvmlSystemGetProcessName(pid, name, length)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(name)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = PyUnicode_FromString(__pyx_v_name); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19663, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19650
 * 
 * 
 * cpdef str system_get_process_name(unsigned int pid):             # <<<<<<<<<<<<<<
 *     """Gets name of the process with provided process id.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_process_name", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21system_get_process_name(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_20system_get_process_name, "system_get_process_name(unsigned int pid) -> str\n\nGets name of the process with provided process id.\n\nArgs:\n    pid (unsigned int): The identifier of the process.\n\n.. seealso:: `nvmlSystemGetProcessName`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_21system_get_process_name = {"system_get_process_name", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21system_get_process_name, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20system_get_process_name};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_21system_get_process_name(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_pid;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_get_process_name (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pid,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19650, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19650, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "system_get_process_name", 0) < (0)) __PYX_ERR(0, 19650, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("system_get_process_name", 1, 1, 1, i); __PYX_ERR(0, 19650, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19650, __pyx_L3_error)
    }
    __pyx_v_pid = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_pid == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19650, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("system_get_process_name", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19650, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_process_name", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_20system_get_process_name(__pyx_self, __pyx_v_pid);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_20system_get_process_name(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_pid) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_process_name", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_get_process_name(__pyx_v_pid, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19650, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_process_name", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19666
 * 
 * 
 * cpdef object system_get_hic_version():             # <<<<<<<<<<<<<<
 *     """Retrieves the IDs and firmware versions for any Host Interface Cards (HICs) in the system.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23system_get_hic_version(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_hic_version(CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_hwbc_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v_hwbc_entries = 0;
  nvmlHwbcEntry_t *__pyx_v_hwbc_entries_ptr;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  intptr_t __pyx_t_8;
  int __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_hic_version", 0);

  /* "cuda/bindings/_nvml.pyx":19671
 *     .. seealso:: `nvmlSystemGetHicVersion`
 *     """
 *     cdef unsigned int[1] hwbc_count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlSystemGetHicVersion(<unsigned int*>hwbc_count, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_hwbc_count[0]), __pyx_t_1, sizeof(__pyx_v_hwbc_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":19672
 *     """
 *     cdef unsigned int[1] hwbc_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetHicVersion(<unsigned int*>hwbc_count, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19673
 *     cdef unsigned int[1] hwbc_count = [0]
 *     with nogil:
 *         __status__ = nvmlSystemGetHicVersion(<unsigned int*>hwbc_count, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     cdef HwbcEntry hwbc_entries = HwbcEntry(hwbc_count[0])
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetHicVersion(((unsigned int *)__pyx_v_hwbc_count), NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19673, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":19672
 *     """
 *     cdef unsigned int[1] hwbc_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetHicVersion(<unsigned int*>hwbc_count, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19674
 *     with nogil:
 *         __status__ = nvmlSystemGetHicVersion(<unsigned int*>hwbc_count, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     cdef HwbcEntry hwbc_entries = HwbcEntry(hwbc_count[0])
 *     cdef nvmlHwbcEntry_t *hwbc_entries_ptr = <nvmlHwbcEntry_t *><intptr_t>(hwbc_entries._get_ptr())
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 19674, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19675
 *         __status__ = nvmlSystemGetHicVersion(<unsigned int*>hwbc_count, NULL)
 *     check_status_size(__status__)
 *     cdef HwbcEntry hwbc_entries = HwbcEntry(hwbc_count[0])             # <<<<<<<<<<<<<<
 *     cdef nvmlHwbcEntry_t *hwbc_entries_ptr = <nvmlHwbcEntry_t *><intptr_t>(hwbc_entries._get_ptr())
 *     if hwbc_count[0] == 0:
*/
  __pyx_t_5 = NULL;
  __pyx_t_6 = __Pyx_PyLong_From_unsigned_int((__pyx_v_hwbc_count[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19675, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_6};
    __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry, __pyx_callargs+__pyx_t_7, (2-__pyx_t_7) | (__pyx_t_7*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19675, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_4);
  }
  __pyx_v_hwbc_entries = ((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_t_4);
  __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":19676
 *     check_status_size(__status__)
 *     cdef HwbcEntry hwbc_entries = HwbcEntry(hwbc_count[0])
 *     cdef nvmlHwbcEntry_t *hwbc_entries_ptr = <nvmlHwbcEntry_t *><intptr_t>(hwbc_entries._get_ptr())             # <<<<<<<<<<<<<<
 *     if hwbc_count[0] == 0:
 *         return hwbc_entries
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_v_hwbc_entries->__pyx_vtab)->_get_ptr(__pyx_v_hwbc_entries); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 19676, __pyx_L1_error)
  __pyx_v_hwbc_entries_ptr = ((nvmlHwbcEntry_t *)((intptr_t)__pyx_t_8));

  /* "cuda/bindings/_nvml.pyx":19677
 *     cdef HwbcEntry hwbc_entries = HwbcEntry(hwbc_count[0])
 *     cdef nvmlHwbcEntry_t *hwbc_entries_ptr = <nvmlHwbcEntry_t *><intptr_t>(hwbc_entries._get_ptr())
 *     if hwbc_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return hwbc_entries
 *     with nogil:
*/
  __pyx_t_9 = ((__pyx_v_hwbc_count[0]) == 0);
  if (__pyx_t_9) {

    /* "cuda/bindings/_nvml.pyx":19678
 *     cdef nvmlHwbcEntry_t *hwbc_entries_ptr = <nvmlHwbcEntry_t *><intptr_t>(hwbc_entries._get_ptr())
 *     if hwbc_count[0] == 0:
 *         return hwbc_entries             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlSystemGetHicVersion(<unsigned int*>hwbc_count, hwbc_entries_ptr)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_hwbc_entries);
    __pyx_r = ((PyObject *)__pyx_v_hwbc_entries);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":19677
 *     cdef HwbcEntry hwbc_entries = HwbcEntry(hwbc_count[0])
 *     cdef nvmlHwbcEntry_t *hwbc_entries_ptr = <nvmlHwbcEntry_t *><intptr_t>(hwbc_entries._get_ptr())
 *     if hwbc_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return hwbc_entries
 *     with nogil:
*/
  }

  /* "cuda/bindings/_nvml.pyx":19679
 *     if hwbc_count[0] == 0:
 *         return hwbc_entries
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetHicVersion(<unsigned int*>hwbc_count, hwbc_entries_ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19680
 *         return hwbc_entries
 *     with nogil:
 *         __status__ = nvmlSystemGetHicVersion(<unsigned int*>hwbc_count, hwbc_entries_ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return hwbc_entries
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetHicVersion(((unsigned int *)__pyx_v_hwbc_count), __pyx_v_hwbc_entries_ptr); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19680, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":19679
 *     if hwbc_count[0] == 0:
 *         return hwbc_entries
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetHicVersion(<unsigned int*>hwbc_count, hwbc_entries_ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19681
 *     with nogil:
 *         __status__ = nvmlSystemGetHicVersion(<unsigned int*>hwbc_count, hwbc_entries_ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return hwbc_entries
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 19681, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19682
 *         __status__ = nvmlSystemGetHicVersion(<unsigned int*>hwbc_count, hwbc_entries_ptr)
 *     check_status(__status__)
 *     return hwbc_entries             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_hwbc_entries);
  __pyx_r = ((PyObject *)__pyx_v_hwbc_entries);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19666
 * 
 * 
 * cpdef object system_get_hic_version():             # <<<<<<<<<<<<<<
 *     """Retrieves the IDs and firmware versions for any Host Interface Cards (HICs) in the system.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_hic_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_hwbc_entries);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23system_get_hic_version(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_22system_get_hic_version, "system_get_hic_version()\n\nRetrieves the IDs and firmware versions for any Host Interface Cards (HICs) in the system.\n\n.. seealso:: `nvmlSystemGetHicVersion`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_23system_get_hic_version = {"system_get_hic_version", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_23system_get_hic_version, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_22system_get_hic_version};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_23system_get_hic_version(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_get_hic_version (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_22system_get_hic_version(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_22system_get_hic_version(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_hic_version", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_get_hic_version(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19666, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_hic_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19685
 * 
 * 
 * cpdef unsigned int unit_get_count() except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the number of units in the system.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25unit_get_count(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_unit_get_count(CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_unit_count;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":19694
 *     """
 *     cdef unsigned int unit_count
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetCount(&unit_count)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19695
 *     cdef unsigned int unit_count
 *     with nogil:
 *         __status__ = nvmlUnitGetCount(&unit_count)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return unit_count
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetCount((&__pyx_v_unit_count)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19695, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":19694
 *     """
 *     cdef unsigned int unit_count
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetCount(&unit_count)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19696
 *     with nogil:
 *         __status__ = nvmlUnitGetCount(&unit_count)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return unit_count
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 19696, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19697
 *         __status__ = nvmlUnitGetCount(&unit_count)
 *     check_status(__status__)
 *     return unit_count             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_unit_count;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19685
 * 
 * 
 * cpdef unsigned int unit_get_count() except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the number of units in the system.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_count", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25unit_get_count(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_24unit_get_count, "unit_get_count() -> unsigned int\n\nRetrieves the number of units in the system.\n\nReturns:\n    unsigned int: Reference in which to return the number of units.\n\n.. seealso:: `nvmlUnitGetCount`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_25unit_get_count = {"unit_get_count", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_25unit_get_count, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_24unit_get_count};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_25unit_get_count(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("unit_get_count (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_24unit_get_count(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_24unit_get_count(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("unit_get_count", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_unit_get_count(1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 19685, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19685, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_count", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19700
 * 
 * 
 * cpdef intptr_t unit_get_handle_by_index(unsigned int ind_ex) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular unit, based on its ind_ex.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27unit_get_handle_by_index(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_unit_get_handle_by_index(unsigned int __pyx_v_ind_ex, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml_Unit __pyx_v_unit;
  nvmlReturn_t __pyx_v___status__;
  intptr_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":19712
 *     """
 *     cdef Unit unit
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetHandleByIndex(ind_ex, &unit)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19713
 *     cdef Unit unit
 *     with nogil:
 *         __status__ = nvmlUnitGetHandleByIndex(ind_ex, &unit)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <intptr_t>unit
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetHandleByIndex(__pyx_v_ind_ex, (&__pyx_v_unit)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19713, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":19712
 *     """
 *     cdef Unit unit
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetHandleByIndex(ind_ex, &unit)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19714
 *     with nogil:
 *         __status__ = nvmlUnitGetHandleByIndex(ind_ex, &unit)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <intptr_t>unit
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 19714, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19715
 *         __status__ = nvmlUnitGetHandleByIndex(ind_ex, &unit)
 *     check_status(__status__)
 *     return <intptr_t>unit             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((intptr_t)__pyx_v_unit);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19700
 * 
 * 
 * cpdef intptr_t unit_get_handle_by_index(unsigned int ind_ex) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular unit, based on its ind_ex.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_handle_by_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27unit_get_handle_by_index(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_26unit_get_handle_by_index, "unit_get_handle_by_index(unsigned int ind_ex) -> intptr_t\n\nAcquire the handle for a particular unit, based on its ind_ex.\n\nArgs:\n    ind_ex (unsigned int): The ind_ex of the target unit, >= 0 and < ``unitCount``.\n\nReturns:\n    intptr_t: Reference in which to return the unit handle.\n\n.. seealso:: `nvmlUnitGetHandleByIndex`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_27unit_get_handle_by_index = {"unit_get_handle_by_index", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_27unit_get_handle_by_index, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_26unit_get_handle_by_index};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_27unit_get_handle_by_index(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_ind_ex;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("unit_get_handle_by_index (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ind_ex,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19700, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19700, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "unit_get_handle_by_index", 0) < (0)) __PYX_ERR(0, 19700, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("unit_get_handle_by_index", 1, 1, 1, i); __PYX_ERR(0, 19700, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19700, __pyx_L3_error)
    }
    __pyx_v_ind_ex = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_ind_ex == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19700, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("unit_get_handle_by_index", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19700, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_handle_by_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_26unit_get_handle_by_index(__pyx_self, __pyx_v_ind_ex);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_26unit_get_handle_by_index(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_ind_ex) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  intptr_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("unit_get_handle_by_index", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_unit_get_handle_by_index(__pyx_v_ind_ex, 1); if (unlikely(__pyx_t_1 == ((intptr_t)0) && PyErr_Occurred())) __PYX_ERR(0, 19700, __pyx_L1_error)
  __pyx_t_2 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19700, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_handle_by_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19718
 * 
 * 
 * cpdef object unit_get_unit_info(intptr_t unit):             # <<<<<<<<<<<<<<
 *     """Retrieves the static information associated with a unit.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29unit_get_unit_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_unit_get_unit_info(intptr_t __pyx_v_unit, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *__pyx_v_info_py = 0;
  nvmlUnitInfo_t *__pyx_v_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("unit_get_unit_info", 0);

  /* "cuda/bindings/_nvml.pyx":19729
 *     .. seealso:: `nvmlUnitGetUnitInfo`
 *     """
 *     cdef UnitInfo info_py = UnitInfo()             # <<<<<<<<<<<<<<
 *     cdef nvmlUnitInfo_t *info = <nvmlUnitInfo_t *><intptr_t>(info_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19729, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":19730
 *     """
 *     cdef UnitInfo info_py = UnitInfo()
 *     cdef nvmlUnitInfo_t *info = <nvmlUnitInfo_t *><intptr_t>(info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlUnitGetUnitInfo(<Unit>unit, info)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_UnitInfo *)__pyx_v_info_py->__pyx_vtab)->_get_ptr(__pyx_v_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 19730, __pyx_L1_error)
  __pyx_v_info = ((nvmlUnitInfo_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":19731
 *     cdef UnitInfo info_py = UnitInfo()
 *     cdef nvmlUnitInfo_t *info = <nvmlUnitInfo_t *><intptr_t>(info_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetUnitInfo(<Unit>unit, info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19732
 *     cdef nvmlUnitInfo_t *info = <nvmlUnitInfo_t *><intptr_t>(info_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlUnitGetUnitInfo(<Unit>unit, info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetUnitInfo(((__pyx_t_4cuda_8bindings_5_nvml_Unit)__pyx_v_unit), __pyx_v_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19732, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":19731
 *     cdef UnitInfo info_py = UnitInfo()
 *     cdef nvmlUnitInfo_t *info = <nvmlUnitInfo_t *><intptr_t>(info_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetUnitInfo(<Unit>unit, info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19733
 *     with nogil:
 *         __status__ = nvmlUnitGetUnitInfo(<Unit>unit, info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 19733, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19734
 *         __status__ = nvmlUnitGetUnitInfo(<Unit>unit, info)
 *     check_status(__status__)
 *     return info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_info_py);
  __pyx_r = ((PyObject *)__pyx_v_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19718
 * 
 * 
 * cpdef object unit_get_unit_info(intptr_t unit):             # <<<<<<<<<<<<<<
 *     """Retrieves the static information associated with a unit.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_unit_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29unit_get_unit_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_28unit_get_unit_info, "unit_get_unit_info(intptr_t unit)\n\nRetrieves the static information associated with a unit.\n\nArgs:\n    unit (intptr_t): The identifier of the target unit.\n\nReturns:\n    nvmlUnitInfo_t: Reference in which to return the unit information.\n\n.. seealso:: `nvmlUnitGetUnitInfo`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_29unit_get_unit_info = {"unit_get_unit_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29unit_get_unit_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_28unit_get_unit_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_29unit_get_unit_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_unit;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("unit_get_unit_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_unit,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19718, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19718, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "unit_get_unit_info", 0) < (0)) __PYX_ERR(0, 19718, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("unit_get_unit_info", 1, 1, 1, i); __PYX_ERR(0, 19718, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19718, __pyx_L3_error)
    }
    __pyx_v_unit = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_unit == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 19718, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("unit_get_unit_info", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19718, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_unit_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_28unit_get_unit_info(__pyx_self, __pyx_v_unit);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_28unit_get_unit_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_unit) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("unit_get_unit_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_unit_get_unit_info(__pyx_v_unit, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19718, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_unit_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19737
 * 
 * 
 * cpdef object unit_get_led_state(intptr_t unit):             # <<<<<<<<<<<<<<
 *     """Retrieves the LED state associated with this unit.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31unit_get_led_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_unit_get_led_state(intptr_t __pyx_v_unit, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *__pyx_v_state_py = 0;
  nvmlLedState_t *__pyx_v_state;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("unit_get_led_state", 0);

  /* "cuda/bindings/_nvml.pyx":19748
 *     .. seealso:: `nvmlUnitGetLedState`
 *     """
 *     cdef LedState state_py = LedState()             # <<<<<<<<<<<<<<
 *     cdef nvmlLedState_t *state = <nvmlLedState_t *><intptr_t>(state_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_LedState, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19748, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_state_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":19749
 *     """
 *     cdef LedState state_py = LedState()
 *     cdef nvmlLedState_t *state = <nvmlLedState_t *><intptr_t>(state_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlUnitGetLedState(<Unit>unit, state)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_LedState *)__pyx_v_state_py->__pyx_vtab)->_get_ptr(__pyx_v_state_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 19749, __pyx_L1_error)
  __pyx_v_state = ((nvmlLedState_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":19750
 *     cdef LedState state_py = LedState()
 *     cdef nvmlLedState_t *state = <nvmlLedState_t *><intptr_t>(state_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetLedState(<Unit>unit, state)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19751
 *     cdef nvmlLedState_t *state = <nvmlLedState_t *><intptr_t>(state_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlUnitGetLedState(<Unit>unit, state)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return state_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetLedState(((__pyx_t_4cuda_8bindings_5_nvml_Unit)__pyx_v_unit), __pyx_v_state); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19751, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":19750
 *     cdef LedState state_py = LedState()
 *     cdef nvmlLedState_t *state = <nvmlLedState_t *><intptr_t>(state_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetLedState(<Unit>unit, state)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19752
 *     with nogil:
 *         __status__ = nvmlUnitGetLedState(<Unit>unit, state)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return state_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 19752, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19753
 *         __status__ = nvmlUnitGetLedState(<Unit>unit, state)
 *     check_status(__status__)
 *     return state_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_state_py);
  __pyx_r = ((PyObject *)__pyx_v_state_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19737
 * 
 * 
 * cpdef object unit_get_led_state(intptr_t unit):             # <<<<<<<<<<<<<<
 *     """Retrieves the LED state associated with this unit.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_led_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_state_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31unit_get_led_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_30unit_get_led_state, "unit_get_led_state(intptr_t unit)\n\nRetrieves the LED state associated with this unit.\n\nArgs:\n    unit (intptr_t): The identifier of the target unit.\n\nReturns:\n    nvmlLedState_t: Reference in which to return the current LED state.\n\n.. seealso:: `nvmlUnitGetLedState`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_31unit_get_led_state = {"unit_get_led_state", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31unit_get_led_state, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_30unit_get_led_state};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_31unit_get_led_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_unit;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("unit_get_led_state (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_unit,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19737, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19737, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "unit_get_led_state", 0) < (0)) __PYX_ERR(0, 19737, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("unit_get_led_state", 1, 1, 1, i); __PYX_ERR(0, 19737, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19737, __pyx_L3_error)
    }
    __pyx_v_unit = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_unit == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 19737, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("unit_get_led_state", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19737, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_led_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_30unit_get_led_state(__pyx_self, __pyx_v_unit);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_30unit_get_led_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_unit) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("unit_get_led_state", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_unit_get_led_state(__pyx_v_unit, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19737, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_led_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19756
 * 
 * 
 * cpdef object unit_get_psu_info(intptr_t unit):             # <<<<<<<<<<<<<<
 *     """Retrieves the PSU stats for the unit.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_33unit_get_psu_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_unit_get_psu_info(intptr_t __pyx_v_unit, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *__pyx_v_psu_py = 0;
  nvmlPSUInfo_t *__pyx_v_psu;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("unit_get_psu_info", 0);

  /* "cuda/bindings/_nvml.pyx":19767
 *     .. seealso:: `nvmlUnitGetPsuInfo`
 *     """
 *     cdef PSUInfo psu_py = PSUInfo()             # <<<<<<<<<<<<<<
 *     cdef nvmlPSUInfo_t *psu = <nvmlPSUInfo_t *><intptr_t>(psu_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19767, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_psu_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":19768
 *     """
 *     cdef PSUInfo psu_py = PSUInfo()
 *     cdef nvmlPSUInfo_t *psu = <nvmlPSUInfo_t *><intptr_t>(psu_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlUnitGetPsuInfo(<Unit>unit, psu)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PSUInfo *)__pyx_v_psu_py->__pyx_vtab)->_get_ptr(__pyx_v_psu_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 19768, __pyx_L1_error)
  __pyx_v_psu = ((nvmlPSUInfo_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":19769
 *     cdef PSUInfo psu_py = PSUInfo()
 *     cdef nvmlPSUInfo_t *psu = <nvmlPSUInfo_t *><intptr_t>(psu_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetPsuInfo(<Unit>unit, psu)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19770
 *     cdef nvmlPSUInfo_t *psu = <nvmlPSUInfo_t *><intptr_t>(psu_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlUnitGetPsuInfo(<Unit>unit, psu)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return psu_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetPsuInfo(((__pyx_t_4cuda_8bindings_5_nvml_Unit)__pyx_v_unit), __pyx_v_psu); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19770, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":19769
 *     cdef PSUInfo psu_py = PSUInfo()
 *     cdef nvmlPSUInfo_t *psu = <nvmlPSUInfo_t *><intptr_t>(psu_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetPsuInfo(<Unit>unit, psu)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19771
 *     with nogil:
 *         __status__ = nvmlUnitGetPsuInfo(<Unit>unit, psu)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return psu_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 19771, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19772
 *         __status__ = nvmlUnitGetPsuInfo(<Unit>unit, psu)
 *     check_status(__status__)
 *     return psu_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_psu_py);
  __pyx_r = ((PyObject *)__pyx_v_psu_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19756
 * 
 * 
 * cpdef object unit_get_psu_info(intptr_t unit):             # <<<<<<<<<<<<<<
 *     """Retrieves the PSU stats for the unit.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_psu_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_psu_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_33unit_get_psu_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_32unit_get_psu_info, "unit_get_psu_info(intptr_t unit)\n\nRetrieves the PSU stats for the unit.\n\nArgs:\n    unit (intptr_t): The identifier of the target unit.\n\nReturns:\n    nvmlPSUInfo_t: Reference in which to return the PSU information.\n\n.. seealso:: `nvmlUnitGetPsuInfo`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_33unit_get_psu_info = {"unit_get_psu_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_33unit_get_psu_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_32unit_get_psu_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_33unit_get_psu_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_unit;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("unit_get_psu_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_unit,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19756, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19756, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "unit_get_psu_info", 0) < (0)) __PYX_ERR(0, 19756, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("unit_get_psu_info", 1, 1, 1, i); __PYX_ERR(0, 19756, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19756, __pyx_L3_error)
    }
    __pyx_v_unit = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_unit == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 19756, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("unit_get_psu_info", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19756, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_psu_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_32unit_get_psu_info(__pyx_self, __pyx_v_unit);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_32unit_get_psu_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_unit) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("unit_get_psu_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_unit_get_psu_info(__pyx_v_unit, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19756, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_psu_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19775
 * 
 * 
 * cpdef unsigned int unit_get_temperature(intptr_t unit, unsigned int type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the temperature readings for the unit, in degrees C.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_35unit_get_temperature(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_unit_get_temperature(intptr_t __pyx_v_unit, unsigned int __pyx_v_type, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_temp;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":19788
 *     """
 *     cdef unsigned int temp
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetTemperature(<Unit>unit, type, &temp)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19789
 *     cdef unsigned int temp
 *     with nogil:
 *         __status__ = nvmlUnitGetTemperature(<Unit>unit, type, &temp)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return temp
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetTemperature(((__pyx_t_4cuda_8bindings_5_nvml_Unit)__pyx_v_unit), __pyx_v_type, (&__pyx_v_temp)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19789, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":19788
 *     """
 *     cdef unsigned int temp
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetTemperature(<Unit>unit, type, &temp)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19790
 *     with nogil:
 *         __status__ = nvmlUnitGetTemperature(<Unit>unit, type, &temp)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return temp
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 19790, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19791
 *         __status__ = nvmlUnitGetTemperature(<Unit>unit, type, &temp)
 *     check_status(__status__)
 *     return temp             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_temp;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19775
 * 
 * 
 * cpdef unsigned int unit_get_temperature(intptr_t unit, unsigned int type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the temperature readings for the unit, in degrees C.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_temperature", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_35unit_get_temperature(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_34unit_get_temperature, "unit_get_temperature(intptr_t unit, unsigned int type) -> unsigned int\n\nRetrieves the temperature readings for the unit, in degrees C.\n\nArgs:\n    unit (intptr_t): The identifier of the target unit.\n    type (unsigned int): The type of reading to take.\n\nReturns:\n    unsigned int: Reference in which to return the intake temperature.\n\n.. seealso:: `nvmlUnitGetTemperature`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_35unit_get_temperature = {"unit_get_temperature", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_35unit_get_temperature, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_34unit_get_temperature};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_35unit_get_temperature(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_unit;
  unsigned int __pyx_v_type;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("unit_get_temperature (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_unit,&__pyx_mstate_global->__pyx_n_u_type,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19775, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19775, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19775, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "unit_get_temperature", 0) < (0)) __PYX_ERR(0, 19775, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("unit_get_temperature", 1, 2, 2, i); __PYX_ERR(0, 19775, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19775, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19775, __pyx_L3_error)
    }
    __pyx_v_unit = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_unit == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 19775, __pyx_L3_error)
    __pyx_v_type = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_type == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19775, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("unit_get_temperature", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 19775, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_temperature", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_34unit_get_temperature(__pyx_self, __pyx_v_unit, __pyx_v_type);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_34unit_get_temperature(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_unit, unsigned int __pyx_v_type) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("unit_get_temperature", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_unit_get_temperature(__pyx_v_unit, __pyx_v_type, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 19775, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19775, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_temperature", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19794
 * 
 * 
 * cpdef object unit_get_fan_speed_info(intptr_t unit):             # <<<<<<<<<<<<<<
 *     """Retrieves the fan speed readings for the unit.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37unit_get_fan_speed_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_unit_get_fan_speed_info(intptr_t __pyx_v_unit, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *__pyx_v_fan_speeds_py = 0;
  nvmlUnitFanSpeeds_t *__pyx_v_fan_speeds;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("unit_get_fan_speed_info", 0);

  /* "cuda/bindings/_nvml.pyx":19805
 *     .. seealso:: `nvmlUnitGetFanSpeedInfo`
 *     """
 *     cdef UnitFanSpeeds fan_speeds_py = UnitFanSpeeds()             # <<<<<<<<<<<<<<
 *     cdef nvmlUnitFanSpeeds_t *fan_speeds = <nvmlUnitFanSpeeds_t *><intptr_t>(fan_speeds_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19805, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_fan_speeds_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":19806
 *     """
 *     cdef UnitFanSpeeds fan_speeds_py = UnitFanSpeeds()
 *     cdef nvmlUnitFanSpeeds_t *fan_speeds = <nvmlUnitFanSpeeds_t *><intptr_t>(fan_speeds_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlUnitGetFanSpeedInfo(<Unit>unit, fan_speeds)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_UnitFanSpeeds *)__pyx_v_fan_speeds_py->__pyx_vtab)->_get_ptr(__pyx_v_fan_speeds_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 19806, __pyx_L1_error)
  __pyx_v_fan_speeds = ((nvmlUnitFanSpeeds_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":19807
 *     cdef UnitFanSpeeds fan_speeds_py = UnitFanSpeeds()
 *     cdef nvmlUnitFanSpeeds_t *fan_speeds = <nvmlUnitFanSpeeds_t *><intptr_t>(fan_speeds_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetFanSpeedInfo(<Unit>unit, fan_speeds)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19808
 *     cdef nvmlUnitFanSpeeds_t *fan_speeds = <nvmlUnitFanSpeeds_t *><intptr_t>(fan_speeds_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlUnitGetFanSpeedInfo(<Unit>unit, fan_speeds)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return fan_speeds_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetFanSpeedInfo(((__pyx_t_4cuda_8bindings_5_nvml_Unit)__pyx_v_unit), __pyx_v_fan_speeds); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19808, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":19807
 *     cdef UnitFanSpeeds fan_speeds_py = UnitFanSpeeds()
 *     cdef nvmlUnitFanSpeeds_t *fan_speeds = <nvmlUnitFanSpeeds_t *><intptr_t>(fan_speeds_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetFanSpeedInfo(<Unit>unit, fan_speeds)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19809
 *     with nogil:
 *         __status__ = nvmlUnitGetFanSpeedInfo(<Unit>unit, fan_speeds)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return fan_speeds_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 19809, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19810
 *         __status__ = nvmlUnitGetFanSpeedInfo(<Unit>unit, fan_speeds)
 *     check_status(__status__)
 *     return fan_speeds_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_fan_speeds_py);
  __pyx_r = ((PyObject *)__pyx_v_fan_speeds_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19794
 * 
 * 
 * cpdef object unit_get_fan_speed_info(intptr_t unit):             # <<<<<<<<<<<<<<
 *     """Retrieves the fan speed readings for the unit.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_fan_speed_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_fan_speeds_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37unit_get_fan_speed_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_36unit_get_fan_speed_info, "unit_get_fan_speed_info(intptr_t unit)\n\nRetrieves the fan speed readings for the unit.\n\nArgs:\n    unit (intptr_t): The identifier of the target unit.\n\nReturns:\n    nvmlUnitFanSpeeds_t: Reference in which to return the fan speed information.\n\n.. seealso:: `nvmlUnitGetFanSpeedInfo`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_37unit_get_fan_speed_info = {"unit_get_fan_speed_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_37unit_get_fan_speed_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_36unit_get_fan_speed_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_37unit_get_fan_speed_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_unit;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("unit_get_fan_speed_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_unit,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19794, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19794, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "unit_get_fan_speed_info", 0) < (0)) __PYX_ERR(0, 19794, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("unit_get_fan_speed_info", 1, 1, 1, i); __PYX_ERR(0, 19794, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19794, __pyx_L3_error)
    }
    __pyx_v_unit = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_unit == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 19794, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("unit_get_fan_speed_info", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19794, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_fan_speed_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_36unit_get_fan_speed_info(__pyx_self, __pyx_v_unit);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_36unit_get_fan_speed_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_unit) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("unit_get_fan_speed_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_unit_get_fan_speed_info(__pyx_v_unit, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19794, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_fan_speed_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19813
 * 
 * 
 * cpdef unsigned int device_get_count_v2() except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the number of compute devices in the system. A compute device is a single GPU.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_39device_get_count_v2(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_count_v2(CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_device_count;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":19822
 *     """
 *     cdef unsigned int device_count
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCount_v2(&device_count)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19823
 *     cdef unsigned int device_count
 *     with nogil:
 *         __status__ = nvmlDeviceGetCount_v2(&device_count)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return device_count
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCount_v2((&__pyx_v_device_count)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19823, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":19822
 *     """
 *     cdef unsigned int device_count
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCount_v2(&device_count)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19824
 *     with nogil:
 *         __status__ = nvmlDeviceGetCount_v2(&device_count)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return device_count
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 19824, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19825
 *         __status__ = nvmlDeviceGetCount_v2(&device_count)
 *     check_status(__status__)
 *     return device_count             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_device_count;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19813
 * 
 * 
 * cpdef unsigned int device_get_count_v2() except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the number of compute devices in the system. A compute device is a single GPU.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_count_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_39device_get_count_v2(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_38device_get_count_v2, "device_get_count_v2() -> unsigned int\n\nRetrieves the number of compute devices in the system. A compute device is a single GPU.\n\nReturns:\n    unsigned int: Reference in which to return the number of accessible devices.\n\n.. seealso:: `nvmlDeviceGetCount_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_39device_get_count_v2 = {"device_get_count_v2", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_39device_get_count_v2, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_38device_get_count_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_39device_get_count_v2(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_count_v2 (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_38device_get_count_v2(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_38device_get_count_v2(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_count_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_count_v2(1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 19813, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19813, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_count_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19828
 * 
 * 
 * cpdef object device_get_attributes_v2(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get attributes (engine counts etc.) for the given NVML device handle.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41device_get_attributes_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_attributes_v2(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *__pyx_v_attributes_py = 0;
  nvmlDeviceAttributes_t *__pyx_v_attributes;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_attributes_v2", 0);

  /* "cuda/bindings/_nvml.pyx":19839
 *     .. seealso:: `nvmlDeviceGetAttributes_v2`
 *     """
 *     cdef DeviceAttributes attributes_py = DeviceAttributes()             # <<<<<<<<<<<<<<
 *     cdef nvmlDeviceAttributes_t *attributes = <nvmlDeviceAttributes_t *><intptr_t>(attributes_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19839, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_attributes_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":19840
 *     """
 *     cdef DeviceAttributes attributes_py = DeviceAttributes()
 *     cdef nvmlDeviceAttributes_t *attributes = <nvmlDeviceAttributes_t *><intptr_t>(attributes_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetAttributes_v2(<Device>device, attributes)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceAttributes *)__pyx_v_attributes_py->__pyx_vtab)->_get_ptr(__pyx_v_attributes_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 19840, __pyx_L1_error)
  __pyx_v_attributes = ((nvmlDeviceAttributes_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":19841
 *     cdef DeviceAttributes attributes_py = DeviceAttributes()
 *     cdef nvmlDeviceAttributes_t *attributes = <nvmlDeviceAttributes_t *><intptr_t>(attributes_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAttributes_v2(<Device>device, attributes)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19842
 *     cdef nvmlDeviceAttributes_t *attributes = <nvmlDeviceAttributes_t *><intptr_t>(attributes_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetAttributes_v2(<Device>device, attributes)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return attributes_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAttributes_v2(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_attributes); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19842, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":19841
 *     cdef DeviceAttributes attributes_py = DeviceAttributes()
 *     cdef nvmlDeviceAttributes_t *attributes = <nvmlDeviceAttributes_t *><intptr_t>(attributes_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAttributes_v2(<Device>device, attributes)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19843
 *     with nogil:
 *         __status__ = nvmlDeviceGetAttributes_v2(<Device>device, attributes)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return attributes_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 19843, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19844
 *         __status__ = nvmlDeviceGetAttributes_v2(<Device>device, attributes)
 *     check_status(__status__)
 *     return attributes_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_attributes_py);
  __pyx_r = ((PyObject *)__pyx_v_attributes_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19828
 * 
 * 
 * cpdef object device_get_attributes_v2(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get attributes (engine counts etc.) for the given NVML device handle.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_attributes_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_attributes_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41device_get_attributes_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_40device_get_attributes_v2, "device_get_attributes_v2(intptr_t device)\n\nGet attributes (engine counts etc.) for the given NVML device handle.\n\nArgs:\n    device (intptr_t): NVML device handle.\n\nReturns:\n    nvmlDeviceAttributes_t: Device attributes.\n\n.. seealso:: `nvmlDeviceGetAttributes_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_41device_get_attributes_v2 = {"device_get_attributes_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_41device_get_attributes_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_40device_get_attributes_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_41device_get_attributes_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_attributes_v2 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19828, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19828, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_attributes_v2", 0) < (0)) __PYX_ERR(0, 19828, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_attributes_v2", 1, 1, 1, i); __PYX_ERR(0, 19828, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19828, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 19828, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_attributes_v2", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19828, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_attributes_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_40device_get_attributes_v2(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_40device_get_attributes_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_attributes_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_attributes_v2(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19828, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_attributes_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19847
 * 
 * 
 * cpdef intptr_t device_get_handle_by_index_v2(unsigned int ind_ex) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular device, based on its ind_ex.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_43device_get_handle_by_index_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_index_v2(unsigned int __pyx_v_ind_ex, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml_Device __pyx_v_device;
  nvmlReturn_t __pyx_v___status__;
  intptr_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":19859
 *     """
 *     cdef Device device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetHandleByIndex_v2(ind_ex, &device)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19860
 *     cdef Device device
 *     with nogil:
 *         __status__ = nvmlDeviceGetHandleByIndex_v2(ind_ex, &device)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <intptr_t>device
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByIndex_v2(__pyx_v_ind_ex, (&__pyx_v_device)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19860, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":19859
 *     """
 *     cdef Device device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetHandleByIndex_v2(ind_ex, &device)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19861
 *     with nogil:
 *         __status__ = nvmlDeviceGetHandleByIndex_v2(ind_ex, &device)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <intptr_t>device
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 19861, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19862
 *         __status__ = nvmlDeviceGetHandleByIndex_v2(ind_ex, &device)
 *     check_status(__status__)
 *     return <intptr_t>device             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((intptr_t)__pyx_v_device);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19847
 * 
 * 
 * cpdef intptr_t device_get_handle_by_index_v2(unsigned int ind_ex) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular device, based on its ind_ex.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_handle_by_index_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_43device_get_handle_by_index_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_42device_get_handle_by_index_v2, "device_get_handle_by_index_v2(unsigned int ind_ex) -> intptr_t\n\nAcquire the handle for a particular device, based on its ind_ex.\n\nArgs:\n    ind_ex (unsigned int): The ind_ex of the target GPU, >= 0 and < ``accessibleDevices``.\n\nReturns:\n    intptr_t: Reference in which to return the device handle.\n\n.. seealso:: `nvmlDeviceGetHandleByIndex_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_43device_get_handle_by_index_v2 = {"device_get_handle_by_index_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_43device_get_handle_by_index_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_42device_get_handle_by_index_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_43device_get_handle_by_index_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_ind_ex;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_handle_by_index_v2 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ind_ex,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19847, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19847, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_handle_by_index_v2", 0) < (0)) __PYX_ERR(0, 19847, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_handle_by_index_v2", 1, 1, 1, i); __PYX_ERR(0, 19847, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19847, __pyx_L3_error)
    }
    __pyx_v_ind_ex = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_ind_ex == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19847, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_handle_by_index_v2", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19847, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_handle_by_index_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_42device_get_handle_by_index_v2(__pyx_self, __pyx_v_ind_ex);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_42device_get_handle_by_index_v2(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_ind_ex) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  intptr_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_handle_by_index_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_index_v2(__pyx_v_ind_ex, 1); if (unlikely(__pyx_t_1 == ((intptr_t)0) && PyErr_Occurred())) __PYX_ERR(0, 19847, __pyx_L1_error)
  __pyx_t_2 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19847, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_handle_by_index_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19865
 * 
 * 
 * cpdef intptr_t device_get_handle_by_serial(serial) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular device, based on its board serial number.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_45device_get_handle_by_serial(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_serial(PyObject *__pyx_v_serial, CYTHON_UNUSED int __pyx_skip_dispatch) {
  PyObject *__pyx_v__temp_serial_ = 0;
  char *__pyx_v__serial_;
  __pyx_t_4cuda_8bindings_5_nvml_Device __pyx_v_device;
  nvmlReturn_t __pyx_v___status__;
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  char *__pyx_t_6;
  nvmlReturn_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_handle_by_serial", 0);

  /* "cuda/bindings/_nvml.pyx":19876
 *     .. seealso:: `nvmlDeviceGetHandleBySerial`
 *     """
 *     if not isinstance(serial, str):             # <<<<<<<<<<<<<<
 *         raise TypeError("serial must be a Python str")
 *     cdef bytes _temp_serial_ = (<str>serial).encode()
*/
  __pyx_t_1 = PyUnicode_Check(__pyx_v_serial); 
  __pyx_t_2 = (!__pyx_t_1);
  if (unlikely(__pyx_t_2)) {

    /* "cuda/bindings/_nvml.pyx":19877
 *     """
 *     if not isinstance(serial, str):
 *         raise TypeError("serial must be a Python str")             # <<<<<<<<<<<<<<
 *     cdef bytes _temp_serial_ = (<str>serial).encode()
 *     cdef char* _serial_ = _temp_serial_
*/
    __pyx_t_4 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_mstate_global->__pyx_kp_u_serial_must_be_a_Python_str};
      __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19877, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __Pyx_Raise(__pyx_t_3, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __PYX_ERR(0, 19877, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19876
 *     .. seealso:: `nvmlDeviceGetHandleBySerial`
 *     """
 *     if not isinstance(serial, str):             # <<<<<<<<<<<<<<
 *         raise TypeError("serial must be a Python str")
 *     cdef bytes _temp_serial_ = (<str>serial).encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":19878
 *     if not isinstance(serial, str):
 *         raise TypeError("serial must be a Python str")
 *     cdef bytes _temp_serial_ = (<str>serial).encode()             # <<<<<<<<<<<<<<
 *     cdef char* _serial_ = _temp_serial_
 *     cdef Device device
*/
  if (unlikely(__pyx_v_serial == Py_None)) {
    PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "encode");
    __PYX_ERR(0, 19878, __pyx_L1_error)
  }
  __pyx_t_3 = PyUnicode_AsEncodedString(((PyObject*)__pyx_v_serial), NULL, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19878, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v__temp_serial_ = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":19879
 *         raise TypeError("serial must be a Python str")
 *     cdef bytes _temp_serial_ = (<str>serial).encode()
 *     cdef char* _serial_ = _temp_serial_             # <<<<<<<<<<<<<<
 *     cdef Device device
 *     with nogil:
*/
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v__temp_serial_); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 19879, __pyx_L1_error)
  __pyx_v__serial_ = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":19881
 *     cdef char* _serial_ = _temp_serial_
 *     cdef Device device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetHandleBySerial(<const char*>_serial_, &device)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19882
 *     cdef Device device
 *     with nogil:
 *         __status__ = nvmlDeviceGetHandleBySerial(<const char*>_serial_, &device)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <intptr_t>device
*/
        __pyx_t_7 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleBySerial(((char const *)__pyx_v__serial_), (&__pyx_v_device)); if (unlikely(__pyx_t_7 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19882, __pyx_L5_error)
        __pyx_v___status__ = __pyx_t_7;
      }

      /* "cuda/bindings/_nvml.pyx":19881
 *     cdef char* _serial_ = _temp_serial_
 *     cdef Device device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetHandleBySerial(<const char*>_serial_, &device)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L6;
        }
        __pyx_L5_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L6:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19883
 *     with nogil:
 *         __status__ = nvmlDeviceGetHandleBySerial(<const char*>_serial_, &device)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <intptr_t>device
 * 
*/
  __pyx_t_8 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_8 == ((int)1))) __PYX_ERR(0, 19883, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19884
 *         __status__ = nvmlDeviceGetHandleBySerial(<const char*>_serial_, &device)
 *     check_status(__status__)
 *     return <intptr_t>device             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((intptr_t)__pyx_v_device);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19865
 * 
 * 
 * cpdef intptr_t device_get_handle_by_serial(serial) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular device, based on its board serial number.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_handle_by_serial", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v__temp_serial_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_45device_get_handle_by_serial(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_44device_get_handle_by_serial, "device_get_handle_by_serial(serial) -> intptr_t\n\nAcquire the handle for a particular device, based on its board serial number.\n\nArgs:\n    serial (str): The board serial number of the target GPU.\n\nReturns:\n    intptr_t: Reference in which to return the device handle.\n\n.. seealso:: `nvmlDeviceGetHandleBySerial`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_45device_get_handle_by_serial = {"device_get_handle_by_serial", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_45device_get_handle_by_serial, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_44device_get_handle_by_serial};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_45device_get_handle_by_serial(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_serial = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_handle_by_serial (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_serial,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19865, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19865, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_handle_by_serial", 0) < (0)) __PYX_ERR(0, 19865, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_handle_by_serial", 1, 1, 1, i); __PYX_ERR(0, 19865, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19865, __pyx_L3_error)
    }
    __pyx_v_serial = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_handle_by_serial", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19865, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_handle_by_serial", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_44device_get_handle_by_serial(__pyx_self, __pyx_v_serial);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_44device_get_handle_by_serial(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_serial) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  intptr_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_handle_by_serial", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_serial(__pyx_v_serial, 1); if (unlikely(__pyx_t_1 == ((intptr_t)0) && PyErr_Occurred())) __PYX_ERR(0, 19865, __pyx_L1_error)
  __pyx_t_2 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19865, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_handle_by_serial", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19887
 * 
 * 
 * cpdef intptr_t device_get_handle_by_uuid(uuid) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular device, based on its globally unique immutable UUID (in ASCII format) associated with each device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_47device_get_handle_by_uuid(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_uuid(PyObject *__pyx_v_uuid, CYTHON_UNUSED int __pyx_skip_dispatch) {
  PyObject *__pyx_v__temp_uuid_ = 0;
  char *__pyx_v__uuid_;
  __pyx_t_4cuda_8bindings_5_nvml_Device __pyx_v_device;
  nvmlReturn_t __pyx_v___status__;
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  char *__pyx_t_6;
  nvmlReturn_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_handle_by_uuid", 0);

  /* "cuda/bindings/_nvml.pyx":19898
 *     .. seealso:: `nvmlDeviceGetHandleByUUID`
 *     """
 *     if not isinstance(uuid, str):             # <<<<<<<<<<<<<<
 *         raise TypeError("uuid must be a Python str")
 *     cdef bytes _temp_uuid_ = (<str>uuid).encode()
*/
  __pyx_t_1 = PyUnicode_Check(__pyx_v_uuid); 
  __pyx_t_2 = (!__pyx_t_1);
  if (unlikely(__pyx_t_2)) {

    /* "cuda/bindings/_nvml.pyx":19899
 *     """
 *     if not isinstance(uuid, str):
 *         raise TypeError("uuid must be a Python str")             # <<<<<<<<<<<<<<
 *     cdef bytes _temp_uuid_ = (<str>uuid).encode()
 *     cdef char* _uuid_ = _temp_uuid_
*/
    __pyx_t_4 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_mstate_global->__pyx_kp_u_uuid_must_be_a_Python_str};
      __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19899, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __Pyx_Raise(__pyx_t_3, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __PYX_ERR(0, 19899, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19898
 *     .. seealso:: `nvmlDeviceGetHandleByUUID`
 *     """
 *     if not isinstance(uuid, str):             # <<<<<<<<<<<<<<
 *         raise TypeError("uuid must be a Python str")
 *     cdef bytes _temp_uuid_ = (<str>uuid).encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":19900
 *     if not isinstance(uuid, str):
 *         raise TypeError("uuid must be a Python str")
 *     cdef bytes _temp_uuid_ = (<str>uuid).encode()             # <<<<<<<<<<<<<<
 *     cdef char* _uuid_ = _temp_uuid_
 *     cdef Device device
*/
  if (unlikely(__pyx_v_uuid == Py_None)) {
    PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "encode");
    __PYX_ERR(0, 19900, __pyx_L1_error)
  }
  __pyx_t_3 = PyUnicode_AsEncodedString(((PyObject*)__pyx_v_uuid), NULL, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19900, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v__temp_uuid_ = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":19901
 *         raise TypeError("uuid must be a Python str")
 *     cdef bytes _temp_uuid_ = (<str>uuid).encode()
 *     cdef char* _uuid_ = _temp_uuid_             # <<<<<<<<<<<<<<
 *     cdef Device device
 *     with nogil:
*/
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v__temp_uuid_); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 19901, __pyx_L1_error)
  __pyx_v__uuid_ = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":19903
 *     cdef char* _uuid_ = _temp_uuid_
 *     cdef Device device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetHandleByUUID(<const char*>_uuid_, &device)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19904
 *     cdef Device device
 *     with nogil:
 *         __status__ = nvmlDeviceGetHandleByUUID(<const char*>_uuid_, &device)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <intptr_t>device
*/
        __pyx_t_7 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByUUID(((char const *)__pyx_v__uuid_), (&__pyx_v_device)); if (unlikely(__pyx_t_7 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19904, __pyx_L5_error)
        __pyx_v___status__ = __pyx_t_7;
      }

      /* "cuda/bindings/_nvml.pyx":19903
 *     cdef char* _uuid_ = _temp_uuid_
 *     cdef Device device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetHandleByUUID(<const char*>_uuid_, &device)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L6;
        }
        __pyx_L5_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L6:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19905
 *     with nogil:
 *         __status__ = nvmlDeviceGetHandleByUUID(<const char*>_uuid_, &device)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <intptr_t>device
 * 
*/
  __pyx_t_8 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_8 == ((int)1))) __PYX_ERR(0, 19905, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19906
 *         __status__ = nvmlDeviceGetHandleByUUID(<const char*>_uuid_, &device)
 *     check_status(__status__)
 *     return <intptr_t>device             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((intptr_t)__pyx_v_device);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19887
 * 
 * 
 * cpdef intptr_t device_get_handle_by_uuid(uuid) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular device, based on its globally unique immutable UUID (in ASCII format) associated with each device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_handle_by_uuid", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v__temp_uuid_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_47device_get_handle_by_uuid(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_46device_get_handle_by_uuid, "device_get_handle_by_uuid(uuid) -> intptr_t\n\nAcquire the handle for a particular device, based on its globally unique immutable UUID (in ASCII format) associated with each device.\n\nArgs:\n    uuid (str): The UUID of the target GPU or MIG instance.\n\nReturns:\n    intptr_t: Reference in which to return the device handle or MIG device handle.\n\n.. seealso:: `nvmlDeviceGetHandleByUUID`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_47device_get_handle_by_uuid = {"device_get_handle_by_uuid", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_47device_get_handle_by_uuid, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_46device_get_handle_by_uuid};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_47device_get_handle_by_uuid(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_uuid = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_handle_by_uuid (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_uuid,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19887, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19887, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_handle_by_uuid", 0) < (0)) __PYX_ERR(0, 19887, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_handle_by_uuid", 1, 1, 1, i); __PYX_ERR(0, 19887, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19887, __pyx_L3_error)
    }
    __pyx_v_uuid = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_handle_by_uuid", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19887, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_handle_by_uuid", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_46device_get_handle_by_uuid(__pyx_self, __pyx_v_uuid);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_46device_get_handle_by_uuid(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_uuid) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  intptr_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_handle_by_uuid", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_uuid(__pyx_v_uuid, 1); if (unlikely(__pyx_t_1 == ((intptr_t)0) && PyErr_Occurred())) __PYX_ERR(0, 19887, __pyx_L1_error)
  __pyx_t_2 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19887, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_handle_by_uuid", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19909
 * 
 * 
 * cpdef intptr_t device_get_handle_by_uuidv(intptr_t uuid) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular device, based on its globally unique immutable UUID (in either ASCII or binary format) associated with each device. See ``nvmlUUID_v1_t`` for more information on the UUID struct. The caller must set the appropriate version prior to calling this API.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_49device_get_handle_by_uuidv(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_uuidv(intptr_t __pyx_v_uuid, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml_Device __pyx_v_device;
  nvmlReturn_t __pyx_v___status__;
  intptr_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":19921
 *     """
 *     cdef Device device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetHandleByUUIDV(<const nvmlUUID_t*>uuid, &device)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19922
 *     cdef Device device
 *     with nogil:
 *         __status__ = nvmlDeviceGetHandleByUUIDV(<const nvmlUUID_t*>uuid, &device)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <intptr_t>device
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByUUIDV(((nvmlUUID_t const *)__pyx_v_uuid), (&__pyx_v_device)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19922, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":19921
 *     """
 *     cdef Device device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetHandleByUUIDV(<const nvmlUUID_t*>uuid, &device)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19923
 *     with nogil:
 *         __status__ = nvmlDeviceGetHandleByUUIDV(<const nvmlUUID_t*>uuid, &device)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <intptr_t>device
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 19923, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19924
 *         __status__ = nvmlDeviceGetHandleByUUIDV(<const nvmlUUID_t*>uuid, &device)
 *     check_status(__status__)
 *     return <intptr_t>device             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((intptr_t)__pyx_v_device);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19909
 * 
 * 
 * cpdef intptr_t device_get_handle_by_uuidv(intptr_t uuid) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular device, based on its globally unique immutable UUID (in either ASCII or binary format) associated with each device. See ``nvmlUUID_v1_t`` for more information on the UUID struct. The caller must set the appropriate version prior to calling this API.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_handle_by_uuidv", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_49device_get_handle_by_uuidv(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_48device_get_handle_by_uuidv, "device_get_handle_by_uuidv(intptr_t uuid) -> intptr_t\n\nAcquire the handle for a particular device, based on its globally unique immutable UUID (in either ASCII or binary format) associated with each device. See ``nvmlUUID_v1_t`` for more information on the UUID struct. The caller must set the appropriate version prior to calling this API.\n\nArgs:\n    uuid (intptr_t): The UUID of the target GPU or MIG instance.\n\nReturns:\n    intptr_t: Reference in which to return the device handle or MIG device handle.\n\n.. seealso:: `nvmlDeviceGetHandleByUUIDV`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_49device_get_handle_by_uuidv = {"device_get_handle_by_uuidv", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_49device_get_handle_by_uuidv, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_48device_get_handle_by_uuidv};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_49device_get_handle_by_uuidv(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_uuid;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_handle_by_uuidv (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_uuid,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19909, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19909, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_handle_by_uuidv", 0) < (0)) __PYX_ERR(0, 19909, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_handle_by_uuidv", 1, 1, 1, i); __PYX_ERR(0, 19909, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19909, __pyx_L3_error)
    }
    __pyx_v_uuid = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_uuid == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 19909, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_handle_by_uuidv", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19909, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_handle_by_uuidv", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_48device_get_handle_by_uuidv(__pyx_self, __pyx_v_uuid);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_48device_get_handle_by_uuidv(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_uuid) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  intptr_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_handle_by_uuidv", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_uuidv(__pyx_v_uuid, 1); if (unlikely(__pyx_t_1 == ((intptr_t)0) && PyErr_Occurred())) __PYX_ERR(0, 19909, __pyx_L1_error)
  __pyx_t_2 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19909, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_handle_by_uuidv", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19927
 * 
 * 
 * cpdef intptr_t device_get_handle_by_pci_bus_id_v2(pci_bus_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular device, based on its PCI bus id.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_51device_get_handle_by_pci_bus_id_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_pci_bus_id_v2(PyObject *__pyx_v_pci_bus_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  PyObject *__pyx_v__temp_pci_bus_id_ = 0;
  char *__pyx_v__pci_bus_id_;
  __pyx_t_4cuda_8bindings_5_nvml_Device __pyx_v_device;
  nvmlReturn_t __pyx_v___status__;
  intptr_t __pyx_r;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  char *__pyx_t_6;
  nvmlReturn_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_handle_by_pci_bus_id_v2", 0);

  /* "cuda/bindings/_nvml.pyx":19938
 *     .. seealso:: `nvmlDeviceGetHandleByPciBusId_v2`
 *     """
 *     if not isinstance(pci_bus_id, str):             # <<<<<<<<<<<<<<
 *         raise TypeError("pci_bus_id must be a Python str")
 *     cdef bytes _temp_pci_bus_id_ = (<str>pci_bus_id).encode()
*/
  __pyx_t_1 = PyUnicode_Check(__pyx_v_pci_bus_id); 
  __pyx_t_2 = (!__pyx_t_1);
  if (unlikely(__pyx_t_2)) {

    /* "cuda/bindings/_nvml.pyx":19939
 *     """
 *     if not isinstance(pci_bus_id, str):
 *         raise TypeError("pci_bus_id must be a Python str")             # <<<<<<<<<<<<<<
 *     cdef bytes _temp_pci_bus_id_ = (<str>pci_bus_id).encode()
 *     cdef char* _pci_bus_id_ = _temp_pci_bus_id_
*/
    __pyx_t_4 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_mstate_global->__pyx_kp_u_pci_bus_id_must_be_a_Python_str};
      __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_TypeError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19939, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_3);
    }
    __Pyx_Raise(__pyx_t_3, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __PYX_ERR(0, 19939, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":19938
 *     .. seealso:: `nvmlDeviceGetHandleByPciBusId_v2`
 *     """
 *     if not isinstance(pci_bus_id, str):             # <<<<<<<<<<<<<<
 *         raise TypeError("pci_bus_id must be a Python str")
 *     cdef bytes _temp_pci_bus_id_ = (<str>pci_bus_id).encode()
*/
  }

  /* "cuda/bindings/_nvml.pyx":19940
 *     if not isinstance(pci_bus_id, str):
 *         raise TypeError("pci_bus_id must be a Python str")
 *     cdef bytes _temp_pci_bus_id_ = (<str>pci_bus_id).encode()             # <<<<<<<<<<<<<<
 *     cdef char* _pci_bus_id_ = _temp_pci_bus_id_
 *     cdef Device device
*/
  if (unlikely(__pyx_v_pci_bus_id == Py_None)) {
    PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "encode");
    __PYX_ERR(0, 19940, __pyx_L1_error)
  }
  __pyx_t_3 = PyUnicode_AsEncodedString(((PyObject*)__pyx_v_pci_bus_id), NULL, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19940, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_v__temp_pci_bus_id_ = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":19941
 *         raise TypeError("pci_bus_id must be a Python str")
 *     cdef bytes _temp_pci_bus_id_ = (<str>pci_bus_id).encode()
 *     cdef char* _pci_bus_id_ = _temp_pci_bus_id_             # <<<<<<<<<<<<<<
 *     cdef Device device
 *     with nogil:
*/
  __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v__temp_pci_bus_id_); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 19941, __pyx_L1_error)
  __pyx_v__pci_bus_id_ = __pyx_t_6;

  /* "cuda/bindings/_nvml.pyx":19943
 *     cdef char* _pci_bus_id_ = _temp_pci_bus_id_
 *     cdef Device device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetHandleByPciBusId_v2(<const char*>_pci_bus_id_, &device)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19944
 *     cdef Device device
 *     with nogil:
 *         __status__ = nvmlDeviceGetHandleByPciBusId_v2(<const char*>_pci_bus_id_, &device)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <intptr_t>device
*/
        __pyx_t_7 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByPciBusId_v2(((char const *)__pyx_v__pci_bus_id_), (&__pyx_v_device)); if (unlikely(__pyx_t_7 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19944, __pyx_L5_error)
        __pyx_v___status__ = __pyx_t_7;
      }

      /* "cuda/bindings/_nvml.pyx":19943
 *     cdef char* _pci_bus_id_ = _temp_pci_bus_id_
 *     cdef Device device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetHandleByPciBusId_v2(<const char*>_pci_bus_id_, &device)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L6;
        }
        __pyx_L5_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L6:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19945
 *     with nogil:
 *         __status__ = nvmlDeviceGetHandleByPciBusId_v2(<const char*>_pci_bus_id_, &device)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <intptr_t>device
 * 
*/
  __pyx_t_8 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_8 == ((int)1))) __PYX_ERR(0, 19945, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19946
 *         __status__ = nvmlDeviceGetHandleByPciBusId_v2(<const char*>_pci_bus_id_, &device)
 *     check_status(__status__)
 *     return <intptr_t>device             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((intptr_t)__pyx_v_device);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19927
 * 
 * 
 * cpdef intptr_t device_get_handle_by_pci_bus_id_v2(pci_bus_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular device, based on its PCI bus id.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_handle_by_pci_bus_id_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v__temp_pci_bus_id_);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_51device_get_handle_by_pci_bus_id_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_50device_get_handle_by_pci_bus_id_v2, "device_get_handle_by_pci_bus_id_v2(pci_bus_id) -> intptr_t\n\nAcquire the handle for a particular device, based on its PCI bus id.\n\nArgs:\n    pci_bus_id (str): The PCI bus id of the target GPU Accept the following formats (all numbers in hexadecimal): domain:bus:device.function in format x:x:x.x domain:bus:device in format x:x:x bus:device.function in format x:x.x.\n\nReturns:\n    intptr_t: Reference in which to return the device handle.\n\n.. seealso:: `nvmlDeviceGetHandleByPciBusId_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_51device_get_handle_by_pci_bus_id_v2 = {"device_get_handle_by_pci_bus_id_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_51device_get_handle_by_pci_bus_id_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_50device_get_handle_by_pci_bus_id_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_51device_get_handle_by_pci_bus_id_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v_pci_bus_id = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_handle_by_pci_bus_id_v2 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pci_bus_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19927, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19927, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_handle_by_pci_bus_id_v2", 0) < (0)) __PYX_ERR(0, 19927, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_handle_by_pci_bus_id_v2", 1, 1, 1, i); __PYX_ERR(0, 19927, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19927, __pyx_L3_error)
    }
    __pyx_v_pci_bus_id = values[0];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_handle_by_pci_bus_id_v2", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19927, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_handle_by_pci_bus_id_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_50device_get_handle_by_pci_bus_id_v2(__pyx_self, __pyx_v_pci_bus_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_50device_get_handle_by_pci_bus_id_v2(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_pci_bus_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  intptr_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_handle_by_pci_bus_id_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_pci_bus_id_v2(__pyx_v_pci_bus_id, 1); if (unlikely(__pyx_t_1 == ((intptr_t)0) && PyErr_Occurred())) __PYX_ERR(0, 19927, __pyx_L1_error)
  __pyx_t_2 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19927, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_handle_by_pci_bus_id_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19949
 * 
 * 
 * cpdef str device_get_name(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the name of this device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_53device_get_name(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_name(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_length;
  char __pyx_v_name[96];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_name", 0);

  /* "cuda/bindings/_nvml.pyx":19957
 *     .. seealso:: `nvmlDeviceGetName`
 *     """
 *     cdef unsigned int length = 96             # <<<<<<<<<<<<<<
 *     cdef char[96] name
 *     with nogil:
*/
  __pyx_v_length = 96;

  /* "cuda/bindings/_nvml.pyx":19959
 *     cdef unsigned int length = 96
 *     cdef char[96] name
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetName(<Device>device, name, length)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19960
 *     cdef char[96] name
 *     with nogil:
 *         __status__ = nvmlDeviceGetName(<Device>device, name, length)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(name)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetName(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_name, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19960, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":19959
 *     cdef unsigned int length = 96
 *     cdef char[96] name
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetName(<Device>device, name, length)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19961
 *     with nogil:
 *         __status__ = nvmlDeviceGetName(<Device>device, name, length)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(name)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 19961, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19962
 *         __status__ = nvmlDeviceGetName(<Device>device, name, length)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(name)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = PyUnicode_FromString(__pyx_v_name); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19962, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19949
 * 
 * 
 * cpdef str device_get_name(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the name of this device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_name", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_53device_get_name(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_52device_get_name, "device_get_name(intptr_t device) -> str\n\nRetrieves the name of this device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\n.. seealso:: `nvmlDeviceGetName`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_53device_get_name = {"device_get_name", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_53device_get_name, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_52device_get_name};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_53device_get_name(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_name (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19949, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19949, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_name", 0) < (0)) __PYX_ERR(0, 19949, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_name", 1, 1, 1, i); __PYX_ERR(0, 19949, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19949, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 19949, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_name", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19949, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_name", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_52device_get_name(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_52device_get_name(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_name", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_name(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19949, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_name", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19965
 * 
 * 
 * cpdef int device_get_brand(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the brand of this device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_55device_get_brand(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_brand(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__BrandType __pyx_v_type;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":19977
 *     """
 *     cdef _BrandType type
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetBrand(<Device>device, &type)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19978
 *     cdef _BrandType type
 *     with nogil:
 *         __status__ = nvmlDeviceGetBrand(<Device>device, &type)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>type
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBrand(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_type)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19978, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":19977
 *     """
 *     cdef _BrandType type
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetBrand(<Device>device, &type)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19979
 *     with nogil:
 *         __status__ = nvmlDeviceGetBrand(<Device>device, &type)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>type
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 19979, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19980
 *         __status__ = nvmlDeviceGetBrand(<Device>device, &type)
 *     check_status(__status__)
 *     return <int>type             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_type);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19965
 * 
 * 
 * cpdef int device_get_brand(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the brand of this device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_brand", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_55device_get_brand(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_54device_get_brand, "device_get_brand(intptr_t device) -> int\n\nRetrieves the brand of this device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    int: Reference in which to return the product brand type.\n\n.. seealso:: `nvmlDeviceGetBrand`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_55device_get_brand = {"device_get_brand", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_55device_get_brand, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_54device_get_brand};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_55device_get_brand(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_brand (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19965, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19965, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_brand", 0) < (0)) __PYX_ERR(0, 19965, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_brand", 1, 1, 1, i); __PYX_ERR(0, 19965, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19965, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 19965, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_brand", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19965, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_brand", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_54device_get_brand(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_54device_get_brand(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_brand", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_brand(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19965, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19965, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_brand", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":19983
 * 
 * 
 * cpdef unsigned int device_get_index(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the NVML index of this device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_57device_get_index(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_index(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_ind_ex;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":19995
 *     """
 *     cdef unsigned int ind_ex
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetIndex(<Device>device, &ind_ex)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":19996
 *     cdef unsigned int ind_ex
 *     with nogil:
 *         __status__ = nvmlDeviceGetIndex(<Device>device, &ind_ex)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return ind_ex
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetIndex(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_ind_ex)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19996, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":19995
 *     """
 *     cdef unsigned int ind_ex
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetIndex(<Device>device, &ind_ex)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":19997
 *     with nogil:
 *         __status__ = nvmlDeviceGetIndex(<Device>device, &ind_ex)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return ind_ex
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 19997, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":19998
 *         __status__ = nvmlDeviceGetIndex(<Device>device, &ind_ex)
 *     check_status(__status__)
 *     return ind_ex             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_ind_ex;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":19983
 * 
 * 
 * cpdef unsigned int device_get_index(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the NVML index of this device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_57device_get_index(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_56device_get_index, "device_get_index(intptr_t device) -> unsigned int\n\nRetrieves the NVML index of this device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Reference in which to return the NVML index of the device.\n\n.. seealso:: `nvmlDeviceGetIndex`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_57device_get_index = {"device_get_index", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_57device_get_index, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_56device_get_index};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_57device_get_index(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_index (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19983, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19983, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_index", 0) < (0)) __PYX_ERR(0, 19983, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_index", 1, 1, 1, i); __PYX_ERR(0, 19983, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19983, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 19983, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_index", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19983, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_56device_get_index(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_56device_get_index(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_index", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_index(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 19983, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19983, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20001
 * 
 * 
 * cpdef str device_get_serial(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the globally unique board serial number associated with this device's board.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_59device_get_serial(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_serial(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_length;
  char __pyx_v_serial[30];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_serial", 0);

  /* "cuda/bindings/_nvml.pyx":20009
 *     .. seealso:: `nvmlDeviceGetSerial`
 *     """
 *     cdef unsigned int length = 30             # <<<<<<<<<<<<<<
 *     cdef char[30] serial
 *     with nogil:
*/
  __pyx_v_length = 30;

  /* "cuda/bindings/_nvml.pyx":20011
 *     cdef unsigned int length = 30
 *     cdef char[30] serial
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSerial(<Device>device, serial, length)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20012
 *     cdef char[30] serial
 *     with nogil:
 *         __status__ = nvmlDeviceGetSerial(<Device>device, serial, length)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(serial)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSerial(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_serial, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20012, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20011
 *     cdef unsigned int length = 30
 *     cdef char[30] serial
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSerial(<Device>device, serial, length)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20013
 *     with nogil:
 *         __status__ = nvmlDeviceGetSerial(<Device>device, serial, length)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(serial)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20013, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20014
 *         __status__ = nvmlDeviceGetSerial(<Device>device, serial, length)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(serial)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = PyUnicode_FromString(__pyx_v_serial); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20014, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20001
 * 
 * 
 * cpdef str device_get_serial(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the globally unique board serial number associated with this device's board.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_serial", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_59device_get_serial(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_58device_get_serial, "device_get_serial(intptr_t device) -> str\n\nRetrieves the globally unique board serial number associated with this device's board.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\n.. seealso:: `nvmlDeviceGetSerial`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_59device_get_serial = {"device_get_serial", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_59device_get_serial, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_58device_get_serial};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_59device_get_serial(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_serial (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20001, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20001, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_serial", 0) < (0)) __PYX_ERR(0, 20001, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_serial", 1, 1, 1, i); __PYX_ERR(0, 20001, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20001, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20001, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_serial", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20001, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_serial", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_58device_get_serial(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_58device_get_serial(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_serial", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_serial(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20001, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_serial", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20017
 * 
 * 
 * cpdef unsigned int device_get_module_id(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get a unique identifier for the device module on the baseboard.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_61device_get_module_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_module_id(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_module_id;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20029
 *     """
 *     cdef unsigned int module_id
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetModuleId(<Device>device, &module_id)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20030
 *     cdef unsigned int module_id
 *     with nogil:
 *         __status__ = nvmlDeviceGetModuleId(<Device>device, &module_id)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return module_id
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetModuleId(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_module_id)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20030, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20029
 *     """
 *     cdef unsigned int module_id
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetModuleId(<Device>device, &module_id)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20031
 *     with nogil:
 *         __status__ = nvmlDeviceGetModuleId(<Device>device, &module_id)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return module_id
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20031, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20032
 *         __status__ = nvmlDeviceGetModuleId(<Device>device, &module_id)
 *     check_status(__status__)
 *     return module_id             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_module_id;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20017
 * 
 * 
 * cpdef unsigned int device_get_module_id(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get a unique identifier for the device module on the baseboard.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_module_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_61device_get_module_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_60device_get_module_id, "device_get_module_id(intptr_t device) -> unsigned int\n\nGet a unique identifier for the device module on the baseboard.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Unique identifier for the GPU module.\n\n.. seealso:: `nvmlDeviceGetModuleId`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_61device_get_module_id = {"device_get_module_id", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_61device_get_module_id, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_60device_get_module_id};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_61device_get_module_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_module_id (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20017, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20017, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_module_id", 0) < (0)) __PYX_ERR(0, 20017, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_module_id", 1, 1, 1, i); __PYX_ERR(0, 20017, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20017, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20017, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_module_id", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20017, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_module_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_60device_get_module_id(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_60device_get_module_id(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_module_id", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_module_id(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20017, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20017, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_module_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20035
 * 
 * 
 * cpdef object device_get_c2c_mode_info_v(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the Device's C2C Mode information.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_63device_get_c2c_mode_info_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_c2c_mode_info_v(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *__pyx_v_c2c_mode_info_py = 0;
  nvmlC2cModeInfo_v1_t *__pyx_v_c2c_mode_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_c2c_mode_info_v", 0);

  /* "cuda/bindings/_nvml.pyx":20046
 *     .. seealso:: `nvmlDeviceGetC2cModeInfoV`
 *     """
 *     cdef C2cModeInfo_v1 c2c_mode_info_py = C2cModeInfo_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlC2cModeInfo_v1_t *c2c_mode_info = <nvmlC2cModeInfo_v1_t *><intptr_t>(c2c_mode_info_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20046, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_c2c_mode_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":20047
 *     """
 *     cdef C2cModeInfo_v1 c2c_mode_info_py = C2cModeInfo_v1()
 *     cdef nvmlC2cModeInfo_v1_t *c2c_mode_info = <nvmlC2cModeInfo_v1_t *><intptr_t>(c2c_mode_info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetC2cModeInfoV(<Device>device, c2c_mode_info)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)__pyx_v_c2c_mode_info_py->__pyx_vtab)->_get_ptr(__pyx_v_c2c_mode_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 20047, __pyx_L1_error)
  __pyx_v_c2c_mode_info = ((nvmlC2cModeInfo_v1_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":20048
 *     cdef C2cModeInfo_v1 c2c_mode_info_py = C2cModeInfo_v1()
 *     cdef nvmlC2cModeInfo_v1_t *c2c_mode_info = <nvmlC2cModeInfo_v1_t *><intptr_t>(c2c_mode_info_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetC2cModeInfoV(<Device>device, c2c_mode_info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20049
 *     cdef nvmlC2cModeInfo_v1_t *c2c_mode_info = <nvmlC2cModeInfo_v1_t *><intptr_t>(c2c_mode_info_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetC2cModeInfoV(<Device>device, c2c_mode_info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return c2c_mode_info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetC2cModeInfoV(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_c2c_mode_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20049, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":20048
 *     cdef C2cModeInfo_v1 c2c_mode_info_py = C2cModeInfo_v1()
 *     cdef nvmlC2cModeInfo_v1_t *c2c_mode_info = <nvmlC2cModeInfo_v1_t *><intptr_t>(c2c_mode_info_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetC2cModeInfoV(<Device>device, c2c_mode_info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20050
 *     with nogil:
 *         __status__ = nvmlDeviceGetC2cModeInfoV(<Device>device, c2c_mode_info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return c2c_mode_info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 20050, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20051
 *         __status__ = nvmlDeviceGetC2cModeInfoV(<Device>device, c2c_mode_info)
 *     check_status(__status__)
 *     return c2c_mode_info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_c2c_mode_info_py);
  __pyx_r = ((PyObject *)__pyx_v_c2c_mode_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20035
 * 
 * 
 * cpdef object device_get_c2c_mode_info_v(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the Device's C2C Mode information.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_c2c_mode_info_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_c2c_mode_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_63device_get_c2c_mode_info_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_62device_get_c2c_mode_info_v, "device_get_c2c_mode_info_v(intptr_t device)\n\nRetrieves the Device's C2C Mode information.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlC2cModeInfo_v1_t: Output struct containing the device's C2C Mode info.\n\n.. seealso:: `nvmlDeviceGetC2cModeInfoV`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_63device_get_c2c_mode_info_v = {"device_get_c2c_mode_info_v", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_63device_get_c2c_mode_info_v, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_62device_get_c2c_mode_info_v};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_63device_get_c2c_mode_info_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_c2c_mode_info_v (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20035, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20035, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_c2c_mode_info_v", 0) < (0)) __PYX_ERR(0, 20035, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_c2c_mode_info_v", 1, 1, 1, i); __PYX_ERR(0, 20035, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20035, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20035, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_c2c_mode_info_v", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20035, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_c2c_mode_info_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_62device_get_c2c_mode_info_v(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_62device_get_c2c_mode_info_v(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_c2c_mode_info_v", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_c2c_mode_info_v(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20035, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_c2c_mode_info_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20054
 * 
 * 
 * cpdef object device_get_memory_affinity(intptr_t device, unsigned int node_set_size, unsigned int scope):             # <<<<<<<<<<<<<<
 *     """Retrieves an array of unsigned ints (sized to node_set_size) of bitmasks with the ideal memory affinity within node or socket for the device. For example, if NUMA node 0, 1 are ideal within the socket for the device and node_set_size == 1, result[0] = 0x3.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_65device_get_memory_affinity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_memory_affinity(intptr_t __pyx_v_device, unsigned int __pyx_v_node_set_size, unsigned int __pyx_v_scope, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_array_obj *__pyx_v_node_set = 0;
  unsigned long *__pyx_v_node_set_ptr;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  nvmlReturn_t __pyx_t_8;
  int __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_memory_affinity", 0);

  /* "cuda/bindings/_nvml.pyx":20064
 *     .. seealso:: `nvmlDeviceGetMemoryAffinity`
 *     """
 *     if node_set_size == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long), format="L", mode="c")[:0]
 *     cdef view.array node_set = view.array(shape=(node_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
*/
  __pyx_t_1 = (__pyx_v_node_set_size == 0);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":20065
 *     """
 *     if node_set_size == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long), format="L", mode="c")[:0]             # <<<<<<<<<<<<<<
 *     cdef view.array node_set = view.array(shape=(node_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
 *     cdef unsigned long *node_set_ptr = <unsigned long *>(node_set.data)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned long))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20065, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_3, NULL};
      __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 20065, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_6, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 20065, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_6, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 20065, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_L, __pyx_t_6, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 20065, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_6, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 20065, __pyx_L1_error)
      __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20065, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_2);
    }
    __pyx_t_6 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_2), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 20065, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_6;
    __pyx_t_6 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":20064
 *     .. seealso:: `nvmlDeviceGetMemoryAffinity`
 *     """
 *     if node_set_size == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long), format="L", mode="c")[:0]
 *     cdef view.array node_set = view.array(shape=(node_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":20066
 *     if node_set_size == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long), format="L", mode="c")[:0]
 *     cdef view.array node_set = view.array(shape=(node_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")             # <<<<<<<<<<<<<<
 *     cdef unsigned long *node_set_ptr = <unsigned long *>(node_set.data)
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_node_set_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20066, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20066, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 20066, __pyx_L1_error);
  __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned long))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20066, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_7 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 20066, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_3, __pyx_t_7, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 20066, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_7, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 20066, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_L, __pyx_t_7, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 20066, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_7, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 20066, __pyx_L1_error)
    __pyx_t_6 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_7);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 20066, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_6);
  }
  __pyx_v_node_set = ((struct __pyx_array_obj *)__pyx_t_6);
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":20067
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long), format="L", mode="c")[:0]
 *     cdef view.array node_set = view.array(shape=(node_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
 *     cdef unsigned long *node_set_ptr = <unsigned long *>(node_set.data)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetMemoryAffinity(<Device>device, node_set_size, node_set_ptr, <nvmlAffinityScope_t>scope)
*/
  __pyx_v_node_set_ptr = ((unsigned long *)__pyx_v_node_set->data);

  /* "cuda/bindings/_nvml.pyx":20068
 *     cdef view.array node_set = view.array(shape=(node_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
 *     cdef unsigned long *node_set_ptr = <unsigned long *>(node_set.data)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMemoryAffinity(<Device>device, node_set_size, node_set_ptr, <nvmlAffinityScope_t>scope)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20069
 *     cdef unsigned long *node_set_ptr = <unsigned long *>(node_set.data)
 *     with nogil:
 *         __status__ = nvmlDeviceGetMemoryAffinity(<Device>device, node_set_size, node_set_ptr, <nvmlAffinityScope_t>scope)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return node_set
*/
        __pyx_t_8 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryAffinity(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_node_set_size, __pyx_v_node_set_ptr, ((nvmlAffinityScope_t)__pyx_v_scope)); if (unlikely(__pyx_t_8 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20069, __pyx_L5_error)
        __pyx_v___status__ = __pyx_t_8;
      }

      /* "cuda/bindings/_nvml.pyx":20068
 *     cdef view.array node_set = view.array(shape=(node_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
 *     cdef unsigned long *node_set_ptr = <unsigned long *>(node_set.data)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMemoryAffinity(<Device>device, node_set_size, node_set_ptr, <nvmlAffinityScope_t>scope)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L6;
        }
        __pyx_L5_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L6:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20070
 *     with nogil:
 *         __status__ = nvmlDeviceGetMemoryAffinity(<Device>device, node_set_size, node_set_ptr, <nvmlAffinityScope_t>scope)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return node_set
 * 
*/
  __pyx_t_9 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_9 == ((int)1))) __PYX_ERR(0, 20070, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20071
 *         __status__ = nvmlDeviceGetMemoryAffinity(<Device>device, node_set_size, node_set_ptr, <nvmlAffinityScope_t>scope)
 *     check_status(__status__)
 *     return node_set             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_node_set);
  __pyx_r = ((PyObject *)__pyx_v_node_set);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20054
 * 
 * 
 * cpdef object device_get_memory_affinity(intptr_t device, unsigned int node_set_size, unsigned int scope):             # <<<<<<<<<<<<<<
 *     """Retrieves an array of unsigned ints (sized to node_set_size) of bitmasks with the ideal memory affinity within node or socket for the device. For example, if NUMA node 0, 1 are ideal within the socket for the device and node_set_size == 1, result[0] = 0x3.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_memory_affinity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_node_set);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_65device_get_memory_affinity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_64device_get_memory_affinity, "device_get_memory_affinity(intptr_t device, unsigned int node_set_size, unsigned int scope)\n\nRetrieves an array of unsigned ints (sized to node_set_size) of bitmasks with the ideal memory affinity within node or socket for the device. For example, if NUMA node 0, 1 are ideal within the socket for the device and node_set_size == 1, result[0] = 0x3.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    node_set_size (unsigned int): The size of the nodeSet array that is safe to access.\n    scope (unsigned int): Array reference in which to return a bitmask of NODEs, 64 NODEs per unsigned long on 64-bit machines, 32 on 32-bit machines.\n\n.. seealso:: `nvmlDeviceGetMemoryAffinity`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_65device_get_memory_affinity = {"device_get_memory_affinity", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_65device_get_memory_affinity, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_64device_get_memory_affinity};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_65device_get_memory_affinity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_node_set_size;
  unsigned int __pyx_v_scope;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_memory_affinity (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_node_set_size,&__pyx_mstate_global->__pyx_n_u_scope,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20054, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 20054, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20054, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20054, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_memory_affinity", 0) < (0)) __PYX_ERR(0, 20054, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_memory_affinity", 1, 3, 3, i); __PYX_ERR(0, 20054, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20054, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20054, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 20054, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20054, __pyx_L3_error)
    __pyx_v_node_set_size = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_node_set_size == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20054, __pyx_L3_error)
    __pyx_v_scope = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_scope == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20054, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_memory_affinity", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 20054, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_memory_affinity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_64device_get_memory_affinity(__pyx_self, __pyx_v_device, __pyx_v_node_set_size, __pyx_v_scope);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_64device_get_memory_affinity(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_node_set_size, unsigned int __pyx_v_scope) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_memory_affinity", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_memory_affinity(__pyx_v_device, __pyx_v_node_set_size, __pyx_v_scope, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20054, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_memory_affinity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20074
 * 
 * 
 * cpdef object device_get_cpu_affinity_within_scope(intptr_t device, unsigned int cpu_set_size, unsigned int scope):             # <<<<<<<<<<<<<<
 *     """Retrieves an array of unsigned ints (sized to cpu_set_size) of bitmasks with the ideal CPU affinity within node or socket for the device. For example, if processors 0, 1, 32, and 33 are ideal for the device and cpu_set_size == 2, result[0] = 0x3, result[1] = 0x3.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_67device_get_cpu_affinity_within_scope(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_cpu_affinity_within_scope(intptr_t __pyx_v_device, unsigned int __pyx_v_cpu_set_size, unsigned int __pyx_v_scope, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_array_obj *__pyx_v_cpu_set = 0;
  unsigned long *__pyx_v_cpu_set_ptr;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  nvmlReturn_t __pyx_t_8;
  int __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_cpu_affinity_within_scope", 0);

  /* "cuda/bindings/_nvml.pyx":20084
 *     .. seealso:: `nvmlDeviceGetCpuAffinityWithinScope`
 *     """
 *     if cpu_set_size == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long), format="L", mode="c")[:0]
 *     cdef view.array cpu_set = view.array(shape=(cpu_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
*/
  __pyx_t_1 = (__pyx_v_cpu_set_size == 0);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":20085
 *     """
 *     if cpu_set_size == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long), format="L", mode="c")[:0]             # <<<<<<<<<<<<<<
 *     cdef view.array cpu_set = view.array(shape=(cpu_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
 *     cdef unsigned long *cpu_set_ptr = <unsigned long *>(cpu_set.data)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned long))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20085, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_3, NULL};
      __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 20085, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_6, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 20085, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_6, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 20085, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_L, __pyx_t_6, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 20085, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_6, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 20085, __pyx_L1_error)
      __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20085, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_2);
    }
    __pyx_t_6 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_2), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 20085, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_6;
    __pyx_t_6 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":20084
 *     .. seealso:: `nvmlDeviceGetCpuAffinityWithinScope`
 *     """
 *     if cpu_set_size == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long), format="L", mode="c")[:0]
 *     cdef view.array cpu_set = view.array(shape=(cpu_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":20086
 *     if cpu_set_size == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long), format="L", mode="c")[:0]
 *     cdef view.array cpu_set = view.array(shape=(cpu_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")             # <<<<<<<<<<<<<<
 *     cdef unsigned long *cpu_set_ptr = <unsigned long *>(cpu_set.data)
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_cpu_set_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20086, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20086, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 20086, __pyx_L1_error);
  __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned long))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20086, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_7 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 20086, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_3, __pyx_t_7, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 20086, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_7, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 20086, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_L, __pyx_t_7, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 20086, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_7, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 20086, __pyx_L1_error)
    __pyx_t_6 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_7);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 20086, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_6);
  }
  __pyx_v_cpu_set = ((struct __pyx_array_obj *)__pyx_t_6);
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":20087
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long), format="L", mode="c")[:0]
 *     cdef view.array cpu_set = view.array(shape=(cpu_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
 *     cdef unsigned long *cpu_set_ptr = <unsigned long *>(cpu_set.data)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetCpuAffinityWithinScope(<Device>device, cpu_set_size, cpu_set_ptr, <nvmlAffinityScope_t>scope)
*/
  __pyx_v_cpu_set_ptr = ((unsigned long *)__pyx_v_cpu_set->data);

  /* "cuda/bindings/_nvml.pyx":20088
 *     cdef view.array cpu_set = view.array(shape=(cpu_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
 *     cdef unsigned long *cpu_set_ptr = <unsigned long *>(cpu_set.data)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCpuAffinityWithinScope(<Device>device, cpu_set_size, cpu_set_ptr, <nvmlAffinityScope_t>scope)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20089
 *     cdef unsigned long *cpu_set_ptr = <unsigned long *>(cpu_set.data)
 *     with nogil:
 *         __status__ = nvmlDeviceGetCpuAffinityWithinScope(<Device>device, cpu_set_size, cpu_set_ptr, <nvmlAffinityScope_t>scope)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpu_set
*/
        __pyx_t_8 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCpuAffinityWithinScope(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_cpu_set_size, __pyx_v_cpu_set_ptr, ((nvmlAffinityScope_t)__pyx_v_scope)); if (unlikely(__pyx_t_8 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20089, __pyx_L5_error)
        __pyx_v___status__ = __pyx_t_8;
      }

      /* "cuda/bindings/_nvml.pyx":20088
 *     cdef view.array cpu_set = view.array(shape=(cpu_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
 *     cdef unsigned long *cpu_set_ptr = <unsigned long *>(cpu_set.data)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCpuAffinityWithinScope(<Device>device, cpu_set_size, cpu_set_ptr, <nvmlAffinityScope_t>scope)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L6;
        }
        __pyx_L5_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L6:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20090
 *     with nogil:
 *         __status__ = nvmlDeviceGetCpuAffinityWithinScope(<Device>device, cpu_set_size, cpu_set_ptr, <nvmlAffinityScope_t>scope)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpu_set
 * 
*/
  __pyx_t_9 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_9 == ((int)1))) __PYX_ERR(0, 20090, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20091
 *         __status__ = nvmlDeviceGetCpuAffinityWithinScope(<Device>device, cpu_set_size, cpu_set_ptr, <nvmlAffinityScope_t>scope)
 *     check_status(__status__)
 *     return cpu_set             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_cpu_set);
  __pyx_r = ((PyObject *)__pyx_v_cpu_set);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20074
 * 
 * 
 * cpdef object device_get_cpu_affinity_within_scope(intptr_t device, unsigned int cpu_set_size, unsigned int scope):             # <<<<<<<<<<<<<<
 *     """Retrieves an array of unsigned ints (sized to cpu_set_size) of bitmasks with the ideal CPU affinity within node or socket for the device. For example, if processors 0, 1, 32, and 33 are ideal for the device and cpu_set_size == 2, result[0] = 0x3, result[1] = 0x3.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_cpu_affinity_within_scope", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_cpu_set);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_67device_get_cpu_affinity_within_scope(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_66device_get_cpu_affinity_within_scope, "device_get_cpu_affinity_within_scope(intptr_t device, unsigned int cpu_set_size, unsigned int scope)\n\nRetrieves an array of unsigned ints (sized to cpu_set_size) of bitmasks with the ideal CPU affinity within node or socket for the device. For example, if processors 0, 1, 32, and 33 are ideal for the device and cpu_set_size == 2, result[0] = 0x3, result[1] = 0x3.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    cpu_set_size (unsigned int): The size of the cpuSet array that is safe to access.\n    scope (unsigned int): Array reference in which to return a bitmask of CPUs, 64 CPUs per unsigned long on 64-bit machines, 32 on 32-bit machines.\n\n.. seealso:: `nvmlDeviceGetCpuAffinityWithinScope`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_67device_get_cpu_affinity_within_scope = {"device_get_cpu_affinity_within_scope", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_67device_get_cpu_affinity_within_scope, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_66device_get_cpu_affinity_within_scope};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_67device_get_cpu_affinity_within_scope(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_cpu_set_size;
  unsigned int __pyx_v_scope;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_cpu_affinity_within_scope (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_cpu_set_size,&__pyx_mstate_global->__pyx_n_u_scope,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20074, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 20074, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20074, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20074, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_cpu_affinity_within_scope", 0) < (0)) __PYX_ERR(0, 20074, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_cpu_affinity_within_scope", 1, 3, 3, i); __PYX_ERR(0, 20074, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20074, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20074, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 20074, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20074, __pyx_L3_error)
    __pyx_v_cpu_set_size = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_cpu_set_size == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20074, __pyx_L3_error)
    __pyx_v_scope = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_scope == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20074, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_cpu_affinity_within_scope", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 20074, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_cpu_affinity_within_scope", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_66device_get_cpu_affinity_within_scope(__pyx_self, __pyx_v_device, __pyx_v_cpu_set_size, __pyx_v_scope);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_66device_get_cpu_affinity_within_scope(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_cpu_set_size, unsigned int __pyx_v_scope) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_cpu_affinity_within_scope", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_cpu_affinity_within_scope(__pyx_v_device, __pyx_v_cpu_set_size, __pyx_v_scope, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20074, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_cpu_affinity_within_scope", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20094
 * 
 * 
 * cpdef object device_get_cpu_affinity(intptr_t device, unsigned int cpu_set_size):             # <<<<<<<<<<<<<<
 *     """Retrieves an array of unsigned ints (sized to cpu_set_size) of bitmasks with the ideal CPU affinity for the device For example, if processors 0, 1, 32, and 33 are ideal for the device and cpu_set_size == 2, result[0] = 0x3, result[1] = 0x3 This is equivalent to calling ``nvmlDeviceGetCpuAffinityWithinScope`` with ``NVML_AFFINITY_SCOPE_NODE``.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_69device_get_cpu_affinity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_cpu_affinity(intptr_t __pyx_v_device, unsigned int __pyx_v_cpu_set_size, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_array_obj *__pyx_v_cpu_set = 0;
  unsigned long *__pyx_v_cpu_set_ptr;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  nvmlReturn_t __pyx_t_8;
  int __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_cpu_affinity", 0);

  /* "cuda/bindings/_nvml.pyx":20103
 *     .. seealso:: `nvmlDeviceGetCpuAffinity`
 *     """
 *     if cpu_set_size == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long), format="L", mode="c")[:0]
 *     cdef view.array cpu_set = view.array(shape=(cpu_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
*/
  __pyx_t_1 = (__pyx_v_cpu_set_size == 0);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":20104
 *     """
 *     if cpu_set_size == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long), format="L", mode="c")[:0]             # <<<<<<<<<<<<<<
 *     cdef view.array cpu_set = view.array(shape=(cpu_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
 *     cdef unsigned long *cpu_set_ptr = <unsigned long *>(cpu_set.data)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned long))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20104, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_3, NULL};
      __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 20104, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_6, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 20104, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_6, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 20104, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_L, __pyx_t_6, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 20104, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_6, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 20104, __pyx_L1_error)
      __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20104, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_2);
    }
    __pyx_t_6 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_2), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 20104, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_6;
    __pyx_t_6 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":20103
 *     .. seealso:: `nvmlDeviceGetCpuAffinity`
 *     """
 *     if cpu_set_size == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long), format="L", mode="c")[:0]
 *     cdef view.array cpu_set = view.array(shape=(cpu_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":20105
 *     if cpu_set_size == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long), format="L", mode="c")[:0]
 *     cdef view.array cpu_set = view.array(shape=(cpu_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")             # <<<<<<<<<<<<<<
 *     cdef unsigned long *cpu_set_ptr = <unsigned long *>(cpu_set.data)
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_cpu_set_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20105, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20105, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 20105, __pyx_L1_error);
  __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned long))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20105, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_7 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 20105, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_3, __pyx_t_7, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 20105, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_7, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 20105, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_L, __pyx_t_7, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 20105, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_7, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 20105, __pyx_L1_error)
    __pyx_t_6 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_7);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 20105, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_6);
  }
  __pyx_v_cpu_set = ((struct __pyx_array_obj *)__pyx_t_6);
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":20106
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long), format="L", mode="c")[:0]
 *     cdef view.array cpu_set = view.array(shape=(cpu_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
 *     cdef unsigned long *cpu_set_ptr = <unsigned long *>(cpu_set.data)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetCpuAffinity(<Device>device, cpu_set_size, cpu_set_ptr)
*/
  __pyx_v_cpu_set_ptr = ((unsigned long *)__pyx_v_cpu_set->data);

  /* "cuda/bindings/_nvml.pyx":20107
 *     cdef view.array cpu_set = view.array(shape=(cpu_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
 *     cdef unsigned long *cpu_set_ptr = <unsigned long *>(cpu_set.data)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCpuAffinity(<Device>device, cpu_set_size, cpu_set_ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20108
 *     cdef unsigned long *cpu_set_ptr = <unsigned long *>(cpu_set.data)
 *     with nogil:
 *         __status__ = nvmlDeviceGetCpuAffinity(<Device>device, cpu_set_size, cpu_set_ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpu_set
*/
        __pyx_t_8 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCpuAffinity(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_cpu_set_size, __pyx_v_cpu_set_ptr); if (unlikely(__pyx_t_8 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20108, __pyx_L5_error)
        __pyx_v___status__ = __pyx_t_8;
      }

      /* "cuda/bindings/_nvml.pyx":20107
 *     cdef view.array cpu_set = view.array(shape=(cpu_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
 *     cdef unsigned long *cpu_set_ptr = <unsigned long *>(cpu_set.data)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCpuAffinity(<Device>device, cpu_set_size, cpu_set_ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L6;
        }
        __pyx_L5_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L6:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20109
 *     with nogil:
 *         __status__ = nvmlDeviceGetCpuAffinity(<Device>device, cpu_set_size, cpu_set_ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpu_set
 * 
*/
  __pyx_t_9 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_9 == ((int)1))) __PYX_ERR(0, 20109, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20110
 *         __status__ = nvmlDeviceGetCpuAffinity(<Device>device, cpu_set_size, cpu_set_ptr)
 *     check_status(__status__)
 *     return cpu_set             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_cpu_set);
  __pyx_r = ((PyObject *)__pyx_v_cpu_set);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20094
 * 
 * 
 * cpdef object device_get_cpu_affinity(intptr_t device, unsigned int cpu_set_size):             # <<<<<<<<<<<<<<
 *     """Retrieves an array of unsigned ints (sized to cpu_set_size) of bitmasks with the ideal CPU affinity for the device For example, if processors 0, 1, 32, and 33 are ideal for the device and cpu_set_size == 2, result[0] = 0x3, result[1] = 0x3 This is equivalent to calling ``nvmlDeviceGetCpuAffinityWithinScope`` with ``NVML_AFFINITY_SCOPE_NODE``.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_cpu_affinity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_cpu_set);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_69device_get_cpu_affinity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_68device_get_cpu_affinity, "device_get_cpu_affinity(intptr_t device, unsigned int cpu_set_size)\n\nRetrieves an array of unsigned ints (sized to cpu_set_size) of bitmasks with the ideal CPU affinity for the device For example, if processors 0, 1, 32, and 33 are ideal for the device and cpu_set_size == 2, result[0] = 0x3, result[1] = 0x3 This is equivalent to calling ``nvmlDeviceGetCpuAffinityWithinScope`` with ``NVML_AFFINITY_SCOPE_NODE``.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    cpu_set_size (unsigned int): The size of the cpuSet array that is safe to access.\n\n.. seealso:: `nvmlDeviceGetCpuAffinity`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_69device_get_cpu_affinity = {"device_get_cpu_affinity", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_69device_get_cpu_affinity, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_68device_get_cpu_affinity};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_69device_get_cpu_affinity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_cpu_set_size;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_cpu_affinity (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_cpu_set_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20094, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20094, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20094, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_cpu_affinity", 0) < (0)) __PYX_ERR(0, 20094, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_cpu_affinity", 1, 2, 2, i); __PYX_ERR(0, 20094, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20094, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20094, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20094, __pyx_L3_error)
    __pyx_v_cpu_set_size = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_cpu_set_size == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20094, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_cpu_affinity", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20094, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_cpu_affinity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_68device_get_cpu_affinity(__pyx_self, __pyx_v_device, __pyx_v_cpu_set_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_68device_get_cpu_affinity(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_cpu_set_size) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_cpu_affinity", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_cpu_affinity(__pyx_v_device, __pyx_v_cpu_set_size, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20094, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_cpu_affinity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20113
 * 
 * 
 * cpdef device_set_cpu_affinity(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Sets the ideal affinity for the calling thread and device using the guidelines given in :func:`device_get_cpu_affinity`. Note, this is a change as of version 8.0. Older versions set the affinity for a calling process and all children. Currently supports up to 1024 processors.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_71device_set_cpu_affinity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_cpu_affinity(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_cpu_affinity", 0);

  /* "cuda/bindings/_nvml.pyx":20121
 *     .. seealso:: `nvmlDeviceSetCpuAffinity`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetCpuAffinity(<Device>device)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20122
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetCpuAffinity(<Device>device)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetCpuAffinity(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20122, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20121
 *     .. seealso:: `nvmlDeviceSetCpuAffinity`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetCpuAffinity(<Device>device)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20123
 *     with nogil:
 *         __status__ = nvmlDeviceSetCpuAffinity(<Device>device)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20123, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20113
 * 
 * 
 * cpdef device_set_cpu_affinity(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Sets the ideal affinity for the calling thread and device using the guidelines given in :func:`device_get_cpu_affinity`. Note, this is a change as of version 8.0. Older versions set the affinity for a calling process and all children. Currently supports up to 1024 processors.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_cpu_affinity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_71device_set_cpu_affinity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_70device_set_cpu_affinity, "device_set_cpu_affinity(intptr_t device)\n\nSets the ideal affinity for the calling thread and device using the guidelines given in :func:`device_get_cpu_affinity`. Note, this is a change as of version 8.0. Older versions set the affinity for a calling process and all children. Currently supports up to 1024 processors.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\n.. seealso:: `nvmlDeviceSetCpuAffinity`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_71device_set_cpu_affinity = {"device_set_cpu_affinity", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_71device_set_cpu_affinity, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_70device_set_cpu_affinity};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_71device_set_cpu_affinity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_cpu_affinity (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20113, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20113, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_cpu_affinity", 0) < (0)) __PYX_ERR(0, 20113, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_cpu_affinity", 1, 1, 1, i); __PYX_ERR(0, 20113, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20113, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20113, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_cpu_affinity", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20113, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_cpu_affinity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_70device_set_cpu_affinity(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_70device_set_cpu_affinity(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_cpu_affinity", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_cpu_affinity(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_cpu_affinity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20126
 * 
 * 
 * cpdef device_clear_cpu_affinity(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Clear all affinity bindings for the calling thread. Note, this is a change as of version 8.0 as older versions cleared the affinity for a calling process and all children.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_73device_clear_cpu_affinity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_clear_cpu_affinity(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_clear_cpu_affinity", 0);

  /* "cuda/bindings/_nvml.pyx":20134
 *     .. seealso:: `nvmlDeviceClearCpuAffinity`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceClearCpuAffinity(<Device>device)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20135
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceClearCpuAffinity(<Device>device)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearCpuAffinity(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20135, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20134
 *     .. seealso:: `nvmlDeviceClearCpuAffinity`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceClearCpuAffinity(<Device>device)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20136
 *     with nogil:
 *         __status__ = nvmlDeviceClearCpuAffinity(<Device>device)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20136, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20126
 * 
 * 
 * cpdef device_clear_cpu_affinity(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Clear all affinity bindings for the calling thread. Note, this is a change as of version 8.0 as older versions cleared the affinity for a calling process and all children.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_clear_cpu_affinity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_73device_clear_cpu_affinity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_72device_clear_cpu_affinity, "device_clear_cpu_affinity(intptr_t device)\n\nClear all affinity bindings for the calling thread. Note, this is a change as of version 8.0 as older versions cleared the affinity for a calling process and all children.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\n.. seealso:: `nvmlDeviceClearCpuAffinity`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_73device_clear_cpu_affinity = {"device_clear_cpu_affinity", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_73device_clear_cpu_affinity, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_72device_clear_cpu_affinity};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_73device_clear_cpu_affinity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_clear_cpu_affinity (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20126, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20126, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_clear_cpu_affinity", 0) < (0)) __PYX_ERR(0, 20126, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_clear_cpu_affinity", 1, 1, 1, i); __PYX_ERR(0, 20126, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20126, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20126, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_clear_cpu_affinity", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20126, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_clear_cpu_affinity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_72device_clear_cpu_affinity(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_72device_clear_cpu_affinity(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_clear_cpu_affinity", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_clear_cpu_affinity(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20126, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_clear_cpu_affinity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20139
 * 
 * 
 * cpdef unsigned int device_get_numa_node_id(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get the NUMA node of the given GPU device. This only applies to platforms where the GPUs are NUMA nodes.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_75device_get_numa_node_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_numa_node_id(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_node;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20151
 *     """
 *     cdef unsigned int node
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNumaNodeId(<Device>device, &node)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20152
 *     cdef unsigned int node
 *     with nogil:
 *         __status__ = nvmlDeviceGetNumaNodeId(<Device>device, &node)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return node
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNumaNodeId(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_node)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20152, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20151
 *     """
 *     cdef unsigned int node
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNumaNodeId(<Device>device, &node)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20153
 *     with nogil:
 *         __status__ = nvmlDeviceGetNumaNodeId(<Device>device, &node)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return node
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20153, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20154
 *         __status__ = nvmlDeviceGetNumaNodeId(<Device>device, &node)
 *     check_status(__status__)
 *     return node             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_node;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20139
 * 
 * 
 * cpdef unsigned int device_get_numa_node_id(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get the NUMA node of the given GPU device. This only applies to platforms where the GPUs are NUMA nodes.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_numa_node_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_75device_get_numa_node_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_74device_get_numa_node_id, "device_get_numa_node_id(intptr_t device) -> unsigned int\n\nGet the NUMA node of the given GPU device. This only applies to platforms where the GPUs are NUMA nodes.\n\nArgs:\n    device (intptr_t): The device handle.\n\nReturns:\n    unsigned int: NUMA node ID of the device.\n\n.. seealso:: `nvmlDeviceGetNumaNodeId`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_75device_get_numa_node_id = {"device_get_numa_node_id", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_75device_get_numa_node_id, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_74device_get_numa_node_id};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_75device_get_numa_node_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_numa_node_id (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20139, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20139, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_numa_node_id", 0) < (0)) __PYX_ERR(0, 20139, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_numa_node_id", 1, 1, 1, i); __PYX_ERR(0, 20139, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20139, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20139, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_numa_node_id", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20139, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_numa_node_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_74device_get_numa_node_id(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_74device_get_numa_node_id(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_numa_node_id", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_numa_node_id(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20139, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20139, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_numa_node_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20157
 * 
 * 
 * cpdef int device_get_topology_common_ancestor(intptr_t device1, intptr_t device2) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieve the common ancestor for two devices For all products. Supported on Linux only.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_77device_get_topology_common_ancestor(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_topology_common_ancestor(intptr_t __pyx_v_device1, intptr_t __pyx_v_device2, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__GpuTopologyLevel __pyx_v_path_info;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20170
 *     """
 *     cdef _GpuTopologyLevel path_info
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetTopologyCommonAncestor(<Device>device1, <Device>device2, &path_info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20171
 *     cdef _GpuTopologyLevel path_info
 *     with nogil:
 *         __status__ = nvmlDeviceGetTopologyCommonAncestor(<Device>device1, <Device>device2, &path_info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>path_info
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTopologyCommonAncestor(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device1), ((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device2), (&__pyx_v_path_info)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20171, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20170
 *     """
 *     cdef _GpuTopologyLevel path_info
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetTopologyCommonAncestor(<Device>device1, <Device>device2, &path_info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20172
 *     with nogil:
 *         __status__ = nvmlDeviceGetTopologyCommonAncestor(<Device>device1, <Device>device2, &path_info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>path_info
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20172, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20173
 *         __status__ = nvmlDeviceGetTopologyCommonAncestor(<Device>device1, <Device>device2, &path_info)
 *     check_status(__status__)
 *     return <int>path_info             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_path_info);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20157
 * 
 * 
 * cpdef int device_get_topology_common_ancestor(intptr_t device1, intptr_t device2) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieve the common ancestor for two devices For all products. Supported on Linux only.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_topology_common_ancestor", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_77device_get_topology_common_ancestor(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_76device_get_topology_common_ancestor, "device_get_topology_common_ancestor(intptr_t device1, intptr_t device2) -> int\n\nRetrieve the common ancestor for two devices For all products. Supported on Linux only.\n\nArgs:\n    device1 (intptr_t): The identifier of the first device.\n    device2 (intptr_t): The identifier of the second device.\n\nReturns:\n    int: A ``nvmlGpuTopologyLevel_t`` that gives the path type.\n\n.. seealso:: `nvmlDeviceGetTopologyCommonAncestor`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_77device_get_topology_common_ancestor = {"device_get_topology_common_ancestor", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_77device_get_topology_common_ancestor, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_76device_get_topology_common_ancestor};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_77device_get_topology_common_ancestor(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device1;
  intptr_t __pyx_v_device2;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_topology_common_ancestor (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device1,&__pyx_mstate_global->__pyx_n_u_device2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20157, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20157, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20157, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_topology_common_ancestor", 0) < (0)) __PYX_ERR(0, 20157, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_topology_common_ancestor", 1, 2, 2, i); __PYX_ERR(0, 20157, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20157, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20157, __pyx_L3_error)
    }
    __pyx_v_device1 = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device1 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20157, __pyx_L3_error)
    __pyx_v_device2 = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_device2 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20157, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_topology_common_ancestor", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20157, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_topology_common_ancestor", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_76device_get_topology_common_ancestor(__pyx_self, __pyx_v_device1, __pyx_v_device2);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_76device_get_topology_common_ancestor(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device1, intptr_t __pyx_v_device2) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_topology_common_ancestor", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_topology_common_ancestor(__pyx_v_device1, __pyx_v_device2, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20157, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20157, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_topology_common_ancestor", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20176
 * 
 * 
 * cpdef int device_get_p2p_status(intptr_t device1, intptr_t device2, int p2p_ind_ex) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieve the status for a given p2p capability index between a given pair of GPU.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_79device_get_p2p_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_p2p_status(intptr_t __pyx_v_device1, intptr_t __pyx_v_device2, int __pyx_v_p2p_ind_ex, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__GpuP2PStatus __pyx_v_p2p_status;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20190
 *     """
 *     cdef _GpuP2PStatus p2p_status
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetP2PStatus(<Device>device1, <Device>device2, <_GpuP2PCapsIndex>p2p_ind_ex, &p2p_status)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20191
 *     cdef _GpuP2PStatus p2p_status
 *     with nogil:
 *         __status__ = nvmlDeviceGetP2PStatus(<Device>device1, <Device>device2, <_GpuP2PCapsIndex>p2p_ind_ex, &p2p_status)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>p2p_status
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetP2PStatus(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device1), ((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device2), ((__pyx_t_4cuda_8bindings_5_nvml__GpuP2PCapsIndex)__pyx_v_p2p_ind_ex), (&__pyx_v_p2p_status)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20191, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20190
 *     """
 *     cdef _GpuP2PStatus p2p_status
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetP2PStatus(<Device>device1, <Device>device2, <_GpuP2PCapsIndex>p2p_ind_ex, &p2p_status)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20192
 *     with nogil:
 *         __status__ = nvmlDeviceGetP2PStatus(<Device>device1, <Device>device2, <_GpuP2PCapsIndex>p2p_ind_ex, &p2p_status)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>p2p_status
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20192, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20193
 *         __status__ = nvmlDeviceGetP2PStatus(<Device>device1, <Device>device2, <_GpuP2PCapsIndex>p2p_ind_ex, &p2p_status)
 *     check_status(__status__)
 *     return <int>p2p_status             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_p2p_status);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20176
 * 
 * 
 * cpdef int device_get_p2p_status(intptr_t device1, intptr_t device2, int p2p_ind_ex) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieve the status for a given p2p capability index between a given pair of GPU.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_p2p_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_79device_get_p2p_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_78device_get_p2p_status, "device_get_p2p_status(intptr_t device1, intptr_t device2, int p2p_ind_ex) -> int\n\nRetrieve the status for a given p2p capability index between a given pair of GPU.\n\nArgs:\n    device1 (intptr_t): The first device.\n    device2 (intptr_t): The second device.\n    p2p_ind_ex (GpuP2PCapsIndex): p2p Capability Index being looked for between ``device1`` and ``device2``.\n\nReturns:\n    int: Reference in which to return the status of the ``p2p_ind_ex`` between ``device1`` and ``device2``.\n\n.. seealso:: `nvmlDeviceGetP2PStatus`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_79device_get_p2p_status = {"device_get_p2p_status", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_79device_get_p2p_status, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_78device_get_p2p_status};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_79device_get_p2p_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device1;
  intptr_t __pyx_v_device2;
  int __pyx_v_p2p_ind_ex;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_p2p_status (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device1,&__pyx_mstate_global->__pyx_n_u_device2,&__pyx_mstate_global->__pyx_n_u_p2p_ind_ex,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20176, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 20176, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20176, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20176, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_p2p_status", 0) < (0)) __PYX_ERR(0, 20176, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_p2p_status", 1, 3, 3, i); __PYX_ERR(0, 20176, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20176, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20176, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 20176, __pyx_L3_error)
    }
    __pyx_v_device1 = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device1 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20176, __pyx_L3_error)
    __pyx_v_device2 = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_device2 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20176, __pyx_L3_error)
    __pyx_v_p2p_ind_ex = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_p2p_ind_ex == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20176, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_p2p_status", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 20176, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_p2p_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_78device_get_p2p_status(__pyx_self, __pyx_v_device1, __pyx_v_device2, __pyx_v_p2p_ind_ex);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_78device_get_p2p_status(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device1, intptr_t __pyx_v_device2, int __pyx_v_p2p_ind_ex) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_p2p_status", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_p2p_status(__pyx_v_device1, __pyx_v_device2, __pyx_v_p2p_ind_ex, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20176, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20176, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_p2p_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20196
 * 
 * 
 * cpdef str device_get_uuid(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the globally unique immutable UUID associated with this device, as a 5 part hexadecimal string, that augments the immutable, board serial identifier.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_81device_get_uuid(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_uuid(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_length;
  char __pyx_v_uuid[96];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_uuid", 0);

  /* "cuda/bindings/_nvml.pyx":20204
 *     .. seealso:: `nvmlDeviceGetUUID`
 *     """
 *     cdef unsigned int length = 96             # <<<<<<<<<<<<<<
 *     cdef char[96] uuid
 *     with nogil:
*/
  __pyx_v_length = 96;

  /* "cuda/bindings/_nvml.pyx":20206
 *     cdef unsigned int length = 96
 *     cdef char[96] uuid
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetUUID(<Device>device, uuid, length)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20207
 *     cdef char[96] uuid
 *     with nogil:
 *         __status__ = nvmlDeviceGetUUID(<Device>device, uuid, length)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(uuid)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetUUID(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_uuid, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20207, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20206
 *     cdef unsigned int length = 96
 *     cdef char[96] uuid
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetUUID(<Device>device, uuid, length)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20208
 *     with nogil:
 *         __status__ = nvmlDeviceGetUUID(<Device>device, uuid, length)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(uuid)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20208, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20209
 *         __status__ = nvmlDeviceGetUUID(<Device>device, uuid, length)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(uuid)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = PyUnicode_FromString(__pyx_v_uuid); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20209, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20196
 * 
 * 
 * cpdef str device_get_uuid(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the globally unique immutable UUID associated with this device, as a 5 part hexadecimal string, that augments the immutable, board serial identifier.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_uuid", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_81device_get_uuid(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_80device_get_uuid, "device_get_uuid(intptr_t device) -> str\n\nRetrieves the globally unique immutable UUID associated with this device, as a 5 part hexadecimal string, that augments the immutable, board serial identifier.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\n.. seealso:: `nvmlDeviceGetUUID`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_81device_get_uuid = {"device_get_uuid", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_81device_get_uuid, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_80device_get_uuid};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_81device_get_uuid(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_uuid (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20196, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20196, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_uuid", 0) < (0)) __PYX_ERR(0, 20196, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_uuid", 1, 1, 1, i); __PYX_ERR(0, 20196, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20196, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20196, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_uuid", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20196, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_uuid", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_80device_get_uuid(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_80device_get_uuid(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_uuid", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_uuid(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20196, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_uuid", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20212
 * 
 * 
 * cpdef unsigned int device_get_minor_number(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves minor number for the device. The minor number for the device is such that the Nvidia device node file for each GPU will have the form /dev/nvidia[minor number].
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_83device_get_minor_number(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_minor_number(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_minor_number;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20224
 *     """
 *     cdef unsigned int minor_number
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMinorNumber(<Device>device, &minor_number)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20225
 *     cdef unsigned int minor_number
 *     with nogil:
 *         __status__ = nvmlDeviceGetMinorNumber(<Device>device, &minor_number)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return minor_number
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMinorNumber(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_minor_number)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20225, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20224
 *     """
 *     cdef unsigned int minor_number
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMinorNumber(<Device>device, &minor_number)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20226
 *     with nogil:
 *         __status__ = nvmlDeviceGetMinorNumber(<Device>device, &minor_number)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return minor_number
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20226, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20227
 *         __status__ = nvmlDeviceGetMinorNumber(<Device>device, &minor_number)
 *     check_status(__status__)
 *     return minor_number             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_minor_number;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20212
 * 
 * 
 * cpdef unsigned int device_get_minor_number(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves minor number for the device. The minor number for the device is such that the Nvidia device node file for each GPU will have the form /dev/nvidia[minor number].
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_minor_number", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_83device_get_minor_number(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_82device_get_minor_number, "device_get_minor_number(intptr_t device) -> unsigned int\n\nRetrieves minor number for the device. The minor number for the device is such that the Nvidia device node file for each GPU will have the form /dev/nvidia[minor number].\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Reference in which to return the minor number for the device.\n\n.. seealso:: `nvmlDeviceGetMinorNumber`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_83device_get_minor_number = {"device_get_minor_number", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_83device_get_minor_number, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_82device_get_minor_number};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_83device_get_minor_number(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_minor_number (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20212, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20212, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_minor_number", 0) < (0)) __PYX_ERR(0, 20212, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_minor_number", 1, 1, 1, i); __PYX_ERR(0, 20212, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20212, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20212, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_minor_number", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20212, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_minor_number", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_82device_get_minor_number(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_82device_get_minor_number(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_minor_number", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_minor_number(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20212, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20212, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_minor_number", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20230
 * 
 * 
 * cpdef str device_get_board_part_number(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the the device board part number which is programmed into the board's InfoROM.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_85device_get_board_part_number(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_board_part_number(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_length;
  char __pyx_v_part_number[80];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_board_part_number", 0);

  /* "cuda/bindings/_nvml.pyx":20238
 *     .. seealso:: `nvmlDeviceGetBoardPartNumber`
 *     """
 *     cdef unsigned int length = 80             # <<<<<<<<<<<<<<
 *     cdef char[80] part_number
 *     with nogil:
*/
  __pyx_v_length = 80;

  /* "cuda/bindings/_nvml.pyx":20240
 *     cdef unsigned int length = 80
 *     cdef char[80] part_number
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetBoardPartNumber(<Device>device, part_number, length)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20241
 *     cdef char[80] part_number
 *     with nogil:
 *         __status__ = nvmlDeviceGetBoardPartNumber(<Device>device, part_number, length)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(part_number)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBoardPartNumber(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_part_number, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20241, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20240
 *     cdef unsigned int length = 80
 *     cdef char[80] part_number
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetBoardPartNumber(<Device>device, part_number, length)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20242
 *     with nogil:
 *         __status__ = nvmlDeviceGetBoardPartNumber(<Device>device, part_number, length)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(part_number)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20242, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20243
 *         __status__ = nvmlDeviceGetBoardPartNumber(<Device>device, part_number, length)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(part_number)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = PyUnicode_FromString(__pyx_v_part_number); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20243, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20230
 * 
 * 
 * cpdef str device_get_board_part_number(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the the device board part number which is programmed into the board's InfoROM.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_board_part_number", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_85device_get_board_part_number(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_84device_get_board_part_number, "device_get_board_part_number(intptr_t device) -> str\n\nRetrieves the the device board part number which is programmed into the board's InfoROM.\n\nArgs:\n    device (intptr_t): Identifier of the target device.\n\n.. seealso:: `nvmlDeviceGetBoardPartNumber`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_85device_get_board_part_number = {"device_get_board_part_number", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_85device_get_board_part_number, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_84device_get_board_part_number};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_85device_get_board_part_number(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_board_part_number (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20230, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20230, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_board_part_number", 0) < (0)) __PYX_ERR(0, 20230, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_board_part_number", 1, 1, 1, i); __PYX_ERR(0, 20230, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20230, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20230, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_board_part_number", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20230, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_board_part_number", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_84device_get_board_part_number(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_84device_get_board_part_number(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_board_part_number", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_board_part_number(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20230, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_board_part_number", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20246
 * 
 * 
 * cpdef str device_get_inforom_version(intptr_t device, int object):             # <<<<<<<<<<<<<<
 *     """Retrieves the version information for the device's infoROM object.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_87device_get_inforom_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_inforom_version(intptr_t __pyx_v_device, int __pyx_v_object, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_length;
  char __pyx_v_version[16];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_inforom_version", 0);

  /* "cuda/bindings/_nvml.pyx":20255
 *     .. seealso:: `nvmlDeviceGetInforomVersion`
 *     """
 *     cdef unsigned int length = 16             # <<<<<<<<<<<<<<
 *     cdef char[16] version
 *     with nogil:
*/
  __pyx_v_length = 16;

  /* "cuda/bindings/_nvml.pyx":20257
 *     cdef unsigned int length = 16
 *     cdef char[16] version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetInforomVersion(<Device>device, <_InforomObject>object, version, length)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20258
 *     cdef char[16] version
 *     with nogil:
 *         __status__ = nvmlDeviceGetInforomVersion(<Device>device, <_InforomObject>object, version, length)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(version)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetInforomVersion(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__InforomObject)__pyx_v_object), __pyx_v_version, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20258, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20257
 *     cdef unsigned int length = 16
 *     cdef char[16] version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetInforomVersion(<Device>device, <_InforomObject>object, version, length)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20259
 *     with nogil:
 *         __status__ = nvmlDeviceGetInforomVersion(<Device>device, <_InforomObject>object, version, length)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(version)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20259, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20260
 *         __status__ = nvmlDeviceGetInforomVersion(<Device>device, <_InforomObject>object, version, length)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(version)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = PyUnicode_FromString(__pyx_v_version); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20260, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20246
 * 
 * 
 * cpdef str device_get_inforom_version(intptr_t device, int object):             # <<<<<<<<<<<<<<
 *     """Retrieves the version information for the device's infoROM object.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_inforom_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_87device_get_inforom_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_86device_get_inforom_version, "device_get_inforom_version(intptr_t device, int object) -> str\n\nRetrieves the version information for the device's infoROM object.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    object (InforomObject): The target infoROM object.\n\n.. seealso:: `nvmlDeviceGetInforomVersion`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_87device_get_inforom_version = {"device_get_inforom_version", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_87device_get_inforom_version, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_86device_get_inforom_version};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_87device_get_inforom_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_object;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_inforom_version (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_object_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20246, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20246, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20246, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_inforom_version", 0) < (0)) __PYX_ERR(0, 20246, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_inforom_version", 1, 2, 2, i); __PYX_ERR(0, 20246, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20246, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20246, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20246, __pyx_L3_error)
    __pyx_v_object = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20246, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_inforom_version", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20246, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_inforom_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_86device_get_inforom_version(__pyx_self, __pyx_v_device, __pyx_v_object);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_86device_get_inforom_version(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_object) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_inforom_version", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_inforom_version(__pyx_v_device, __pyx_v_object, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20246, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_inforom_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20263
 * 
 * 
 * cpdef str device_get_inforom_image_version(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the global infoROM image version.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_89device_get_inforom_image_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_inforom_image_version(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_length;
  char __pyx_v_version[16];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_inforom_image_version", 0);

  /* "cuda/bindings/_nvml.pyx":20271
 *     .. seealso:: `nvmlDeviceGetInforomImageVersion`
 *     """
 *     cdef unsigned int length = 16             # <<<<<<<<<<<<<<
 *     cdef char[16] version
 *     with nogil:
*/
  __pyx_v_length = 16;

  /* "cuda/bindings/_nvml.pyx":20273
 *     cdef unsigned int length = 16
 *     cdef char[16] version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetInforomImageVersion(<Device>device, version, length)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20274
 *     cdef char[16] version
 *     with nogil:
 *         __status__ = nvmlDeviceGetInforomImageVersion(<Device>device, version, length)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(version)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetInforomImageVersion(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_version, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20274, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20273
 *     cdef unsigned int length = 16
 *     cdef char[16] version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetInforomImageVersion(<Device>device, version, length)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20275
 *     with nogil:
 *         __status__ = nvmlDeviceGetInforomImageVersion(<Device>device, version, length)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(version)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20275, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20276
 *         __status__ = nvmlDeviceGetInforomImageVersion(<Device>device, version, length)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(version)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = PyUnicode_FromString(__pyx_v_version); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20276, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20263
 * 
 * 
 * cpdef str device_get_inforom_image_version(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the global infoROM image version.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_inforom_image_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_89device_get_inforom_image_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_88device_get_inforom_image_version, "device_get_inforom_image_version(intptr_t device) -> str\n\nRetrieves the global infoROM image version.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\n.. seealso:: `nvmlDeviceGetInforomImageVersion`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_89device_get_inforom_image_version = {"device_get_inforom_image_version", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_89device_get_inforom_image_version, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_88device_get_inforom_image_version};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_89device_get_inforom_image_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_inforom_image_version (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20263, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20263, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_inforom_image_version", 0) < (0)) __PYX_ERR(0, 20263, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_inforom_image_version", 1, 1, 1, i); __PYX_ERR(0, 20263, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20263, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20263, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_inforom_image_version", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20263, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_inforom_image_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_88device_get_inforom_image_version(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_88device_get_inforom_image_version(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_inforom_image_version", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_inforom_image_version(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20263, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_inforom_image_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20279
 * 
 * 
 * cpdef unsigned int device_get_inforom_configuration_checksum(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the checksum of the configuration stored in the device's infoROM.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_91device_get_inforom_configuration_checksum(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_inforom_configuration_checksum(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_checksum;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20291
 *     """
 *     cdef unsigned int checksum
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetInforomConfigurationChecksum(<Device>device, &checksum)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20292
 *     cdef unsigned int checksum
 *     with nogil:
 *         __status__ = nvmlDeviceGetInforomConfigurationChecksum(<Device>device, &checksum)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return checksum
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetInforomConfigurationChecksum(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_checksum)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20292, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20291
 *     """
 *     cdef unsigned int checksum
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetInforomConfigurationChecksum(<Device>device, &checksum)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20293
 *     with nogil:
 *         __status__ = nvmlDeviceGetInforomConfigurationChecksum(<Device>device, &checksum)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return checksum
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20293, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20294
 *         __status__ = nvmlDeviceGetInforomConfigurationChecksum(<Device>device, &checksum)
 *     check_status(__status__)
 *     return checksum             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_checksum;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20279
 * 
 * 
 * cpdef unsigned int device_get_inforom_configuration_checksum(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the checksum of the configuration stored in the device's infoROM.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_inforom_configuration_checksum", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_91device_get_inforom_configuration_checksum(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_90device_get_inforom_configuration_checksum, "device_get_inforom_configuration_checksum(intptr_t device) -> unsigned int\n\nRetrieves the checksum of the configuration stored in the device's infoROM.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Reference in which to return the infoROM configuration checksum.\n\n.. seealso:: `nvmlDeviceGetInforomConfigurationChecksum`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_91device_get_inforom_configuration_checksum = {"device_get_inforom_configuration_checksum", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_91device_get_inforom_configuration_checksum, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_90device_get_inforom_configuration_checksum};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_91device_get_inforom_configuration_checksum(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_inforom_configuration_checksum (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20279, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20279, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_inforom_configuration_checksum", 0) < (0)) __PYX_ERR(0, 20279, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_inforom_configuration_checksum", 1, 1, 1, i); __PYX_ERR(0, 20279, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20279, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20279, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_inforom_configuration_checksum", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20279, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_inforom_configuration_checksum", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_90device_get_inforom_configuration_checksum(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_90device_get_inforom_configuration_checksum(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_inforom_configuration_checksum", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_inforom_configuration_checksum(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20279, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20279, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_inforom_configuration_checksum", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20297
 * 
 * 
 * cpdef device_validate_inforom(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Reads the infoROM from the flash and verifies the checksums.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_93device_validate_inforom(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_validate_inforom(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_validate_inforom", 0);

  /* "cuda/bindings/_nvml.pyx":20305
 *     .. seealso:: `nvmlDeviceValidateInforom`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceValidateInforom(<Device>device)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20306
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceValidateInforom(<Device>device)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceValidateInforom(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20306, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20305
 *     .. seealso:: `nvmlDeviceValidateInforom`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceValidateInforom(<Device>device)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20307
 *     with nogil:
 *         __status__ = nvmlDeviceValidateInforom(<Device>device)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20307, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20297
 * 
 * 
 * cpdef device_validate_inforom(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Reads the infoROM from the flash and verifies the checksums.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_validate_inforom", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_93device_validate_inforom(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_92device_validate_inforom, "device_validate_inforom(intptr_t device)\n\nReads the infoROM from the flash and verifies the checksums.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\n.. seealso:: `nvmlDeviceValidateInforom`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_93device_validate_inforom = {"device_validate_inforom", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_93device_validate_inforom, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_92device_validate_inforom};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_93device_validate_inforom(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_validate_inforom (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20297, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20297, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_validate_inforom", 0) < (0)) __PYX_ERR(0, 20297, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_validate_inforom", 1, 1, 1, i); __PYX_ERR(0, 20297, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20297, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20297, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_validate_inforom", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20297, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_validate_inforom", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_92device_validate_inforom(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_92device_validate_inforom(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_validate_inforom", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_validate_inforom(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_validate_inforom", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20310
 * 
 * 
 * cpdef unsigned long device_get_last_bbx_flush_time(intptr_t device, intptr_t timestamp) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the timestamp and the duration of the last flush of the BBX (blackbox) infoROM object during the current run.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_95device_get_last_bbx_flush_time(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned long __pyx_f_4cuda_8bindings_5_nvml_device_get_last_bbx_flush_time(intptr_t __pyx_v_device, intptr_t __pyx_v_timestamp, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned long __pyx_v_duration_us;
  nvmlReturn_t __pyx_v___status__;
  unsigned long __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20323
 *     """
 *     cdef unsigned long duration_us
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetLastBBXFlushTime(<Device>device, <unsigned long long*>timestamp, &duration_us)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20324
 *     cdef unsigned long duration_us
 *     with nogil:
 *         __status__ = nvmlDeviceGetLastBBXFlushTime(<Device>device, <unsigned long long*>timestamp, &duration_us)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return duration_us
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetLastBBXFlushTime(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((unsigned PY_LONG_LONG *)__pyx_v_timestamp), (&__pyx_v_duration_us)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20324, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20323
 *     """
 *     cdef unsigned long duration_us
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetLastBBXFlushTime(<Device>device, <unsigned long long*>timestamp, &duration_us)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20325
 *     with nogil:
 *         __status__ = nvmlDeviceGetLastBBXFlushTime(<Device>device, <unsigned long long*>timestamp, &duration_us)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return duration_us
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20325, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20326
 *         __status__ = nvmlDeviceGetLastBBXFlushTime(<Device>device, <unsigned long long*>timestamp, &duration_us)
 *     check_status(__status__)
 *     return duration_us             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_duration_us;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20310
 * 
 * 
 * cpdef unsigned long device_get_last_bbx_flush_time(intptr_t device, intptr_t timestamp) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the timestamp and the duration of the last flush of the BBX (blackbox) infoROM object during the current run.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_last_bbx_flush_time", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_95device_get_last_bbx_flush_time(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_94device_get_last_bbx_flush_time, "device_get_last_bbx_flush_time(intptr_t device, intptr_t timestamp) -> unsigned long\n\nRetrieves the timestamp and the duration of the last flush of the BBX (blackbox) infoROM object during the current run.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    timestamp (intptr_t): The start timestamp of the last BBX Flush.\n\nReturns:\n    unsigned long: The duration (us) of the last BBX Flush.\n\n.. seealso:: `nvmlDeviceGetLastBBXFlushTime`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_95device_get_last_bbx_flush_time = {"device_get_last_bbx_flush_time", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_95device_get_last_bbx_flush_time, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_94device_get_last_bbx_flush_time};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_95device_get_last_bbx_flush_time(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  intptr_t __pyx_v_timestamp;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_last_bbx_flush_time (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_timestamp,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20310, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20310, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20310, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_last_bbx_flush_time", 0) < (0)) __PYX_ERR(0, 20310, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_last_bbx_flush_time", 1, 2, 2, i); __PYX_ERR(0, 20310, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20310, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20310, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20310, __pyx_L3_error)
    __pyx_v_timestamp = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_timestamp == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20310, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_last_bbx_flush_time", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20310, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_last_bbx_flush_time", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_94device_get_last_bbx_flush_time(__pyx_self, __pyx_v_device, __pyx_v_timestamp);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_94device_get_last_bbx_flush_time(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_timestamp) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned long __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_last_bbx_flush_time", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_last_bbx_flush_time(__pyx_v_device, __pyx_v_timestamp, 1); if (unlikely(__pyx_t_1 == ((unsigned long)0) && PyErr_Occurred())) __PYX_ERR(0, 20310, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_long(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20310, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_last_bbx_flush_time", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20329
 * 
 * 
 * cpdef int device_get_display_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the display mode for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_97device_get_display_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_display_mode(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__EnableState __pyx_v_display;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20341
 *     """
 *     cdef _EnableState display
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetDisplayMode(<Device>device, &display)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20342
 *     cdef _EnableState display
 *     with nogil:
 *         __status__ = nvmlDeviceGetDisplayMode(<Device>device, &display)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>display
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDisplayMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_display)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20342, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20341
 *     """
 *     cdef _EnableState display
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetDisplayMode(<Device>device, &display)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20343
 *     with nogil:
 *         __status__ = nvmlDeviceGetDisplayMode(<Device>device, &display)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>display
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20343, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20344
 *         __status__ = nvmlDeviceGetDisplayMode(<Device>device, &display)
 *     check_status(__status__)
 *     return <int>display             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_display);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20329
 * 
 * 
 * cpdef int device_get_display_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the display mode for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_display_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_97device_get_display_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_96device_get_display_mode, "device_get_display_mode(intptr_t device) -> int\n\nRetrieves the display mode for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    int: Reference in which to return the display mode.\n\n.. seealso:: `nvmlDeviceGetDisplayMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_97device_get_display_mode = {"device_get_display_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_97device_get_display_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_96device_get_display_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_97device_get_display_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_display_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20329, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20329, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_display_mode", 0) < (0)) __PYX_ERR(0, 20329, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_display_mode", 1, 1, 1, i); __PYX_ERR(0, 20329, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20329, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20329, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_display_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20329, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_display_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_96device_get_display_mode(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_96device_get_display_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_display_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_display_mode(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20329, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_display_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20347
 * 
 * 
 * cpdef int device_get_display_active(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the display active state for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_99device_get_display_active(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_display_active(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__EnableState __pyx_v_is_active;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20359
 *     """
 *     cdef _EnableState is_active
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetDisplayActive(<Device>device, &is_active)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20360
 *     cdef _EnableState is_active
 *     with nogil:
 *         __status__ = nvmlDeviceGetDisplayActive(<Device>device, &is_active)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>is_active
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDisplayActive(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_is_active)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20360, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20359
 *     """
 *     cdef _EnableState is_active
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetDisplayActive(<Device>device, &is_active)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20361
 *     with nogil:
 *         __status__ = nvmlDeviceGetDisplayActive(<Device>device, &is_active)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>is_active
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20361, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20362
 *         __status__ = nvmlDeviceGetDisplayActive(<Device>device, &is_active)
 *     check_status(__status__)
 *     return <int>is_active             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_is_active);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20347
 * 
 * 
 * cpdef int device_get_display_active(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the display active state for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_display_active", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_99device_get_display_active(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_98device_get_display_active, "device_get_display_active(intptr_t device) -> int\n\nRetrieves the display active state for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    int: Reference in which to return the display active state.\n\n.. seealso:: `nvmlDeviceGetDisplayActive`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_99device_get_display_active = {"device_get_display_active", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_99device_get_display_active, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_98device_get_display_active};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_99device_get_display_active(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_display_active (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20347, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20347, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_display_active", 0) < (0)) __PYX_ERR(0, 20347, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_display_active", 1, 1, 1, i); __PYX_ERR(0, 20347, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20347, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20347, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_display_active", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20347, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_display_active", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_98device_get_display_active(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_98device_get_display_active(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_display_active", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_display_active(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20347, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20347, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_display_active", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20365
 * 
 * 
 * cpdef int device_get_persistence_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the persistence mode associated with this device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_101device_get_persistence_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_persistence_mode(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__EnableState __pyx_v_mode;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20377
 *     """
 *     cdef _EnableState mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPersistenceMode(<Device>device, &mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20378
 *     cdef _EnableState mode
 *     with nogil:
 *         __status__ = nvmlDeviceGetPersistenceMode(<Device>device, &mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>mode
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPersistenceMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20378, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20377
 *     """
 *     cdef _EnableState mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPersistenceMode(<Device>device, &mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20379
 *     with nogil:
 *         __status__ = nvmlDeviceGetPersistenceMode(<Device>device, &mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>mode
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20379, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20380
 *         __status__ = nvmlDeviceGetPersistenceMode(<Device>device, &mode)
 *     check_status(__status__)
 *     return <int>mode             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_mode);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20365
 * 
 * 
 * cpdef int device_get_persistence_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the persistence mode associated with this device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_persistence_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_101device_get_persistence_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_100device_get_persistence_mode, "device_get_persistence_mode(intptr_t device) -> int\n\nRetrieves the persistence mode associated with this device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    int: Reference in which to return the current driver persistence mode.\n\n.. seealso:: `nvmlDeviceGetPersistenceMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_101device_get_persistence_mode = {"device_get_persistence_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_101device_get_persistence_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_100device_get_persistence_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_101device_get_persistence_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_persistence_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20365, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20365, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_persistence_mode", 0) < (0)) __PYX_ERR(0, 20365, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_persistence_mode", 1, 1, 1, i); __PYX_ERR(0, 20365, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20365, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20365, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_persistence_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20365, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_persistence_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_100device_get_persistence_mode(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_100device_get_persistence_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_persistence_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_persistence_mode(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20365, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20365, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_persistence_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20383
 * 
 * 
 * cpdef object device_get_pci_info_ext(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves PCI attributes of this device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_103device_get_pci_info_ext(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_pci_info_ext(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *__pyx_v_pci_py = 0;
  nvmlPciInfoExt_t *__pyx_v_pci;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_pci_info_ext", 0);

  /* "cuda/bindings/_nvml.pyx":20394
 *     .. seealso:: `nvmlDeviceGetPciInfoExt`
 *     """
 *     cdef PciInfoExt_v1 pci_py = PciInfoExt_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlPciInfoExt_t *pci = <nvmlPciInfoExt_t *><intptr_t>(pci_py._get_ptr())
 *     pci.version = sizeof(nvmlPciInfoExt_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20394, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_pci_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":20395
 *     """
 *     cdef PciInfoExt_v1 pci_py = PciInfoExt_v1()
 *     cdef nvmlPciInfoExt_t *pci = <nvmlPciInfoExt_t *><intptr_t>(pci_py._get_ptr())             # <<<<<<<<<<<<<<
 *     pci.version = sizeof(nvmlPciInfoExt_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)__pyx_v_pci_py->__pyx_vtab)->_get_ptr(__pyx_v_pci_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 20395, __pyx_L1_error)
  __pyx_v_pci = ((nvmlPciInfoExt_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":20396
 *     cdef PciInfoExt_v1 pci_py = PciInfoExt_v1()
 *     cdef nvmlPciInfoExt_t *pci = <nvmlPciInfoExt_t *><intptr_t>(pci_py._get_ptr())
 *     pci.version = sizeof(nvmlPciInfoExt_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetPciInfoExt(<Device>device, pci)
*/
  __pyx_v_pci->version = ((sizeof(nvmlPciInfoExt_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":20397
 *     cdef nvmlPciInfoExt_t *pci = <nvmlPciInfoExt_t *><intptr_t>(pci_py._get_ptr())
 *     pci.version = sizeof(nvmlPciInfoExt_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPciInfoExt(<Device>device, pci)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20398
 *     pci.version = sizeof(nvmlPciInfoExt_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetPciInfoExt(<Device>device, pci)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return pci_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPciInfoExt(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_pci); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20398, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":20397
 *     cdef nvmlPciInfoExt_t *pci = <nvmlPciInfoExt_t *><intptr_t>(pci_py._get_ptr())
 *     pci.version = sizeof(nvmlPciInfoExt_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPciInfoExt(<Device>device, pci)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20399
 *     with nogil:
 *         __status__ = nvmlDeviceGetPciInfoExt(<Device>device, pci)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return pci_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 20399, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20400
 *         __status__ = nvmlDeviceGetPciInfoExt(<Device>device, pci)
 *     check_status(__status__)
 *     return pci_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_pci_py);
  __pyx_r = ((PyObject *)__pyx_v_pci_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20383
 * 
 * 
 * cpdef object device_get_pci_info_ext(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves PCI attributes of this device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pci_info_ext", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_pci_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_103device_get_pci_info_ext(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_102device_get_pci_info_ext, "device_get_pci_info_ext(intptr_t device)\n\nRetrieves PCI attributes of this device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlPciInfoExt_v1_t: Reference in which to return the PCI info.\n\n.. seealso:: `nvmlDeviceGetPciInfoExt`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_103device_get_pci_info_ext = {"device_get_pci_info_ext", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_103device_get_pci_info_ext, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_102device_get_pci_info_ext};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_103device_get_pci_info_ext(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_pci_info_ext (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20383, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20383, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_pci_info_ext", 0) < (0)) __PYX_ERR(0, 20383, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_pci_info_ext", 1, 1, 1, i); __PYX_ERR(0, 20383, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20383, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20383, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_pci_info_ext", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20383, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pci_info_ext", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_102device_get_pci_info_ext(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_102device_get_pci_info_ext(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_pci_info_ext", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_pci_info_ext(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20383, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pci_info_ext", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20403
 * 
 * 
 * cpdef object device_get_pci_info_v3(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the PCI attributes of this device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_105device_get_pci_info_v3(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_pci_info_v3(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_pci_py = 0;
  nvmlPciInfo_t *__pyx_v_pci;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_pci_info_v3", 0);

  /* "cuda/bindings/_nvml.pyx":20414
 *     .. seealso:: `nvmlDeviceGetPciInfo_v3`
 *     """
 *     cdef PciInfo pci_py = PciInfo()             # <<<<<<<<<<<<<<
 *     cdef nvmlPciInfo_t *pci = <nvmlPciInfo_t *><intptr_t>(pci_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20414, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_pci_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":20415
 *     """
 *     cdef PciInfo pci_py = PciInfo()
 *     cdef nvmlPciInfo_t *pci = <nvmlPciInfo_t *><intptr_t>(pci_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetPciInfo_v3(<Device>device, pci)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_pci_py->__pyx_vtab)->_get_ptr(__pyx_v_pci_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 20415, __pyx_L1_error)
  __pyx_v_pci = ((nvmlPciInfo_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":20416
 *     cdef PciInfo pci_py = PciInfo()
 *     cdef nvmlPciInfo_t *pci = <nvmlPciInfo_t *><intptr_t>(pci_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPciInfo_v3(<Device>device, pci)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20417
 *     cdef nvmlPciInfo_t *pci = <nvmlPciInfo_t *><intptr_t>(pci_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetPciInfo_v3(<Device>device, pci)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return pci_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPciInfo_v3(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_pci); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20417, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":20416
 *     cdef PciInfo pci_py = PciInfo()
 *     cdef nvmlPciInfo_t *pci = <nvmlPciInfo_t *><intptr_t>(pci_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPciInfo_v3(<Device>device, pci)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20418
 *     with nogil:
 *         __status__ = nvmlDeviceGetPciInfo_v3(<Device>device, pci)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return pci_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 20418, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20419
 *         __status__ = nvmlDeviceGetPciInfo_v3(<Device>device, pci)
 *     check_status(__status__)
 *     return pci_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_pci_py);
  __pyx_r = ((PyObject *)__pyx_v_pci_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20403
 * 
 * 
 * cpdef object device_get_pci_info_v3(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the PCI attributes of this device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pci_info_v3", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_pci_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_105device_get_pci_info_v3(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_104device_get_pci_info_v3, "device_get_pci_info_v3(intptr_t device)\n\nRetrieves the PCI attributes of this device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlPciInfo_t: Reference in which to return the PCI info.\n\n.. seealso:: `nvmlDeviceGetPciInfo_v3`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_105device_get_pci_info_v3 = {"device_get_pci_info_v3", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_105device_get_pci_info_v3, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_104device_get_pci_info_v3};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_105device_get_pci_info_v3(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_pci_info_v3 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20403, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20403, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_pci_info_v3", 0) < (0)) __PYX_ERR(0, 20403, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_pci_info_v3", 1, 1, 1, i); __PYX_ERR(0, 20403, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20403, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20403, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_pci_info_v3", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20403, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pci_info_v3", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_104device_get_pci_info_v3(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_104device_get_pci_info_v3(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_pci_info_v3", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_pci_info_v3(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20403, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pci_info_v3", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20422
 * 
 * 
 * cpdef unsigned int device_get_max_pcie_link_generation(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the maximum PCIe link generation possible with this device and system.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_107device_get_max_pcie_link_generation(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_max_pcie_link_generation(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_max_link_gen;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20434
 *     """
 *     cdef unsigned int max_link_gen
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMaxPcieLinkGeneration(<Device>device, &max_link_gen)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20435
 *     cdef unsigned int max_link_gen
 *     with nogil:
 *         __status__ = nvmlDeviceGetMaxPcieLinkGeneration(<Device>device, &max_link_gen)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return max_link_gen
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxPcieLinkGeneration(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_max_link_gen)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20435, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20434
 *     """
 *     cdef unsigned int max_link_gen
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMaxPcieLinkGeneration(<Device>device, &max_link_gen)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20436
 *     with nogil:
 *         __status__ = nvmlDeviceGetMaxPcieLinkGeneration(<Device>device, &max_link_gen)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return max_link_gen
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20436, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20437
 *         __status__ = nvmlDeviceGetMaxPcieLinkGeneration(<Device>device, &max_link_gen)
 *     check_status(__status__)
 *     return max_link_gen             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_max_link_gen;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20422
 * 
 * 
 * cpdef unsigned int device_get_max_pcie_link_generation(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the maximum PCIe link generation possible with this device and system.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_max_pcie_link_generation", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_107device_get_max_pcie_link_generation(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_106device_get_max_pcie_link_generation, "device_get_max_pcie_link_generation(intptr_t device) -> unsigned int\n\nRetrieves the maximum PCIe link generation possible with this device and system.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Reference in which to return the max PCIe link generation.\n\n.. seealso:: `nvmlDeviceGetMaxPcieLinkGeneration`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_107device_get_max_pcie_link_generation = {"device_get_max_pcie_link_generation", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_107device_get_max_pcie_link_generation, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_106device_get_max_pcie_link_generation};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_107device_get_max_pcie_link_generation(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_max_pcie_link_generation (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20422, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20422, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_max_pcie_link_generation", 0) < (0)) __PYX_ERR(0, 20422, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_max_pcie_link_generation", 1, 1, 1, i); __PYX_ERR(0, 20422, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20422, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20422, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_max_pcie_link_generation", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20422, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_max_pcie_link_generation", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_106device_get_max_pcie_link_generation(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_106device_get_max_pcie_link_generation(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_max_pcie_link_generation", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_max_pcie_link_generation(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20422, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20422, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_max_pcie_link_generation", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20440
 * 
 * 
 * cpdef unsigned int device_get_gpu_max_pcie_link_generation(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the maximum PCIe link generation supported by this device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_109device_get_gpu_max_pcie_link_generation(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_max_pcie_link_generation(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_max_link_gen_device;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20452
 *     """
 *     cdef unsigned int max_link_gen_device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuMaxPcieLinkGeneration(<Device>device, &max_link_gen_device)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20453
 *     cdef unsigned int max_link_gen_device
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuMaxPcieLinkGeneration(<Device>device, &max_link_gen_device)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return max_link_gen_device
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuMaxPcieLinkGeneration(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_max_link_gen_device)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20453, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20452
 *     """
 *     cdef unsigned int max_link_gen_device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuMaxPcieLinkGeneration(<Device>device, &max_link_gen_device)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20454
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuMaxPcieLinkGeneration(<Device>device, &max_link_gen_device)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return max_link_gen_device
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20454, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20455
 *         __status__ = nvmlDeviceGetGpuMaxPcieLinkGeneration(<Device>device, &max_link_gen_device)
 *     check_status(__status__)
 *     return max_link_gen_device             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_max_link_gen_device;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20440
 * 
 * 
 * cpdef unsigned int device_get_gpu_max_pcie_link_generation(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the maximum PCIe link generation supported by this device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_max_pcie_link_generation", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_109device_get_gpu_max_pcie_link_generation(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_108device_get_gpu_max_pcie_link_generation, "device_get_gpu_max_pcie_link_generation(intptr_t device) -> unsigned int\n\nRetrieves the maximum PCIe link generation supported by this device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Reference in which to return the max PCIe link generation.\n\n.. seealso:: `nvmlDeviceGetGpuMaxPcieLinkGeneration`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_109device_get_gpu_max_pcie_link_generation = {"device_get_gpu_max_pcie_link_generation", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_109device_get_gpu_max_pcie_link_generation, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_108device_get_gpu_max_pcie_link_generation};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_109device_get_gpu_max_pcie_link_generation(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_gpu_max_pcie_link_generation (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20440, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20440, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_gpu_max_pcie_link_generation", 0) < (0)) __PYX_ERR(0, 20440, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_gpu_max_pcie_link_generation", 1, 1, 1, i); __PYX_ERR(0, 20440, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20440, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20440, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_gpu_max_pcie_link_generation", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20440, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_max_pcie_link_generation", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_108device_get_gpu_max_pcie_link_generation(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_108device_get_gpu_max_pcie_link_generation(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpu_max_pcie_link_generation", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_max_pcie_link_generation(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20440, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20440, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_max_pcie_link_generation", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20458
 * 
 * 
 * cpdef unsigned int device_get_max_pcie_link_width(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the maximum PCIe link width possible with this device and system.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_111device_get_max_pcie_link_width(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_max_pcie_link_width(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_max_link_width;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20470
 *     """
 *     cdef unsigned int max_link_width
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMaxPcieLinkWidth(<Device>device, &max_link_width)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20471
 *     cdef unsigned int max_link_width
 *     with nogil:
 *         __status__ = nvmlDeviceGetMaxPcieLinkWidth(<Device>device, &max_link_width)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return max_link_width
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxPcieLinkWidth(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_max_link_width)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20471, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20470
 *     """
 *     cdef unsigned int max_link_width
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMaxPcieLinkWidth(<Device>device, &max_link_width)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20472
 *     with nogil:
 *         __status__ = nvmlDeviceGetMaxPcieLinkWidth(<Device>device, &max_link_width)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return max_link_width
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20472, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20473
 *         __status__ = nvmlDeviceGetMaxPcieLinkWidth(<Device>device, &max_link_width)
 *     check_status(__status__)
 *     return max_link_width             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_max_link_width;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20458
 * 
 * 
 * cpdef unsigned int device_get_max_pcie_link_width(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the maximum PCIe link width possible with this device and system.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_max_pcie_link_width", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_111device_get_max_pcie_link_width(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_110device_get_max_pcie_link_width, "device_get_max_pcie_link_width(intptr_t device) -> unsigned int\n\nRetrieves the maximum PCIe link width possible with this device and system.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Reference in which to return the max PCIe link generation.\n\n.. seealso:: `nvmlDeviceGetMaxPcieLinkWidth`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_111device_get_max_pcie_link_width = {"device_get_max_pcie_link_width", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_111device_get_max_pcie_link_width, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_110device_get_max_pcie_link_width};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_111device_get_max_pcie_link_width(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_max_pcie_link_width (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20458, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20458, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_max_pcie_link_width", 0) < (0)) __PYX_ERR(0, 20458, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_max_pcie_link_width", 1, 1, 1, i); __PYX_ERR(0, 20458, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20458, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20458, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_max_pcie_link_width", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20458, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_max_pcie_link_width", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_110device_get_max_pcie_link_width(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_110device_get_max_pcie_link_width(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_max_pcie_link_width", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_max_pcie_link_width(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20458, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20458, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_max_pcie_link_width", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20476
 * 
 * 
 * cpdef unsigned int device_get_curr_pcie_link_generation(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the current PCIe link generation.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_113device_get_curr_pcie_link_generation(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_curr_pcie_link_generation(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_curr_link_gen;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20488
 *     """
 *     cdef unsigned int curr_link_gen
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCurrPcieLinkGeneration(<Device>device, &curr_link_gen)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20489
 *     cdef unsigned int curr_link_gen
 *     with nogil:
 *         __status__ = nvmlDeviceGetCurrPcieLinkGeneration(<Device>device, &curr_link_gen)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return curr_link_gen
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrPcieLinkGeneration(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_curr_link_gen)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20489, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20488
 *     """
 *     cdef unsigned int curr_link_gen
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCurrPcieLinkGeneration(<Device>device, &curr_link_gen)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20490
 *     with nogil:
 *         __status__ = nvmlDeviceGetCurrPcieLinkGeneration(<Device>device, &curr_link_gen)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return curr_link_gen
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20490, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20491
 *         __status__ = nvmlDeviceGetCurrPcieLinkGeneration(<Device>device, &curr_link_gen)
 *     check_status(__status__)
 *     return curr_link_gen             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_curr_link_gen;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20476
 * 
 * 
 * cpdef unsigned int device_get_curr_pcie_link_generation(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the current PCIe link generation.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_curr_pcie_link_generation", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_113device_get_curr_pcie_link_generation(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_112device_get_curr_pcie_link_generation, "device_get_curr_pcie_link_generation(intptr_t device) -> unsigned int\n\nRetrieves the current PCIe link generation.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Reference in which to return the current PCIe link generation.\n\n.. seealso:: `nvmlDeviceGetCurrPcieLinkGeneration`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_113device_get_curr_pcie_link_generation = {"device_get_curr_pcie_link_generation", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_113device_get_curr_pcie_link_generation, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_112device_get_curr_pcie_link_generation};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_113device_get_curr_pcie_link_generation(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_curr_pcie_link_generation (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20476, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20476, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_curr_pcie_link_generation", 0) < (0)) __PYX_ERR(0, 20476, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_curr_pcie_link_generation", 1, 1, 1, i); __PYX_ERR(0, 20476, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20476, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20476, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_curr_pcie_link_generation", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20476, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_curr_pcie_link_generation", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_112device_get_curr_pcie_link_generation(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_112device_get_curr_pcie_link_generation(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_curr_pcie_link_generation", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_curr_pcie_link_generation(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20476, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20476, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_curr_pcie_link_generation", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20494
 * 
 * 
 * cpdef unsigned int device_get_curr_pcie_link_width(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the current PCIe link width.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_115device_get_curr_pcie_link_width(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_curr_pcie_link_width(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_curr_link_width;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20506
 *     """
 *     cdef unsigned int curr_link_width
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCurrPcieLinkWidth(<Device>device, &curr_link_width)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20507
 *     cdef unsigned int curr_link_width
 *     with nogil:
 *         __status__ = nvmlDeviceGetCurrPcieLinkWidth(<Device>device, &curr_link_width)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return curr_link_width
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrPcieLinkWidth(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_curr_link_width)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20507, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20506
 *     """
 *     cdef unsigned int curr_link_width
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCurrPcieLinkWidth(<Device>device, &curr_link_width)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20508
 *     with nogil:
 *         __status__ = nvmlDeviceGetCurrPcieLinkWidth(<Device>device, &curr_link_width)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return curr_link_width
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20508, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20509
 *         __status__ = nvmlDeviceGetCurrPcieLinkWidth(<Device>device, &curr_link_width)
 *     check_status(__status__)
 *     return curr_link_width             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_curr_link_width;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20494
 * 
 * 
 * cpdef unsigned int device_get_curr_pcie_link_width(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the current PCIe link width.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_curr_pcie_link_width", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_115device_get_curr_pcie_link_width(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_114device_get_curr_pcie_link_width, "device_get_curr_pcie_link_width(intptr_t device) -> unsigned int\n\nRetrieves the current PCIe link width.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Reference in which to return the current PCIe link generation.\n\n.. seealso:: `nvmlDeviceGetCurrPcieLinkWidth`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_115device_get_curr_pcie_link_width = {"device_get_curr_pcie_link_width", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_115device_get_curr_pcie_link_width, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_114device_get_curr_pcie_link_width};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_115device_get_curr_pcie_link_width(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_curr_pcie_link_width (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20494, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20494, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_curr_pcie_link_width", 0) < (0)) __PYX_ERR(0, 20494, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_curr_pcie_link_width", 1, 1, 1, i); __PYX_ERR(0, 20494, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20494, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20494, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_curr_pcie_link_width", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20494, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_curr_pcie_link_width", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_114device_get_curr_pcie_link_width(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_114device_get_curr_pcie_link_width(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_curr_pcie_link_width", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_curr_pcie_link_width(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20494, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20494, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_curr_pcie_link_width", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20512
 * 
 * 
 * cpdef unsigned int device_get_pcie_throughput(intptr_t device, int counter) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve PCIe utilization information. This function is querying a byte counter over a 20ms interval and thus is the PCIe throughput over that interval.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_117device_get_pcie_throughput(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_pcie_throughput(intptr_t __pyx_v_device, int __pyx_v_counter, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_value;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20525
 *     """
 *     cdef unsigned int value
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPcieThroughput(<Device>device, <_PcieUtilCounter>counter, &value)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20526
 *     cdef unsigned int value
 *     with nogil:
 *         __status__ = nvmlDeviceGetPcieThroughput(<Device>device, <_PcieUtilCounter>counter, &value)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return value
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieThroughput(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__PcieUtilCounter)__pyx_v_counter), (&__pyx_v_value)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20526, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20525
 *     """
 *     cdef unsigned int value
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPcieThroughput(<Device>device, <_PcieUtilCounter>counter, &value)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20527
 *     with nogil:
 *         __status__ = nvmlDeviceGetPcieThroughput(<Device>device, <_PcieUtilCounter>counter, &value)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return value
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20527, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20528
 *         __status__ = nvmlDeviceGetPcieThroughput(<Device>device, <_PcieUtilCounter>counter, &value)
 *     check_status(__status__)
 *     return value             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_value;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20512
 * 
 * 
 * cpdef unsigned int device_get_pcie_throughput(intptr_t device, int counter) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve PCIe utilization information. This function is querying a byte counter over a 20ms interval and thus is the PCIe throughput over that interval.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pcie_throughput", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_117device_get_pcie_throughput(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_116device_get_pcie_throughput, "device_get_pcie_throughput(intptr_t device, int counter) -> unsigned int\n\nRetrieve PCIe utilization information. This function is querying a byte counter over a 20ms interval and thus is the PCIe throughput over that interval.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    counter (PcieUtilCounter): The specific counter that should be queried ``nvmlPcieUtilCounter_t``.\n\nReturns:\n    unsigned int: Reference in which to return throughput in KB/s.\n\n.. seealso:: `nvmlDeviceGetPcieThroughput`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_117device_get_pcie_throughput = {"device_get_pcie_throughput", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_117device_get_pcie_throughput, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_116device_get_pcie_throughput};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_117device_get_pcie_throughput(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_counter;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_pcie_throughput (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_counter,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20512, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20512, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20512, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_pcie_throughput", 0) < (0)) __PYX_ERR(0, 20512, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_pcie_throughput", 1, 2, 2, i); __PYX_ERR(0, 20512, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20512, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20512, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20512, __pyx_L3_error)
    __pyx_v_counter = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_counter == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20512, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_pcie_throughput", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20512, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pcie_throughput", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_116device_get_pcie_throughput(__pyx_self, __pyx_v_device, __pyx_v_counter);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_116device_get_pcie_throughput(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_counter) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_pcie_throughput", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_pcie_throughput(__pyx_v_device, __pyx_v_counter, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20512, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20512, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pcie_throughput", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20531
 * 
 * 
 * cpdef unsigned int device_get_pcie_replay_counter(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the PCIe replay counter.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_119device_get_pcie_replay_counter(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_pcie_replay_counter(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_value;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20543
 *     """
 *     cdef unsigned int value
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPcieReplayCounter(<Device>device, &value)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20544
 *     cdef unsigned int value
 *     with nogil:
 *         __status__ = nvmlDeviceGetPcieReplayCounter(<Device>device, &value)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return value
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieReplayCounter(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_value)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20544, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20543
 *     """
 *     cdef unsigned int value
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPcieReplayCounter(<Device>device, &value)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20545
 *     with nogil:
 *         __status__ = nvmlDeviceGetPcieReplayCounter(<Device>device, &value)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return value
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20545, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20546
 *         __status__ = nvmlDeviceGetPcieReplayCounter(<Device>device, &value)
 *     check_status(__status__)
 *     return value             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_value;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20531
 * 
 * 
 * cpdef unsigned int device_get_pcie_replay_counter(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the PCIe replay counter.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pcie_replay_counter", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_119device_get_pcie_replay_counter(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_118device_get_pcie_replay_counter, "device_get_pcie_replay_counter(intptr_t device) -> unsigned int\n\nRetrieve the PCIe replay counter.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Reference in which to return the counter's value.\n\n.. seealso:: `nvmlDeviceGetPcieReplayCounter`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_119device_get_pcie_replay_counter = {"device_get_pcie_replay_counter", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_119device_get_pcie_replay_counter, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_118device_get_pcie_replay_counter};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_119device_get_pcie_replay_counter(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_pcie_replay_counter (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20531, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20531, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_pcie_replay_counter", 0) < (0)) __PYX_ERR(0, 20531, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_pcie_replay_counter", 1, 1, 1, i); __PYX_ERR(0, 20531, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20531, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20531, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_pcie_replay_counter", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20531, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pcie_replay_counter", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_118device_get_pcie_replay_counter(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_118device_get_pcie_replay_counter(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_pcie_replay_counter", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_pcie_replay_counter(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20531, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20531, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pcie_replay_counter", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20549
 * 
 * 
 * cpdef unsigned int device_get_clock_info(intptr_t device, int type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the current clock speeds for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_121device_get_clock_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_clock_info(intptr_t __pyx_v_device, int __pyx_v_type, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_clock;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20562
 *     """
 *     cdef unsigned int clock
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetClockInfo(<Device>device, <_ClockType>type, &clock)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20563
 *     cdef unsigned int clock
 *     with nogil:
 *         __status__ = nvmlDeviceGetClockInfo(<Device>device, <_ClockType>type, &clock)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return clock
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClockInfo(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__ClockType)__pyx_v_type), (&__pyx_v_clock)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20563, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20562
 *     """
 *     cdef unsigned int clock
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetClockInfo(<Device>device, <_ClockType>type, &clock)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20564
 *     with nogil:
 *         __status__ = nvmlDeviceGetClockInfo(<Device>device, <_ClockType>type, &clock)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return clock
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20564, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20565
 *         __status__ = nvmlDeviceGetClockInfo(<Device>device, <_ClockType>type, &clock)
 *     check_status(__status__)
 *     return clock             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_clock;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20549
 * 
 * 
 * cpdef unsigned int device_get_clock_info(intptr_t device, int type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the current clock speeds for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_clock_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_121device_get_clock_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_120device_get_clock_info, "device_get_clock_info(intptr_t device, int type) -> unsigned int\n\nRetrieves the current clock speeds for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    type (ClockType): Identify which clock domain to query.\n\nReturns:\n    unsigned int: Reference in which to return the clock speed in MHz.\n\n.. seealso:: `nvmlDeviceGetClockInfo`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_121device_get_clock_info = {"device_get_clock_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_121device_get_clock_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_120device_get_clock_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_121device_get_clock_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_type;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_clock_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_type,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20549, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20549, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20549, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_clock_info", 0) < (0)) __PYX_ERR(0, 20549, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_clock_info", 1, 2, 2, i); __PYX_ERR(0, 20549, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20549, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20549, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20549, __pyx_L3_error)
    __pyx_v_type = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20549, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_clock_info", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20549, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_clock_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_120device_get_clock_info(__pyx_self, __pyx_v_device, __pyx_v_type);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_120device_get_clock_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_type) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_clock_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_clock_info(__pyx_v_device, __pyx_v_type, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20549, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20549, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_clock_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20568
 * 
 * 
 * cpdef unsigned int device_get_max_clock_info(intptr_t device, int type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the maximum clock speeds for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_123device_get_max_clock_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_max_clock_info(intptr_t __pyx_v_device, int __pyx_v_type, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_clock;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20581
 *     """
 *     cdef unsigned int clock
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMaxClockInfo(<Device>device, <_ClockType>type, &clock)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20582
 *     cdef unsigned int clock
 *     with nogil:
 *         __status__ = nvmlDeviceGetMaxClockInfo(<Device>device, <_ClockType>type, &clock)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return clock
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxClockInfo(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__ClockType)__pyx_v_type), (&__pyx_v_clock)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20582, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20581
 *     """
 *     cdef unsigned int clock
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMaxClockInfo(<Device>device, <_ClockType>type, &clock)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20583
 *     with nogil:
 *         __status__ = nvmlDeviceGetMaxClockInfo(<Device>device, <_ClockType>type, &clock)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return clock
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20583, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20584
 *         __status__ = nvmlDeviceGetMaxClockInfo(<Device>device, <_ClockType>type, &clock)
 *     check_status(__status__)
 *     return clock             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_clock;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20568
 * 
 * 
 * cpdef unsigned int device_get_max_clock_info(intptr_t device, int type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the maximum clock speeds for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_max_clock_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_123device_get_max_clock_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_122device_get_max_clock_info, "device_get_max_clock_info(intptr_t device, int type) -> unsigned int\n\nRetrieves the maximum clock speeds for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    type (ClockType): Identify which clock domain to query.\n\nReturns:\n    unsigned int: Reference in which to return the clock speed in MHz.\n\n.. seealso:: `nvmlDeviceGetMaxClockInfo`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_123device_get_max_clock_info = {"device_get_max_clock_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_123device_get_max_clock_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_122device_get_max_clock_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_123device_get_max_clock_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_type;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_max_clock_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_type,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20568, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20568, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20568, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_max_clock_info", 0) < (0)) __PYX_ERR(0, 20568, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_max_clock_info", 1, 2, 2, i); __PYX_ERR(0, 20568, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20568, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20568, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20568, __pyx_L3_error)
    __pyx_v_type = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20568, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_max_clock_info", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20568, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_max_clock_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_122device_get_max_clock_info(__pyx_self, __pyx_v_device, __pyx_v_type);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_122device_get_max_clock_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_type) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_max_clock_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_max_clock_info(__pyx_v_device, __pyx_v_type, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20568, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_max_clock_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20587
 * 
 * 
 * cpdef int device_get_gpc_clk_vf_offset(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the GPCCLK VF offset value.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_125device_get_gpc_clk_vf_offset(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_gpc_clk_vf_offset(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  int __pyx_v_offset;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20599
 *     """
 *     cdef int offset
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpcClkVfOffset(<Device>device, &offset)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20600
 *     cdef int offset
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpcClkVfOffset(<Device>device, &offset)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return offset
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpcClkVfOffset(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_offset)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20600, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20599
 *     """
 *     cdef int offset
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpcClkVfOffset(<Device>device, &offset)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20601
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpcClkVfOffset(<Device>device, &offset)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return offset
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20601, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20602
 *         __status__ = nvmlDeviceGetGpcClkVfOffset(<Device>device, &offset)
 *     check_status(__status__)
 *     return offset             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_offset;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20587
 * 
 * 
 * cpdef int device_get_gpc_clk_vf_offset(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the GPCCLK VF offset value.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpc_clk_vf_offset", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_125device_get_gpc_clk_vf_offset(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_124device_get_gpc_clk_vf_offset, "device_get_gpc_clk_vf_offset(intptr_t device) -> int\n\nRetrieve the GPCCLK VF offset value.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    int: The retrieved GPCCLK VF offset value.\n\n.. seealso:: `nvmlDeviceGetGpcClkVfOffset`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_125device_get_gpc_clk_vf_offset = {"device_get_gpc_clk_vf_offset", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_125device_get_gpc_clk_vf_offset, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_124device_get_gpc_clk_vf_offset};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_125device_get_gpc_clk_vf_offset(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_gpc_clk_vf_offset (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20587, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20587, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_gpc_clk_vf_offset", 0) < (0)) __PYX_ERR(0, 20587, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_gpc_clk_vf_offset", 1, 1, 1, i); __PYX_ERR(0, 20587, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20587, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20587, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_gpc_clk_vf_offset", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20587, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpc_clk_vf_offset", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_124device_get_gpc_clk_vf_offset(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_124device_get_gpc_clk_vf_offset(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpc_clk_vf_offset", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_gpc_clk_vf_offset(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((int)0) && PyErr_Occurred())) __PYX_ERR(0, 20587, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20587, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpc_clk_vf_offset", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20605
 * 
 * 
 * cpdef unsigned int device_get_clock(intptr_t device, int clock_type, int clock_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the clock speed for the clock specified by the clock type and clock ID.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_127device_get_clock(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_clock(intptr_t __pyx_v_device, int __pyx_v_clock_type, int __pyx_v_clock_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_clock_m_hz;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20619
 *     """
 *     cdef unsigned int clock_m_hz
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetClock(<Device>device, <_ClockType>clock_type, <_ClockId>clock_id, &clock_m_hz)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20620
 *     cdef unsigned int clock_m_hz
 *     with nogil:
 *         __status__ = nvmlDeviceGetClock(<Device>device, <_ClockType>clock_type, <_ClockId>clock_id, &clock_m_hz)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return clock_m_hz
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClock(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__ClockType)__pyx_v_clock_type), ((__pyx_t_4cuda_8bindings_5_nvml__ClockId)__pyx_v_clock_id), (&__pyx_v_clock_m_hz)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20620, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20619
 *     """
 *     cdef unsigned int clock_m_hz
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetClock(<Device>device, <_ClockType>clock_type, <_ClockId>clock_id, &clock_m_hz)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20621
 *     with nogil:
 *         __status__ = nvmlDeviceGetClock(<Device>device, <_ClockType>clock_type, <_ClockId>clock_id, &clock_m_hz)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return clock_m_hz
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20621, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20622
 *         __status__ = nvmlDeviceGetClock(<Device>device, <_ClockType>clock_type, <_ClockId>clock_id, &clock_m_hz)
 *     check_status(__status__)
 *     return clock_m_hz             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_clock_m_hz;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20605
 * 
 * 
 * cpdef unsigned int device_get_clock(intptr_t device, int clock_type, int clock_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the clock speed for the clock specified by the clock type and clock ID.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_clock", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_127device_get_clock(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_126device_get_clock, "device_get_clock(intptr_t device, int clock_type, int clock_id) -> unsigned int\n\nRetrieves the clock speed for the clock specified by the clock type and clock ID.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    clock_type (ClockType): Identify which clock domain to query.\n    clock_id (ClockId): Identify which clock in the domain to query.\n\nReturns:\n    unsigned int: Reference in which to return the clock in MHz.\n\n.. seealso:: `nvmlDeviceGetClock`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_127device_get_clock = {"device_get_clock", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_127device_get_clock, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_126device_get_clock};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_127device_get_clock(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_clock_type;
  int __pyx_v_clock_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_clock (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_clock_type,&__pyx_mstate_global->__pyx_n_u_clock_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20605, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 20605, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20605, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20605, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_clock", 0) < (0)) __PYX_ERR(0, 20605, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_clock", 1, 3, 3, i); __PYX_ERR(0, 20605, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20605, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20605, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 20605, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20605, __pyx_L3_error)
    __pyx_v_clock_type = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_clock_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20605, __pyx_L3_error)
    __pyx_v_clock_id = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_clock_id == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20605, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_clock", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 20605, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_clock", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_126device_get_clock(__pyx_self, __pyx_v_device, __pyx_v_clock_type, __pyx_v_clock_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_126device_get_clock(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_clock_type, int __pyx_v_clock_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_clock", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_clock(__pyx_v_device, __pyx_v_clock_type, __pyx_v_clock_id, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20605, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20605, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_clock", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20625
 * 
 * 
 * cpdef unsigned int device_get_max_customer_boost_clock(intptr_t device, int clock_type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the customer defined maximum boost clock speed specified by the given clock type.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_129device_get_max_customer_boost_clock(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_max_customer_boost_clock(intptr_t __pyx_v_device, int __pyx_v_clock_type, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_clock_m_hz;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20638
 *     """
 *     cdef unsigned int clock_m_hz
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMaxCustomerBoostClock(<Device>device, <_ClockType>clock_type, &clock_m_hz)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20639
 *     cdef unsigned int clock_m_hz
 *     with nogil:
 *         __status__ = nvmlDeviceGetMaxCustomerBoostClock(<Device>device, <_ClockType>clock_type, &clock_m_hz)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return clock_m_hz
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxCustomerBoostClock(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__ClockType)__pyx_v_clock_type), (&__pyx_v_clock_m_hz)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20639, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20638
 *     """
 *     cdef unsigned int clock_m_hz
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMaxCustomerBoostClock(<Device>device, <_ClockType>clock_type, &clock_m_hz)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20640
 *     with nogil:
 *         __status__ = nvmlDeviceGetMaxCustomerBoostClock(<Device>device, <_ClockType>clock_type, &clock_m_hz)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return clock_m_hz
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20640, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20641
 *         __status__ = nvmlDeviceGetMaxCustomerBoostClock(<Device>device, <_ClockType>clock_type, &clock_m_hz)
 *     check_status(__status__)
 *     return clock_m_hz             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_clock_m_hz;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20625
 * 
 * 
 * cpdef unsigned int device_get_max_customer_boost_clock(intptr_t device, int clock_type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the customer defined maximum boost clock speed specified by the given clock type.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_max_customer_boost_clock", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_129device_get_max_customer_boost_clock(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_128device_get_max_customer_boost_clock, "device_get_max_customer_boost_clock(intptr_t device, int clock_type) -> unsigned int\n\nRetrieves the customer defined maximum boost clock speed specified by the given clock type.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    clock_type (ClockType): Identify which clock domain to query.\n\nReturns:\n    unsigned int: Reference in which to return the clock in MHz.\n\n.. seealso:: `nvmlDeviceGetMaxCustomerBoostClock`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_129device_get_max_customer_boost_clock = {"device_get_max_customer_boost_clock", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_129device_get_max_customer_boost_clock, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_128device_get_max_customer_boost_clock};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_129device_get_max_customer_boost_clock(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_clock_type;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_max_customer_boost_clock (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_clock_type,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20625, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20625, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20625, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_max_customer_boost_clock", 0) < (0)) __PYX_ERR(0, 20625, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_max_customer_boost_clock", 1, 2, 2, i); __PYX_ERR(0, 20625, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20625, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20625, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20625, __pyx_L3_error)
    __pyx_v_clock_type = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_clock_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20625, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_max_customer_boost_clock", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20625, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_max_customer_boost_clock", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_128device_get_max_customer_boost_clock(__pyx_self, __pyx_v_device, __pyx_v_clock_type);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_128device_get_max_customer_boost_clock(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_clock_type) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_max_customer_boost_clock", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_max_customer_boost_clock(__pyx_v_device, __pyx_v_clock_type, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20625, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20625, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_max_customer_boost_clock", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20644
 * 
 * 
 * cpdef object device_get_supported_memory_clocks(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the list of possible memory clocks that can be used as an argument for ``nvmlDeviceSetMemoryLockedClocks``.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_131device_get_supported_memory_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_supported_memory_clocks(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_clocks_m_hz = 0;
  unsigned int *__pyx_v_clocks_m_hz_ptr;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_supported_memory_clocks", 0);

  /* "cuda/bindings/_nvml.pyx":20652
 *     .. seealso:: `nvmlDeviceGetSupportedMemoryClocks`
 *     """
 *     cdef unsigned int[1] count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedMemoryClocks(<Device>device, <unsigned int*>count, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_count[0]), __pyx_t_1, sizeof(__pyx_v_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":20653
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedMemoryClocks(<Device>device, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20654
 *     cdef unsigned int[1] count = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedMemoryClocks(<Device>device, <unsigned int*>count, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     if count[0] == 0:
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedMemoryClocks(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((unsigned int *)__pyx_v_count), NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20654, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":20653
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedMemoryClocks(<Device>device, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20655
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedMemoryClocks(<Device>device, <unsigned int*>count, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 20655, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20656
 *         __status__ = nvmlDeviceGetSupportedMemoryClocks(<Device>device, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array clocks_m_hz = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  __pyx_t_4 = ((__pyx_v_count[0]) == 0);
  if (__pyx_t_4) {

    /* "cuda/bindings/_nvml.pyx":20657
 *     check_status_size(__status__)
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]             # <<<<<<<<<<<<<<
 *     cdef view.array clocks_m_hz = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     cdef unsigned int *clocks_m_hz_ptr = <unsigned int *>(clocks_m_hz.data)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_6 = NULL;
    __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 20657, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_8 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_6, NULL};
      __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 20657, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_9);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_9, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 20657, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_9, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 20657, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_9, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 20657, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_9, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 20657, __pyx_L1_error)
      __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20657, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_5);
    }
    __pyx_t_9 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_5), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 20657, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_9;
    __pyx_t_9 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":20656
 *         __status__ = nvmlDeviceGetSupportedMemoryClocks(<Device>device, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array clocks_m_hz = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":20658
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array clocks_m_hz = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")             # <<<<<<<<<<<<<<
 *     cdef unsigned int *clocks_m_hz_ptr = <unsigned int *>(clocks_m_hz.data)
 *     with nogil:
*/
  __pyx_t_5 = NULL;
  __pyx_t_7 = __Pyx_PyLong_From_unsigned_int((__pyx_v_count[0])); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 20658, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 20658, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 20658, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 20658, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_5, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 20658, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_6, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 20658, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 20658, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 20658, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 20658, __pyx_L1_error)
    __pyx_t_9 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 20658, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_9);
  }
  __pyx_v_clocks_m_hz = ((struct __pyx_array_obj *)__pyx_t_9);
  __pyx_t_9 = 0;

  /* "cuda/bindings/_nvml.pyx":20659
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array clocks_m_hz = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     cdef unsigned int *clocks_m_hz_ptr = <unsigned int *>(clocks_m_hz.data)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedMemoryClocks(<Device>device, <unsigned int*>count, clocks_m_hz_ptr)
*/
  __pyx_v_clocks_m_hz_ptr = ((unsigned int *)__pyx_v_clocks_m_hz->data);

  /* "cuda/bindings/_nvml.pyx":20660
 *     cdef view.array clocks_m_hz = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     cdef unsigned int *clocks_m_hz_ptr = <unsigned int *>(clocks_m_hz.data)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedMemoryClocks(<Device>device, <unsigned int*>count, clocks_m_hz_ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20661
 *     cdef unsigned int *clocks_m_hz_ptr = <unsigned int *>(clocks_m_hz.data)
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedMemoryClocks(<Device>device, <unsigned int*>count, clocks_m_hz_ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return clocks_m_hz
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedMemoryClocks(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((unsigned int *)__pyx_v_count), __pyx_v_clocks_m_hz_ptr); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20661, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":20660
 *     cdef view.array clocks_m_hz = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     cdef unsigned int *clocks_m_hz_ptr = <unsigned int *>(clocks_m_hz.data)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedMemoryClocks(<Device>device, <unsigned int*>count, clocks_m_hz_ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20662
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedMemoryClocks(<Device>device, <unsigned int*>count, clocks_m_hz_ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return clocks_m_hz
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 20662, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20663
 *         __status__ = nvmlDeviceGetSupportedMemoryClocks(<Device>device, <unsigned int*>count, clocks_m_hz_ptr)
 *     check_status(__status__)
 *     return clocks_m_hz             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_clocks_m_hz);
  __pyx_r = ((PyObject *)__pyx_v_clocks_m_hz);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20644
 * 
 * 
 * cpdef object device_get_supported_memory_clocks(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the list of possible memory clocks that can be used as an argument for ``nvmlDeviceSetMemoryLockedClocks``.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_memory_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_clocks_m_hz);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_131device_get_supported_memory_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_130device_get_supported_memory_clocks, "device_get_supported_memory_clocks(intptr_t device)\n\nRetrieves the list of possible memory clocks that can be used as an argument for ``nvmlDeviceSetMemoryLockedClocks``.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\n.. seealso:: `nvmlDeviceGetSupportedMemoryClocks`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_131device_get_supported_memory_clocks = {"device_get_supported_memory_clocks", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_131device_get_supported_memory_clocks, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_130device_get_supported_memory_clocks};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_131device_get_supported_memory_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_supported_memory_clocks (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20644, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20644, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_supported_memory_clocks", 0) < (0)) __PYX_ERR(0, 20644, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_supported_memory_clocks", 1, 1, 1, i); __PYX_ERR(0, 20644, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20644, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20644, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_supported_memory_clocks", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20644, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_memory_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_130device_get_supported_memory_clocks(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_130device_get_supported_memory_clocks(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_supported_memory_clocks", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_supported_memory_clocks(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20644, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_memory_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20666
 * 
 * 
 * cpdef object device_get_supported_graphics_clocks(intptr_t device, unsigned int memory_clock_m_hz):             # <<<<<<<<<<<<<<
 *     """Retrieves the list of possible graphics clocks that can be used as an argument for ``nvmlDeviceSetGpuLockedClocks``.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_133device_get_supported_graphics_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_supported_graphics_clocks(intptr_t __pyx_v_device, unsigned int __pyx_v_memory_clock_m_hz, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_clocks_m_hz = 0;
  unsigned int *__pyx_v_clocks_m_hz_ptr;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_supported_graphics_clocks", 0);

  /* "cuda/bindings/_nvml.pyx":20675
 *     .. seealso:: `nvmlDeviceGetSupportedGraphicsClocks`
 *     """
 *     cdef unsigned int[1] count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedGraphicsClocks(<Device>device, memory_clock_m_hz, <unsigned int*>count, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_count[0]), __pyx_t_1, sizeof(__pyx_v_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":20676
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedGraphicsClocks(<Device>device, memory_clock_m_hz, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20677
 *     cdef unsigned int[1] count = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedGraphicsClocks(<Device>device, memory_clock_m_hz, <unsigned int*>count, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     if count[0] == 0:
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedGraphicsClocks(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_memory_clock_m_hz, ((unsigned int *)__pyx_v_count), NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20677, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":20676
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedGraphicsClocks(<Device>device, memory_clock_m_hz, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20678
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedGraphicsClocks(<Device>device, memory_clock_m_hz, <unsigned int*>count, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 20678, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20679
 *         __status__ = nvmlDeviceGetSupportedGraphicsClocks(<Device>device, memory_clock_m_hz, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array clocks_m_hz = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  __pyx_t_4 = ((__pyx_v_count[0]) == 0);
  if (__pyx_t_4) {

    /* "cuda/bindings/_nvml.pyx":20680
 *     check_status_size(__status__)
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]             # <<<<<<<<<<<<<<
 *     cdef view.array clocks_m_hz = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     cdef unsigned int *clocks_m_hz_ptr = <unsigned int *>(clocks_m_hz.data)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_6 = NULL;
    __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 20680, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_8 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_6, NULL};
      __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 20680, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_9);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_9, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 20680, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_9, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 20680, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_9, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 20680, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_9, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 20680, __pyx_L1_error)
      __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20680, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_5);
    }
    __pyx_t_9 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_5), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 20680, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_9;
    __pyx_t_9 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":20679
 *         __status__ = nvmlDeviceGetSupportedGraphicsClocks(<Device>device, memory_clock_m_hz, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array clocks_m_hz = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":20681
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array clocks_m_hz = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")             # <<<<<<<<<<<<<<
 *     cdef unsigned int *clocks_m_hz_ptr = <unsigned int *>(clocks_m_hz.data)
 *     with nogil:
*/
  __pyx_t_5 = NULL;
  __pyx_t_7 = __Pyx_PyLong_From_unsigned_int((__pyx_v_count[0])); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 20681, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 20681, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 20681, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 20681, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_5, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 20681, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_6, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 20681, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 20681, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 20681, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 20681, __pyx_L1_error)
    __pyx_t_9 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 20681, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_9);
  }
  __pyx_v_clocks_m_hz = ((struct __pyx_array_obj *)__pyx_t_9);
  __pyx_t_9 = 0;

  /* "cuda/bindings/_nvml.pyx":20682
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array clocks_m_hz = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     cdef unsigned int *clocks_m_hz_ptr = <unsigned int *>(clocks_m_hz.data)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedGraphicsClocks(<Device>device, memory_clock_m_hz, <unsigned int*>count, clocks_m_hz_ptr)
*/
  __pyx_v_clocks_m_hz_ptr = ((unsigned int *)__pyx_v_clocks_m_hz->data);

  /* "cuda/bindings/_nvml.pyx":20683
 *     cdef view.array clocks_m_hz = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     cdef unsigned int *clocks_m_hz_ptr = <unsigned int *>(clocks_m_hz.data)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedGraphicsClocks(<Device>device, memory_clock_m_hz, <unsigned int*>count, clocks_m_hz_ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20684
 *     cdef unsigned int *clocks_m_hz_ptr = <unsigned int *>(clocks_m_hz.data)
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedGraphicsClocks(<Device>device, memory_clock_m_hz, <unsigned int*>count, clocks_m_hz_ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return clocks_m_hz
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedGraphicsClocks(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_memory_clock_m_hz, ((unsigned int *)__pyx_v_count), __pyx_v_clocks_m_hz_ptr); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20684, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":20683
 *     cdef view.array clocks_m_hz = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     cdef unsigned int *clocks_m_hz_ptr = <unsigned int *>(clocks_m_hz.data)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedGraphicsClocks(<Device>device, memory_clock_m_hz, <unsigned int*>count, clocks_m_hz_ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20685
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedGraphicsClocks(<Device>device, memory_clock_m_hz, <unsigned int*>count, clocks_m_hz_ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return clocks_m_hz
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 20685, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20686
 *         __status__ = nvmlDeviceGetSupportedGraphicsClocks(<Device>device, memory_clock_m_hz, <unsigned int*>count, clocks_m_hz_ptr)
 *     check_status(__status__)
 *     return clocks_m_hz             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_clocks_m_hz);
  __pyx_r = ((PyObject *)__pyx_v_clocks_m_hz);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20666
 * 
 * 
 * cpdef object device_get_supported_graphics_clocks(intptr_t device, unsigned int memory_clock_m_hz):             # <<<<<<<<<<<<<<
 *     """Retrieves the list of possible graphics clocks that can be used as an argument for ``nvmlDeviceSetGpuLockedClocks``.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_graphics_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_clocks_m_hz);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_133device_get_supported_graphics_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_132device_get_supported_graphics_clocks, "device_get_supported_graphics_clocks(intptr_t device, unsigned int memory_clock_m_hz)\n\nRetrieves the list of possible graphics clocks that can be used as an argument for ``nvmlDeviceSetGpuLockedClocks``.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    memory_clock_m_hz (unsigned int): Memory clock for which to return possible graphics clocks.\n\n.. seealso:: `nvmlDeviceGetSupportedGraphicsClocks`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_133device_get_supported_graphics_clocks = {"device_get_supported_graphics_clocks", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_133device_get_supported_graphics_clocks, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_132device_get_supported_graphics_clocks};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_133device_get_supported_graphics_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_memory_clock_m_hz;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_supported_graphics_clocks (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_memory_clock_m_hz,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20666, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20666, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20666, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_supported_graphics_clocks", 0) < (0)) __PYX_ERR(0, 20666, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_supported_graphics_clocks", 1, 2, 2, i); __PYX_ERR(0, 20666, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20666, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20666, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20666, __pyx_L3_error)
    __pyx_v_memory_clock_m_hz = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_memory_clock_m_hz == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20666, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_supported_graphics_clocks", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20666, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_graphics_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_132device_get_supported_graphics_clocks(__pyx_self, __pyx_v_device, __pyx_v_memory_clock_m_hz);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_132device_get_supported_graphics_clocks(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_memory_clock_m_hz) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_supported_graphics_clocks", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_supported_graphics_clocks(__pyx_v_device, __pyx_v_memory_clock_m_hz, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20666, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_graphics_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20689
 * 
 * 
 * cpdef tuple device_get_auto_boosted_clocks_enabled(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the current state of Auto Boosted clocks on a device and store it in ``isEnabled``.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_135device_get_auto_boosted_clocks_enabled(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_auto_boosted_clocks_enabled(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__EnableState __pyx_v_is_enabled;
  __pyx_t_4cuda_8bindings_5_nvml__EnableState __pyx_v_default_is_enabled;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_auto_boosted_clocks_enabled", 0);

  /* "cuda/bindings/_nvml.pyx":20705
 *     cdef _EnableState is_enabled
 *     cdef _EnableState default_is_enabled
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAutoBoostedClocksEnabled(<Device>device, &is_enabled, &default_is_enabled)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20706
 *     cdef _EnableState default_is_enabled
 *     with nogil:
 *         __status__ = nvmlDeviceGetAutoBoostedClocksEnabled(<Device>device, &is_enabled, &default_is_enabled)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (<int>is_enabled, <int>default_is_enabled)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAutoBoostedClocksEnabled(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_is_enabled), (&__pyx_v_default_is_enabled)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20706, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20705
 *     cdef _EnableState is_enabled
 *     cdef _EnableState default_is_enabled
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAutoBoostedClocksEnabled(<Device>device, &is_enabled, &default_is_enabled)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20707
 *     with nogil:
 *         __status__ = nvmlDeviceGetAutoBoostedClocksEnabled(<Device>device, &is_enabled, &default_is_enabled)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (<int>is_enabled, <int>default_is_enabled)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20707, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20708
 *         __status__ = nvmlDeviceGetAutoBoostedClocksEnabled(<Device>device, &is_enabled, &default_is_enabled)
 *     check_status(__status__)
 *     return (<int>is_enabled, <int>default_is_enabled)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_int(((int)__pyx_v_is_enabled)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20708, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_int(((int)__pyx_v_default_is_enabled)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20708, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20708, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 20708, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 20708, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20689
 * 
 * 
 * cpdef tuple device_get_auto_boosted_clocks_enabled(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the current state of Auto Boosted clocks on a device and store it in ``isEnabled``.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_auto_boosted_clocks_enabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_135device_get_auto_boosted_clocks_enabled(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_134device_get_auto_boosted_clocks_enabled, "device_get_auto_boosted_clocks_enabled(intptr_t device) -> tuple\n\nRetrieve the current state of Auto Boosted clocks on a device and store it in ``isEnabled``.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    A 2-tuple containing:\n\n    - int: Where to store the current state of Auto Boosted clocks of the target device.\n    - int: Where to store the default Auto Boosted clocks behavior of the target device that the device will revert to when no applications are using the GPU.\n\n.. seealso:: `nvmlDeviceGetAutoBoostedClocksEnabled`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_135device_get_auto_boosted_clocks_enabled = {"device_get_auto_boosted_clocks_enabled", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_135device_get_auto_boosted_clocks_enabled, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_134device_get_auto_boosted_clocks_enabled};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_135device_get_auto_boosted_clocks_enabled(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_auto_boosted_clocks_enabled (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20689, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20689, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_auto_boosted_clocks_enabled", 0) < (0)) __PYX_ERR(0, 20689, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_auto_boosted_clocks_enabled", 1, 1, 1, i); __PYX_ERR(0, 20689, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20689, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20689, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_auto_boosted_clocks_enabled", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20689, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_auto_boosted_clocks_enabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_134device_get_auto_boosted_clocks_enabled(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_134device_get_auto_boosted_clocks_enabled(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_auto_boosted_clocks_enabled", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_auto_boosted_clocks_enabled(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20689, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_auto_boosted_clocks_enabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20711
 * 
 * 
 * cpdef unsigned int device_get_fan_speed(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the intended operating speed of the device's fan.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_137device_get_fan_speed(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_fan_speed(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_speed;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20723
 *     """
 *     cdef unsigned int speed
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetFanSpeed(<Device>device, &speed)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20724
 *     cdef unsigned int speed
 *     with nogil:
 *         __status__ = nvmlDeviceGetFanSpeed(<Device>device, &speed)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return speed
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanSpeed(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_speed)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20724, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20723
 *     """
 *     cdef unsigned int speed
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetFanSpeed(<Device>device, &speed)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20725
 *     with nogil:
 *         __status__ = nvmlDeviceGetFanSpeed(<Device>device, &speed)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return speed
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20725, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20726
 *         __status__ = nvmlDeviceGetFanSpeed(<Device>device, &speed)
 *     check_status(__status__)
 *     return speed             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_speed;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20711
 * 
 * 
 * cpdef unsigned int device_get_fan_speed(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the intended operating speed of the device's fan.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fan_speed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_137device_get_fan_speed(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_136device_get_fan_speed, "device_get_fan_speed(intptr_t device) -> unsigned int\n\nRetrieves the intended operating speed of the device's fan.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Reference in which to return the fan speed percentage.\n\n.. seealso:: `nvmlDeviceGetFanSpeed`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_137device_get_fan_speed = {"device_get_fan_speed", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_137device_get_fan_speed, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_136device_get_fan_speed};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_137device_get_fan_speed(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_fan_speed (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20711, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20711, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_fan_speed", 0) < (0)) __PYX_ERR(0, 20711, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_fan_speed", 1, 1, 1, i); __PYX_ERR(0, 20711, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20711, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20711, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_fan_speed", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20711, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fan_speed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_136device_get_fan_speed(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_136device_get_fan_speed(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_fan_speed", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_fan_speed(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20711, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fan_speed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20729
 * 
 * 
 * cpdef unsigned int device_get_fan_speed_v2(intptr_t device, unsigned int fan) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the intended operating speed of the device's specified fan.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_139device_get_fan_speed_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_fan_speed_v2(intptr_t __pyx_v_device, unsigned int __pyx_v_fan, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_speed;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20742
 *     """
 *     cdef unsigned int speed
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetFanSpeed_v2(<Device>device, fan, &speed)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20743
 *     cdef unsigned int speed
 *     with nogil:
 *         __status__ = nvmlDeviceGetFanSpeed_v2(<Device>device, fan, &speed)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return speed
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanSpeed_v2(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_fan, (&__pyx_v_speed)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20743, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20742
 *     """
 *     cdef unsigned int speed
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetFanSpeed_v2(<Device>device, fan, &speed)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20744
 *     with nogil:
 *         __status__ = nvmlDeviceGetFanSpeed_v2(<Device>device, fan, &speed)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return speed
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20744, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20745
 *         __status__ = nvmlDeviceGetFanSpeed_v2(<Device>device, fan, &speed)
 *     check_status(__status__)
 *     return speed             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_speed;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20729
 * 
 * 
 * cpdef unsigned int device_get_fan_speed_v2(intptr_t device, unsigned int fan) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the intended operating speed of the device's specified fan.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fan_speed_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_139device_get_fan_speed_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_138device_get_fan_speed_v2, "device_get_fan_speed_v2(intptr_t device, unsigned int fan) -> unsigned int\n\nRetrieves the intended operating speed of the device's specified fan.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    fan (unsigned int): The index of the target fan, zero indexed.\n\nReturns:\n    unsigned int: Reference in which to return the fan speed percentage.\n\n.. seealso:: `nvmlDeviceGetFanSpeed_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_139device_get_fan_speed_v2 = {"device_get_fan_speed_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_139device_get_fan_speed_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_138device_get_fan_speed_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_139device_get_fan_speed_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_fan;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_fan_speed_v2 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_fan,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20729, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20729, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20729, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_fan_speed_v2", 0) < (0)) __PYX_ERR(0, 20729, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_fan_speed_v2", 1, 2, 2, i); __PYX_ERR(0, 20729, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20729, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20729, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20729, __pyx_L3_error)
    __pyx_v_fan = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_fan == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20729, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_fan_speed_v2", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20729, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fan_speed_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_138device_get_fan_speed_v2(__pyx_self, __pyx_v_device, __pyx_v_fan);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_138device_get_fan_speed_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_fan) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_fan_speed_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_fan_speed_v2(__pyx_v_device, __pyx_v_fan, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20729, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20729, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fan_speed_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20748
 * 
 * 
 * cpdef object device_get_fan_speed_rpm(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the intended operating speed in rotations per minute (RPM) of the device's specified fan.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_141device_get_fan_speed_rpm(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_fan_speed_rpm(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *__pyx_v_fan_speed_py = 0;
  nvmlFanSpeedInfo_t *__pyx_v_fan_speed;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_fan_speed_rpm", 0);

  /* "cuda/bindings/_nvml.pyx":20759
 *     .. seealso:: `nvmlDeviceGetFanSpeedRPM`
 *     """
 *     cdef FanSpeedInfo_v1 fan_speed_py = FanSpeedInfo_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlFanSpeedInfo_t *fan_speed = <nvmlFanSpeedInfo_t *><intptr_t>(fan_speed_py._get_ptr())
 *     fan_speed.version = sizeof(nvmlFanSpeedInfo_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20759, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_fan_speed_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":20760
 *     """
 *     cdef FanSpeedInfo_v1 fan_speed_py = FanSpeedInfo_v1()
 *     cdef nvmlFanSpeedInfo_t *fan_speed = <nvmlFanSpeedInfo_t *><intptr_t>(fan_speed_py._get_ptr())             # <<<<<<<<<<<<<<
 *     fan_speed.version = sizeof(nvmlFanSpeedInfo_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)__pyx_v_fan_speed_py->__pyx_vtab)->_get_ptr(__pyx_v_fan_speed_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 20760, __pyx_L1_error)
  __pyx_v_fan_speed = ((nvmlFanSpeedInfo_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":20761
 *     cdef FanSpeedInfo_v1 fan_speed_py = FanSpeedInfo_v1()
 *     cdef nvmlFanSpeedInfo_t *fan_speed = <nvmlFanSpeedInfo_t *><intptr_t>(fan_speed_py._get_ptr())
 *     fan_speed.version = sizeof(nvmlFanSpeedInfo_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetFanSpeedRPM(<Device>device, fan_speed)
*/
  __pyx_v_fan_speed->version = ((sizeof(nvmlFanSpeedInfo_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":20762
 *     cdef nvmlFanSpeedInfo_t *fan_speed = <nvmlFanSpeedInfo_t *><intptr_t>(fan_speed_py._get_ptr())
 *     fan_speed.version = sizeof(nvmlFanSpeedInfo_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetFanSpeedRPM(<Device>device, fan_speed)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20763
 *     fan_speed.version = sizeof(nvmlFanSpeedInfo_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetFanSpeedRPM(<Device>device, fan_speed)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return fan_speed_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanSpeedRPM(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_fan_speed); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20763, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":20762
 *     cdef nvmlFanSpeedInfo_t *fan_speed = <nvmlFanSpeedInfo_t *><intptr_t>(fan_speed_py._get_ptr())
 *     fan_speed.version = sizeof(nvmlFanSpeedInfo_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetFanSpeedRPM(<Device>device, fan_speed)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20764
 *     with nogil:
 *         __status__ = nvmlDeviceGetFanSpeedRPM(<Device>device, fan_speed)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return fan_speed_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 20764, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20765
 *         __status__ = nvmlDeviceGetFanSpeedRPM(<Device>device, fan_speed)
 *     check_status(__status__)
 *     return fan_speed_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_fan_speed_py);
  __pyx_r = ((PyObject *)__pyx_v_fan_speed_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20748
 * 
 * 
 * cpdef object device_get_fan_speed_rpm(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the intended operating speed in rotations per minute (RPM) of the device's specified fan.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fan_speed_rpm", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_fan_speed_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_141device_get_fan_speed_rpm(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_140device_get_fan_speed_rpm, "device_get_fan_speed_rpm(intptr_t device)\n\nRetrieves the intended operating speed in rotations per minute (RPM) of the device's specified fan.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlFanSpeedInfo_v1_t: Structure specifying the index of the target fan (input) and retrieved fan speed value (output).\n\n.. seealso:: `nvmlDeviceGetFanSpeedRPM`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_141device_get_fan_speed_rpm = {"device_get_fan_speed_rpm", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_141device_get_fan_speed_rpm, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_140device_get_fan_speed_rpm};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_141device_get_fan_speed_rpm(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_fan_speed_rpm (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20748, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20748, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_fan_speed_rpm", 0) < (0)) __PYX_ERR(0, 20748, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_fan_speed_rpm", 1, 1, 1, i); __PYX_ERR(0, 20748, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20748, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20748, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_fan_speed_rpm", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20748, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fan_speed_rpm", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_140device_get_fan_speed_rpm(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_140device_get_fan_speed_rpm(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_fan_speed_rpm", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_fan_speed_rpm(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20748, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fan_speed_rpm", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20768
 * 
 * 
 * cpdef unsigned int device_get_target_fan_speed(intptr_t device, unsigned int fan) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the intended target speed of the device's specified fan.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_143device_get_target_fan_speed(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_target_fan_speed(intptr_t __pyx_v_device, unsigned int __pyx_v_fan, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_target_speed;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20781
 *     """
 *     cdef unsigned int target_speed
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetTargetFanSpeed(<Device>device, fan, &target_speed)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20782
 *     cdef unsigned int target_speed
 *     with nogil:
 *         __status__ = nvmlDeviceGetTargetFanSpeed(<Device>device, fan, &target_speed)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return target_speed
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTargetFanSpeed(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_fan, (&__pyx_v_target_speed)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20782, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20781
 *     """
 *     cdef unsigned int target_speed
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetTargetFanSpeed(<Device>device, fan, &target_speed)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20783
 *     with nogil:
 *         __status__ = nvmlDeviceGetTargetFanSpeed(<Device>device, fan, &target_speed)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return target_speed
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20783, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20784
 *         __status__ = nvmlDeviceGetTargetFanSpeed(<Device>device, fan, &target_speed)
 *     check_status(__status__)
 *     return target_speed             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_target_speed;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20768
 * 
 * 
 * cpdef unsigned int device_get_target_fan_speed(intptr_t device, unsigned int fan) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the intended target speed of the device's specified fan.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_target_fan_speed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_143device_get_target_fan_speed(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_142device_get_target_fan_speed, "device_get_target_fan_speed(intptr_t device, unsigned int fan) -> unsigned int\n\nRetrieves the intended target speed of the device's specified fan.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    fan (unsigned int): The index of the target fan, zero indexed.\n\nReturns:\n    unsigned int: Reference in which to return the fan speed percentage.\n\n.. seealso:: `nvmlDeviceGetTargetFanSpeed`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_143device_get_target_fan_speed = {"device_get_target_fan_speed", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_143device_get_target_fan_speed, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_142device_get_target_fan_speed};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_143device_get_target_fan_speed(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_fan;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_target_fan_speed (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_fan,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20768, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20768, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20768, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_target_fan_speed", 0) < (0)) __PYX_ERR(0, 20768, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_target_fan_speed", 1, 2, 2, i); __PYX_ERR(0, 20768, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20768, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20768, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20768, __pyx_L3_error)
    __pyx_v_fan = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_fan == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20768, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_target_fan_speed", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20768, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_target_fan_speed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_142device_get_target_fan_speed(__pyx_self, __pyx_v_device, __pyx_v_fan);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_142device_get_target_fan_speed(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_fan) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_target_fan_speed", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_target_fan_speed(__pyx_v_device, __pyx_v_fan, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20768, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20768, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_target_fan_speed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20787
 * 
 * 
 * cpdef tuple device_get_min_max_fan_speed(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the min and max fan speed that user can set for the GPU fan.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_145device_get_min_max_fan_speed(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_min_max_fan_speed(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_min_speed;
  unsigned int __pyx_v_max_speed;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_min_max_fan_speed", 0);

  /* "cuda/bindings/_nvml.pyx":20803
 *     cdef unsigned int min_speed
 *     cdef unsigned int max_speed
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMinMaxFanSpeed(<Device>device, &min_speed, &max_speed)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20804
 *     cdef unsigned int max_speed
 *     with nogil:
 *         __status__ = nvmlDeviceGetMinMaxFanSpeed(<Device>device, &min_speed, &max_speed)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (min_speed, max_speed)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMinMaxFanSpeed(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_min_speed), (&__pyx_v_max_speed)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20804, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20803
 *     cdef unsigned int min_speed
 *     cdef unsigned int max_speed
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMinMaxFanSpeed(<Device>device, &min_speed, &max_speed)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20805
 *     with nogil:
 *         __status__ = nvmlDeviceGetMinMaxFanSpeed(<Device>device, &min_speed, &max_speed)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (min_speed, max_speed)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20805, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20806
 *         __status__ = nvmlDeviceGetMinMaxFanSpeed(<Device>device, &min_speed, &max_speed)
 *     check_status(__status__)
 *     return (min_speed, max_speed)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_unsigned_int(__pyx_v_min_speed); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20806, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_max_speed); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20806, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20806, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 20806, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 20806, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20787
 * 
 * 
 * cpdef tuple device_get_min_max_fan_speed(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the min and max fan speed that user can set for the GPU fan.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_min_max_fan_speed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_145device_get_min_max_fan_speed(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_144device_get_min_max_fan_speed, "device_get_min_max_fan_speed(intptr_t device) -> tuple\n\nRetrieves the min and max fan speed that user can set for the GPU fan.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    A 2-tuple containing:\n\n    - unsigned int: The minimum speed allowed to set.\n    - unsigned int: The maximum speed allowed to set.\n\n.. seealso:: `nvmlDeviceGetMinMaxFanSpeed`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_145device_get_min_max_fan_speed = {"device_get_min_max_fan_speed", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_145device_get_min_max_fan_speed, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_144device_get_min_max_fan_speed};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_145device_get_min_max_fan_speed(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_min_max_fan_speed (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20787, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20787, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_min_max_fan_speed", 0) < (0)) __PYX_ERR(0, 20787, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_min_max_fan_speed", 1, 1, 1, i); __PYX_ERR(0, 20787, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20787, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20787, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_min_max_fan_speed", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20787, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_min_max_fan_speed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_144device_get_min_max_fan_speed(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_144device_get_min_max_fan_speed(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_min_max_fan_speed", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_min_max_fan_speed(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20787, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_min_max_fan_speed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20809
 * 
 * 
 * cpdef unsigned int device_get_fan_control_policy_v2(intptr_t device, unsigned int fan) except *:             # <<<<<<<<<<<<<<
 *     """Gets current fan control policy.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_147device_get_fan_control_policy_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_fan_control_policy_v2(intptr_t __pyx_v_device, unsigned int __pyx_v_fan, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlFanControlPolicy_t __pyx_v_policy;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20822
 *     """
 *     cdef nvmlFanControlPolicy_t policy
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetFanControlPolicy_v2(<Device>device, fan, &policy)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20823
 *     cdef nvmlFanControlPolicy_t policy
 *     with nogil:
 *         __status__ = nvmlDeviceGetFanControlPolicy_v2(<Device>device, fan, &policy)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <unsigned int>policy
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanControlPolicy_v2(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_fan, (&__pyx_v_policy)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20823, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20822
 *     """
 *     cdef nvmlFanControlPolicy_t policy
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetFanControlPolicy_v2(<Device>device, fan, &policy)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20824
 *     with nogil:
 *         __status__ = nvmlDeviceGetFanControlPolicy_v2(<Device>device, fan, &policy)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <unsigned int>policy
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20824, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20825
 *         __status__ = nvmlDeviceGetFanControlPolicy_v2(<Device>device, fan, &policy)
 *     check_status(__status__)
 *     return <unsigned int>policy             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((unsigned int)__pyx_v_policy);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20809
 * 
 * 
 * cpdef unsigned int device_get_fan_control_policy_v2(intptr_t device, unsigned int fan) except *:             # <<<<<<<<<<<<<<
 *     """Gets current fan control policy.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fan_control_policy_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_147device_get_fan_control_policy_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_146device_get_fan_control_policy_v2, "device_get_fan_control_policy_v2(intptr_t device, unsigned int fan) -> unsigned int\n\nGets current fan control policy.\n\nArgs:\n    device (intptr_t): The identifier of the target ``device``.\n    fan (unsigned int): The index of the target fan, zero indexed.\n\nReturns:\n    unsigned int: Reference in which to return the fan control ``policy``.\n\n.. seealso:: `nvmlDeviceGetFanControlPolicy_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_147device_get_fan_control_policy_v2 = {"device_get_fan_control_policy_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_147device_get_fan_control_policy_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_146device_get_fan_control_policy_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_147device_get_fan_control_policy_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_fan;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_fan_control_policy_v2 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_fan,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20809, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20809, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20809, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_fan_control_policy_v2", 0) < (0)) __PYX_ERR(0, 20809, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_fan_control_policy_v2", 1, 2, 2, i); __PYX_ERR(0, 20809, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20809, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20809, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20809, __pyx_L3_error)
    __pyx_v_fan = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_fan == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20809, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_fan_control_policy_v2", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20809, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fan_control_policy_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_146device_get_fan_control_policy_v2(__pyx_self, __pyx_v_device, __pyx_v_fan);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_146device_get_fan_control_policy_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_fan) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_fan_control_policy_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_fan_control_policy_v2(__pyx_v_device, __pyx_v_fan, 1); if (unlikely(__pyx_t_1 == ((unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20809, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20809, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fan_control_policy_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20828
 * 
 * 
 * cpdef unsigned int device_get_num_fans(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the number of fans on the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_149device_get_num_fans(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_num_fans(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_num_fans;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20840
 *     """
 *     cdef unsigned int num_fans
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNumFans(<Device>device, &num_fans)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20841
 *     cdef unsigned int num_fans
 *     with nogil:
 *         __status__ = nvmlDeviceGetNumFans(<Device>device, &num_fans)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return num_fans
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNumFans(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_num_fans)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20841, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20840
 *     """
 *     cdef unsigned int num_fans
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNumFans(<Device>device, &num_fans)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20842
 *     with nogil:
 *         __status__ = nvmlDeviceGetNumFans(<Device>device, &num_fans)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return num_fans
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20842, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20843
 *         __status__ = nvmlDeviceGetNumFans(<Device>device, &num_fans)
 *     check_status(__status__)
 *     return num_fans             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_num_fans;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20828
 * 
 * 
 * cpdef unsigned int device_get_num_fans(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the number of fans on the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_num_fans", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_149device_get_num_fans(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_148device_get_num_fans, "device_get_num_fans(intptr_t device) -> unsigned int\n\nRetrieves the number of fans on the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: The number of fans.\n\n.. seealso:: `nvmlDeviceGetNumFans`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_149device_get_num_fans = {"device_get_num_fans", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_149device_get_num_fans, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_148device_get_num_fans};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_149device_get_num_fans(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_num_fans (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20828, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20828, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_num_fans", 0) < (0)) __PYX_ERR(0, 20828, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_num_fans", 1, 1, 1, i); __PYX_ERR(0, 20828, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20828, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20828, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_num_fans", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20828, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_num_fans", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_148device_get_num_fans(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_148device_get_num_fans(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_num_fans", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_num_fans(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20828, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20828, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_num_fans", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20846
 * 
 * 
 * cpdef object device_get_cooler_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the cooler's information. Returns a cooler's control signal characteristics. The possible types are restricted, Variable and Toggle. See ``nvmlCoolerControl_t`` for details on available signal types. Returns objects that cooler cools. Targets may be GPU, Memory, Power Supply or All of these. See ``nvmlCoolerTarget_t`` for details on available targets.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_151device_get_cooler_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_cooler_info(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *__pyx_v_cooler_info_py = 0;
  nvmlCoolerInfo_t *__pyx_v_cooler_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_cooler_info", 0);

  /* "cuda/bindings/_nvml.pyx":20857
 *     .. seealso:: `nvmlDeviceGetCoolerInfo`
 *     """
 *     cdef CoolerInfo_v1 cooler_info_py = CoolerInfo_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlCoolerInfo_t *cooler_info = <nvmlCoolerInfo_t *><intptr_t>(cooler_info_py._get_ptr())
 *     cooler_info.version = sizeof(nvmlCoolerInfo_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20857, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_cooler_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":20858
 *     """
 *     cdef CoolerInfo_v1 cooler_info_py = CoolerInfo_v1()
 *     cdef nvmlCoolerInfo_t *cooler_info = <nvmlCoolerInfo_t *><intptr_t>(cooler_info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     cooler_info.version = sizeof(nvmlCoolerInfo_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)__pyx_v_cooler_info_py->__pyx_vtab)->_get_ptr(__pyx_v_cooler_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 20858, __pyx_L1_error)
  __pyx_v_cooler_info = ((nvmlCoolerInfo_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":20859
 *     cdef CoolerInfo_v1 cooler_info_py = CoolerInfo_v1()
 *     cdef nvmlCoolerInfo_t *cooler_info = <nvmlCoolerInfo_t *><intptr_t>(cooler_info_py._get_ptr())
 *     cooler_info.version = sizeof(nvmlCoolerInfo_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetCoolerInfo(<Device>device, cooler_info)
*/
  __pyx_v_cooler_info->version = ((sizeof(nvmlCoolerInfo_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":20860
 *     cdef nvmlCoolerInfo_t *cooler_info = <nvmlCoolerInfo_t *><intptr_t>(cooler_info_py._get_ptr())
 *     cooler_info.version = sizeof(nvmlCoolerInfo_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCoolerInfo(<Device>device, cooler_info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20861
 *     cooler_info.version = sizeof(nvmlCoolerInfo_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetCoolerInfo(<Device>device, cooler_info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cooler_info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCoolerInfo(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_cooler_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20861, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":20860
 *     cdef nvmlCoolerInfo_t *cooler_info = <nvmlCoolerInfo_t *><intptr_t>(cooler_info_py._get_ptr())
 *     cooler_info.version = sizeof(nvmlCoolerInfo_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCoolerInfo(<Device>device, cooler_info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20862
 *     with nogil:
 *         __status__ = nvmlDeviceGetCoolerInfo(<Device>device, cooler_info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cooler_info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 20862, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20863
 *         __status__ = nvmlDeviceGetCoolerInfo(<Device>device, cooler_info)
 *     check_status(__status__)
 *     return cooler_info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_cooler_info_py);
  __pyx_r = ((PyObject *)__pyx_v_cooler_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20846
 * 
 * 
 * cpdef object device_get_cooler_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the cooler's information. Returns a cooler's control signal characteristics. The possible types are restricted, Variable and Toggle. See ``nvmlCoolerControl_t`` for details on available signal types. Returns objects that cooler cools. Targets may be GPU, Memory, Power Supply or All of these. See ``nvmlCoolerTarget_t`` for details on available targets.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_cooler_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_cooler_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_151device_get_cooler_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_150device_get_cooler_info, "device_get_cooler_info(intptr_t device)\n\nRetrieves the cooler's information. Returns a cooler's control signal characteristics. The possible types are restricted, Variable and Toggle. See ``nvmlCoolerControl_t`` for details on available signal types. Returns objects that cooler cools. Targets may be GPU, Memory, Power Supply or All of these. See ``nvmlCoolerTarget_t`` for details on available targets.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlCoolerInfo_v1_t: Structure specifying the cooler's control signal characteristics (out) and the target that cooler cools (out).\n\n.. seealso:: `nvmlDeviceGetCoolerInfo`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_151device_get_cooler_info = {"device_get_cooler_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_151device_get_cooler_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_150device_get_cooler_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_151device_get_cooler_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_cooler_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20846, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20846, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_cooler_info", 0) < (0)) __PYX_ERR(0, 20846, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_cooler_info", 1, 1, 1, i); __PYX_ERR(0, 20846, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20846, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20846, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_cooler_info", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20846, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_cooler_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_150device_get_cooler_info(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_150device_get_cooler_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_cooler_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_cooler_info(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20846, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_cooler_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20866
 * 
 * 
 * cpdef unsigned int device_get_temperature_threshold(intptr_t device, int threshold_type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the temperature threshold for the GPU with the specified threshold type in degrees C.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_153device_get_temperature_threshold(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_temperature_threshold(intptr_t __pyx_v_device, int __pyx_v_threshold_type, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_temp;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20879
 *     """
 *     cdef unsigned int temp
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetTemperatureThreshold(<Device>device, <_TemperatureThresholds>threshold_type, &temp)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20880
 *     cdef unsigned int temp
 *     with nogil:
 *         __status__ = nvmlDeviceGetTemperatureThreshold(<Device>device, <_TemperatureThresholds>threshold_type, &temp)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return temp
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTemperatureThreshold(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__TemperatureThresholds)__pyx_v_threshold_type), (&__pyx_v_temp)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20880, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20879
 *     """
 *     cdef unsigned int temp
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetTemperatureThreshold(<Device>device, <_TemperatureThresholds>threshold_type, &temp)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20881
 *     with nogil:
 *         __status__ = nvmlDeviceGetTemperatureThreshold(<Device>device, <_TemperatureThresholds>threshold_type, &temp)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return temp
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20881, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20882
 *         __status__ = nvmlDeviceGetTemperatureThreshold(<Device>device, <_TemperatureThresholds>threshold_type, &temp)
 *     check_status(__status__)
 *     return temp             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_temp;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20866
 * 
 * 
 * cpdef unsigned int device_get_temperature_threshold(intptr_t device, int threshold_type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the temperature threshold for the GPU with the specified threshold type in degrees C.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_temperature_threshold", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_153device_get_temperature_threshold(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_152device_get_temperature_threshold, "device_get_temperature_threshold(intptr_t device, int threshold_type) -> unsigned int\n\nRetrieves the temperature threshold for the GPU with the specified threshold type in degrees C.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    threshold_type (TemperatureThresholds): The type of threshold value queried.\n\nReturns:\n    unsigned int: Reference in which to return the temperature reading.\n\n.. seealso:: `nvmlDeviceGetTemperatureThreshold`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_153device_get_temperature_threshold = {"device_get_temperature_threshold", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_153device_get_temperature_threshold, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_152device_get_temperature_threshold};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_153device_get_temperature_threshold(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_threshold_type;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_temperature_threshold (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_threshold_type,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20866, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20866, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20866, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_temperature_threshold", 0) < (0)) __PYX_ERR(0, 20866, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_temperature_threshold", 1, 2, 2, i); __PYX_ERR(0, 20866, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20866, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20866, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20866, __pyx_L3_error)
    __pyx_v_threshold_type = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_threshold_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20866, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_temperature_threshold", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20866, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_temperature_threshold", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_152device_get_temperature_threshold(__pyx_self, __pyx_v_device, __pyx_v_threshold_type);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_152device_get_temperature_threshold(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_threshold_type) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_temperature_threshold", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_temperature_threshold(__pyx_v_device, __pyx_v_threshold_type, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 20866, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20866, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_temperature_threshold", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20885
 * 
 * 
 * cpdef object device_get_margin_temperature(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the thermal margin temperature (distance to nearest slowdown threshold).
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_155device_get_margin_temperature(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_margin_temperature(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *__pyx_v_margin_temp_info_py = 0;
  nvmlMarginTemperature_t *__pyx_v_margin_temp_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_margin_temperature", 0);

  /* "cuda/bindings/_nvml.pyx":20896
 *     .. seealso:: `nvmlDeviceGetMarginTemperature`
 *     """
 *     cdef MarginTemperature_v1 margin_temp_info_py = MarginTemperature_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlMarginTemperature_t *margin_temp_info = <nvmlMarginTemperature_t *><intptr_t>(margin_temp_info_py._get_ptr())
 *     margin_temp_info.version = sizeof(nvmlMarginTemperature_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20896, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_margin_temp_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":20897
 *     """
 *     cdef MarginTemperature_v1 margin_temp_info_py = MarginTemperature_v1()
 *     cdef nvmlMarginTemperature_t *margin_temp_info = <nvmlMarginTemperature_t *><intptr_t>(margin_temp_info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     margin_temp_info.version = sizeof(nvmlMarginTemperature_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)__pyx_v_margin_temp_info_py->__pyx_vtab)->_get_ptr(__pyx_v_margin_temp_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 20897, __pyx_L1_error)
  __pyx_v_margin_temp_info = ((nvmlMarginTemperature_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":20898
 *     cdef MarginTemperature_v1 margin_temp_info_py = MarginTemperature_v1()
 *     cdef nvmlMarginTemperature_t *margin_temp_info = <nvmlMarginTemperature_t *><intptr_t>(margin_temp_info_py._get_ptr())
 *     margin_temp_info.version = sizeof(nvmlMarginTemperature_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetMarginTemperature(<Device>device, margin_temp_info)
*/
  __pyx_v_margin_temp_info->version = ((sizeof(nvmlMarginTemperature_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":20899
 *     cdef nvmlMarginTemperature_t *margin_temp_info = <nvmlMarginTemperature_t *><intptr_t>(margin_temp_info_py._get_ptr())
 *     margin_temp_info.version = sizeof(nvmlMarginTemperature_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMarginTemperature(<Device>device, margin_temp_info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20900
 *     margin_temp_info.version = sizeof(nvmlMarginTemperature_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetMarginTemperature(<Device>device, margin_temp_info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return margin_temp_info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMarginTemperature(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_margin_temp_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20900, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":20899
 *     cdef nvmlMarginTemperature_t *margin_temp_info = <nvmlMarginTemperature_t *><intptr_t>(margin_temp_info_py._get_ptr())
 *     margin_temp_info.version = sizeof(nvmlMarginTemperature_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMarginTemperature(<Device>device, margin_temp_info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20901
 *     with nogil:
 *         __status__ = nvmlDeviceGetMarginTemperature(<Device>device, margin_temp_info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return margin_temp_info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 20901, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20902
 *         __status__ = nvmlDeviceGetMarginTemperature(<Device>device, margin_temp_info)
 *     check_status(__status__)
 *     return margin_temp_info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_margin_temp_info_py);
  __pyx_r = ((PyObject *)__pyx_v_margin_temp_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20885
 * 
 * 
 * cpdef object device_get_margin_temperature(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the thermal margin temperature (distance to nearest slowdown threshold).
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_margin_temperature", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_margin_temp_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_155device_get_margin_temperature(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_154device_get_margin_temperature, "device_get_margin_temperature(intptr_t device)\n\nRetrieves the thermal margin temperature (distance to nearest slowdown threshold).\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlMarginTemperature_v1_t: Versioned structure in which to return the temperature reading.\n\n.. seealso:: `nvmlDeviceGetMarginTemperature`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_155device_get_margin_temperature = {"device_get_margin_temperature", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_155device_get_margin_temperature, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_154device_get_margin_temperature};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_155device_get_margin_temperature(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_margin_temperature (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20885, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20885, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_margin_temperature", 0) < (0)) __PYX_ERR(0, 20885, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_margin_temperature", 1, 1, 1, i); __PYX_ERR(0, 20885, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20885, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20885, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_margin_temperature", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20885, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_margin_temperature", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_154device_get_margin_temperature(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_154device_get_margin_temperature(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_margin_temperature", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_margin_temperature(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20885, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_margin_temperature", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20905
 * 
 * 
 * cpdef object device_get_thermal_settings(intptr_t device, unsigned int sensor_ind_ex):             # <<<<<<<<<<<<<<
 *     """Used to execute a list of thermal system instructions.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_157device_get_thermal_settings(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_thermal_settings(intptr_t __pyx_v_device, unsigned int __pyx_v_sensor_ind_ex, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *__pyx_v_p_thermal_settings_py = 0;
  nvmlGpuThermalSettings_t *__pyx_v_p_thermal_settings;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_thermal_settings", 0);

  /* "cuda/bindings/_nvml.pyx":20917
 *     .. seealso:: `nvmlDeviceGetThermalSettings`
 *     """
 *     cdef GpuThermalSettings p_thermal_settings_py = GpuThermalSettings()             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuThermalSettings_t *p_thermal_settings = <nvmlGpuThermalSettings_t *><intptr_t>(p_thermal_settings_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20917, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_p_thermal_settings_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":20918
 *     """
 *     cdef GpuThermalSettings p_thermal_settings_py = GpuThermalSettings()
 *     cdef nvmlGpuThermalSettings_t *p_thermal_settings = <nvmlGpuThermalSettings_t *><intptr_t>(p_thermal_settings_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetThermalSettings(<Device>device, sensor_ind_ex, p_thermal_settings)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuThermalSettings *)__pyx_v_p_thermal_settings_py->__pyx_vtab)->_get_ptr(__pyx_v_p_thermal_settings_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 20918, __pyx_L1_error)
  __pyx_v_p_thermal_settings = ((nvmlGpuThermalSettings_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":20919
 *     cdef GpuThermalSettings p_thermal_settings_py = GpuThermalSettings()
 *     cdef nvmlGpuThermalSettings_t *p_thermal_settings = <nvmlGpuThermalSettings_t *><intptr_t>(p_thermal_settings_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetThermalSettings(<Device>device, sensor_ind_ex, p_thermal_settings)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20920
 *     cdef nvmlGpuThermalSettings_t *p_thermal_settings = <nvmlGpuThermalSettings_t *><intptr_t>(p_thermal_settings_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetThermalSettings(<Device>device, sensor_ind_ex, p_thermal_settings)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return p_thermal_settings_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetThermalSettings(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_sensor_ind_ex, __pyx_v_p_thermal_settings); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20920, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":20919
 *     cdef GpuThermalSettings p_thermal_settings_py = GpuThermalSettings()
 *     cdef nvmlGpuThermalSettings_t *p_thermal_settings = <nvmlGpuThermalSettings_t *><intptr_t>(p_thermal_settings_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetThermalSettings(<Device>device, sensor_ind_ex, p_thermal_settings)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20921
 *     with nogil:
 *         __status__ = nvmlDeviceGetThermalSettings(<Device>device, sensor_ind_ex, p_thermal_settings)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return p_thermal_settings_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 20921, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20922
 *         __status__ = nvmlDeviceGetThermalSettings(<Device>device, sensor_ind_ex, p_thermal_settings)
 *     check_status(__status__)
 *     return p_thermal_settings_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_p_thermal_settings_py);
  __pyx_r = ((PyObject *)__pyx_v_p_thermal_settings_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20905
 * 
 * 
 * cpdef object device_get_thermal_settings(intptr_t device, unsigned int sensor_ind_ex):             # <<<<<<<<<<<<<<
 *     """Used to execute a list of thermal system instructions.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_thermal_settings", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_p_thermal_settings_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_157device_get_thermal_settings(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_156device_get_thermal_settings, "device_get_thermal_settings(intptr_t device, unsigned int sensor_ind_ex)\n\nUsed to execute a list of thermal system instructions.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    sensor_ind_ex (unsigned int): The index of the thermal sensor.\n\nReturns:\n    nvmlGpuThermalSettings_t: Reference in which to return the thermal sensor information.\n\n.. seealso:: `nvmlDeviceGetThermalSettings`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_157device_get_thermal_settings = {"device_get_thermal_settings", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_157device_get_thermal_settings, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_156device_get_thermal_settings};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_157device_get_thermal_settings(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_sensor_ind_ex;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_thermal_settings (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_sensor_ind_ex,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20905, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20905, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20905, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_thermal_settings", 0) < (0)) __PYX_ERR(0, 20905, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_thermal_settings", 1, 2, 2, i); __PYX_ERR(0, 20905, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20905, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20905, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20905, __pyx_L3_error)
    __pyx_v_sensor_ind_ex = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_sensor_ind_ex == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20905, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_thermal_settings", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20905, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_thermal_settings", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_156device_get_thermal_settings(__pyx_self, __pyx_v_device, __pyx_v_sensor_ind_ex);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_156device_get_thermal_settings(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_sensor_ind_ex) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_thermal_settings", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_thermal_settings(__pyx_v_device, __pyx_v_sensor_ind_ex, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20905, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_thermal_settings", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20925
 * 
 * 
 * cpdef int device_get_performance_state(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the current performance state for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_159device_get_performance_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_performance_state(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__Pstates __pyx_v_p_state;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20937
 *     """
 *     cdef _Pstates p_state
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPerformanceState(<Device>device, &p_state)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20938
 *     cdef _Pstates p_state
 *     with nogil:
 *         __status__ = nvmlDeviceGetPerformanceState(<Device>device, &p_state)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>p_state
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPerformanceState(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_p_state)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20938, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20937
 *     """
 *     cdef _Pstates p_state
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPerformanceState(<Device>device, &p_state)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20939
 *     with nogil:
 *         __status__ = nvmlDeviceGetPerformanceState(<Device>device, &p_state)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>p_state
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20939, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20940
 *         __status__ = nvmlDeviceGetPerformanceState(<Device>device, &p_state)
 *     check_status(__status__)
 *     return <int>p_state             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_p_state);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20925
 * 
 * 
 * cpdef int device_get_performance_state(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the current performance state for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_performance_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_159device_get_performance_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_158device_get_performance_state, "device_get_performance_state(intptr_t device) -> int\n\nRetrieves the current performance state for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    int: Reference in which to return the performance state reading.\n\n.. seealso:: `nvmlDeviceGetPerformanceState`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_159device_get_performance_state = {"device_get_performance_state", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_159device_get_performance_state, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_158device_get_performance_state};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_159device_get_performance_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_performance_state (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20925, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20925, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_performance_state", 0) < (0)) __PYX_ERR(0, 20925, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_performance_state", 1, 1, 1, i); __PYX_ERR(0, 20925, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20925, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20925, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_performance_state", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20925, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_performance_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_158device_get_performance_state(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_158device_get_performance_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_performance_state", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_performance_state(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20925, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20925, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_performance_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20943
 * 
 * 
 * cpdef unsigned long long device_get_current_clocks_event_reasons(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves current clocks event reasons.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_161device_get_current_clocks_event_reasons(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_device_get_current_clocks_event_reasons(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned PY_LONG_LONG __pyx_v_clocks_event_reasons;
  nvmlReturn_t __pyx_v___status__;
  unsigned PY_LONG_LONG __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20955
 *     """
 *     cdef unsigned long long clocks_event_reasons
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCurrentClocksEventReasons(<Device>device, &clocks_event_reasons)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20956
 *     cdef unsigned long long clocks_event_reasons
 *     with nogil:
 *         __status__ = nvmlDeviceGetCurrentClocksEventReasons(<Device>device, &clocks_event_reasons)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return clocks_event_reasons
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrentClocksEventReasons(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_clocks_event_reasons)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20956, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20955
 *     """
 *     cdef unsigned long long clocks_event_reasons
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCurrentClocksEventReasons(<Device>device, &clocks_event_reasons)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20957
 *     with nogil:
 *         __status__ = nvmlDeviceGetCurrentClocksEventReasons(<Device>device, &clocks_event_reasons)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return clocks_event_reasons
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20957, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20958
 *         __status__ = nvmlDeviceGetCurrentClocksEventReasons(<Device>device, &clocks_event_reasons)
 *     check_status(__status__)
 *     return clocks_event_reasons             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_clocks_event_reasons;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20943
 * 
 * 
 * cpdef unsigned long long device_get_current_clocks_event_reasons(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves current clocks event reasons.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_current_clocks_event_reasons", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_161device_get_current_clocks_event_reasons(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_160device_get_current_clocks_event_reasons, "device_get_current_clocks_event_reasons(intptr_t device) -> unsigned long long\n\nRetrieves current clocks event reasons.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned long long: Reference in which to return bitmask of active clocks event reasons.\n\n.. seealso:: `nvmlDeviceGetCurrentClocksEventReasons`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_161device_get_current_clocks_event_reasons = {"device_get_current_clocks_event_reasons", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_161device_get_current_clocks_event_reasons, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_160device_get_current_clocks_event_reasons};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_161device_get_current_clocks_event_reasons(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_current_clocks_event_reasons (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20943, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20943, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_current_clocks_event_reasons", 0) < (0)) __PYX_ERR(0, 20943, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_current_clocks_event_reasons", 1, 1, 1, i); __PYX_ERR(0, 20943, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20943, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20943, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_current_clocks_event_reasons", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20943, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_current_clocks_event_reasons", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_160device_get_current_clocks_event_reasons(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_160device_get_current_clocks_event_reasons(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned PY_LONG_LONG __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_current_clocks_event_reasons", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_current_clocks_event_reasons(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned PY_LONG_LONG)0) && PyErr_Occurred())) __PYX_ERR(0, 20943, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20943, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_current_clocks_event_reasons", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20961
 * 
 * 
 * cpdef unsigned long long device_get_supported_clocks_event_reasons(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves bitmask of supported clocks event reasons that can be returned by ``nvmlDeviceGetCurrentClocksEventReasons``.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_163device_get_supported_clocks_event_reasons(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_device_get_supported_clocks_event_reasons(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned PY_LONG_LONG __pyx_v_supported_clocks_event_reasons;
  nvmlReturn_t __pyx_v___status__;
  unsigned PY_LONG_LONG __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20973
 *     """
 *     cdef unsigned long long supported_clocks_event_reasons
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedClocksEventReasons(<Device>device, &supported_clocks_event_reasons)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20974
 *     cdef unsigned long long supported_clocks_event_reasons
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedClocksEventReasons(<Device>device, &supported_clocks_event_reasons)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return supported_clocks_event_reasons
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedClocksEventReasons(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_supported_clocks_event_reasons)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20974, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20973
 *     """
 *     cdef unsigned long long supported_clocks_event_reasons
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedClocksEventReasons(<Device>device, &supported_clocks_event_reasons)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20975
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedClocksEventReasons(<Device>device, &supported_clocks_event_reasons)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return supported_clocks_event_reasons
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20975, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20976
 *         __status__ = nvmlDeviceGetSupportedClocksEventReasons(<Device>device, &supported_clocks_event_reasons)
 *     check_status(__status__)
 *     return supported_clocks_event_reasons             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_supported_clocks_event_reasons;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20961
 * 
 * 
 * cpdef unsigned long long device_get_supported_clocks_event_reasons(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves bitmask of supported clocks event reasons that can be returned by ``nvmlDeviceGetCurrentClocksEventReasons``.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_clocks_event_reasons", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_163device_get_supported_clocks_event_reasons(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_162device_get_supported_clocks_event_reasons, "device_get_supported_clocks_event_reasons(intptr_t device) -> unsigned long long\n\nRetrieves bitmask of supported clocks event reasons that can be returned by ``nvmlDeviceGetCurrentClocksEventReasons``.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned long long: Reference in which to return bitmask of supported clocks event reasons.\n\n.. seealso:: `nvmlDeviceGetSupportedClocksEventReasons`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_163device_get_supported_clocks_event_reasons = {"device_get_supported_clocks_event_reasons", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_163device_get_supported_clocks_event_reasons, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_162device_get_supported_clocks_event_reasons};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_163device_get_supported_clocks_event_reasons(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_supported_clocks_event_reasons (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20961, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20961, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_supported_clocks_event_reasons", 0) < (0)) __PYX_ERR(0, 20961, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_supported_clocks_event_reasons", 1, 1, 1, i); __PYX_ERR(0, 20961, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20961, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20961, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_supported_clocks_event_reasons", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20961, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_clocks_event_reasons", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_162device_get_supported_clocks_event_reasons(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_162device_get_supported_clocks_event_reasons(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned PY_LONG_LONG __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_supported_clocks_event_reasons", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_supported_clocks_event_reasons(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned PY_LONG_LONG)0) && PyErr_Occurred())) __PYX_ERR(0, 20961, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20961, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_clocks_event_reasons", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20979
 * 
 * 
 * cpdef int device_get_power_state(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Deprecated: Use ``nvmlDeviceGetPerformanceState``. This function exposes an incorrect generalization.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_165device_get_power_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_power_state(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__Pstates __pyx_v_p_state;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":20991
 *     """
 *     cdef _Pstates p_state
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPowerState(<Device>device, &p_state)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":20992
 *     cdef _Pstates p_state
 *     with nogil:
 *         __status__ = nvmlDeviceGetPowerState(<Device>device, &p_state)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>p_state
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerState(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_p_state)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20992, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":20991
 *     """
 *     cdef _Pstates p_state
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPowerState(<Device>device, &p_state)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":20993
 *     with nogil:
 *         __status__ = nvmlDeviceGetPowerState(<Device>device, &p_state)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>p_state
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 20993, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":20994
 *         __status__ = nvmlDeviceGetPowerState(<Device>device, &p_state)
 *     check_status(__status__)
 *     return <int>p_state             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_p_state);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20979
 * 
 * 
 * cpdef int device_get_power_state(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Deprecated: Use ``nvmlDeviceGetPerformanceState``. This function exposes an incorrect generalization.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_165device_get_power_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_164device_get_power_state, "device_get_power_state(intptr_t device) -> int\n\nDeprecated: Use ``nvmlDeviceGetPerformanceState``. This function exposes an incorrect generalization.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    int: Reference in which to return the performance state reading.\n\n.. seealso:: `nvmlDeviceGetPowerState`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_165device_get_power_state = {"device_get_power_state", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_165device_get_power_state, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_164device_get_power_state};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_165device_get_power_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_power_state (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20979, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20979, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_power_state", 0) < (0)) __PYX_ERR(0, 20979, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_power_state", 1, 1, 1, i); __PYX_ERR(0, 20979, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20979, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20979, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_power_state", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20979, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_164device_get_power_state(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_164device_get_power_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_power_state", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_power_state(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20979, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":20997
 * 
 * 
 * cpdef object device_get_dynamic_pstates_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve performance monitor samples from the associated subdevice.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_167device_get_dynamic_pstates_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_dynamic_pstates_info(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *__pyx_v_p_dynamic_pstates_info_py = 0;
  nvmlGpuDynamicPstatesInfo_t *__pyx_v_p_dynamic_pstates_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_dynamic_pstates_info", 0);

  /* "cuda/bindings/_nvml.pyx":21008
 *     .. seealso:: `nvmlDeviceGetDynamicPstatesInfo`
 *     """
 *     cdef GpuDynamicPstatesInfo p_dynamic_pstates_info_py = GpuDynamicPstatesInfo()             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuDynamicPstatesInfo_t *p_dynamic_pstates_info = <nvmlGpuDynamicPstatesInfo_t *><intptr_t>(p_dynamic_pstates_info_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21008, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_p_dynamic_pstates_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":21009
 *     """
 *     cdef GpuDynamicPstatesInfo p_dynamic_pstates_info_py = GpuDynamicPstatesInfo()
 *     cdef nvmlGpuDynamicPstatesInfo_t *p_dynamic_pstates_info = <nvmlGpuDynamicPstatesInfo_t *><intptr_t>(p_dynamic_pstates_info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetDynamicPstatesInfo(<Device>device, p_dynamic_pstates_info)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)__pyx_v_p_dynamic_pstates_info_py->__pyx_vtab)->_get_ptr(__pyx_v_p_dynamic_pstates_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21009, __pyx_L1_error)
  __pyx_v_p_dynamic_pstates_info = ((nvmlGpuDynamicPstatesInfo_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":21010
 *     cdef GpuDynamicPstatesInfo p_dynamic_pstates_info_py = GpuDynamicPstatesInfo()
 *     cdef nvmlGpuDynamicPstatesInfo_t *p_dynamic_pstates_info = <nvmlGpuDynamicPstatesInfo_t *><intptr_t>(p_dynamic_pstates_info_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetDynamicPstatesInfo(<Device>device, p_dynamic_pstates_info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21011
 *     cdef nvmlGpuDynamicPstatesInfo_t *p_dynamic_pstates_info = <nvmlGpuDynamicPstatesInfo_t *><intptr_t>(p_dynamic_pstates_info_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetDynamicPstatesInfo(<Device>device, p_dynamic_pstates_info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return p_dynamic_pstates_info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDynamicPstatesInfo(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_p_dynamic_pstates_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21011, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":21010
 *     cdef GpuDynamicPstatesInfo p_dynamic_pstates_info_py = GpuDynamicPstatesInfo()
 *     cdef nvmlGpuDynamicPstatesInfo_t *p_dynamic_pstates_info = <nvmlGpuDynamicPstatesInfo_t *><intptr_t>(p_dynamic_pstates_info_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetDynamicPstatesInfo(<Device>device, p_dynamic_pstates_info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21012
 *     with nogil:
 *         __status__ = nvmlDeviceGetDynamicPstatesInfo(<Device>device, p_dynamic_pstates_info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return p_dynamic_pstates_info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 21012, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21013
 *         __status__ = nvmlDeviceGetDynamicPstatesInfo(<Device>device, p_dynamic_pstates_info)
 *     check_status(__status__)
 *     return p_dynamic_pstates_info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_p_dynamic_pstates_info_py);
  __pyx_r = ((PyObject *)__pyx_v_p_dynamic_pstates_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":20997
 * 
 * 
 * cpdef object device_get_dynamic_pstates_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve performance monitor samples from the associated subdevice.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_dynamic_pstates_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_p_dynamic_pstates_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_167device_get_dynamic_pstates_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_166device_get_dynamic_pstates_info, "device_get_dynamic_pstates_info(intptr_t device)\n\nRetrieve performance monitor samples from the associated subdevice.\n\nArgs:\n    device (intptr_t): .\n\nReturns:\n    nvmlGpuDynamicPstatesInfo_t: .\n\n.. seealso:: `nvmlDeviceGetDynamicPstatesInfo`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_167device_get_dynamic_pstates_info = {"device_get_dynamic_pstates_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_167device_get_dynamic_pstates_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_166device_get_dynamic_pstates_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_167device_get_dynamic_pstates_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_dynamic_pstates_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20997, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20997, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_dynamic_pstates_info", 0) < (0)) __PYX_ERR(0, 20997, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_dynamic_pstates_info", 1, 1, 1, i); __PYX_ERR(0, 20997, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20997, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 20997, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_dynamic_pstates_info", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20997, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_dynamic_pstates_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_166device_get_dynamic_pstates_info(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_166device_get_dynamic_pstates_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_dynamic_pstates_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_dynamic_pstates_info(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20997, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_dynamic_pstates_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21016
 * 
 * 
 * cpdef int device_get_mem_clk_vf_offset(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the MemClk (Memory Clock) VF offset value.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_169device_get_mem_clk_vf_offset(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_mem_clk_vf_offset(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  int __pyx_v_offset;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21028
 *     """
 *     cdef int offset
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMemClkVfOffset(<Device>device, &offset)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21029
 *     cdef int offset
 *     with nogil:
 *         __status__ = nvmlDeviceGetMemClkVfOffset(<Device>device, &offset)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return offset
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemClkVfOffset(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_offset)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21029, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21028
 *     """
 *     cdef int offset
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMemClkVfOffset(<Device>device, &offset)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21030
 *     with nogil:
 *         __status__ = nvmlDeviceGetMemClkVfOffset(<Device>device, &offset)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return offset
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21030, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21031
 *         __status__ = nvmlDeviceGetMemClkVfOffset(<Device>device, &offset)
 *     check_status(__status__)
 *     return offset             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_offset;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21016
 * 
 * 
 * cpdef int device_get_mem_clk_vf_offset(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the MemClk (Memory Clock) VF offset value.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_mem_clk_vf_offset", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_169device_get_mem_clk_vf_offset(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_168device_get_mem_clk_vf_offset, "device_get_mem_clk_vf_offset(intptr_t device) -> int\n\nRetrieve the MemClk (Memory Clock) VF offset value.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    int: The retrieved MemClk VF offset value.\n\n.. seealso:: `nvmlDeviceGetMemClkVfOffset`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_169device_get_mem_clk_vf_offset = {"device_get_mem_clk_vf_offset", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_169device_get_mem_clk_vf_offset, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_168device_get_mem_clk_vf_offset};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_169device_get_mem_clk_vf_offset(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_mem_clk_vf_offset (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21016, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21016, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_mem_clk_vf_offset", 0) < (0)) __PYX_ERR(0, 21016, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_mem_clk_vf_offset", 1, 1, 1, i); __PYX_ERR(0, 21016, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21016, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21016, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_mem_clk_vf_offset", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21016, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_mem_clk_vf_offset", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_168device_get_mem_clk_vf_offset(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_168device_get_mem_clk_vf_offset(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_mem_clk_vf_offset", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_mem_clk_vf_offset(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((int)0) && PyErr_Occurred())) __PYX_ERR(0, 21016, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21016, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_mem_clk_vf_offset", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21034
 * 
 * 
 * cpdef tuple device_get_min_max_clock_of_p_state(intptr_t device, int type, int pstate):             # <<<<<<<<<<<<<<
 *     """Retrieve min and max clocks of some clock domain for a given PState.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_171device_get_min_max_clock_of_p_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_min_max_clock_of_p_state(intptr_t __pyx_v_device, int __pyx_v_type, int __pyx_v_pstate, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_min_clock_m_hz;
  unsigned int __pyx_v_max_clock_m_hz;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_min_max_clock_of_p_state", 0);

  /* "cuda/bindings/_nvml.pyx":21052
 *     cdef unsigned int min_clock_m_hz
 *     cdef unsigned int max_clock_m_hz
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMinMaxClockOfPState(<Device>device, <_ClockType>type, <_Pstates>pstate, &min_clock_m_hz, &max_clock_m_hz)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21053
 *     cdef unsigned int max_clock_m_hz
 *     with nogil:
 *         __status__ = nvmlDeviceGetMinMaxClockOfPState(<Device>device, <_ClockType>type, <_Pstates>pstate, &min_clock_m_hz, &max_clock_m_hz)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (min_clock_m_hz, max_clock_m_hz)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMinMaxClockOfPState(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__ClockType)__pyx_v_type), ((__pyx_t_4cuda_8bindings_5_nvml__Pstates)__pyx_v_pstate), (&__pyx_v_min_clock_m_hz), (&__pyx_v_max_clock_m_hz)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21053, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21052
 *     cdef unsigned int min_clock_m_hz
 *     cdef unsigned int max_clock_m_hz
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMinMaxClockOfPState(<Device>device, <_ClockType>type, <_Pstates>pstate, &min_clock_m_hz, &max_clock_m_hz)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21054
 *     with nogil:
 *         __status__ = nvmlDeviceGetMinMaxClockOfPState(<Device>device, <_ClockType>type, <_Pstates>pstate, &min_clock_m_hz, &max_clock_m_hz)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (min_clock_m_hz, max_clock_m_hz)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21054, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21055
 *         __status__ = nvmlDeviceGetMinMaxClockOfPState(<Device>device, <_ClockType>type, <_Pstates>pstate, &min_clock_m_hz, &max_clock_m_hz)
 *     check_status(__status__)
 *     return (min_clock_m_hz, max_clock_m_hz)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_unsigned_int(__pyx_v_min_clock_m_hz); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21055, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_max_clock_m_hz); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21055, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21055, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21055, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 21055, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21034
 * 
 * 
 * cpdef tuple device_get_min_max_clock_of_p_state(intptr_t device, int type, int pstate):             # <<<<<<<<<<<<<<
 *     """Retrieve min and max clocks of some clock domain for a given PState.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_min_max_clock_of_p_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_171device_get_min_max_clock_of_p_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_170device_get_min_max_clock_of_p_state, "device_get_min_max_clock_of_p_state(intptr_t device, int type, int pstate) -> tuple\n\nRetrieve min and max clocks of some clock domain for a given PState.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    type (ClockType): Clock domain.\n    pstate (Pstates): PState to query.\n\nReturns:\n    A 2-tuple containing:\n\n    - unsigned int: Reference in which to return min clock frequency.\n    - unsigned int: Reference in which to return max clock frequency.\n\n.. seealso:: `nvmlDeviceGetMinMaxClockOfPState`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_171device_get_min_max_clock_of_p_state = {"device_get_min_max_clock_of_p_state", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_171device_get_min_max_clock_of_p_state, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_170device_get_min_max_clock_of_p_state};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_171device_get_min_max_clock_of_p_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_type;
  int __pyx_v_pstate;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_min_max_clock_of_p_state (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_type,&__pyx_mstate_global->__pyx_n_u_pstate,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21034, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21034, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21034, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21034, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_min_max_clock_of_p_state", 0) < (0)) __PYX_ERR(0, 21034, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_min_max_clock_of_p_state", 1, 3, 3, i); __PYX_ERR(0, 21034, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21034, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21034, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21034, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21034, __pyx_L3_error)
    __pyx_v_type = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21034, __pyx_L3_error)
    __pyx_v_pstate = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_pstate == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21034, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_min_max_clock_of_p_state", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 21034, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_min_max_clock_of_p_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_170device_get_min_max_clock_of_p_state(__pyx_self, __pyx_v_device, __pyx_v_type, __pyx_v_pstate);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_170device_get_min_max_clock_of_p_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_type, int __pyx_v_pstate) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_min_max_clock_of_p_state", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_min_max_clock_of_p_state(__pyx_v_device, __pyx_v_type, __pyx_v_pstate, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21034, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_min_max_clock_of_p_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21058
 * 
 * 
 * cpdef tuple device_get_gpc_clk_min_max_vf_offset(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the GPCCLK min max VF offset value.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_173device_get_gpc_clk_min_max_vf_offset(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_gpc_clk_min_max_vf_offset(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  int __pyx_v_min_offset;
  int __pyx_v_max_offset;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpc_clk_min_max_vf_offset", 0);

  /* "cuda/bindings/_nvml.pyx":21074
 *     cdef int min_offset
 *     cdef int max_offset
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpcClkMinMaxVfOffset(<Device>device, &min_offset, &max_offset)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21075
 *     cdef int max_offset
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpcClkMinMaxVfOffset(<Device>device, &min_offset, &max_offset)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (min_offset, max_offset)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpcClkMinMaxVfOffset(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_min_offset), (&__pyx_v_max_offset)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21075, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21074
 *     cdef int min_offset
 *     cdef int max_offset
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpcClkMinMaxVfOffset(<Device>device, &min_offset, &max_offset)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21076
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpcClkMinMaxVfOffset(<Device>device, &min_offset, &max_offset)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (min_offset, max_offset)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21076, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21077
 *         __status__ = nvmlDeviceGetGpcClkMinMaxVfOffset(<Device>device, &min_offset, &max_offset)
 *     check_status(__status__)
 *     return (min_offset, max_offset)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_int(__pyx_v_min_offset); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21077, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_int(__pyx_v_max_offset); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21077, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21077, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21077, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 21077, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21058
 * 
 * 
 * cpdef tuple device_get_gpc_clk_min_max_vf_offset(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the GPCCLK min max VF offset value.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpc_clk_min_max_vf_offset", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_173device_get_gpc_clk_min_max_vf_offset(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_172device_get_gpc_clk_min_max_vf_offset, "device_get_gpc_clk_min_max_vf_offset(intptr_t device) -> tuple\n\nRetrieve the GPCCLK min max VF offset value.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    A 2-tuple containing:\n\n    - int: The retrieved GPCCLK VF min offset value.\n    - int: The retrieved GPCCLK VF max offset value.\n\n.. seealso:: `nvmlDeviceGetGpcClkMinMaxVfOffset`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_173device_get_gpc_clk_min_max_vf_offset = {"device_get_gpc_clk_min_max_vf_offset", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_173device_get_gpc_clk_min_max_vf_offset, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_172device_get_gpc_clk_min_max_vf_offset};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_173device_get_gpc_clk_min_max_vf_offset(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_gpc_clk_min_max_vf_offset (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21058, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21058, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_gpc_clk_min_max_vf_offset", 0) < (0)) __PYX_ERR(0, 21058, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_gpc_clk_min_max_vf_offset", 1, 1, 1, i); __PYX_ERR(0, 21058, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21058, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21058, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_gpc_clk_min_max_vf_offset", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21058, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpc_clk_min_max_vf_offset", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_172device_get_gpc_clk_min_max_vf_offset(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_172device_get_gpc_clk_min_max_vf_offset(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpc_clk_min_max_vf_offset", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_gpc_clk_min_max_vf_offset(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21058, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpc_clk_min_max_vf_offset", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21080
 * 
 * 
 * cpdef tuple device_get_mem_clk_min_max_vf_offset(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the MemClk (Memory Clock) min max VF offset value.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_175device_get_mem_clk_min_max_vf_offset(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_mem_clk_min_max_vf_offset(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  int __pyx_v_min_offset;
  int __pyx_v_max_offset;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_mem_clk_min_max_vf_offset", 0);

  /* "cuda/bindings/_nvml.pyx":21096
 *     cdef int min_offset
 *     cdef int max_offset
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMemClkMinMaxVfOffset(<Device>device, &min_offset, &max_offset)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21097
 *     cdef int max_offset
 *     with nogil:
 *         __status__ = nvmlDeviceGetMemClkMinMaxVfOffset(<Device>device, &min_offset, &max_offset)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (min_offset, max_offset)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemClkMinMaxVfOffset(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_min_offset), (&__pyx_v_max_offset)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21097, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21096
 *     cdef int min_offset
 *     cdef int max_offset
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMemClkMinMaxVfOffset(<Device>device, &min_offset, &max_offset)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21098
 *     with nogil:
 *         __status__ = nvmlDeviceGetMemClkMinMaxVfOffset(<Device>device, &min_offset, &max_offset)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (min_offset, max_offset)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21098, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21099
 *         __status__ = nvmlDeviceGetMemClkMinMaxVfOffset(<Device>device, &min_offset, &max_offset)
 *     check_status(__status__)
 *     return (min_offset, max_offset)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_int(__pyx_v_min_offset); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21099, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_int(__pyx_v_max_offset); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21099, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21099, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21099, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 21099, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21080
 * 
 * 
 * cpdef tuple device_get_mem_clk_min_max_vf_offset(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the MemClk (Memory Clock) min max VF offset value.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_mem_clk_min_max_vf_offset", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_175device_get_mem_clk_min_max_vf_offset(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_174device_get_mem_clk_min_max_vf_offset, "device_get_mem_clk_min_max_vf_offset(intptr_t device) -> tuple\n\nRetrieve the MemClk (Memory Clock) min max VF offset value.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    A 2-tuple containing:\n\n    - int: The retrieved MemClk VF min offset value.\n    - int: The retrieved MemClk VF max offset value.\n\n.. seealso:: `nvmlDeviceGetMemClkMinMaxVfOffset`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_175device_get_mem_clk_min_max_vf_offset = {"device_get_mem_clk_min_max_vf_offset", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_175device_get_mem_clk_min_max_vf_offset, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_174device_get_mem_clk_min_max_vf_offset};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_175device_get_mem_clk_min_max_vf_offset(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_mem_clk_min_max_vf_offset (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21080, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21080, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_mem_clk_min_max_vf_offset", 0) < (0)) __PYX_ERR(0, 21080, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_mem_clk_min_max_vf_offset", 1, 1, 1, i); __PYX_ERR(0, 21080, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21080, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21080, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_mem_clk_min_max_vf_offset", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21080, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_mem_clk_min_max_vf_offset", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_174device_get_mem_clk_min_max_vf_offset(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_174device_get_mem_clk_min_max_vf_offset(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_mem_clk_min_max_vf_offset", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_mem_clk_min_max_vf_offset(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21080, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_mem_clk_min_max_vf_offset", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21102
 * 
 * 
 * cpdef object device_get_clock_offsets(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve min, max and current clock offset of some clock domain for a given PState.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_177device_get_clock_offsets(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_clock_offsets(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *__pyx_v_info_py = 0;
  nvmlClockOffset_t *__pyx_v_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_clock_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":21113
 *     .. seealso:: `nvmlDeviceGetClockOffsets`
 *     """
 *     cdef ClockOffset_v1 info_py = ClockOffset_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlClockOffset_t *info = <nvmlClockOffset_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlClockOffset_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21113, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":21114
 *     """
 *     cdef ClockOffset_v1 info_py = ClockOffset_v1()
 *     cdef nvmlClockOffset_t *info = <nvmlClockOffset_t *><intptr_t>(info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     info.version = sizeof(nvmlClockOffset_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ClockOffset_v1 *)__pyx_v_info_py->__pyx_vtab)->_get_ptr(__pyx_v_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21114, __pyx_L1_error)
  __pyx_v_info = ((nvmlClockOffset_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":21115
 *     cdef ClockOffset_v1 info_py = ClockOffset_v1()
 *     cdef nvmlClockOffset_t *info = <nvmlClockOffset_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlClockOffset_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetClockOffsets(<Device>device, info)
*/
  __pyx_v_info->version = ((sizeof(nvmlClockOffset_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":21116
 *     cdef nvmlClockOffset_t *info = <nvmlClockOffset_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlClockOffset_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetClockOffsets(<Device>device, info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21117
 *     info.version = sizeof(nvmlClockOffset_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetClockOffsets(<Device>device, info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClockOffsets(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21117, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":21116
 *     cdef nvmlClockOffset_t *info = <nvmlClockOffset_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlClockOffset_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetClockOffsets(<Device>device, info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21118
 *     with nogil:
 *         __status__ = nvmlDeviceGetClockOffsets(<Device>device, info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 21118, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21119
 *         __status__ = nvmlDeviceGetClockOffsets(<Device>device, info)
 *     check_status(__status__)
 *     return info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_info_py);
  __pyx_r = ((PyObject *)__pyx_v_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21102
 * 
 * 
 * cpdef object device_get_clock_offsets(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve min, max and current clock offset of some clock domain for a given PState.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_clock_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_177device_get_clock_offsets(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_176device_get_clock_offsets, "device_get_clock_offsets(intptr_t device)\n\nRetrieve min, max and current clock offset of some clock domain for a given PState.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlClockOffset_v1_t: Structure specifying the clock type (input) and the pstate (input) retrieved clock offset value (output), min clock offset (output) and max clock offset (output).\n\n.. seealso:: `nvmlDeviceGetClockOffsets`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_177device_get_clock_offsets = {"device_get_clock_offsets", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_177device_get_clock_offsets, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_176device_get_clock_offsets};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_177device_get_clock_offsets(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_clock_offsets (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21102, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21102, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_clock_offsets", 0) < (0)) __PYX_ERR(0, 21102, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_clock_offsets", 1, 1, 1, i); __PYX_ERR(0, 21102, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21102, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21102, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_clock_offsets", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21102, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_clock_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_176device_get_clock_offsets(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_176device_get_clock_offsets(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_clock_offsets", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_clock_offsets(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21102, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_clock_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21122
 * 
 * 
 * cpdef device_set_clock_offsets(intptr_t device, intptr_t info):             # <<<<<<<<<<<<<<
 *     """Control current clock offset of some clock domain for a given PState.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_179device_set_clock_offsets(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_clock_offsets(intptr_t __pyx_v_device, intptr_t __pyx_v_info, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_clock_offsets", 0);

  /* "cuda/bindings/_nvml.pyx":21131
 *     .. seealso:: `nvmlDeviceSetClockOffsets`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetClockOffsets(<Device>device, <nvmlClockOffset_t*>info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21132
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetClockOffsets(<Device>device, <nvmlClockOffset_t*>info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetClockOffsets(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlClockOffset_t *)__pyx_v_info)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21132, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21131
 *     .. seealso:: `nvmlDeviceSetClockOffsets`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetClockOffsets(<Device>device, <nvmlClockOffset_t*>info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21133
 *     with nogil:
 *         __status__ = nvmlDeviceSetClockOffsets(<Device>device, <nvmlClockOffset_t*>info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21133, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21122
 * 
 * 
 * cpdef device_set_clock_offsets(intptr_t device, intptr_t info):             # <<<<<<<<<<<<<<
 *     """Control current clock offset of some clock domain for a given PState.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_clock_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_179device_set_clock_offsets(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_178device_set_clock_offsets, "device_set_clock_offsets(intptr_t device, intptr_t info)\n\nControl current clock offset of some clock domain for a given PState.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    info (intptr_t): Structure specifying the clock type (input), the pstate (input) and clock offset value (input).\n\n.. seealso:: `nvmlDeviceSetClockOffsets`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_179device_set_clock_offsets = {"device_set_clock_offsets", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_179device_set_clock_offsets, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_178device_set_clock_offsets};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_179device_set_clock_offsets(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  intptr_t __pyx_v_info;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_clock_offsets (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_info,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21122, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21122, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21122, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_clock_offsets", 0) < (0)) __PYX_ERR(0, 21122, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_clock_offsets", 1, 2, 2, i); __PYX_ERR(0, 21122, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21122, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21122, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21122, __pyx_L3_error)
    __pyx_v_info = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_info == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21122, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_clock_offsets", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 21122, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_clock_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_178device_set_clock_offsets(__pyx_self, __pyx_v_device, __pyx_v_info);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_178device_set_clock_offsets(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_info) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_clock_offsets", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_clock_offsets(__pyx_v_device, __pyx_v_info, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21122, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_clock_offsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21136
 * 
 * 
 * cpdef object device_get_performance_modes(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves a performance mode string with all the performance modes defined for this device along with their associated GPU Clock and Memory Clock values. Not all tokens will be reported on all GPUs, and additional tokens may be added in the future. For backwards compatibility we still provide nvclock and memclock; those are the same as nvclockmin and memclockmin.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_181device_get_performance_modes(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_performance_modes(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *__pyx_v_perf_modes_py = 0;
  nvmlDevicePerfModes_t *__pyx_v_perf_modes;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_performance_modes", 0);

  /* "cuda/bindings/_nvml.pyx":21147
 *     .. seealso:: `nvmlDeviceGetPerformanceModes`
 *     """
 *     cdef DevicePerfModes_v1 perf_modes_py = DevicePerfModes_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlDevicePerfModes_t *perf_modes = <nvmlDevicePerfModes_t *><intptr_t>(perf_modes_py._get_ptr())
 *     perf_modes.version = sizeof(nvmlDevicePerfModes_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21147, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_perf_modes_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":21148
 *     """
 *     cdef DevicePerfModes_v1 perf_modes_py = DevicePerfModes_v1()
 *     cdef nvmlDevicePerfModes_t *perf_modes = <nvmlDevicePerfModes_t *><intptr_t>(perf_modes_py._get_ptr())             # <<<<<<<<<<<<<<
 *     perf_modes.version = sizeof(nvmlDevicePerfModes_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)__pyx_v_perf_modes_py->__pyx_vtab)->_get_ptr(__pyx_v_perf_modes_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21148, __pyx_L1_error)
  __pyx_v_perf_modes = ((nvmlDevicePerfModes_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":21149
 *     cdef DevicePerfModes_v1 perf_modes_py = DevicePerfModes_v1()
 *     cdef nvmlDevicePerfModes_t *perf_modes = <nvmlDevicePerfModes_t *><intptr_t>(perf_modes_py._get_ptr())
 *     perf_modes.version = sizeof(nvmlDevicePerfModes_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetPerformanceModes(<Device>device, perf_modes)
*/
  __pyx_v_perf_modes->version = ((sizeof(nvmlDevicePerfModes_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":21150
 *     cdef nvmlDevicePerfModes_t *perf_modes = <nvmlDevicePerfModes_t *><intptr_t>(perf_modes_py._get_ptr())
 *     perf_modes.version = sizeof(nvmlDevicePerfModes_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPerformanceModes(<Device>device, perf_modes)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21151
 *     perf_modes.version = sizeof(nvmlDevicePerfModes_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetPerformanceModes(<Device>device, perf_modes)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return perf_modes_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPerformanceModes(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_perf_modes); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21151, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":21150
 *     cdef nvmlDevicePerfModes_t *perf_modes = <nvmlDevicePerfModes_t *><intptr_t>(perf_modes_py._get_ptr())
 *     perf_modes.version = sizeof(nvmlDevicePerfModes_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPerformanceModes(<Device>device, perf_modes)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21152
 *     with nogil:
 *         __status__ = nvmlDeviceGetPerformanceModes(<Device>device, perf_modes)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return perf_modes_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 21152, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21153
 *         __status__ = nvmlDeviceGetPerformanceModes(<Device>device, perf_modes)
 *     check_status(__status__)
 *     return perf_modes_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_perf_modes_py);
  __pyx_r = ((PyObject *)__pyx_v_perf_modes_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21136
 * 
 * 
 * cpdef object device_get_performance_modes(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves a performance mode string with all the performance modes defined for this device along with their associated GPU Clock and Memory Clock values. Not all tokens will be reported on all GPUs, and additional tokens may be added in the future. For backwards compatibility we still provide nvclock and memclock; those are the same as nvclockmin and memclockmin.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_performance_modes", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_perf_modes_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_181device_get_performance_modes(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_180device_get_performance_modes, "device_get_performance_modes(intptr_t device)\n\nRetrieves a performance mode string with all the performance modes defined for this device along with their associated GPU Clock and Memory Clock values. Not all tokens will be reported on all GPUs, and additional tokens may be added in the future. For backwards compatibility we still provide nvclock and memclock; those are the same as nvclockmin and memclockmin.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlDevicePerfModes_v1_t: Reference in which to return the performance level string.\n\n.. seealso:: `nvmlDeviceGetPerformanceModes`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_181device_get_performance_modes = {"device_get_performance_modes", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_181device_get_performance_modes, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_180device_get_performance_modes};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_181device_get_performance_modes(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_performance_modes (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21136, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21136, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_performance_modes", 0) < (0)) __PYX_ERR(0, 21136, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_performance_modes", 1, 1, 1, i); __PYX_ERR(0, 21136, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21136, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21136, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_performance_modes", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21136, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_performance_modes", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_180device_get_performance_modes(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_180device_get_performance_modes(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_performance_modes", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_performance_modes(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21136, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_performance_modes", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21156
 * 
 * 
 * cpdef object device_get_current_clock_freqs(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves a string with the associated current GPU Clock and Memory Clock values.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_183device_get_current_clock_freqs(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_current_clock_freqs(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *__pyx_v_current_clock_freqs_py = 0;
  nvmlDeviceCurrentClockFreqs_t *__pyx_v_current_clock_freqs;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_current_clock_freqs", 0);

  /* "cuda/bindings/_nvml.pyx":21167
 *     .. seealso:: `nvmlDeviceGetCurrentClockFreqs`
 *     """
 *     cdef DeviceCurrentClockFreqs_v1 current_clock_freqs_py = DeviceCurrentClockFreqs_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlDeviceCurrentClockFreqs_t *current_clock_freqs = <nvmlDeviceCurrentClockFreqs_t *><intptr_t>(current_clock_freqs_py._get_ptr())
 *     current_clock_freqs.version = sizeof(nvmlDeviceCurrentClockFreqs_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21167, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_current_clock_freqs_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":21168
 *     """
 *     cdef DeviceCurrentClockFreqs_v1 current_clock_freqs_py = DeviceCurrentClockFreqs_v1()
 *     cdef nvmlDeviceCurrentClockFreqs_t *current_clock_freqs = <nvmlDeviceCurrentClockFreqs_t *><intptr_t>(current_clock_freqs_py._get_ptr())             # <<<<<<<<<<<<<<
 *     current_clock_freqs.version = sizeof(nvmlDeviceCurrentClockFreqs_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)__pyx_v_current_clock_freqs_py->__pyx_vtab)->_get_ptr(__pyx_v_current_clock_freqs_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21168, __pyx_L1_error)
  __pyx_v_current_clock_freqs = ((nvmlDeviceCurrentClockFreqs_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":21169
 *     cdef DeviceCurrentClockFreqs_v1 current_clock_freqs_py = DeviceCurrentClockFreqs_v1()
 *     cdef nvmlDeviceCurrentClockFreqs_t *current_clock_freqs = <nvmlDeviceCurrentClockFreqs_t *><intptr_t>(current_clock_freqs_py._get_ptr())
 *     current_clock_freqs.version = sizeof(nvmlDeviceCurrentClockFreqs_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetCurrentClockFreqs(<Device>device, current_clock_freqs)
*/
  __pyx_v_current_clock_freqs->version = ((sizeof(nvmlDeviceCurrentClockFreqs_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":21170
 *     cdef nvmlDeviceCurrentClockFreqs_t *current_clock_freqs = <nvmlDeviceCurrentClockFreqs_t *><intptr_t>(current_clock_freqs_py._get_ptr())
 *     current_clock_freqs.version = sizeof(nvmlDeviceCurrentClockFreqs_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCurrentClockFreqs(<Device>device, current_clock_freqs)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21171
 *     current_clock_freqs.version = sizeof(nvmlDeviceCurrentClockFreqs_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetCurrentClockFreqs(<Device>device, current_clock_freqs)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return current_clock_freqs_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrentClockFreqs(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_current_clock_freqs); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21171, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":21170
 *     cdef nvmlDeviceCurrentClockFreqs_t *current_clock_freqs = <nvmlDeviceCurrentClockFreqs_t *><intptr_t>(current_clock_freqs_py._get_ptr())
 *     current_clock_freqs.version = sizeof(nvmlDeviceCurrentClockFreqs_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCurrentClockFreqs(<Device>device, current_clock_freqs)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21172
 *     with nogil:
 *         __status__ = nvmlDeviceGetCurrentClockFreqs(<Device>device, current_clock_freqs)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return current_clock_freqs_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 21172, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21173
 *         __status__ = nvmlDeviceGetCurrentClockFreqs(<Device>device, current_clock_freqs)
 *     check_status(__status__)
 *     return current_clock_freqs_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_current_clock_freqs_py);
  __pyx_r = ((PyObject *)__pyx_v_current_clock_freqs_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21156
 * 
 * 
 * cpdef object device_get_current_clock_freqs(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves a string with the associated current GPU Clock and Memory Clock values.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_current_clock_freqs", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_current_clock_freqs_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_183device_get_current_clock_freqs(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_182device_get_current_clock_freqs, "device_get_current_clock_freqs(intptr_t device)\n\nRetrieves a string with the associated current GPU Clock and Memory Clock values.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlDeviceCurrentClockFreqs_v1_t: Reference in which to return the performance level string.\n\n.. seealso:: `nvmlDeviceGetCurrentClockFreqs`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_183device_get_current_clock_freqs = {"device_get_current_clock_freqs", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_183device_get_current_clock_freqs, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_182device_get_current_clock_freqs};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_183device_get_current_clock_freqs(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_current_clock_freqs (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21156, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21156, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_current_clock_freqs", 0) < (0)) __PYX_ERR(0, 21156, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_current_clock_freqs", 1, 1, 1, i); __PYX_ERR(0, 21156, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21156, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21156, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_current_clock_freqs", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21156, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_current_clock_freqs", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_182device_get_current_clock_freqs(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_182device_get_current_clock_freqs(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_current_clock_freqs", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_current_clock_freqs(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21156, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_current_clock_freqs", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21176
 * 
 * 
 * cpdef unsigned int device_get_power_management_limit(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the power management limit associated with this device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_185device_get_power_management_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_power_management_limit(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_limit;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21188
 *     """
 *     cdef unsigned int limit
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPowerManagementLimit(<Device>device, &limit)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21189
 *     cdef unsigned int limit
 *     with nogil:
 *         __status__ = nvmlDeviceGetPowerManagementLimit(<Device>device, &limit)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return limit
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerManagementLimit(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_limit)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21189, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21188
 *     """
 *     cdef unsigned int limit
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPowerManagementLimit(<Device>device, &limit)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21190
 *     with nogil:
 *         __status__ = nvmlDeviceGetPowerManagementLimit(<Device>device, &limit)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return limit
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21190, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21191
 *         __status__ = nvmlDeviceGetPowerManagementLimit(<Device>device, &limit)
 *     check_status(__status__)
 *     return limit             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_limit;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21176
 * 
 * 
 * cpdef unsigned int device_get_power_management_limit(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the power management limit associated with this device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_management_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_185device_get_power_management_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_184device_get_power_management_limit, "device_get_power_management_limit(intptr_t device) -> unsigned int\n\nRetrieves the power management limit associated with this device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Reference in which to return the power management limit in milliwatts.\n\n.. seealso:: `nvmlDeviceGetPowerManagementLimit`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_185device_get_power_management_limit = {"device_get_power_management_limit", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_185device_get_power_management_limit, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_184device_get_power_management_limit};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_185device_get_power_management_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_power_management_limit (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21176, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21176, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_power_management_limit", 0) < (0)) __PYX_ERR(0, 21176, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_power_management_limit", 1, 1, 1, i); __PYX_ERR(0, 21176, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21176, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21176, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_power_management_limit", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21176, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_management_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_184device_get_power_management_limit(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_184device_get_power_management_limit(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_power_management_limit", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_power_management_limit(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 21176, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21176, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_management_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21194
 * 
 * 
 * cpdef tuple device_get_power_management_limit_constraints(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves information about possible values of power management limits on this device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_187device_get_power_management_limit_constraints(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_power_management_limit_constraints(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_min_limit;
  unsigned int __pyx_v_max_limit;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_power_management_limit_constraints", 0);

  /* "cuda/bindings/_nvml.pyx":21210
 *     cdef unsigned int min_limit
 *     cdef unsigned int max_limit
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPowerManagementLimitConstraints(<Device>device, &min_limit, &max_limit)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21211
 *     cdef unsigned int max_limit
 *     with nogil:
 *         __status__ = nvmlDeviceGetPowerManagementLimitConstraints(<Device>device, &min_limit, &max_limit)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (min_limit, max_limit)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerManagementLimitConstraints(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_min_limit), (&__pyx_v_max_limit)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21211, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21210
 *     cdef unsigned int min_limit
 *     cdef unsigned int max_limit
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPowerManagementLimitConstraints(<Device>device, &min_limit, &max_limit)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21212
 *     with nogil:
 *         __status__ = nvmlDeviceGetPowerManagementLimitConstraints(<Device>device, &min_limit, &max_limit)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (min_limit, max_limit)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21212, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21213
 *         __status__ = nvmlDeviceGetPowerManagementLimitConstraints(<Device>device, &min_limit, &max_limit)
 *     check_status(__status__)
 *     return (min_limit, max_limit)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_unsigned_int(__pyx_v_min_limit); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21213, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_max_limit); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21213, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21213, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21213, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 21213, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21194
 * 
 * 
 * cpdef tuple device_get_power_management_limit_constraints(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves information about possible values of power management limits on this device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_management_limit_constraints", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_187device_get_power_management_limit_constraints(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_186device_get_power_management_limit_constraints, "device_get_power_management_limit_constraints(intptr_t device) -> tuple\n\nRetrieves information about possible values of power management limits on this device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    A 2-tuple containing:\n\n    - unsigned int: Reference in which to return the minimum power management limit in milliwatts.\n    - unsigned int: Reference in which to return the maximum power management limit in milliwatts.\n\n.. seealso:: `nvmlDeviceGetPowerManagementLimitConstraints`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_187device_get_power_management_limit_constraints = {"device_get_power_management_limit_constraints", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_187device_get_power_management_limit_constraints, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_186device_get_power_management_limit_constraints};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_187device_get_power_management_limit_constraints(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_power_management_limit_constraints (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21194, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21194, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_power_management_limit_constraints", 0) < (0)) __PYX_ERR(0, 21194, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_power_management_limit_constraints", 1, 1, 1, i); __PYX_ERR(0, 21194, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21194, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21194, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_power_management_limit_constraints", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21194, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_management_limit_constraints", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_186device_get_power_management_limit_constraints(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_186device_get_power_management_limit_constraints(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_power_management_limit_constraints", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_power_management_limit_constraints(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21194, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_management_limit_constraints", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21216
 * 
 * 
 * cpdef unsigned int device_get_power_management_default_limit(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves default power management limit on this device, in milliwatts. Default power management limit is a power management limit that the device boots with.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_189device_get_power_management_default_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_power_management_default_limit(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_default_limit;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21228
 *     """
 *     cdef unsigned int default_limit
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPowerManagementDefaultLimit(<Device>device, &default_limit)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21229
 *     cdef unsigned int default_limit
 *     with nogil:
 *         __status__ = nvmlDeviceGetPowerManagementDefaultLimit(<Device>device, &default_limit)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return default_limit
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerManagementDefaultLimit(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_default_limit)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21229, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21228
 *     """
 *     cdef unsigned int default_limit
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPowerManagementDefaultLimit(<Device>device, &default_limit)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21230
 *     with nogil:
 *         __status__ = nvmlDeviceGetPowerManagementDefaultLimit(<Device>device, &default_limit)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return default_limit
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21230, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21231
 *         __status__ = nvmlDeviceGetPowerManagementDefaultLimit(<Device>device, &default_limit)
 *     check_status(__status__)
 *     return default_limit             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_default_limit;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21216
 * 
 * 
 * cpdef unsigned int device_get_power_management_default_limit(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves default power management limit on this device, in milliwatts. Default power management limit is a power management limit that the device boots with.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_management_default_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_189device_get_power_management_default_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_188device_get_power_management_default_limit, "device_get_power_management_default_limit(intptr_t device) -> unsigned int\n\nRetrieves default power management limit on this device, in milliwatts. Default power management limit is a power management limit that the device boots with.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Reference in which to return the default power management limit in milliwatts.\n\n.. seealso:: `nvmlDeviceGetPowerManagementDefaultLimit`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_189device_get_power_management_default_limit = {"device_get_power_management_default_limit", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_189device_get_power_management_default_limit, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_188device_get_power_management_default_limit};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_189device_get_power_management_default_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_power_management_default_limit (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21216, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21216, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_power_management_default_limit", 0) < (0)) __PYX_ERR(0, 21216, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_power_management_default_limit", 1, 1, 1, i); __PYX_ERR(0, 21216, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21216, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21216, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_power_management_default_limit", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21216, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_management_default_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_188device_get_power_management_default_limit(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_188device_get_power_management_default_limit(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_power_management_default_limit", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_power_management_default_limit(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 21216, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21216, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_management_default_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21234
 * 
 * 
 * cpdef unsigned int device_get_power_usage(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory).
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_191device_get_power_usage(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_power_usage(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_power;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21246
 *     """
 *     cdef unsigned int power
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPowerUsage(<Device>device, &power)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21247
 *     cdef unsigned int power
 *     with nogil:
 *         __status__ = nvmlDeviceGetPowerUsage(<Device>device, &power)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return power
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerUsage(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_power)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21247, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21246
 *     """
 *     cdef unsigned int power
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPowerUsage(<Device>device, &power)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21248
 *     with nogil:
 *         __status__ = nvmlDeviceGetPowerUsage(<Device>device, &power)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return power
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21248, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21249
 *         __status__ = nvmlDeviceGetPowerUsage(<Device>device, &power)
 *     check_status(__status__)
 *     return power             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_power;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21234
 * 
 * 
 * cpdef unsigned int device_get_power_usage(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory).
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_usage", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_191device_get_power_usage(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_190device_get_power_usage, "device_get_power_usage(intptr_t device) -> unsigned int\n\nRetrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory).\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Reference in which to return the power usage information.\n\n.. seealso:: `nvmlDeviceGetPowerUsage`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_191device_get_power_usage = {"device_get_power_usage", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_191device_get_power_usage, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_190device_get_power_usage};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_191device_get_power_usage(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_power_usage (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21234, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21234, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_power_usage", 0) < (0)) __PYX_ERR(0, 21234, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_power_usage", 1, 1, 1, i); __PYX_ERR(0, 21234, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21234, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21234, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_power_usage", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21234, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_usage", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_190device_get_power_usage(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_190device_get_power_usage(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_power_usage", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_power_usage(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 21234, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21234, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_usage", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21252
 * 
 * 
 * cpdef unsigned long long device_get_total_energy_consumption(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves total energy consumption for this GPU in millijoules (mJ) since the driver was last reloaded.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_193device_get_total_energy_consumption(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_device_get_total_energy_consumption(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned PY_LONG_LONG __pyx_v_energy;
  nvmlReturn_t __pyx_v___status__;
  unsigned PY_LONG_LONG __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21264
 *     """
 *     cdef unsigned long long energy
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetTotalEnergyConsumption(<Device>device, &energy)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21265
 *     cdef unsigned long long energy
 *     with nogil:
 *         __status__ = nvmlDeviceGetTotalEnergyConsumption(<Device>device, &energy)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return energy
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTotalEnergyConsumption(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_energy)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21265, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21264
 *     """
 *     cdef unsigned long long energy
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetTotalEnergyConsumption(<Device>device, &energy)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21266
 *     with nogil:
 *         __status__ = nvmlDeviceGetTotalEnergyConsumption(<Device>device, &energy)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return energy
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21266, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21267
 *         __status__ = nvmlDeviceGetTotalEnergyConsumption(<Device>device, &energy)
 *     check_status(__status__)
 *     return energy             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_energy;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21252
 * 
 * 
 * cpdef unsigned long long device_get_total_energy_consumption(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves total energy consumption for this GPU in millijoules (mJ) since the driver was last reloaded.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_total_energy_consumption", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_193device_get_total_energy_consumption(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_192device_get_total_energy_consumption, "device_get_total_energy_consumption(intptr_t device) -> unsigned long long\n\nRetrieves total energy consumption for this GPU in millijoules (mJ) since the driver was last reloaded.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned long long: Reference in which to return the energy consumption information.\n\n.. seealso:: `nvmlDeviceGetTotalEnergyConsumption`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_193device_get_total_energy_consumption = {"device_get_total_energy_consumption", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_193device_get_total_energy_consumption, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_192device_get_total_energy_consumption};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_193device_get_total_energy_consumption(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_total_energy_consumption (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21252, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21252, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_total_energy_consumption", 0) < (0)) __PYX_ERR(0, 21252, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_total_energy_consumption", 1, 1, 1, i); __PYX_ERR(0, 21252, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21252, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21252, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_total_energy_consumption", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21252, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_total_energy_consumption", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_192device_get_total_energy_consumption(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_192device_get_total_energy_consumption(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned PY_LONG_LONG __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_total_energy_consumption", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_total_energy_consumption(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned PY_LONG_LONG)0) && PyErr_Occurred())) __PYX_ERR(0, 21252, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21252, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_total_energy_consumption", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21270
 * 
 * 
 * cpdef unsigned int device_get_enforced_power_limit(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get the effective power limit that the driver enforces after taking into account all limiters.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_195device_get_enforced_power_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_enforced_power_limit(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_limit;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21282
 *     """
 *     cdef unsigned int limit
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetEnforcedPowerLimit(<Device>device, &limit)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21283
 *     cdef unsigned int limit
 *     with nogil:
 *         __status__ = nvmlDeviceGetEnforcedPowerLimit(<Device>device, &limit)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return limit
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEnforcedPowerLimit(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_limit)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21283, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21282
 *     """
 *     cdef unsigned int limit
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetEnforcedPowerLimit(<Device>device, &limit)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21284
 *     with nogil:
 *         __status__ = nvmlDeviceGetEnforcedPowerLimit(<Device>device, &limit)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return limit
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21284, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21285
 *         __status__ = nvmlDeviceGetEnforcedPowerLimit(<Device>device, &limit)
 *     check_status(__status__)
 *     return limit             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_limit;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21270
 * 
 * 
 * cpdef unsigned int device_get_enforced_power_limit(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get the effective power limit that the driver enforces after taking into account all limiters.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_enforced_power_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_195device_get_enforced_power_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_194device_get_enforced_power_limit, "device_get_enforced_power_limit(intptr_t device) -> unsigned int\n\nGet the effective power limit that the driver enforces after taking into account all limiters.\n\nArgs:\n    device (intptr_t): The device to communicate with.\n\nReturns:\n    unsigned int: Reference in which to return the power management limit in milliwatts.\n\n.. seealso:: `nvmlDeviceGetEnforcedPowerLimit`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_195device_get_enforced_power_limit = {"device_get_enforced_power_limit", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_195device_get_enforced_power_limit, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_194device_get_enforced_power_limit};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_195device_get_enforced_power_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_enforced_power_limit (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21270, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21270, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_enforced_power_limit", 0) < (0)) __PYX_ERR(0, 21270, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_enforced_power_limit", 1, 1, 1, i); __PYX_ERR(0, 21270, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21270, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21270, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_enforced_power_limit", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21270, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_enforced_power_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_194device_get_enforced_power_limit(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_194device_get_enforced_power_limit(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_enforced_power_limit", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_enforced_power_limit(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 21270, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21270, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_enforced_power_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21288
 * 
 * 
 * cpdef tuple device_get_gpu_operation_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current GOM and pending GOM (the one that GPU will switch to after reboot).
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_197device_get_gpu_operation_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_operation_mode(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__GpuOperationMode __pyx_v_current;
  __pyx_t_4cuda_8bindings_5_nvml__GpuOperationMode __pyx_v_pending;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpu_operation_mode", 0);

  /* "cuda/bindings/_nvml.pyx":21304
 *     cdef _GpuOperationMode current
 *     cdef _GpuOperationMode pending
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuOperationMode(<Device>device, &current, &pending)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21305
 *     cdef _GpuOperationMode pending
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuOperationMode(<Device>device, &current, &pending)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (<int>current, <int>pending)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuOperationMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_current), (&__pyx_v_pending)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21305, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21304
 *     cdef _GpuOperationMode current
 *     cdef _GpuOperationMode pending
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuOperationMode(<Device>device, &current, &pending)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21306
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuOperationMode(<Device>device, &current, &pending)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (<int>current, <int>pending)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21306, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21307
 *         __status__ = nvmlDeviceGetGpuOperationMode(<Device>device, &current, &pending)
 *     check_status(__status__)
 *     return (<int>current, <int>pending)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_int(((int)__pyx_v_current)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21307, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_int(((int)__pyx_v_pending)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21307, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21307, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21307, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 21307, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21288
 * 
 * 
 * cpdef tuple device_get_gpu_operation_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current GOM and pending GOM (the one that GPU will switch to after reboot).
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_operation_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_197device_get_gpu_operation_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_196device_get_gpu_operation_mode, "device_get_gpu_operation_mode(intptr_t device) -> tuple\n\nRetrieves the current GOM and pending GOM (the one that GPU will switch to after reboot).\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    A 2-tuple containing:\n\n    - int: Reference in which to return the current GOM.\n    - int: Reference in which to return the pending GOM.\n\n.. seealso:: `nvmlDeviceGetGpuOperationMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_197device_get_gpu_operation_mode = {"device_get_gpu_operation_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_197device_get_gpu_operation_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_196device_get_gpu_operation_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_197device_get_gpu_operation_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_gpu_operation_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21288, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21288, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_gpu_operation_mode", 0) < (0)) __PYX_ERR(0, 21288, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_gpu_operation_mode", 1, 1, 1, i); __PYX_ERR(0, 21288, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21288, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21288, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_gpu_operation_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21288, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_operation_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_196device_get_gpu_operation_mode(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_196device_get_gpu_operation_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpu_operation_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_operation_mode(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21288, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_operation_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21310
 * 
 * 
 * cpdef object device_get_memory_info_v2(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the amount of used, free, reserved and total memory available on the device, in bytes.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_199device_get_memory_info_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_memory_info_v2(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *__pyx_v_memory_py = 0;
  nvmlMemory_v2_t *__pyx_v_memory;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_memory_info_v2", 0);

  /* "cuda/bindings/_nvml.pyx":21321
 *     .. seealso:: `nvmlDeviceGetMemoryInfo_v2`
 *     """
 *     cdef Memory_v2 memory_py = Memory_v2()             # <<<<<<<<<<<<<<
 *     cdef nvmlMemory_v2_t *memory = <nvmlMemory_v2_t *><intptr_t>(memory_py._get_ptr())
 *     memory.version = sizeof(nvmlMemory_v2_t) | (2 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21321, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_memory_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":21322
 *     """
 *     cdef Memory_v2 memory_py = Memory_v2()
 *     cdef nvmlMemory_v2_t *memory = <nvmlMemory_v2_t *><intptr_t>(memory_py._get_ptr())             # <<<<<<<<<<<<<<
 *     memory.version = sizeof(nvmlMemory_v2_t) | (2 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Memory_v2 *)__pyx_v_memory_py->__pyx_vtab)->_get_ptr(__pyx_v_memory_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21322, __pyx_L1_error)
  __pyx_v_memory = ((nvmlMemory_v2_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":21323
 *     cdef Memory_v2 memory_py = Memory_v2()
 *     cdef nvmlMemory_v2_t *memory = <nvmlMemory_v2_t *><intptr_t>(memory_py._get_ptr())
 *     memory.version = sizeof(nvmlMemory_v2_t) | (2 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetMemoryInfo_v2(<Device>device, memory)
*/
  __pyx_v_memory->version = ((sizeof(nvmlMemory_v2_t)) | 0x2000000);

  /* "cuda/bindings/_nvml.pyx":21324
 *     cdef nvmlMemory_v2_t *memory = <nvmlMemory_v2_t *><intptr_t>(memory_py._get_ptr())
 *     memory.version = sizeof(nvmlMemory_v2_t) | (2 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMemoryInfo_v2(<Device>device, memory)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21325
 *     memory.version = sizeof(nvmlMemory_v2_t) | (2 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetMemoryInfo_v2(<Device>device, memory)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return memory_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryInfo_v2(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_memory); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21325, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":21324
 *     cdef nvmlMemory_v2_t *memory = <nvmlMemory_v2_t *><intptr_t>(memory_py._get_ptr())
 *     memory.version = sizeof(nvmlMemory_v2_t) | (2 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMemoryInfo_v2(<Device>device, memory)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21326
 *     with nogil:
 *         __status__ = nvmlDeviceGetMemoryInfo_v2(<Device>device, memory)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return memory_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 21326, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21327
 *         __status__ = nvmlDeviceGetMemoryInfo_v2(<Device>device, memory)
 *     check_status(__status__)
 *     return memory_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_memory_py);
  __pyx_r = ((PyObject *)__pyx_v_memory_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21310
 * 
 * 
 * cpdef object device_get_memory_info_v2(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the amount of used, free, reserved and total memory available on the device, in bytes.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_memory_info_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_memory_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_199device_get_memory_info_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_198device_get_memory_info_v2, "device_get_memory_info_v2(intptr_t device)\n\nRetrieves the amount of used, free, reserved and total memory available on the device, in bytes.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlMemory_v2_t: Reference in which to return the memory information.\n\n.. seealso:: `nvmlDeviceGetMemoryInfo_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_199device_get_memory_info_v2 = {"device_get_memory_info_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_199device_get_memory_info_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_198device_get_memory_info_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_199device_get_memory_info_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_memory_info_v2 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21310, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21310, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_memory_info_v2", 0) < (0)) __PYX_ERR(0, 21310, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_memory_info_v2", 1, 1, 1, i); __PYX_ERR(0, 21310, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21310, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21310, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_memory_info_v2", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21310, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_memory_info_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_198device_get_memory_info_v2(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_198device_get_memory_info_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_memory_info_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_memory_info_v2(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21310, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_memory_info_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21330
 * 
 * 
 * cpdef int device_get_compute_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the current compute mode for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_201device_get_compute_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_compute_mode(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__ComputeMode __pyx_v_mode;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21342
 *     """
 *     cdef _ComputeMode mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetComputeMode(<Device>device, &mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21343
 *     cdef _ComputeMode mode
 *     with nogil:
 *         __status__ = nvmlDeviceGetComputeMode(<Device>device, &mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>mode
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetComputeMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21343, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21342
 *     """
 *     cdef _ComputeMode mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetComputeMode(<Device>device, &mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21344
 *     with nogil:
 *         __status__ = nvmlDeviceGetComputeMode(<Device>device, &mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>mode
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21344, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21345
 *         __status__ = nvmlDeviceGetComputeMode(<Device>device, &mode)
 *     check_status(__status__)
 *     return <int>mode             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_mode);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21330
 * 
 * 
 * cpdef int device_get_compute_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the current compute mode for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_compute_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_201device_get_compute_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_200device_get_compute_mode, "device_get_compute_mode(intptr_t device) -> int\n\nRetrieves the current compute mode for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    int: Reference in which to return the current compute mode.\n\n.. seealso:: `nvmlDeviceGetComputeMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_201device_get_compute_mode = {"device_get_compute_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_201device_get_compute_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_200device_get_compute_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_201device_get_compute_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_compute_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21330, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21330, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_compute_mode", 0) < (0)) __PYX_ERR(0, 21330, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_compute_mode", 1, 1, 1, i); __PYX_ERR(0, 21330, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21330, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21330, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_compute_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21330, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_compute_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_200device_get_compute_mode(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_200device_get_compute_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_compute_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_compute_mode(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21330, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21330, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_compute_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21348
 * 
 * 
 * cpdef tuple device_get_cuda_compute_capability(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the CUDA compute capability of the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_203device_get_cuda_compute_capability(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_cuda_compute_capability(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  int __pyx_v_major;
  int __pyx_v_minor;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_cuda_compute_capability", 0);

  /* "cuda/bindings/_nvml.pyx":21364
 *     cdef int major
 *     cdef int minor
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCudaComputeCapability(<Device>device, &major, &minor)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21365
 *     cdef int minor
 *     with nogil:
 *         __status__ = nvmlDeviceGetCudaComputeCapability(<Device>device, &major, &minor)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (major, minor)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCudaComputeCapability(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_major), (&__pyx_v_minor)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21365, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21364
 *     cdef int major
 *     cdef int minor
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCudaComputeCapability(<Device>device, &major, &minor)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21366
 *     with nogil:
 *         __status__ = nvmlDeviceGetCudaComputeCapability(<Device>device, &major, &minor)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (major, minor)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21366, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21367
 *         __status__ = nvmlDeviceGetCudaComputeCapability(<Device>device, &major, &minor)
 *     check_status(__status__)
 *     return (major, minor)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_int(__pyx_v_major); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21367, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_int(__pyx_v_minor); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21367, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21367, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21367, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 21367, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21348
 * 
 * 
 * cpdef tuple device_get_cuda_compute_capability(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the CUDA compute capability of the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_cuda_compute_capability", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_203device_get_cuda_compute_capability(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_202device_get_cuda_compute_capability, "device_get_cuda_compute_capability(intptr_t device) -> tuple\n\nRetrieves the CUDA compute capability of the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    A 2-tuple containing:\n\n    - int: Reference in which to return the major CUDA compute capability.\n    - int: Reference in which to return the minor CUDA compute capability.\n\n.. seealso:: `nvmlDeviceGetCudaComputeCapability`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_203device_get_cuda_compute_capability = {"device_get_cuda_compute_capability", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_203device_get_cuda_compute_capability, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_202device_get_cuda_compute_capability};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_203device_get_cuda_compute_capability(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_cuda_compute_capability (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21348, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21348, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_cuda_compute_capability", 0) < (0)) __PYX_ERR(0, 21348, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_cuda_compute_capability", 1, 1, 1, i); __PYX_ERR(0, 21348, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21348, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21348, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_cuda_compute_capability", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21348, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_cuda_compute_capability", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_202device_get_cuda_compute_capability(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_202device_get_cuda_compute_capability(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_cuda_compute_capability", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_cuda_compute_capability(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21348, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_cuda_compute_capability", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21370
 * 
 * 
 * cpdef tuple device_get_dram_encryption_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current and pending DRAM Encryption modes for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_205device_get_dram_encryption_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_dram_encryption_mode(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlDramEncryptionInfo_t __pyx_v_current;
  nvmlDramEncryptionInfo_t __pyx_v_pending;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_dram_encryption_mode", 0);

  /* "cuda/bindings/_nvml.pyx":21386
 *     cdef nvmlDramEncryptionInfo_t current
 *     cdef nvmlDramEncryptionInfo_t pending
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetDramEncryptionMode(<Device>device, &current, &pending)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21387
 *     cdef nvmlDramEncryptionInfo_t pending
 *     with nogil:
 *         __status__ = nvmlDeviceGetDramEncryptionMode(<Device>device, &current, &pending)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (<nvmlDramEncryptionInfo_v1_t>current, <nvmlDramEncryptionInfo_v1_t>pending)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDramEncryptionMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_current), (&__pyx_v_pending)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21387, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21386
 *     cdef nvmlDramEncryptionInfo_t current
 *     cdef nvmlDramEncryptionInfo_t pending
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetDramEncryptionMode(<Device>device, &current, &pending)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21388
 *     with nogil:
 *         __status__ = nvmlDeviceGetDramEncryptionMode(<Device>device, &current, &pending)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (<nvmlDramEncryptionInfo_v1_t>current, <nvmlDramEncryptionInfo_v1_t>pending)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21388, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21389
 *         __status__ = nvmlDeviceGetDramEncryptionMode(<Device>device, &current, &pending)
 *     check_status(__status__)
 *     return (<nvmlDramEncryptionInfo_v1_t>current, <nvmlDramEncryptionInfo_v1_t>pending)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __pyx_convert__to_py_nvmlDramEncryptionInfo_v1_t(__pyx_v_current); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21389, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __pyx_convert__to_py_nvmlDramEncryptionInfo_v1_t(__pyx_v_pending); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21389, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21389, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21389, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 21389, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21370
 * 
 * 
 * cpdef tuple device_get_dram_encryption_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current and pending DRAM Encryption modes for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_dram_encryption_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_205device_get_dram_encryption_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_204device_get_dram_encryption_mode, "device_get_dram_encryption_mode(intptr_t device) -> tuple\n\nRetrieves the current and pending DRAM Encryption modes for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    A 2-tuple containing:\n\n    - nvmlDramEncryptionInfo_v1_t: Reference in which to return the current DRAM Encryption mode.\n    - nvmlDramEncryptionInfo_v1_t: Reference in which to return the pending DRAM Encryption mode.\n\n.. seealso:: `nvmlDeviceGetDramEncryptionMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_205device_get_dram_encryption_mode = {"device_get_dram_encryption_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_205device_get_dram_encryption_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_204device_get_dram_encryption_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_205device_get_dram_encryption_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_dram_encryption_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21370, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21370, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_dram_encryption_mode", 0) < (0)) __PYX_ERR(0, 21370, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_dram_encryption_mode", 1, 1, 1, i); __PYX_ERR(0, 21370, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21370, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21370, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_dram_encryption_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21370, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_dram_encryption_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_204device_get_dram_encryption_mode(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_204device_get_dram_encryption_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_dram_encryption_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_dram_encryption_mode(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21370, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_dram_encryption_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21392
 * 
 * 
 * cpdef device_set_dram_encryption_mode(intptr_t device, intptr_t dram_encryption):             # <<<<<<<<<<<<<<
 *     """Set the DRAM Encryption mode for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_207device_set_dram_encryption_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_dram_encryption_mode(intptr_t __pyx_v_device, intptr_t __pyx_v_dram_encryption, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_dram_encryption_mode", 0);

  /* "cuda/bindings/_nvml.pyx":21401
 *     .. seealso:: `nvmlDeviceSetDramEncryptionMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetDramEncryptionMode(<Device>device, <const nvmlDramEncryptionInfo_t*>dram_encryption)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21402
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetDramEncryptionMode(<Device>device, <const nvmlDramEncryptionInfo_t*>dram_encryption)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDramEncryptionMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlDramEncryptionInfo_t const *)__pyx_v_dram_encryption)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21402, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21401
 *     .. seealso:: `nvmlDeviceSetDramEncryptionMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetDramEncryptionMode(<Device>device, <const nvmlDramEncryptionInfo_t*>dram_encryption)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21403
 *     with nogil:
 *         __status__ = nvmlDeviceSetDramEncryptionMode(<Device>device, <const nvmlDramEncryptionInfo_t*>dram_encryption)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21403, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21392
 * 
 * 
 * cpdef device_set_dram_encryption_mode(intptr_t device, intptr_t dram_encryption):             # <<<<<<<<<<<<<<
 *     """Set the DRAM Encryption mode for the device.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_dram_encryption_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_207device_set_dram_encryption_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_206device_set_dram_encryption_mode, "device_set_dram_encryption_mode(intptr_t device, intptr_t dram_encryption)\n\nSet the DRAM Encryption mode for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    dram_encryption (intptr_t): The target DRAM Encryption mode.\n\n.. seealso:: `nvmlDeviceSetDramEncryptionMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_207device_set_dram_encryption_mode = {"device_set_dram_encryption_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_207device_set_dram_encryption_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_206device_set_dram_encryption_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_207device_set_dram_encryption_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  intptr_t __pyx_v_dram_encryption;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_dram_encryption_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_dram_encryption,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21392, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21392, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21392, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_dram_encryption_mode", 0) < (0)) __PYX_ERR(0, 21392, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_dram_encryption_mode", 1, 2, 2, i); __PYX_ERR(0, 21392, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21392, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21392, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21392, __pyx_L3_error)
    __pyx_v_dram_encryption = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_dram_encryption == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21392, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_dram_encryption_mode", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 21392, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_dram_encryption_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_206device_set_dram_encryption_mode(__pyx_self, __pyx_v_device, __pyx_v_dram_encryption);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_206device_set_dram_encryption_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_dram_encryption) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_dram_encryption_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_dram_encryption_mode(__pyx_v_device, __pyx_v_dram_encryption, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21392, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_dram_encryption_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21406
 * 
 * 
 * cpdef tuple device_get_ecc_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current and pending ECC modes for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_209device_get_ecc_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_ecc_mode(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__EnableState __pyx_v_current;
  __pyx_t_4cuda_8bindings_5_nvml__EnableState __pyx_v_pending;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_ecc_mode", 0);

  /* "cuda/bindings/_nvml.pyx":21422
 *     cdef _EnableState current
 *     cdef _EnableState pending
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetEccMode(<Device>device, &current, &pending)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21423
 *     cdef _EnableState pending
 *     with nogil:
 *         __status__ = nvmlDeviceGetEccMode(<Device>device, &current, &pending)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (<int>current, <int>pending)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEccMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_current), (&__pyx_v_pending)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21423, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21422
 *     cdef _EnableState current
 *     cdef _EnableState pending
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetEccMode(<Device>device, &current, &pending)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21424
 *     with nogil:
 *         __status__ = nvmlDeviceGetEccMode(<Device>device, &current, &pending)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (<int>current, <int>pending)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21424, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21425
 *         __status__ = nvmlDeviceGetEccMode(<Device>device, &current, &pending)
 *     check_status(__status__)
 *     return (<int>current, <int>pending)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_int(((int)__pyx_v_current)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21425, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_int(((int)__pyx_v_pending)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21425, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21425, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21425, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 21425, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21406
 * 
 * 
 * cpdef tuple device_get_ecc_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current and pending ECC modes for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_ecc_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_209device_get_ecc_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_208device_get_ecc_mode, "device_get_ecc_mode(intptr_t device) -> tuple\n\nRetrieves the current and pending ECC modes for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    A 2-tuple containing:\n\n    - int: Reference in which to return the current ECC mode.\n    - int: Reference in which to return the pending ECC mode.\n\n.. seealso:: `nvmlDeviceGetEccMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_209device_get_ecc_mode = {"device_get_ecc_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_209device_get_ecc_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_208device_get_ecc_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_209device_get_ecc_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_ecc_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21406, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21406, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_ecc_mode", 0) < (0)) __PYX_ERR(0, 21406, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_ecc_mode", 1, 1, 1, i); __PYX_ERR(0, 21406, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21406, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21406, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_ecc_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21406, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_ecc_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_208device_get_ecc_mode(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_208device_get_ecc_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_ecc_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_ecc_mode(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21406, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_ecc_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21428
 * 
 * 
 * cpdef int device_get_default_ecc_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the default ECC modes for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_211device_get_default_ecc_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_default_ecc_mode(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__EnableState __pyx_v_default_mode;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21440
 *     """
 *     cdef _EnableState default_mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetDefaultEccMode(<Device>device, &default_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21441
 *     cdef _EnableState default_mode
 *     with nogil:
 *         __status__ = nvmlDeviceGetDefaultEccMode(<Device>device, &default_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>default_mode
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDefaultEccMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_default_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21441, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21440
 *     """
 *     cdef _EnableState default_mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetDefaultEccMode(<Device>device, &default_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21442
 *     with nogil:
 *         __status__ = nvmlDeviceGetDefaultEccMode(<Device>device, &default_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>default_mode
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21442, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21443
 *         __status__ = nvmlDeviceGetDefaultEccMode(<Device>device, &default_mode)
 *     check_status(__status__)
 *     return <int>default_mode             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_default_mode);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21428
 * 
 * 
 * cpdef int device_get_default_ecc_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the default ECC modes for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_default_ecc_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_211device_get_default_ecc_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_210device_get_default_ecc_mode, "device_get_default_ecc_mode(intptr_t device) -> int\n\nRetrieves the default ECC modes for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    int: Reference in which to return the default ECC mode.\n\n.. seealso:: `nvmlDeviceGetDefaultEccMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_211device_get_default_ecc_mode = {"device_get_default_ecc_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_211device_get_default_ecc_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_210device_get_default_ecc_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_211device_get_default_ecc_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_default_ecc_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21428, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21428, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_default_ecc_mode", 0) < (0)) __PYX_ERR(0, 21428, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_default_ecc_mode", 1, 1, 1, i); __PYX_ERR(0, 21428, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21428, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21428, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_default_ecc_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21428, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_default_ecc_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_210device_get_default_ecc_mode(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_210device_get_default_ecc_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_default_ecc_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_default_ecc_mode(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21428, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21428, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_default_ecc_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21446
 * 
 * 
 * cpdef unsigned int device_get_board_id(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the device boardId from 0-N. Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with :func:`device_get_multi_gpu_board` to decide if they are on the same board as well. The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will always return those values but they will always be different from each other).
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_213device_get_board_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_board_id(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_board_id;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21458
 *     """
 *     cdef unsigned int board_id
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetBoardId(<Device>device, &board_id)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21459
 *     cdef unsigned int board_id
 *     with nogil:
 *         __status__ = nvmlDeviceGetBoardId(<Device>device, &board_id)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return board_id
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBoardId(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_board_id)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21459, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21458
 *     """
 *     cdef unsigned int board_id
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetBoardId(<Device>device, &board_id)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21460
 *     with nogil:
 *         __status__ = nvmlDeviceGetBoardId(<Device>device, &board_id)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return board_id
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21460, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21461
 *         __status__ = nvmlDeviceGetBoardId(<Device>device, &board_id)
 *     check_status(__status__)
 *     return board_id             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_board_id;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21446
 * 
 * 
 * cpdef unsigned int device_get_board_id(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the device boardId from 0-N. Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with :func:`device_get_multi_gpu_board` to decide if they are on the same board as well. The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will always return those values but they will always be different from each other).
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_board_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_213device_get_board_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_212device_get_board_id, "device_get_board_id(intptr_t device) -> unsigned int\n\nRetrieves the device boardId from 0-N. Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with :func:`device_get_multi_gpu_board` to decide if they are on the same board as well. The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will always return those values but they will always be different from each other).\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Reference in which to return the device's board ID.\n\n.. seealso:: `nvmlDeviceGetBoardId`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_213device_get_board_id = {"device_get_board_id", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_213device_get_board_id, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_212device_get_board_id};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_213device_get_board_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_board_id (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21446, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21446, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_board_id", 0) < (0)) __PYX_ERR(0, 21446, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_board_id", 1, 1, 1, i); __PYX_ERR(0, 21446, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21446, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21446, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_board_id", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21446, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_board_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_212device_get_board_id(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_212device_get_board_id(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_board_id", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_board_id(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 21446, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21446, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_board_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21464
 * 
 * 
 * cpdef unsigned int device_get_multi_gpu_board(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves whether the device is on a Multi-GPU Board Devices that are on multi-GPU boards will set ``multiGpuBool`` to a non-zero value.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_215device_get_multi_gpu_board(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_multi_gpu_board(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_multi_gpu_bool;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21476
 *     """
 *     cdef unsigned int multi_gpu_bool
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMultiGpuBoard(<Device>device, &multi_gpu_bool)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21477
 *     cdef unsigned int multi_gpu_bool
 *     with nogil:
 *         __status__ = nvmlDeviceGetMultiGpuBoard(<Device>device, &multi_gpu_bool)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return multi_gpu_bool
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMultiGpuBoard(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_multi_gpu_bool)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21477, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21476
 *     """
 *     cdef unsigned int multi_gpu_bool
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMultiGpuBoard(<Device>device, &multi_gpu_bool)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21478
 *     with nogil:
 *         __status__ = nvmlDeviceGetMultiGpuBoard(<Device>device, &multi_gpu_bool)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return multi_gpu_bool
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21478, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21479
 *         __status__ = nvmlDeviceGetMultiGpuBoard(<Device>device, &multi_gpu_bool)
 *     check_status(__status__)
 *     return multi_gpu_bool             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_multi_gpu_bool;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21464
 * 
 * 
 * cpdef unsigned int device_get_multi_gpu_board(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves whether the device is on a Multi-GPU Board Devices that are on multi-GPU boards will set ``multiGpuBool`` to a non-zero value.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_multi_gpu_board", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_215device_get_multi_gpu_board(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_214device_get_multi_gpu_board, "device_get_multi_gpu_board(intptr_t device) -> unsigned int\n\nRetrieves whether the device is on a Multi-GPU Board Devices that are on multi-GPU boards will set ``multiGpuBool`` to a non-zero value.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Reference in which to return a zero or non-zero value to indicate whether the device is on a multi GPU board.\n\n.. seealso:: `nvmlDeviceGetMultiGpuBoard`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_215device_get_multi_gpu_board = {"device_get_multi_gpu_board", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_215device_get_multi_gpu_board, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_214device_get_multi_gpu_board};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_215device_get_multi_gpu_board(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_multi_gpu_board (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21464, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21464, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_multi_gpu_board", 0) < (0)) __PYX_ERR(0, 21464, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_multi_gpu_board", 1, 1, 1, i); __PYX_ERR(0, 21464, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21464, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21464, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_multi_gpu_board", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21464, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_multi_gpu_board", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_214device_get_multi_gpu_board(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_214device_get_multi_gpu_board(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_multi_gpu_board", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_multi_gpu_board(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 21464, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21464, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_multi_gpu_board", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21482
 * 
 * 
 * cpdef unsigned long long device_get_total_ecc_errors(intptr_t device, int error_type, int counter_type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the total ECC error counts for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_217device_get_total_ecc_errors(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_device_get_total_ecc_errors(intptr_t __pyx_v_device, int __pyx_v_error_type, int __pyx_v_counter_type, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned PY_LONG_LONG __pyx_v_ecc_counts;
  nvmlReturn_t __pyx_v___status__;
  unsigned PY_LONG_LONG __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21496
 *     """
 *     cdef unsigned long long ecc_counts
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetTotalEccErrors(<Device>device, <_MemoryErrorType>error_type, <_EccCounterType>counter_type, &ecc_counts)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21497
 *     cdef unsigned long long ecc_counts
 *     with nogil:
 *         __status__ = nvmlDeviceGetTotalEccErrors(<Device>device, <_MemoryErrorType>error_type, <_EccCounterType>counter_type, &ecc_counts)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return ecc_counts
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTotalEccErrors(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__MemoryErrorType)__pyx_v_error_type), ((__pyx_t_4cuda_8bindings_5_nvml__EccCounterType)__pyx_v_counter_type), (&__pyx_v_ecc_counts)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21497, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21496
 *     """
 *     cdef unsigned long long ecc_counts
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetTotalEccErrors(<Device>device, <_MemoryErrorType>error_type, <_EccCounterType>counter_type, &ecc_counts)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21498
 *     with nogil:
 *         __status__ = nvmlDeviceGetTotalEccErrors(<Device>device, <_MemoryErrorType>error_type, <_EccCounterType>counter_type, &ecc_counts)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return ecc_counts
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21498, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21499
 *         __status__ = nvmlDeviceGetTotalEccErrors(<Device>device, <_MemoryErrorType>error_type, <_EccCounterType>counter_type, &ecc_counts)
 *     check_status(__status__)
 *     return ecc_counts             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_ecc_counts;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21482
 * 
 * 
 * cpdef unsigned long long device_get_total_ecc_errors(intptr_t device, int error_type, int counter_type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the total ECC error counts for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_total_ecc_errors", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_217device_get_total_ecc_errors(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_216device_get_total_ecc_errors, "device_get_total_ecc_errors(intptr_t device, int error_type, int counter_type) -> unsigned long long\n\nRetrieves the total ECC error counts for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    error_type (MemoryErrorType): Flag that specifies the type of the errors.\n    counter_type (EccCounterType): Flag that specifies the counter-type of the errors.\n\nReturns:\n    unsigned long long: Reference in which to return the specified ECC errors.\n\n.. seealso:: `nvmlDeviceGetTotalEccErrors`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_217device_get_total_ecc_errors = {"device_get_total_ecc_errors", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_217device_get_total_ecc_errors, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_216device_get_total_ecc_errors};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_217device_get_total_ecc_errors(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_error_type;
  int __pyx_v_counter_type;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_total_ecc_errors (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_error_type,&__pyx_mstate_global->__pyx_n_u_counter_type,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21482, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21482, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21482, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21482, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_total_ecc_errors", 0) < (0)) __PYX_ERR(0, 21482, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_total_ecc_errors", 1, 3, 3, i); __PYX_ERR(0, 21482, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21482, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21482, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21482, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21482, __pyx_L3_error)
    __pyx_v_error_type = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_error_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21482, __pyx_L3_error)
    __pyx_v_counter_type = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_counter_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21482, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_total_ecc_errors", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 21482, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_total_ecc_errors", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_216device_get_total_ecc_errors(__pyx_self, __pyx_v_device, __pyx_v_error_type, __pyx_v_counter_type);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_216device_get_total_ecc_errors(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_error_type, int __pyx_v_counter_type) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned PY_LONG_LONG __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_total_ecc_errors", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_total_ecc_errors(__pyx_v_device, __pyx_v_error_type, __pyx_v_counter_type, 1); if (unlikely(__pyx_t_1 == ((unsigned PY_LONG_LONG)0) && PyErr_Occurred())) __PYX_ERR(0, 21482, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21482, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_total_ecc_errors", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21502
 * 
 * 
 * cpdef unsigned long long device_get_memory_error_counter(intptr_t device, int error_type, int counter_type, int location_type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the requested memory error counter for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_219device_get_memory_error_counter(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_device_get_memory_error_counter(intptr_t __pyx_v_device, int __pyx_v_error_type, int __pyx_v_counter_type, int __pyx_v_location_type, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned PY_LONG_LONG __pyx_v_count;
  nvmlReturn_t __pyx_v___status__;
  unsigned PY_LONG_LONG __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21517
 *     """
 *     cdef unsigned long long count
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMemoryErrorCounter(<Device>device, <_MemoryErrorType>error_type, <_EccCounterType>counter_type, <_MemoryLocation>location_type, &count)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21518
 *     cdef unsigned long long count
 *     with nogil:
 *         __status__ = nvmlDeviceGetMemoryErrorCounter(<Device>device, <_MemoryErrorType>error_type, <_EccCounterType>counter_type, <_MemoryLocation>location_type, &count)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return count
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryErrorCounter(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__MemoryErrorType)__pyx_v_error_type), ((__pyx_t_4cuda_8bindings_5_nvml__EccCounterType)__pyx_v_counter_type), ((__pyx_t_4cuda_8bindings_5_nvml__MemoryLocation)__pyx_v_location_type), (&__pyx_v_count)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21518, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21517
 *     """
 *     cdef unsigned long long count
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMemoryErrorCounter(<Device>device, <_MemoryErrorType>error_type, <_EccCounterType>counter_type, <_MemoryLocation>location_type, &count)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21519
 *     with nogil:
 *         __status__ = nvmlDeviceGetMemoryErrorCounter(<Device>device, <_MemoryErrorType>error_type, <_EccCounterType>counter_type, <_MemoryLocation>location_type, &count)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return count
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21519, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21520
 *         __status__ = nvmlDeviceGetMemoryErrorCounter(<Device>device, <_MemoryErrorType>error_type, <_EccCounterType>counter_type, <_MemoryLocation>location_type, &count)
 *     check_status(__status__)
 *     return count             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_count;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21502
 * 
 * 
 * cpdef unsigned long long device_get_memory_error_counter(intptr_t device, int error_type, int counter_type, int location_type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the requested memory error counter for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_memory_error_counter", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_219device_get_memory_error_counter(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_218device_get_memory_error_counter, "device_get_memory_error_counter(intptr_t device, int error_type, int counter_type, int location_type) -> unsigned long long\n\nRetrieves the requested memory error counter for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    error_type (MemoryErrorType): Flag that specifies the type of error.\n    counter_type (EccCounterType): Flag that specifies the counter-type of the errors.\n    location_type (MemoryLocation): Specifies the location of the counter.\n\nReturns:\n    unsigned long long: Reference in which to return the ECC counter.\n\n.. seealso:: `nvmlDeviceGetMemoryErrorCounter`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_219device_get_memory_error_counter = {"device_get_memory_error_counter", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_219device_get_memory_error_counter, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_218device_get_memory_error_counter};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_219device_get_memory_error_counter(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_error_type;
  int __pyx_v_counter_type;
  int __pyx_v_location_type;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[4] = {0,0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_memory_error_counter (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_error_type,&__pyx_mstate_global->__pyx_n_u_counter_type,&__pyx_mstate_global->__pyx_n_u_location_type,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21502, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  4:
        values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21502, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21502, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21502, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21502, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_memory_error_counter", 0) < (0)) __PYX_ERR(0, 21502, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 4; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_memory_error_counter", 1, 4, 4, i); __PYX_ERR(0, 21502, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 4)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21502, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21502, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21502, __pyx_L3_error)
      values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21502, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21502, __pyx_L3_error)
    __pyx_v_error_type = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_error_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21502, __pyx_L3_error)
    __pyx_v_counter_type = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_counter_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21502, __pyx_L3_error)
    __pyx_v_location_type = __Pyx_PyLong_As_int(values[3]); if (unlikely((__pyx_v_location_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21502, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_memory_error_counter", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 21502, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_memory_error_counter", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_218device_get_memory_error_counter(__pyx_self, __pyx_v_device, __pyx_v_error_type, __pyx_v_counter_type, __pyx_v_location_type);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_218device_get_memory_error_counter(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_error_type, int __pyx_v_counter_type, int __pyx_v_location_type) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned PY_LONG_LONG __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_memory_error_counter", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_memory_error_counter(__pyx_v_device, __pyx_v_error_type, __pyx_v_counter_type, __pyx_v_location_type, 1); if (unlikely(__pyx_t_1 == ((unsigned PY_LONG_LONG)0) && PyErr_Occurred())) __PYX_ERR(0, 21502, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21502, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_memory_error_counter", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21523
 * 
 * 
 * cpdef object device_get_utilization_rates(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization rates for the device's major subsystems.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_221device_get_utilization_rates(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_utilization_rates(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *__pyx_v_utilization_py = 0;
  nvmlUtilization_t *__pyx_v_utilization;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_utilization_rates", 0);

  /* "cuda/bindings/_nvml.pyx":21534
 *     .. seealso:: `nvmlDeviceGetUtilizationRates`
 *     """
 *     cdef Utilization utilization_py = Utilization()             # <<<<<<<<<<<<<<
 *     cdef nvmlUtilization_t *utilization = <nvmlUtilization_t *><intptr_t>(utilization_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21534, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_utilization_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":21535
 *     """
 *     cdef Utilization utilization_py = Utilization()
 *     cdef nvmlUtilization_t *utilization = <nvmlUtilization_t *><intptr_t>(utilization_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetUtilizationRates(<Device>device, utilization)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Utilization *)__pyx_v_utilization_py->__pyx_vtab)->_get_ptr(__pyx_v_utilization_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21535, __pyx_L1_error)
  __pyx_v_utilization = ((nvmlUtilization_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":21536
 *     cdef Utilization utilization_py = Utilization()
 *     cdef nvmlUtilization_t *utilization = <nvmlUtilization_t *><intptr_t>(utilization_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetUtilizationRates(<Device>device, utilization)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21537
 *     cdef nvmlUtilization_t *utilization = <nvmlUtilization_t *><intptr_t>(utilization_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetUtilizationRates(<Device>device, utilization)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return utilization_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetUtilizationRates(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_utilization); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21537, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":21536
 *     cdef Utilization utilization_py = Utilization()
 *     cdef nvmlUtilization_t *utilization = <nvmlUtilization_t *><intptr_t>(utilization_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetUtilizationRates(<Device>device, utilization)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21538
 *     with nogil:
 *         __status__ = nvmlDeviceGetUtilizationRates(<Device>device, utilization)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return utilization_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 21538, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21539
 *         __status__ = nvmlDeviceGetUtilizationRates(<Device>device, utilization)
 *     check_status(__status__)
 *     return utilization_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_utilization_py);
  __pyx_r = ((PyObject *)__pyx_v_utilization_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21523
 * 
 * 
 * cpdef object device_get_utilization_rates(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization rates for the device's major subsystems.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_utilization_rates", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_utilization_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_221device_get_utilization_rates(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_220device_get_utilization_rates, "device_get_utilization_rates(intptr_t device)\n\nRetrieves the current utilization rates for the device's major subsystems.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlUtilization_t: Reference in which to return the utilization information.\n\n.. seealso:: `nvmlDeviceGetUtilizationRates`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_221device_get_utilization_rates = {"device_get_utilization_rates", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_221device_get_utilization_rates, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_220device_get_utilization_rates};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_221device_get_utilization_rates(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_utilization_rates (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21523, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21523, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_utilization_rates", 0) < (0)) __PYX_ERR(0, 21523, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_utilization_rates", 1, 1, 1, i); __PYX_ERR(0, 21523, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21523, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21523, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_utilization_rates", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21523, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_utilization_rates", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_220device_get_utilization_rates(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_220device_get_utilization_rates(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_utilization_rates", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_utilization_rates(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_utilization_rates", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21542
 * 
 * 
 * cpdef tuple device_get_encoder_utilization(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization and sampling size in microseconds for the Encoder.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_223device_get_encoder_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_encoder_utilization(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_utilization;
  unsigned int __pyx_v_sampling_period_us;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_encoder_utilization", 0);

  /* "cuda/bindings/_nvml.pyx":21558
 *     cdef unsigned int utilization
 *     cdef unsigned int sampling_period_us
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetEncoderUtilization(<Device>device, &utilization, &sampling_period_us)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21559
 *     cdef unsigned int sampling_period_us
 *     with nogil:
 *         __status__ = nvmlDeviceGetEncoderUtilization(<Device>device, &utilization, &sampling_period_us)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (utilization, sampling_period_us)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderUtilization(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_utilization), (&__pyx_v_sampling_period_us)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21559, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21558
 *     cdef unsigned int utilization
 *     cdef unsigned int sampling_period_us
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetEncoderUtilization(<Device>device, &utilization, &sampling_period_us)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21560
 *     with nogil:
 *         __status__ = nvmlDeviceGetEncoderUtilization(<Device>device, &utilization, &sampling_period_us)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (utilization, sampling_period_us)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21560, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21561
 *         __status__ = nvmlDeviceGetEncoderUtilization(<Device>device, &utilization, &sampling_period_us)
 *     check_status(__status__)
 *     return (utilization, sampling_period_us)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_unsigned_int(__pyx_v_utilization); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21561, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_sampling_period_us); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21561, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21561, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21561, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 21561, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21542
 * 
 * 
 * cpdef tuple device_get_encoder_utilization(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization and sampling size in microseconds for the Encoder.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_encoder_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_223device_get_encoder_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_222device_get_encoder_utilization, "device_get_encoder_utilization(intptr_t device) -> tuple\n\nRetrieves the current utilization and sampling size in microseconds for the Encoder.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    A 2-tuple containing:\n\n    - unsigned int: Reference to an unsigned int for encoder utilization info.\n    - unsigned int: Reference to an unsigned int for the sampling period in US.\n\n.. seealso:: `nvmlDeviceGetEncoderUtilization`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_223device_get_encoder_utilization = {"device_get_encoder_utilization", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_223device_get_encoder_utilization, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_222device_get_encoder_utilization};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_223device_get_encoder_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_encoder_utilization (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21542, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21542, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_encoder_utilization", 0) < (0)) __PYX_ERR(0, 21542, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_encoder_utilization", 1, 1, 1, i); __PYX_ERR(0, 21542, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21542, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21542, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_encoder_utilization", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21542, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_encoder_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_222device_get_encoder_utilization(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_222device_get_encoder_utilization(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_encoder_utilization", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_encoder_utilization(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21542, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_encoder_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21564
 * 
 * 
 * cpdef unsigned int device_get_encoder_capacity(intptr_t device, int encoder_query_type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the current capacity of the device's encoder, as a percentage of maximum encoder capacity with valid values in the range 0-100.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_225device_get_encoder_capacity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_encoder_capacity(intptr_t __pyx_v_device, int __pyx_v_encoder_query_type, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_encoder_capacity;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21577
 *     """
 *     cdef unsigned int encoder_capacity
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetEncoderCapacity(<Device>device, <_EncoderType>encoder_query_type, &encoder_capacity)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21578
 *     cdef unsigned int encoder_capacity
 *     with nogil:
 *         __status__ = nvmlDeviceGetEncoderCapacity(<Device>device, <_EncoderType>encoder_query_type, &encoder_capacity)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return encoder_capacity
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderCapacity(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__EncoderType)__pyx_v_encoder_query_type), (&__pyx_v_encoder_capacity)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21578, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21577
 *     """
 *     cdef unsigned int encoder_capacity
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetEncoderCapacity(<Device>device, <_EncoderType>encoder_query_type, &encoder_capacity)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21579
 *     with nogil:
 *         __status__ = nvmlDeviceGetEncoderCapacity(<Device>device, <_EncoderType>encoder_query_type, &encoder_capacity)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return encoder_capacity
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21579, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21580
 *         __status__ = nvmlDeviceGetEncoderCapacity(<Device>device, <_EncoderType>encoder_query_type, &encoder_capacity)
 *     check_status(__status__)
 *     return encoder_capacity             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_encoder_capacity;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21564
 * 
 * 
 * cpdef unsigned int device_get_encoder_capacity(intptr_t device, int encoder_query_type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the current capacity of the device's encoder, as a percentage of maximum encoder capacity with valid values in the range 0-100.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_encoder_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_225device_get_encoder_capacity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_224device_get_encoder_capacity, "device_get_encoder_capacity(intptr_t device, int encoder_query_type) -> unsigned int\n\nRetrieves the current capacity of the device's encoder, as a percentage of maximum encoder capacity with valid values in the range 0-100.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    encoder_query_type (EncoderType): Type of encoder to query.\n\nReturns:\n    unsigned int: Reference to an unsigned int for the encoder capacity.\n\n.. seealso:: `nvmlDeviceGetEncoderCapacity`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_225device_get_encoder_capacity = {"device_get_encoder_capacity", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_225device_get_encoder_capacity, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_224device_get_encoder_capacity};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_225device_get_encoder_capacity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_encoder_query_type;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_encoder_capacity (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_encoder_query_type,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21564, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21564, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21564, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_encoder_capacity", 0) < (0)) __PYX_ERR(0, 21564, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_encoder_capacity", 1, 2, 2, i); __PYX_ERR(0, 21564, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21564, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21564, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21564, __pyx_L3_error)
    __pyx_v_encoder_query_type = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_encoder_query_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21564, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_encoder_capacity", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 21564, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_encoder_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_224device_get_encoder_capacity(__pyx_self, __pyx_v_device, __pyx_v_encoder_query_type);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_224device_get_encoder_capacity(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_encoder_query_type) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_encoder_capacity", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_encoder_capacity(__pyx_v_device, __pyx_v_encoder_query_type, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 21564, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21564, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_encoder_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21583
 * 
 * 
 * cpdef tuple device_get_encoder_stats(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current encoder statistics for a given device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_227device_get_encoder_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_encoder_stats(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_session_count;
  unsigned int __pyx_v_average_fps;
  unsigned int __pyx_v_average_latency;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_encoder_stats", 0);

  /* "cuda/bindings/_nvml.pyx":21601
 *     cdef unsigned int average_fps
 *     cdef unsigned int average_latency
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetEncoderStats(<Device>device, &session_count, &average_fps, &average_latency)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21602
 *     cdef unsigned int average_latency
 *     with nogil:
 *         __status__ = nvmlDeviceGetEncoderStats(<Device>device, &session_count, &average_fps, &average_latency)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (session_count, average_fps, average_latency)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderStats(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_session_count), (&__pyx_v_average_fps), (&__pyx_v_average_latency)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21602, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21601
 *     cdef unsigned int average_fps
 *     cdef unsigned int average_latency
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetEncoderStats(<Device>device, &session_count, &average_fps, &average_latency)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21603
 *     with nogil:
 *         __status__ = nvmlDeviceGetEncoderStats(<Device>device, &session_count, &average_fps, &average_latency)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (session_count, average_fps, average_latency)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21603, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21604
 *         __status__ = nvmlDeviceGetEncoderStats(<Device>device, &session_count, &average_fps, &average_latency)
 *     check_status(__status__)
 *     return (session_count, average_fps, average_latency)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_unsigned_int(__pyx_v_session_count); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21604, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_average_fps); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21604, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyLong_From_unsigned_int(__pyx_v_average_latency); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21604, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 21604, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21604, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 21604, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_5);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_5) != (0)) __PYX_ERR(0, 21604, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_t_5 = 0;
  __pyx_r = ((PyObject*)__pyx_t_6);
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21583
 * 
 * 
 * cpdef tuple device_get_encoder_stats(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current encoder statistics for a given device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_encoder_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_227device_get_encoder_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_226device_get_encoder_stats, "device_get_encoder_stats(intptr_t device) -> tuple\n\nRetrieves the current encoder statistics for a given device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    A 3-tuple containing:\n\n    - unsigned int: Reference to an unsigned int for count of active encoder sessions.\n    - unsigned int: Reference to an unsigned int for trailing average FPS of all active sessions.\n    - unsigned int: Reference to an unsigned int for encode latency in microseconds.\n\n.. seealso:: `nvmlDeviceGetEncoderStats`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_227device_get_encoder_stats = {"device_get_encoder_stats", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_227device_get_encoder_stats, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_226device_get_encoder_stats};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_227device_get_encoder_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_encoder_stats (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21583, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21583, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_encoder_stats", 0) < (0)) __PYX_ERR(0, 21583, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_encoder_stats", 1, 1, 1, i); __PYX_ERR(0, 21583, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21583, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21583, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_encoder_stats", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21583, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_encoder_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_226device_get_encoder_stats(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_226device_get_encoder_stats(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_encoder_stats", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_encoder_stats(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21583, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_encoder_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21607
 * 
 * 
 * cpdef object device_get_encoder_sessions(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves information about active encoder sessions on a target device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_229device_get_encoder_sessions(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_encoder_sessions(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_session_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_session_infos = 0;
  nvmlEncoderSessionInfo_t *__pyx_v_session_infos_ptr;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  intptr_t __pyx_t_8;
  int __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_encoder_sessions", 0);

  /* "cuda/bindings/_nvml.pyx":21615
 *     .. seealso:: `nvmlDeviceGetEncoderSessions`
 *     """
 *     cdef unsigned int[1] session_count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetEncoderSessions(<Device>device, <unsigned int*>session_count, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_session_count[0]), __pyx_t_1, sizeof(__pyx_v_session_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":21616
 *     """
 *     cdef unsigned int[1] session_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetEncoderSessions(<Device>device, <unsigned int*>session_count, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21617
 *     cdef unsigned int[1] session_count = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetEncoderSessions(<Device>device, <unsigned int*>session_count, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     cdef EncoderSessionInfo session_infos = EncoderSessionInfo(session_count[0])
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderSessions(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((unsigned int *)__pyx_v_session_count), NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21617, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":21616
 *     """
 *     cdef unsigned int[1] session_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetEncoderSessions(<Device>device, <unsigned int*>session_count, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21618
 *     with nogil:
 *         __status__ = nvmlDeviceGetEncoderSessions(<Device>device, <unsigned int*>session_count, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     cdef EncoderSessionInfo session_infos = EncoderSessionInfo(session_count[0])
 *     cdef nvmlEncoderSessionInfo_t *session_infos_ptr = <nvmlEncoderSessionInfo_t *><intptr_t>(session_infos._get_ptr())
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 21618, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21619
 *         __status__ = nvmlDeviceGetEncoderSessions(<Device>device, <unsigned int*>session_count, NULL)
 *     check_status_size(__status__)
 *     cdef EncoderSessionInfo session_infos = EncoderSessionInfo(session_count[0])             # <<<<<<<<<<<<<<
 *     cdef nvmlEncoderSessionInfo_t *session_infos_ptr = <nvmlEncoderSessionInfo_t *><intptr_t>(session_infos._get_ptr())
 *     if session_count[0] == 0:
*/
  __pyx_t_5 = NULL;
  __pyx_t_6 = __Pyx_PyLong_From_unsigned_int((__pyx_v_session_count[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 21619, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_6};
    __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo, __pyx_callargs+__pyx_t_7, (2-__pyx_t_7) | (__pyx_t_7*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21619, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_4);
  }
  __pyx_v_session_infos = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_t_4);
  __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":21620
 *     check_status_size(__status__)
 *     cdef EncoderSessionInfo session_infos = EncoderSessionInfo(session_count[0])
 *     cdef nvmlEncoderSessionInfo_t *session_infos_ptr = <nvmlEncoderSessionInfo_t *><intptr_t>(session_infos._get_ptr())             # <<<<<<<<<<<<<<
 *     if session_count[0] == 0:
 *         return session_infos
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_session_infos->__pyx_vtab)->_get_ptr(__pyx_v_session_infos); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21620, __pyx_L1_error)
  __pyx_v_session_infos_ptr = ((nvmlEncoderSessionInfo_t *)((intptr_t)__pyx_t_8));

  /* "cuda/bindings/_nvml.pyx":21621
 *     cdef EncoderSessionInfo session_infos = EncoderSessionInfo(session_count[0])
 *     cdef nvmlEncoderSessionInfo_t *session_infos_ptr = <nvmlEncoderSessionInfo_t *><intptr_t>(session_infos._get_ptr())
 *     if session_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return session_infos
 *     with nogil:
*/
  __pyx_t_9 = ((__pyx_v_session_count[0]) == 0);
  if (__pyx_t_9) {

    /* "cuda/bindings/_nvml.pyx":21622
 *     cdef nvmlEncoderSessionInfo_t *session_infos_ptr = <nvmlEncoderSessionInfo_t *><intptr_t>(session_infos._get_ptr())
 *     if session_count[0] == 0:
 *         return session_infos             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetEncoderSessions(<Device>device, <unsigned int*>session_count, session_infos_ptr)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_session_infos);
    __pyx_r = ((PyObject *)__pyx_v_session_infos);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":21621
 *     cdef EncoderSessionInfo session_infos = EncoderSessionInfo(session_count[0])
 *     cdef nvmlEncoderSessionInfo_t *session_infos_ptr = <nvmlEncoderSessionInfo_t *><intptr_t>(session_infos._get_ptr())
 *     if session_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return session_infos
 *     with nogil:
*/
  }

  /* "cuda/bindings/_nvml.pyx":21623
 *     if session_count[0] == 0:
 *         return session_infos
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetEncoderSessions(<Device>device, <unsigned int*>session_count, session_infos_ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21624
 *         return session_infos
 *     with nogil:
 *         __status__ = nvmlDeviceGetEncoderSessions(<Device>device, <unsigned int*>session_count, session_infos_ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return session_infos
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderSessions(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((unsigned int *)__pyx_v_session_count), __pyx_v_session_infos_ptr); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21624, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":21623
 *     if session_count[0] == 0:
 *         return session_infos
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetEncoderSessions(<Device>device, <unsigned int*>session_count, session_infos_ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21625
 *     with nogil:
 *         __status__ = nvmlDeviceGetEncoderSessions(<Device>device, <unsigned int*>session_count, session_infos_ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return session_infos
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 21625, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21626
 *         __status__ = nvmlDeviceGetEncoderSessions(<Device>device, <unsigned int*>session_count, session_infos_ptr)
 *     check_status(__status__)
 *     return session_infos             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_session_infos);
  __pyx_r = ((PyObject *)__pyx_v_session_infos);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21607
 * 
 * 
 * cpdef object device_get_encoder_sessions(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves information about active encoder sessions on a target device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_encoder_sessions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_session_infos);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_229device_get_encoder_sessions(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_228device_get_encoder_sessions, "device_get_encoder_sessions(intptr_t device)\n\nRetrieves information about active encoder sessions on a target device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\n.. seealso:: `nvmlDeviceGetEncoderSessions`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_229device_get_encoder_sessions = {"device_get_encoder_sessions", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_229device_get_encoder_sessions, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_228device_get_encoder_sessions};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_229device_get_encoder_sessions(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_encoder_sessions (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21607, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21607, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_encoder_sessions", 0) < (0)) __PYX_ERR(0, 21607, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_encoder_sessions", 1, 1, 1, i); __PYX_ERR(0, 21607, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21607, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21607, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_encoder_sessions", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21607, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_encoder_sessions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_228device_get_encoder_sessions(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_228device_get_encoder_sessions(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_encoder_sessions", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_encoder_sessions(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_encoder_sessions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21629
 * 
 * 
 * cpdef tuple device_get_decoder_utilization(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization and sampling size in microseconds for the Decoder.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_231device_get_decoder_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_decoder_utilization(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_utilization;
  unsigned int __pyx_v_sampling_period_us;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_decoder_utilization", 0);

  /* "cuda/bindings/_nvml.pyx":21645
 *     cdef unsigned int utilization
 *     cdef unsigned int sampling_period_us
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetDecoderUtilization(<Device>device, &utilization, &sampling_period_us)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21646
 *     cdef unsigned int sampling_period_us
 *     with nogil:
 *         __status__ = nvmlDeviceGetDecoderUtilization(<Device>device, &utilization, &sampling_period_us)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (utilization, sampling_period_us)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDecoderUtilization(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_utilization), (&__pyx_v_sampling_period_us)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21646, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21645
 *     cdef unsigned int utilization
 *     cdef unsigned int sampling_period_us
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetDecoderUtilization(<Device>device, &utilization, &sampling_period_us)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21647
 *     with nogil:
 *         __status__ = nvmlDeviceGetDecoderUtilization(<Device>device, &utilization, &sampling_period_us)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (utilization, sampling_period_us)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21647, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21648
 *         __status__ = nvmlDeviceGetDecoderUtilization(<Device>device, &utilization, &sampling_period_us)
 *     check_status(__status__)
 *     return (utilization, sampling_period_us)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_unsigned_int(__pyx_v_utilization); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21648, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_sampling_period_us); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21648, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21648, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21648, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 21648, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21629
 * 
 * 
 * cpdef tuple device_get_decoder_utilization(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization and sampling size in microseconds for the Decoder.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_decoder_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_231device_get_decoder_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_230device_get_decoder_utilization, "device_get_decoder_utilization(intptr_t device) -> tuple\n\nRetrieves the current utilization and sampling size in microseconds for the Decoder.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    A 2-tuple containing:\n\n    - unsigned int: Reference to an unsigned int for decoder utilization info.\n    - unsigned int: Reference to an unsigned int for the sampling period in US.\n\n.. seealso:: `nvmlDeviceGetDecoderUtilization`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_231device_get_decoder_utilization = {"device_get_decoder_utilization", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_231device_get_decoder_utilization, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_230device_get_decoder_utilization};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_231device_get_decoder_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_decoder_utilization (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21629, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21629, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_decoder_utilization", 0) < (0)) __PYX_ERR(0, 21629, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_decoder_utilization", 1, 1, 1, i); __PYX_ERR(0, 21629, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21629, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21629, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_decoder_utilization", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21629, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_decoder_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_230device_get_decoder_utilization(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_230device_get_decoder_utilization(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_decoder_utilization", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_decoder_utilization(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_decoder_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21651
 * 
 * 
 * cpdef tuple device_get_jpg_utilization(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization and sampling size in microseconds for the JPG.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_233device_get_jpg_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_jpg_utilization(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_utilization;
  unsigned int __pyx_v_sampling_period_us;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_jpg_utilization", 0);

  /* "cuda/bindings/_nvml.pyx":21667
 *     cdef unsigned int utilization
 *     cdef unsigned int sampling_period_us
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetJpgUtilization(<Device>device, &utilization, &sampling_period_us)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21668
 *     cdef unsigned int sampling_period_us
 *     with nogil:
 *         __status__ = nvmlDeviceGetJpgUtilization(<Device>device, &utilization, &sampling_period_us)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (utilization, sampling_period_us)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetJpgUtilization(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_utilization), (&__pyx_v_sampling_period_us)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21668, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21667
 *     cdef unsigned int utilization
 *     cdef unsigned int sampling_period_us
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetJpgUtilization(<Device>device, &utilization, &sampling_period_us)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21669
 *     with nogil:
 *         __status__ = nvmlDeviceGetJpgUtilization(<Device>device, &utilization, &sampling_period_us)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (utilization, sampling_period_us)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21669, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21670
 *         __status__ = nvmlDeviceGetJpgUtilization(<Device>device, &utilization, &sampling_period_us)
 *     check_status(__status__)
 *     return (utilization, sampling_period_us)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_unsigned_int(__pyx_v_utilization); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21670, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_sampling_period_us); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21670, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21670, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21670, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 21670, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21651
 * 
 * 
 * cpdef tuple device_get_jpg_utilization(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization and sampling size in microseconds for the JPG.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_jpg_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_233device_get_jpg_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_232device_get_jpg_utilization, "device_get_jpg_utilization(intptr_t device) -> tuple\n\nRetrieves the current utilization and sampling size in microseconds for the JPG.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    A 2-tuple containing:\n\n    - unsigned int: Reference to an unsigned int for jpg utilization info.\n    - unsigned int: Reference to an unsigned int for the sampling period in US.\n\n.. seealso:: `nvmlDeviceGetJpgUtilization`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_233device_get_jpg_utilization = {"device_get_jpg_utilization", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_233device_get_jpg_utilization, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_232device_get_jpg_utilization};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_233device_get_jpg_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_jpg_utilization (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21651, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21651, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_jpg_utilization", 0) < (0)) __PYX_ERR(0, 21651, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_jpg_utilization", 1, 1, 1, i); __PYX_ERR(0, 21651, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21651, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21651, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_jpg_utilization", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21651, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_jpg_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_232device_get_jpg_utilization(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_232device_get_jpg_utilization(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_jpg_utilization", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_jpg_utilization(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21651, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_jpg_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21673
 * 
 * 
 * cpdef tuple device_get_ofa_utilization(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization and sampling size in microseconds for the OFA (Optical Flow Accelerator).
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_235device_get_ofa_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_ofa_utilization(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_utilization;
  unsigned int __pyx_v_sampling_period_us;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_ofa_utilization", 0);

  /* "cuda/bindings/_nvml.pyx":21689
 *     cdef unsigned int utilization
 *     cdef unsigned int sampling_period_us
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetOfaUtilization(<Device>device, &utilization, &sampling_period_us)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21690
 *     cdef unsigned int sampling_period_us
 *     with nogil:
 *         __status__ = nvmlDeviceGetOfaUtilization(<Device>device, &utilization, &sampling_period_us)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (utilization, sampling_period_us)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetOfaUtilization(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_utilization), (&__pyx_v_sampling_period_us)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21690, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21689
 *     cdef unsigned int utilization
 *     cdef unsigned int sampling_period_us
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetOfaUtilization(<Device>device, &utilization, &sampling_period_us)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21691
 *     with nogil:
 *         __status__ = nvmlDeviceGetOfaUtilization(<Device>device, &utilization, &sampling_period_us)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (utilization, sampling_period_us)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21691, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21692
 *         __status__ = nvmlDeviceGetOfaUtilization(<Device>device, &utilization, &sampling_period_us)
 *     check_status(__status__)
 *     return (utilization, sampling_period_us)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_unsigned_int(__pyx_v_utilization); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21692, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_sampling_period_us); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21692, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21692, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21692, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 21692, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21673
 * 
 * 
 * cpdef tuple device_get_ofa_utilization(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization and sampling size in microseconds for the OFA (Optical Flow Accelerator).
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_ofa_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_235device_get_ofa_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_234device_get_ofa_utilization, "device_get_ofa_utilization(intptr_t device) -> tuple\n\nRetrieves the current utilization and sampling size in microseconds for the OFA (Optical Flow Accelerator).\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    A 2-tuple containing:\n\n    - unsigned int: Reference to an unsigned int for ofa utilization info.\n    - unsigned int: Reference to an unsigned int for the sampling period in US.\n\n.. seealso:: `nvmlDeviceGetOfaUtilization`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_235device_get_ofa_utilization = {"device_get_ofa_utilization", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_235device_get_ofa_utilization, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_234device_get_ofa_utilization};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_235device_get_ofa_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_ofa_utilization (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21673, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21673, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_ofa_utilization", 0) < (0)) __PYX_ERR(0, 21673, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_ofa_utilization", 1, 1, 1, i); __PYX_ERR(0, 21673, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21673, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21673, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_ofa_utilization", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21673, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_ofa_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_234device_get_ofa_utilization(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_234device_get_ofa_utilization(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_ofa_utilization", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_ofa_utilization(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21673, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_ofa_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21695
 * 
 * 
 * cpdef object device_get_fbc_stats(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the active frame buffer capture sessions statistics for a given device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_237device_get_fbc_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_fbc_stats(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_fbc_stats_py = 0;
  nvmlFBCStats_t *__pyx_v_fbc_stats;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_fbc_stats", 0);

  /* "cuda/bindings/_nvml.pyx":21706
 *     .. seealso:: `nvmlDeviceGetFBCStats`
 *     """
 *     cdef FBCStats fbc_stats_py = FBCStats()             # <<<<<<<<<<<<<<
 *     cdef nvmlFBCStats_t *fbc_stats = <nvmlFBCStats_t *><intptr_t>(fbc_stats_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21706, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_fbc_stats_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":21707
 *     """
 *     cdef FBCStats fbc_stats_py = FBCStats()
 *     cdef nvmlFBCStats_t *fbc_stats = <nvmlFBCStats_t *><intptr_t>(fbc_stats_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetFBCStats(<Device>device, fbc_stats)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FBCStats *)__pyx_v_fbc_stats_py->__pyx_vtab)->_get_ptr(__pyx_v_fbc_stats_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21707, __pyx_L1_error)
  __pyx_v_fbc_stats = ((nvmlFBCStats_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":21708
 *     cdef FBCStats fbc_stats_py = FBCStats()
 *     cdef nvmlFBCStats_t *fbc_stats = <nvmlFBCStats_t *><intptr_t>(fbc_stats_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetFBCStats(<Device>device, fbc_stats)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21709
 *     cdef nvmlFBCStats_t *fbc_stats = <nvmlFBCStats_t *><intptr_t>(fbc_stats_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetFBCStats(<Device>device, fbc_stats)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return fbc_stats_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFBCStats(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_fbc_stats); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21709, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":21708
 *     cdef FBCStats fbc_stats_py = FBCStats()
 *     cdef nvmlFBCStats_t *fbc_stats = <nvmlFBCStats_t *><intptr_t>(fbc_stats_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetFBCStats(<Device>device, fbc_stats)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21710
 *     with nogil:
 *         __status__ = nvmlDeviceGetFBCStats(<Device>device, fbc_stats)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return fbc_stats_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 21710, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21711
 *         __status__ = nvmlDeviceGetFBCStats(<Device>device, fbc_stats)
 *     check_status(__status__)
 *     return fbc_stats_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_fbc_stats_py);
  __pyx_r = ((PyObject *)__pyx_v_fbc_stats_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21695
 * 
 * 
 * cpdef object device_get_fbc_stats(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the active frame buffer capture sessions statistics for a given device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fbc_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_fbc_stats_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_237device_get_fbc_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_236device_get_fbc_stats, "device_get_fbc_stats(intptr_t device)\n\nRetrieves the active frame buffer capture sessions statistics for a given device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlFBCStats_t: Reference to nvmlFBCStats_t structure containing NvFBC stats.\n\n.. seealso:: `nvmlDeviceGetFBCStats`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_237device_get_fbc_stats = {"device_get_fbc_stats", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_237device_get_fbc_stats, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_236device_get_fbc_stats};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_237device_get_fbc_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_fbc_stats (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21695, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21695, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_fbc_stats", 0) < (0)) __PYX_ERR(0, 21695, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_fbc_stats", 1, 1, 1, i); __PYX_ERR(0, 21695, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21695, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21695, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_fbc_stats", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21695, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fbc_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_236device_get_fbc_stats(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_236device_get_fbc_stats(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_fbc_stats", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_fbc_stats(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21695, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fbc_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21714
 * 
 * 
 * cpdef object device_get_fbc_sessions(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves information about active frame buffer capture sessions on a target device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_239device_get_fbc_sessions(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_fbc_sessions(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_session_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_session_info = 0;
  nvmlFBCSessionInfo_t *__pyx_v_session_info_ptr;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  intptr_t __pyx_t_8;
  int __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_fbc_sessions", 0);

  /* "cuda/bindings/_nvml.pyx":21722
 *     .. seealso:: `nvmlDeviceGetFBCSessions`
 *     """
 *     cdef unsigned int[1] session_count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetFBCSessions(<Device>device, <unsigned int*>session_count, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_session_count[0]), __pyx_t_1, sizeof(__pyx_v_session_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":21723
 *     """
 *     cdef unsigned int[1] session_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetFBCSessions(<Device>device, <unsigned int*>session_count, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21724
 *     cdef unsigned int[1] session_count = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetFBCSessions(<Device>device, <unsigned int*>session_count, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     cdef FBCSessionInfo session_info = FBCSessionInfo(session_count[0])
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFBCSessions(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((unsigned int *)__pyx_v_session_count), NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21724, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":21723
 *     """
 *     cdef unsigned int[1] session_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetFBCSessions(<Device>device, <unsigned int*>session_count, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21725
 *     with nogil:
 *         __status__ = nvmlDeviceGetFBCSessions(<Device>device, <unsigned int*>session_count, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     cdef FBCSessionInfo session_info = FBCSessionInfo(session_count[0])
 *     cdef nvmlFBCSessionInfo_t *session_info_ptr = <nvmlFBCSessionInfo_t *><intptr_t>(session_info._get_ptr())
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 21725, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21726
 *         __status__ = nvmlDeviceGetFBCSessions(<Device>device, <unsigned int*>session_count, NULL)
 *     check_status_size(__status__)
 *     cdef FBCSessionInfo session_info = FBCSessionInfo(session_count[0])             # <<<<<<<<<<<<<<
 *     cdef nvmlFBCSessionInfo_t *session_info_ptr = <nvmlFBCSessionInfo_t *><intptr_t>(session_info._get_ptr())
 *     if session_count[0] == 0:
*/
  __pyx_t_5 = NULL;
  __pyx_t_6 = __Pyx_PyLong_From_unsigned_int((__pyx_v_session_count[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 21726, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_6};
    __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo, __pyx_callargs+__pyx_t_7, (2-__pyx_t_7) | (__pyx_t_7*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21726, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_4);
  }
  __pyx_v_session_info = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_t_4);
  __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":21727
 *     check_status_size(__status__)
 *     cdef FBCSessionInfo session_info = FBCSessionInfo(session_count[0])
 *     cdef nvmlFBCSessionInfo_t *session_info_ptr = <nvmlFBCSessionInfo_t *><intptr_t>(session_info._get_ptr())             # <<<<<<<<<<<<<<
 *     if session_count[0] == 0:
 *         return session_info
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_session_info->__pyx_vtab)->_get_ptr(__pyx_v_session_info); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21727, __pyx_L1_error)
  __pyx_v_session_info_ptr = ((nvmlFBCSessionInfo_t *)((intptr_t)__pyx_t_8));

  /* "cuda/bindings/_nvml.pyx":21728
 *     cdef FBCSessionInfo session_info = FBCSessionInfo(session_count[0])
 *     cdef nvmlFBCSessionInfo_t *session_info_ptr = <nvmlFBCSessionInfo_t *><intptr_t>(session_info._get_ptr())
 *     if session_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return session_info
 *     with nogil:
*/
  __pyx_t_9 = ((__pyx_v_session_count[0]) == 0);
  if (__pyx_t_9) {

    /* "cuda/bindings/_nvml.pyx":21729
 *     cdef nvmlFBCSessionInfo_t *session_info_ptr = <nvmlFBCSessionInfo_t *><intptr_t>(session_info._get_ptr())
 *     if session_count[0] == 0:
 *         return session_info             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetFBCSessions(<Device>device, <unsigned int*>session_count, session_info_ptr)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_session_info);
    __pyx_r = ((PyObject *)__pyx_v_session_info);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":21728
 *     cdef FBCSessionInfo session_info = FBCSessionInfo(session_count[0])
 *     cdef nvmlFBCSessionInfo_t *session_info_ptr = <nvmlFBCSessionInfo_t *><intptr_t>(session_info._get_ptr())
 *     if session_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return session_info
 *     with nogil:
*/
  }

  /* "cuda/bindings/_nvml.pyx":21730
 *     if session_count[0] == 0:
 *         return session_info
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetFBCSessions(<Device>device, <unsigned int*>session_count, session_info_ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21731
 *         return session_info
 *     with nogil:
 *         __status__ = nvmlDeviceGetFBCSessions(<Device>device, <unsigned int*>session_count, session_info_ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return session_info
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFBCSessions(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((unsigned int *)__pyx_v_session_count), __pyx_v_session_info_ptr); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21731, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":21730
 *     if session_count[0] == 0:
 *         return session_info
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetFBCSessions(<Device>device, <unsigned int*>session_count, session_info_ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21732
 *     with nogil:
 *         __status__ = nvmlDeviceGetFBCSessions(<Device>device, <unsigned int*>session_count, session_info_ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return session_info
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 21732, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21733
 *         __status__ = nvmlDeviceGetFBCSessions(<Device>device, <unsigned int*>session_count, session_info_ptr)
 *     check_status(__status__)
 *     return session_info             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_session_info);
  __pyx_r = ((PyObject *)__pyx_v_session_info);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21714
 * 
 * 
 * cpdef object device_get_fbc_sessions(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves information about active frame buffer capture sessions on a target device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fbc_sessions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_session_info);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_239device_get_fbc_sessions(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_238device_get_fbc_sessions, "device_get_fbc_sessions(intptr_t device)\n\nRetrieves information about active frame buffer capture sessions on a target device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\n.. seealso:: `nvmlDeviceGetFBCSessions`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_239device_get_fbc_sessions = {"device_get_fbc_sessions", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_239device_get_fbc_sessions, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_238device_get_fbc_sessions};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_239device_get_fbc_sessions(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_fbc_sessions (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21714, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21714, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_fbc_sessions", 0) < (0)) __PYX_ERR(0, 21714, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_fbc_sessions", 1, 1, 1, i); __PYX_ERR(0, 21714, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21714, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21714, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_fbc_sessions", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21714, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fbc_sessions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_238device_get_fbc_sessions(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_238device_get_fbc_sessions(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_fbc_sessions", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_fbc_sessions(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21714, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_fbc_sessions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21736
 * 
 * 
 * cpdef tuple device_get_driver_model_v2(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current and pending driver model for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_241device_get_driver_model_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_driver_model_v2(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__DriverModel __pyx_v_current;
  __pyx_t_4cuda_8bindings_5_nvml__DriverModel __pyx_v_pending;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_driver_model_v2", 0);

  /* "cuda/bindings/_nvml.pyx":21752
 *     cdef _DriverModel current
 *     cdef _DriverModel pending
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetDriverModel_v2(<Device>device, &current, &pending)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21753
 *     cdef _DriverModel pending
 *     with nogil:
 *         __status__ = nvmlDeviceGetDriverModel_v2(<Device>device, &current, &pending)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (<int>current, <int>pending)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDriverModel_v2(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_current), (&__pyx_v_pending)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21753, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21752
 *     cdef _DriverModel current
 *     cdef _DriverModel pending
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetDriverModel_v2(<Device>device, &current, &pending)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21754
 *     with nogil:
 *         __status__ = nvmlDeviceGetDriverModel_v2(<Device>device, &current, &pending)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (<int>current, <int>pending)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21754, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21755
 *         __status__ = nvmlDeviceGetDriverModel_v2(<Device>device, &current, &pending)
 *     check_status(__status__)
 *     return (<int>current, <int>pending)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_int(((int)__pyx_v_current)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21755, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_int(((int)__pyx_v_pending)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21755, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21755, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21755, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 21755, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21736
 * 
 * 
 * cpdef tuple device_get_driver_model_v2(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current and pending driver model for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_driver_model_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_241device_get_driver_model_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_240device_get_driver_model_v2, "device_get_driver_model_v2(intptr_t device) -> tuple\n\nRetrieves the current and pending driver model for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    A 2-tuple containing:\n\n    - int: Reference in which to return the current driver model.\n    - int: Reference in which to return the pending driver model.\n\n.. seealso:: `nvmlDeviceGetDriverModel_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_241device_get_driver_model_v2 = {"device_get_driver_model_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_241device_get_driver_model_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_240device_get_driver_model_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_241device_get_driver_model_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_driver_model_v2 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21736, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21736, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_driver_model_v2", 0) < (0)) __PYX_ERR(0, 21736, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_driver_model_v2", 1, 1, 1, i); __PYX_ERR(0, 21736, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21736, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21736, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_driver_model_v2", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21736, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_driver_model_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_240device_get_driver_model_v2(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_240device_get_driver_model_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_driver_model_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_driver_model_v2(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21736, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_driver_model_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21758
 * 
 * 
 * cpdef str device_get_vbios_version(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get VBIOS version of the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_243device_get_vbios_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vbios_version(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_length;
  char __pyx_v_version[32];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vbios_version", 0);

  /* "cuda/bindings/_nvml.pyx":21766
 *     .. seealso:: `nvmlDeviceGetVbiosVersion`
 *     """
 *     cdef unsigned int length = 32             # <<<<<<<<<<<<<<
 *     cdef char[32] version
 *     with nogil:
*/
  __pyx_v_length = 32;

  /* "cuda/bindings/_nvml.pyx":21768
 *     cdef unsigned int length = 32
 *     cdef char[32] version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVbiosVersion(<Device>device, version, length)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21769
 *     cdef char[32] version
 *     with nogil:
 *         __status__ = nvmlDeviceGetVbiosVersion(<Device>device, version, length)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(version)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVbiosVersion(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_version, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21769, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21768
 *     cdef unsigned int length = 32
 *     cdef char[32] version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVbiosVersion(<Device>device, version, length)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21770
 *     with nogil:
 *         __status__ = nvmlDeviceGetVbiosVersion(<Device>device, version, length)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(version)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21770, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21771
 *         __status__ = nvmlDeviceGetVbiosVersion(<Device>device, version, length)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(version)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = PyUnicode_FromString(__pyx_v_version); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21771, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21758
 * 
 * 
 * cpdef str device_get_vbios_version(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get VBIOS version of the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vbios_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_243device_get_vbios_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_242device_get_vbios_version, "device_get_vbios_version(intptr_t device) -> str\n\nGet VBIOS version of the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\n.. seealso:: `nvmlDeviceGetVbiosVersion`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_243device_get_vbios_version = {"device_get_vbios_version", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_243device_get_vbios_version, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_242device_get_vbios_version};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_243device_get_vbios_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_vbios_version (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21758, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21758, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_vbios_version", 0) < (0)) __PYX_ERR(0, 21758, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_vbios_version", 1, 1, 1, i); __PYX_ERR(0, 21758, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21758, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21758, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_vbios_version", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21758, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vbios_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_242device_get_vbios_version(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_242device_get_vbios_version(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vbios_version", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_vbios_version(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21758, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vbios_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21774
 * 
 * 
 * cpdef object device_get_bridge_chip_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get Bridge Chip Information for all the bridge chips on the board.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_245device_get_bridge_chip_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_bridge_chip_info(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *__pyx_v_bridge_hierarchy_py = 0;
  nvmlBridgeChipHierarchy_t *__pyx_v_bridge_hierarchy;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_bridge_chip_info", 0);

  /* "cuda/bindings/_nvml.pyx":21785
 *     .. seealso:: `nvmlDeviceGetBridgeChipInfo`
 *     """
 *     cdef BridgeChipHierarchy bridge_hierarchy_py = BridgeChipHierarchy()             # <<<<<<<<<<<<<<
 *     cdef nvmlBridgeChipHierarchy_t *bridge_hierarchy = <nvmlBridgeChipHierarchy_t *><intptr_t>(bridge_hierarchy_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21785, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_bridge_hierarchy_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":21786
 *     """
 *     cdef BridgeChipHierarchy bridge_hierarchy_py = BridgeChipHierarchy()
 *     cdef nvmlBridgeChipHierarchy_t *bridge_hierarchy = <nvmlBridgeChipHierarchy_t *><intptr_t>(bridge_hierarchy_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetBridgeChipInfo(<Device>device, bridge_hierarchy)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)__pyx_v_bridge_hierarchy_py->__pyx_vtab)->_get_ptr(__pyx_v_bridge_hierarchy_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21786, __pyx_L1_error)
  __pyx_v_bridge_hierarchy = ((nvmlBridgeChipHierarchy_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":21787
 *     cdef BridgeChipHierarchy bridge_hierarchy_py = BridgeChipHierarchy()
 *     cdef nvmlBridgeChipHierarchy_t *bridge_hierarchy = <nvmlBridgeChipHierarchy_t *><intptr_t>(bridge_hierarchy_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetBridgeChipInfo(<Device>device, bridge_hierarchy)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21788
 *     cdef nvmlBridgeChipHierarchy_t *bridge_hierarchy = <nvmlBridgeChipHierarchy_t *><intptr_t>(bridge_hierarchy_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetBridgeChipInfo(<Device>device, bridge_hierarchy)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return bridge_hierarchy_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBridgeChipInfo(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_bridge_hierarchy); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21788, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":21787
 *     cdef BridgeChipHierarchy bridge_hierarchy_py = BridgeChipHierarchy()
 *     cdef nvmlBridgeChipHierarchy_t *bridge_hierarchy = <nvmlBridgeChipHierarchy_t *><intptr_t>(bridge_hierarchy_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetBridgeChipInfo(<Device>device, bridge_hierarchy)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21789
 *     with nogil:
 *         __status__ = nvmlDeviceGetBridgeChipInfo(<Device>device, bridge_hierarchy)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return bridge_hierarchy_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 21789, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21790
 *         __status__ = nvmlDeviceGetBridgeChipInfo(<Device>device, bridge_hierarchy)
 *     check_status(__status__)
 *     return bridge_hierarchy_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_bridge_hierarchy_py);
  __pyx_r = ((PyObject *)__pyx_v_bridge_hierarchy_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21774
 * 
 * 
 * cpdef object device_get_bridge_chip_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get Bridge Chip Information for all the bridge chips on the board.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_bridge_chip_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_bridge_hierarchy_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_245device_get_bridge_chip_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_244device_get_bridge_chip_info, "device_get_bridge_chip_info(intptr_t device)\n\nGet Bridge Chip Information for all the bridge chips on the board.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlBridgeChipHierarchy_t: Reference to the returned bridge chip Hierarchy.\n\n.. seealso:: `nvmlDeviceGetBridgeChipInfo`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_245device_get_bridge_chip_info = {"device_get_bridge_chip_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_245device_get_bridge_chip_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_244device_get_bridge_chip_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_245device_get_bridge_chip_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_bridge_chip_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21774, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21774, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_bridge_chip_info", 0) < (0)) __PYX_ERR(0, 21774, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_bridge_chip_info", 1, 1, 1, i); __PYX_ERR(0, 21774, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21774, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21774, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_bridge_chip_info", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21774, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_bridge_chip_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_244device_get_bridge_chip_info(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_244device_get_bridge_chip_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_bridge_chip_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_bridge_chip_info(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21774, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_bridge_chip_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21793
 * 
 * 
 * cpdef object device_get_compute_running_processes_v3(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get information about processes with a compute context on a device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_247device_get_compute_running_processes_v3(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_compute_running_processes_v3(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_info_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_infos = 0;
  nvmlProcessInfo_t *__pyx_v_infos_ptr;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  intptr_t __pyx_t_8;
  int __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_compute_running_processes_v3", 0);

  /* "cuda/bindings/_nvml.pyx":21801
 *     .. seealso:: `nvmlDeviceGetComputeRunningProcesses_v3`
 *     """
 *     cdef unsigned int[1] info_count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_info_count[0]), __pyx_t_1, sizeof(__pyx_v_info_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":21802
 *     """
 *     cdef unsigned int[1] info_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21803
 *     cdef unsigned int[1] info_count = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     cdef ProcessInfo infos = ProcessInfo(info_count[0])
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetComputeRunningProcesses_v3(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((unsigned int *)__pyx_v_info_count), NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21803, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":21802
 *     """
 *     cdef unsigned int[1] info_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21804
 *     with nogil:
 *         __status__ = nvmlDeviceGetComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     cdef ProcessInfo infos = ProcessInfo(info_count[0])
 *     cdef nvmlProcessInfo_t *infos_ptr = <nvmlProcessInfo_t *><intptr_t>(infos._get_ptr())
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 21804, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21805
 *         __status__ = nvmlDeviceGetComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, NULL)
 *     check_status_size(__status__)
 *     cdef ProcessInfo infos = ProcessInfo(info_count[0])             # <<<<<<<<<<<<<<
 *     cdef nvmlProcessInfo_t *infos_ptr = <nvmlProcessInfo_t *><intptr_t>(infos._get_ptr())
 *     if info_count[0] == 0:
*/
  __pyx_t_5 = NULL;
  __pyx_t_6 = __Pyx_PyLong_From_unsigned_int((__pyx_v_info_count[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 21805, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_6};
    __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo, __pyx_callargs+__pyx_t_7, (2-__pyx_t_7) | (__pyx_t_7*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21805, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_4);
  }
  __pyx_v_infos = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_t_4);
  __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":21806
 *     check_status_size(__status__)
 *     cdef ProcessInfo infos = ProcessInfo(info_count[0])
 *     cdef nvmlProcessInfo_t *infos_ptr = <nvmlProcessInfo_t *><intptr_t>(infos._get_ptr())             # <<<<<<<<<<<<<<
 *     if info_count[0] == 0:
 *         return infos
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_infos->__pyx_vtab)->_get_ptr(__pyx_v_infos); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21806, __pyx_L1_error)
  __pyx_v_infos_ptr = ((nvmlProcessInfo_t *)((intptr_t)__pyx_t_8));

  /* "cuda/bindings/_nvml.pyx":21807
 *     cdef ProcessInfo infos = ProcessInfo(info_count[0])
 *     cdef nvmlProcessInfo_t *infos_ptr = <nvmlProcessInfo_t *><intptr_t>(infos._get_ptr())
 *     if info_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return infos
 *     with nogil:
*/
  __pyx_t_9 = ((__pyx_v_info_count[0]) == 0);
  if (__pyx_t_9) {

    /* "cuda/bindings/_nvml.pyx":21808
 *     cdef nvmlProcessInfo_t *infos_ptr = <nvmlProcessInfo_t *><intptr_t>(infos._get_ptr())
 *     if info_count[0] == 0:
 *         return infos             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, infos_ptr)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_infos);
    __pyx_r = ((PyObject *)__pyx_v_infos);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":21807
 *     cdef ProcessInfo infos = ProcessInfo(info_count[0])
 *     cdef nvmlProcessInfo_t *infos_ptr = <nvmlProcessInfo_t *><intptr_t>(infos._get_ptr())
 *     if info_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return infos
 *     with nogil:
*/
  }

  /* "cuda/bindings/_nvml.pyx":21809
 *     if info_count[0] == 0:
 *         return infos
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, infos_ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21810
 *         return infos
 *     with nogil:
 *         __status__ = nvmlDeviceGetComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, infos_ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return infos
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetComputeRunningProcesses_v3(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((unsigned int *)__pyx_v_info_count), __pyx_v_infos_ptr); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21810, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":21809
 *     if info_count[0] == 0:
 *         return infos
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, infos_ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21811
 *     with nogil:
 *         __status__ = nvmlDeviceGetComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, infos_ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return infos
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 21811, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21812
 *         __status__ = nvmlDeviceGetComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, infos_ptr)
 *     check_status(__status__)
 *     return infos             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_infos);
  __pyx_r = ((PyObject *)__pyx_v_infos);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21793
 * 
 * 
 * cpdef object device_get_compute_running_processes_v3(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get information about processes with a compute context on a device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_compute_running_processes_v3", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_infos);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_247device_get_compute_running_processes_v3(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_246device_get_compute_running_processes_v3, "device_get_compute_running_processes_v3(intptr_t device)\n\nGet information about processes with a compute context on a device.\n\nArgs:\n    device (intptr_t): The device handle or MIG device handle.\n\n.. seealso:: `nvmlDeviceGetComputeRunningProcesses_v3`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_247device_get_compute_running_processes_v3 = {"device_get_compute_running_processes_v3", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_247device_get_compute_running_processes_v3, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_246device_get_compute_running_processes_v3};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_247device_get_compute_running_processes_v3(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_compute_running_processes_v3 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21793, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21793, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_compute_running_processes_v3", 0) < (0)) __PYX_ERR(0, 21793, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_compute_running_processes_v3", 1, 1, 1, i); __PYX_ERR(0, 21793, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21793, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21793, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_compute_running_processes_v3", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21793, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_compute_running_processes_v3", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_246device_get_compute_running_processes_v3(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_246device_get_compute_running_processes_v3(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_compute_running_processes_v3", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_compute_running_processes_v3(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21793, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_compute_running_processes_v3", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21815
 * 
 * 
 * cpdef object device_get_mps_compute_running_processes_v3(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get information about processes with a Multi-Process Service (MPS) compute context on a device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_249device_get_mps_compute_running_processes_v3(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_mps_compute_running_processes_v3(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_info_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v_infos = 0;
  nvmlProcessInfo_t *__pyx_v_infos_ptr;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  intptr_t __pyx_t_8;
  int __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_mps_compute_running_processes_v3", 0);

  /* "cuda/bindings/_nvml.pyx":21823
 *     .. seealso:: `nvmlDeviceGetMPSComputeRunningProcesses_v3`
 *     """
 *     cdef unsigned int[1] info_count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetMPSComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_info_count[0]), __pyx_t_1, sizeof(__pyx_v_info_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":21824
 *     """
 *     cdef unsigned int[1] info_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMPSComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21825
 *     cdef unsigned int[1] info_count = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetMPSComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     cdef ProcessInfo infos = ProcessInfo(info_count[0])
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMPSComputeRunningProcesses_v3(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((unsigned int *)__pyx_v_info_count), NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21825, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":21824
 *     """
 *     cdef unsigned int[1] info_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMPSComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21826
 *     with nogil:
 *         __status__ = nvmlDeviceGetMPSComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     cdef ProcessInfo infos = ProcessInfo(info_count[0])
 *     cdef nvmlProcessInfo_t *infos_ptr = <nvmlProcessInfo_t *><intptr_t>(infos._get_ptr())
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 21826, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21827
 *         __status__ = nvmlDeviceGetMPSComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, NULL)
 *     check_status_size(__status__)
 *     cdef ProcessInfo infos = ProcessInfo(info_count[0])             # <<<<<<<<<<<<<<
 *     cdef nvmlProcessInfo_t *infos_ptr = <nvmlProcessInfo_t *><intptr_t>(infos._get_ptr())
 *     if info_count[0] == 0:
*/
  __pyx_t_5 = NULL;
  __pyx_t_6 = __Pyx_PyLong_From_unsigned_int((__pyx_v_info_count[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 21827, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_6};
    __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo, __pyx_callargs+__pyx_t_7, (2-__pyx_t_7) | (__pyx_t_7*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21827, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_4);
  }
  __pyx_v_infos = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_t_4);
  __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":21828
 *     check_status_size(__status__)
 *     cdef ProcessInfo infos = ProcessInfo(info_count[0])
 *     cdef nvmlProcessInfo_t *infos_ptr = <nvmlProcessInfo_t *><intptr_t>(infos._get_ptr())             # <<<<<<<<<<<<<<
 *     if info_count[0] == 0:
 *         return infos
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v_infos->__pyx_vtab)->_get_ptr(__pyx_v_infos); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21828, __pyx_L1_error)
  __pyx_v_infos_ptr = ((nvmlProcessInfo_t *)((intptr_t)__pyx_t_8));

  /* "cuda/bindings/_nvml.pyx":21829
 *     cdef ProcessInfo infos = ProcessInfo(info_count[0])
 *     cdef nvmlProcessInfo_t *infos_ptr = <nvmlProcessInfo_t *><intptr_t>(infos._get_ptr())
 *     if info_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return infos
 *     with nogil:
*/
  __pyx_t_9 = ((__pyx_v_info_count[0]) == 0);
  if (__pyx_t_9) {

    /* "cuda/bindings/_nvml.pyx":21830
 *     cdef nvmlProcessInfo_t *infos_ptr = <nvmlProcessInfo_t *><intptr_t>(infos._get_ptr())
 *     if info_count[0] == 0:
 *         return infos             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetMPSComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, infos_ptr)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_infos);
    __pyx_r = ((PyObject *)__pyx_v_infos);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":21829
 *     cdef ProcessInfo infos = ProcessInfo(info_count[0])
 *     cdef nvmlProcessInfo_t *infos_ptr = <nvmlProcessInfo_t *><intptr_t>(infos._get_ptr())
 *     if info_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return infos
 *     with nogil:
*/
  }

  /* "cuda/bindings/_nvml.pyx":21831
 *     if info_count[0] == 0:
 *         return infos
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMPSComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, infos_ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21832
 *         return infos
 *     with nogil:
 *         __status__ = nvmlDeviceGetMPSComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, infos_ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return infos
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMPSComputeRunningProcesses_v3(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((unsigned int *)__pyx_v_info_count), __pyx_v_infos_ptr); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21832, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":21831
 *     if info_count[0] == 0:
 *         return infos
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMPSComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, infos_ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21833
 *     with nogil:
 *         __status__ = nvmlDeviceGetMPSComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, infos_ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return infos
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 21833, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21834
 *         __status__ = nvmlDeviceGetMPSComputeRunningProcesses_v3(<Device>device, <unsigned int*>info_count, infos_ptr)
 *     check_status(__status__)
 *     return infos             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_infos);
  __pyx_r = ((PyObject *)__pyx_v_infos);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21815
 * 
 * 
 * cpdef object device_get_mps_compute_running_processes_v3(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get information about processes with a Multi-Process Service (MPS) compute context on a device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_mps_compute_running_processes_v3", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_infos);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_249device_get_mps_compute_running_processes_v3(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_248device_get_mps_compute_running_processes_v3, "device_get_mps_compute_running_processes_v3(intptr_t device)\n\nGet information about processes with a Multi-Process Service (MPS) compute context on a device.\n\nArgs:\n    device (intptr_t): The device handle or MIG device handle.\n\n.. seealso:: `nvmlDeviceGetMPSComputeRunningProcesses_v3`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_249device_get_mps_compute_running_processes_v3 = {"device_get_mps_compute_running_processes_v3", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_249device_get_mps_compute_running_processes_v3, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_248device_get_mps_compute_running_processes_v3};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_249device_get_mps_compute_running_processes_v3(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_mps_compute_running_processes_v3 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21815, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21815, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_mps_compute_running_processes_v3", 0) < (0)) __PYX_ERR(0, 21815, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_mps_compute_running_processes_v3", 1, 1, 1, i); __PYX_ERR(0, 21815, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21815, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21815, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_mps_compute_running_processes_v3", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21815, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_mps_compute_running_processes_v3", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_248device_get_mps_compute_running_processes_v3(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_248device_get_mps_compute_running_processes_v3(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_mps_compute_running_processes_v3", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_mps_compute_running_processes_v3(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21815, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_mps_compute_running_processes_v3", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21837
 * 
 * 
 * cpdef int device_on_same_board(intptr_t device1, intptr_t device2) except? 0:             # <<<<<<<<<<<<<<
 *     """Check if the GPU devices are on the same physical board.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_251device_on_same_board(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_on_same_board(intptr_t __pyx_v_device1, intptr_t __pyx_v_device2, CYTHON_UNUSED int __pyx_skip_dispatch) {
  int __pyx_v_on_same_board;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21850
 *     """
 *     cdef int on_same_board
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceOnSameBoard(<Device>device1, <Device>device2, &on_same_board)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21851
 *     cdef int on_same_board
 *     with nogil:
 *         __status__ = nvmlDeviceOnSameBoard(<Device>device1, <Device>device2, &on_same_board)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return on_same_board
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceOnSameBoard(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device1), ((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device2), (&__pyx_v_on_same_board)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21851, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21850
 *     """
 *     cdef int on_same_board
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceOnSameBoard(<Device>device1, <Device>device2, &on_same_board)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21852
 *     with nogil:
 *         __status__ = nvmlDeviceOnSameBoard(<Device>device1, <Device>device2, &on_same_board)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return on_same_board
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21852, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21853
 *         __status__ = nvmlDeviceOnSameBoard(<Device>device1, <Device>device2, &on_same_board)
 *     check_status(__status__)
 *     return on_same_board             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_on_same_board;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21837
 * 
 * 
 * cpdef int device_on_same_board(intptr_t device1, intptr_t device2) except? 0:             # <<<<<<<<<<<<<<
 *     """Check if the GPU devices are on the same physical board.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_on_same_board", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_251device_on_same_board(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_250device_on_same_board, "device_on_same_board(intptr_t device1, intptr_t device2) -> int\n\nCheck if the GPU devices are on the same physical board.\n\nArgs:\n    device1 (intptr_t): The first GPU device.\n    device2 (intptr_t): The second GPU device.\n\nReturns:\n    int: Reference in which to return the status. Non-zero indicates that the GPUs are on the same board.\n\n.. seealso:: `nvmlDeviceOnSameBoard`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_251device_on_same_board = {"device_on_same_board", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_251device_on_same_board, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_250device_on_same_board};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_251device_on_same_board(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device1;
  intptr_t __pyx_v_device2;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_on_same_board (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device1,&__pyx_mstate_global->__pyx_n_u_device2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21837, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21837, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21837, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_on_same_board", 0) < (0)) __PYX_ERR(0, 21837, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_on_same_board", 1, 2, 2, i); __PYX_ERR(0, 21837, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21837, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21837, __pyx_L3_error)
    }
    __pyx_v_device1 = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device1 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21837, __pyx_L3_error)
    __pyx_v_device2 = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_device2 == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21837, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_on_same_board", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 21837, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_on_same_board", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_250device_on_same_board(__pyx_self, __pyx_v_device1, __pyx_v_device2);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_250device_on_same_board(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device1, intptr_t __pyx_v_device2) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_on_same_board", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_on_same_board(__pyx_v_device1, __pyx_v_device2, 1); if (unlikely(__pyx_t_1 == ((int)0) && PyErr_Occurred())) __PYX_ERR(0, 21837, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21837, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_on_same_board", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21856
 * 
 * 
 * cpdef int device_get_api_restriction(intptr_t device, int api_type) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the root/admin permissions on the target API. See ``nvmlRestrictedAPI_t`` for the list of supported APIs. If an API is restricted only root users can call that API. See ``nvmlDeviceSetAPIRestriction`` to change current permissions.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_253device_get_api_restriction(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_api_restriction(intptr_t __pyx_v_device, int __pyx_v_api_type, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__EnableState __pyx_v_is_restricted;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21869
 *     """
 *     cdef _EnableState is_restricted
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAPIRestriction(<Device>device, <_RestrictedAPI>api_type, &is_restricted)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21870
 *     cdef _EnableState is_restricted
 *     with nogil:
 *         __status__ = nvmlDeviceGetAPIRestriction(<Device>device, <_RestrictedAPI>api_type, &is_restricted)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>is_restricted
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAPIRestriction(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__RestrictedAPI)__pyx_v_api_type), (&__pyx_v_is_restricted)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21870, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21869
 *     """
 *     cdef _EnableState is_restricted
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAPIRestriction(<Device>device, <_RestrictedAPI>api_type, &is_restricted)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21871
 *     with nogil:
 *         __status__ = nvmlDeviceGetAPIRestriction(<Device>device, <_RestrictedAPI>api_type, &is_restricted)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>is_restricted
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21871, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21872
 *         __status__ = nvmlDeviceGetAPIRestriction(<Device>device, <_RestrictedAPI>api_type, &is_restricted)
 *     check_status(__status__)
 *     return <int>is_restricted             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_is_restricted);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21856
 * 
 * 
 * cpdef int device_get_api_restriction(intptr_t device, int api_type) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the root/admin permissions on the target API. See ``nvmlRestrictedAPI_t`` for the list of supported APIs. If an API is restricted only root users can call that API. See ``nvmlDeviceSetAPIRestriction`` to change current permissions.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_api_restriction", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_253device_get_api_restriction(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_252device_get_api_restriction, "device_get_api_restriction(intptr_t device, int api_type) -> int\n\nRetrieves the root/admin permissions on the target API. See ``nvmlRestrictedAPI_t`` for the list of supported APIs. If an API is restricted only root users can call that API. See ``nvmlDeviceSetAPIRestriction`` to change current permissions.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    api_type (RestrictedAPI): Target API type for this operation.\n\nReturns:\n    int: Reference in which to return the current restriction NVML_FEATURE_ENABLED indicates that the API is root-only NVML_FEATURE_DISABLED indicates that the API is accessible to all users.\n\n.. seealso:: `nvmlDeviceGetAPIRestriction`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_253device_get_api_restriction = {"device_get_api_restriction", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_253device_get_api_restriction, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_252device_get_api_restriction};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_253device_get_api_restriction(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_api_type;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_api_restriction (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_api_type,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21856, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21856, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21856, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_api_restriction", 0) < (0)) __PYX_ERR(0, 21856, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_api_restriction", 1, 2, 2, i); __PYX_ERR(0, 21856, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21856, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21856, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21856, __pyx_L3_error)
    __pyx_v_api_type = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_api_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21856, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_api_restriction", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 21856, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_api_restriction", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_252device_get_api_restriction(__pyx_self, __pyx_v_device, __pyx_v_api_type);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_252device_get_api_restriction(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_api_type) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_api_restriction", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_api_restriction(__pyx_v_device, __pyx_v_api_type, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21856, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21856, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_api_restriction", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21875
 * 
 * 
 * cpdef object device_get_bar1_memory_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Gets Total, Available and Used size of BAR1 memory.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_255device_get_bar1_memory_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_bar1_memory_info(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *__pyx_v_bar1memory_py = 0;
  nvmlBAR1Memory_t *__pyx_v_bar1memory;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_bar1_memory_info", 0);

  /* "cuda/bindings/_nvml.pyx":21886
 *     .. seealso:: `nvmlDeviceGetBAR1MemoryInfo`
 *     """
 *     cdef BAR1Memory bar1memory_py = BAR1Memory()             # <<<<<<<<<<<<<<
 *     cdef nvmlBAR1Memory_t *bar1memory = <nvmlBAR1Memory_t *><intptr_t>(bar1memory_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21886, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_bar1memory_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":21887
 *     """
 *     cdef BAR1Memory bar1memory_py = BAR1Memory()
 *     cdef nvmlBAR1Memory_t *bar1memory = <nvmlBAR1Memory_t *><intptr_t>(bar1memory_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetBAR1MemoryInfo(<Device>device, bar1memory)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_BAR1Memory *)__pyx_v_bar1memory_py->__pyx_vtab)->_get_ptr(__pyx_v_bar1memory_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21887, __pyx_L1_error)
  __pyx_v_bar1memory = ((nvmlBAR1Memory_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":21888
 *     cdef BAR1Memory bar1memory_py = BAR1Memory()
 *     cdef nvmlBAR1Memory_t *bar1memory = <nvmlBAR1Memory_t *><intptr_t>(bar1memory_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetBAR1MemoryInfo(<Device>device, bar1memory)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21889
 *     cdef nvmlBAR1Memory_t *bar1memory = <nvmlBAR1Memory_t *><intptr_t>(bar1memory_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetBAR1MemoryInfo(<Device>device, bar1memory)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return bar1memory_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBAR1MemoryInfo(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_bar1memory); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21889, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":21888
 *     cdef BAR1Memory bar1memory_py = BAR1Memory()
 *     cdef nvmlBAR1Memory_t *bar1memory = <nvmlBAR1Memory_t *><intptr_t>(bar1memory_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetBAR1MemoryInfo(<Device>device, bar1memory)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21890
 *     with nogil:
 *         __status__ = nvmlDeviceGetBAR1MemoryInfo(<Device>device, bar1memory)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return bar1memory_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 21890, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21891
 *         __status__ = nvmlDeviceGetBAR1MemoryInfo(<Device>device, bar1memory)
 *     check_status(__status__)
 *     return bar1memory_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_bar1memory_py);
  __pyx_r = ((PyObject *)__pyx_v_bar1memory_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21875
 * 
 * 
 * cpdef object device_get_bar1_memory_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Gets Total, Available and Used size of BAR1 memory.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_bar1_memory_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_bar1memory_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_255device_get_bar1_memory_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_254device_get_bar1_memory_info, "device_get_bar1_memory_info(intptr_t device)\n\nGets Total, Available and Used size of BAR1 memory.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlBAR1Memory_t: Reference in which BAR1 memory information is returned.\n\n.. seealso:: `nvmlDeviceGetBAR1MemoryInfo`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_255device_get_bar1_memory_info = {"device_get_bar1_memory_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_255device_get_bar1_memory_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_254device_get_bar1_memory_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_255device_get_bar1_memory_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_bar1_memory_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21875, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21875, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_bar1_memory_info", 0) < (0)) __PYX_ERR(0, 21875, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_bar1_memory_info", 1, 1, 1, i); __PYX_ERR(0, 21875, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21875, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21875, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_bar1_memory_info", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21875, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_bar1_memory_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_254device_get_bar1_memory_info(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_254device_get_bar1_memory_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_bar1_memory_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_bar1_memory_info(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21875, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_bar1_memory_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21894
 * 
 * 
 * cpdef unsigned int device_get_irq_num(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's interrupt number.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_257device_get_irq_num(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_irq_num(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_irq_num;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21906
 *     """
 *     cdef unsigned int irq_num
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetIrqNum(<Device>device, &irq_num)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21907
 *     cdef unsigned int irq_num
 *     with nogil:
 *         __status__ = nvmlDeviceGetIrqNum(<Device>device, &irq_num)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return irq_num
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetIrqNum(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_irq_num)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21907, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21906
 *     """
 *     cdef unsigned int irq_num
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetIrqNum(<Device>device, &irq_num)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21908
 *     with nogil:
 *         __status__ = nvmlDeviceGetIrqNum(<Device>device, &irq_num)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return irq_num
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21908, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21909
 *         __status__ = nvmlDeviceGetIrqNum(<Device>device, &irq_num)
 *     check_status(__status__)
 *     return irq_num             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_irq_num;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21894
 * 
 * 
 * cpdef unsigned int device_get_irq_num(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's interrupt number.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_irq_num", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_257device_get_irq_num(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_256device_get_irq_num, "device_get_irq_num(intptr_t device) -> unsigned int\n\nGets the device's interrupt number.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: The interrupt number associated with the specified device.\n\n.. seealso:: `nvmlDeviceGetIrqNum`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_257device_get_irq_num = {"device_get_irq_num", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_257device_get_irq_num, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_256device_get_irq_num};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_257device_get_irq_num(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_irq_num (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21894, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21894, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_irq_num", 0) < (0)) __PYX_ERR(0, 21894, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_irq_num", 1, 1, 1, i); __PYX_ERR(0, 21894, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21894, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21894, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_irq_num", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21894, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_irq_num", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_256device_get_irq_num(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_256device_get_irq_num(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_irq_num", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_irq_num(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 21894, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_irq_num", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21912
 * 
 * 
 * cpdef unsigned int device_get_num_gpu_cores(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's core count.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_259device_get_num_gpu_cores(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_num_gpu_cores(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_num_cores;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21924
 *     """
 *     cdef unsigned int num_cores
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNumGpuCores(<Device>device, &num_cores)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21925
 *     cdef unsigned int num_cores
 *     with nogil:
 *         __status__ = nvmlDeviceGetNumGpuCores(<Device>device, &num_cores)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return num_cores
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNumGpuCores(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_num_cores)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21925, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21924
 *     """
 *     cdef unsigned int num_cores
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNumGpuCores(<Device>device, &num_cores)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21926
 *     with nogil:
 *         __status__ = nvmlDeviceGetNumGpuCores(<Device>device, &num_cores)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return num_cores
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21926, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21927
 *         __status__ = nvmlDeviceGetNumGpuCores(<Device>device, &num_cores)
 *     check_status(__status__)
 *     return num_cores             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_num_cores;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21912
 * 
 * 
 * cpdef unsigned int device_get_num_gpu_cores(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's core count.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_num_gpu_cores", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_259device_get_num_gpu_cores(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_258device_get_num_gpu_cores, "device_get_num_gpu_cores(intptr_t device) -> unsigned int\n\nGets the device's core count.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: The number of cores for the specified device.\n\n.. seealso:: `nvmlDeviceGetNumGpuCores`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_259device_get_num_gpu_cores = {"device_get_num_gpu_cores", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_259device_get_num_gpu_cores, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_258device_get_num_gpu_cores};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_259device_get_num_gpu_cores(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_num_gpu_cores (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21912, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21912, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_num_gpu_cores", 0) < (0)) __PYX_ERR(0, 21912, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_num_gpu_cores", 1, 1, 1, i); __PYX_ERR(0, 21912, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21912, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21912, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_num_gpu_cores", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21912, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_num_gpu_cores", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_258device_get_num_gpu_cores(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_258device_get_num_gpu_cores(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_num_gpu_cores", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_num_gpu_cores(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 21912, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21912, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_num_gpu_cores", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21930
 * 
 * 
 * cpdef unsigned int device_get_power_source(intptr_t device) except *:             # <<<<<<<<<<<<<<
 *     """Gets the devices power source.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_261device_get_power_source(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_power_source(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlPowerSource_t __pyx_v_power_source;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21942
 *     """
 *     cdef nvmlPowerSource_t power_source
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPowerSource(<Device>device, &power_source)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21943
 *     cdef nvmlPowerSource_t power_source
 *     with nogil:
 *         __status__ = nvmlDeviceGetPowerSource(<Device>device, &power_source)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <unsigned int>power_source
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerSource(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_power_source)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21943, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21942
 *     """
 *     cdef nvmlPowerSource_t power_source
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPowerSource(<Device>device, &power_source)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21944
 *     with nogil:
 *         __status__ = nvmlDeviceGetPowerSource(<Device>device, &power_source)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <unsigned int>power_source
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21944, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21945
 *         __status__ = nvmlDeviceGetPowerSource(<Device>device, &power_source)
 *     check_status(__status__)
 *     return <unsigned int>power_source             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((unsigned int)__pyx_v_power_source);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21930
 * 
 * 
 * cpdef unsigned int device_get_power_source(intptr_t device) except *:             # <<<<<<<<<<<<<<
 *     """Gets the devices power source.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_source", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_261device_get_power_source(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_260device_get_power_source, "device_get_power_source(intptr_t device) -> unsigned int\n\nGets the devices power source.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: The power source of the device.\n\n.. seealso:: `nvmlDeviceGetPowerSource`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_261device_get_power_source = {"device_get_power_source", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_261device_get_power_source, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_260device_get_power_source};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_261device_get_power_source(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_power_source (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21930, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21930, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_power_source", 0) < (0)) __PYX_ERR(0, 21930, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_power_source", 1, 1, 1, i); __PYX_ERR(0, 21930, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21930, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21930, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_power_source", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21930, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_source", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_260device_get_power_source(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_260device_get_power_source(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_power_source", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_power_source(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21930, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21930, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_source", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21948
 * 
 * 
 * cpdef unsigned int device_get_memory_bus_width(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's memory bus width.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_263device_get_memory_bus_width(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_memory_bus_width(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_bus_width;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21960
 *     """
 *     cdef unsigned int bus_width
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMemoryBusWidth(<Device>device, &bus_width)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21961
 *     cdef unsigned int bus_width
 *     with nogil:
 *         __status__ = nvmlDeviceGetMemoryBusWidth(<Device>device, &bus_width)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return bus_width
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryBusWidth(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_bus_width)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21961, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21960
 *     """
 *     cdef unsigned int bus_width
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMemoryBusWidth(<Device>device, &bus_width)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21962
 *     with nogil:
 *         __status__ = nvmlDeviceGetMemoryBusWidth(<Device>device, &bus_width)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return bus_width
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21962, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21963
 *         __status__ = nvmlDeviceGetMemoryBusWidth(<Device>device, &bus_width)
 *     check_status(__status__)
 *     return bus_width             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_bus_width;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21948
 * 
 * 
 * cpdef unsigned int device_get_memory_bus_width(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's memory bus width.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_memory_bus_width", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_263device_get_memory_bus_width(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_262device_get_memory_bus_width, "device_get_memory_bus_width(intptr_t device) -> unsigned int\n\nGets the device's memory bus width.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: The devices's memory bus width.\n\n.. seealso:: `nvmlDeviceGetMemoryBusWidth`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_263device_get_memory_bus_width = {"device_get_memory_bus_width", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_263device_get_memory_bus_width, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_262device_get_memory_bus_width};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_263device_get_memory_bus_width(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_memory_bus_width (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21948, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21948, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_memory_bus_width", 0) < (0)) __PYX_ERR(0, 21948, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_memory_bus_width", 1, 1, 1, i); __PYX_ERR(0, 21948, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21948, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21948, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_memory_bus_width", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21948, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_memory_bus_width", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_262device_get_memory_bus_width(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_262device_get_memory_bus_width(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_memory_bus_width", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_memory_bus_width(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 21948, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21948, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_memory_bus_width", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21966
 * 
 * 
 * cpdef unsigned int device_get_pcie_link_max_speed(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's PCIE Max Link speed in MBPS.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_265device_get_pcie_link_max_speed(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_pcie_link_max_speed(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_max_speed;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21978
 *     """
 *     cdef unsigned int max_speed
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPcieLinkMaxSpeed(<Device>device, &max_speed)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21979
 *     cdef unsigned int max_speed
 *     with nogil:
 *         __status__ = nvmlDeviceGetPcieLinkMaxSpeed(<Device>device, &max_speed)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return max_speed
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieLinkMaxSpeed(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_max_speed)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21979, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21978
 *     """
 *     cdef unsigned int max_speed
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPcieLinkMaxSpeed(<Device>device, &max_speed)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21980
 *     with nogil:
 *         __status__ = nvmlDeviceGetPcieLinkMaxSpeed(<Device>device, &max_speed)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return max_speed
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21980, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21981
 *         __status__ = nvmlDeviceGetPcieLinkMaxSpeed(<Device>device, &max_speed)
 *     check_status(__status__)
 *     return max_speed             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_max_speed;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21966
 * 
 * 
 * cpdef unsigned int device_get_pcie_link_max_speed(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's PCIE Max Link speed in MBPS.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pcie_link_max_speed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_265device_get_pcie_link_max_speed(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_264device_get_pcie_link_max_speed, "device_get_pcie_link_max_speed(intptr_t device) -> unsigned int\n\nGets the device's PCIE Max Link speed in MBPS.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: The devices's PCIE Max Link speed in MBPS.\n\n.. seealso:: `nvmlDeviceGetPcieLinkMaxSpeed`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_265device_get_pcie_link_max_speed = {"device_get_pcie_link_max_speed", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_265device_get_pcie_link_max_speed, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_264device_get_pcie_link_max_speed};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_265device_get_pcie_link_max_speed(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_pcie_link_max_speed (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21966, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21966, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_pcie_link_max_speed", 0) < (0)) __PYX_ERR(0, 21966, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_pcie_link_max_speed", 1, 1, 1, i); __PYX_ERR(0, 21966, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21966, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21966, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_pcie_link_max_speed", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21966, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pcie_link_max_speed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_264device_get_pcie_link_max_speed(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_264device_get_pcie_link_max_speed(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_pcie_link_max_speed", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_pcie_link_max_speed(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 21966, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21966, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pcie_link_max_speed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":21984
 * 
 * 
 * cpdef unsigned int device_get_pcie_speed(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's PCIe Link speed in Mbps.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_267device_get_pcie_speed(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_pcie_speed(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_pcie_speed;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":21996
 *     """
 *     cdef unsigned int pcie_speed
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPcieSpeed(<Device>device, &pcie_speed)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":21997
 *     cdef unsigned int pcie_speed
 *     with nogil:
 *         __status__ = nvmlDeviceGetPcieSpeed(<Device>device, &pcie_speed)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return pcie_speed
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieSpeed(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_pcie_speed)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21997, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":21996
 *     """
 *     cdef unsigned int pcie_speed
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPcieSpeed(<Device>device, &pcie_speed)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":21998
 *     with nogil:
 *         __status__ = nvmlDeviceGetPcieSpeed(<Device>device, &pcie_speed)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return pcie_speed
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 21998, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":21999
 *         __status__ = nvmlDeviceGetPcieSpeed(<Device>device, &pcie_speed)
 *     check_status(__status__)
 *     return pcie_speed             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_pcie_speed;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":21984
 * 
 * 
 * cpdef unsigned int device_get_pcie_speed(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's PCIe Link speed in Mbps.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pcie_speed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_267device_get_pcie_speed(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_266device_get_pcie_speed, "device_get_pcie_speed(intptr_t device) -> unsigned int\n\nGets the device's PCIe Link speed in Mbps.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: The devices's PCIe Max Link speed in Mbps.\n\n.. seealso:: `nvmlDeviceGetPcieSpeed`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_267device_get_pcie_speed = {"device_get_pcie_speed", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_267device_get_pcie_speed, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_266device_get_pcie_speed};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_267device_get_pcie_speed(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_pcie_speed (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21984, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21984, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_pcie_speed", 0) < (0)) __PYX_ERR(0, 21984, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_pcie_speed", 1, 1, 1, i); __PYX_ERR(0, 21984, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21984, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 21984, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_pcie_speed", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21984, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pcie_speed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_266device_get_pcie_speed(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_266device_get_pcie_speed(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_pcie_speed", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_pcie_speed(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 21984, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21984, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pcie_speed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22002
 * 
 * 
 * cpdef unsigned int device_get_adaptive_clock_info_status(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's Adaptive Clock status.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_269device_get_adaptive_clock_info_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_adaptive_clock_info_status(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_adaptive_clock_status;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":22014
 *     """
 *     cdef unsigned int adaptive_clock_status
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAdaptiveClockInfoStatus(<Device>device, &adaptive_clock_status)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22015
 *     cdef unsigned int adaptive_clock_status
 *     with nogil:
 *         __status__ = nvmlDeviceGetAdaptiveClockInfoStatus(<Device>device, &adaptive_clock_status)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return adaptive_clock_status
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAdaptiveClockInfoStatus(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_adaptive_clock_status)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22015, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22014
 *     """
 *     cdef unsigned int adaptive_clock_status
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAdaptiveClockInfoStatus(<Device>device, &adaptive_clock_status)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22016
 *     with nogil:
 *         __status__ = nvmlDeviceGetAdaptiveClockInfoStatus(<Device>device, &adaptive_clock_status)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return adaptive_clock_status
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22016, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22017
 *         __status__ = nvmlDeviceGetAdaptiveClockInfoStatus(<Device>device, &adaptive_clock_status)
 *     check_status(__status__)
 *     return adaptive_clock_status             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_adaptive_clock_status;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22002
 * 
 * 
 * cpdef unsigned int device_get_adaptive_clock_info_status(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's Adaptive Clock status.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_adaptive_clock_info_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_269device_get_adaptive_clock_info_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_268device_get_adaptive_clock_info_status, "device_get_adaptive_clock_info_status(intptr_t device) -> unsigned int\n\nGets the device's Adaptive Clock status.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: The current adaptive clocking status, either NVML_ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED or NVML_ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED.\n\n.. seealso:: `nvmlDeviceGetAdaptiveClockInfoStatus`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_269device_get_adaptive_clock_info_status = {"device_get_adaptive_clock_info_status", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_269device_get_adaptive_clock_info_status, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_268device_get_adaptive_clock_info_status};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_269device_get_adaptive_clock_info_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_adaptive_clock_info_status (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22002, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22002, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_adaptive_clock_info_status", 0) < (0)) __PYX_ERR(0, 22002, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_adaptive_clock_info_status", 1, 1, 1, i); __PYX_ERR(0, 22002, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22002, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22002, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_adaptive_clock_info_status", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22002, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_adaptive_clock_info_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_268device_get_adaptive_clock_info_status(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_268device_get_adaptive_clock_info_status(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_adaptive_clock_info_status", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_adaptive_clock_info_status(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 22002, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22002, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_adaptive_clock_info_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22020
 * 
 * 
 * cpdef unsigned int device_get_bus_type(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get the type of the GPU Bus (PCIe, PCI, ...).
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_271device_get_bus_type(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_bus_type(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlBusType_t __pyx_v_type;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":22032
 *     """
 *     cdef nvmlBusType_t type
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetBusType(<Device>device, &type)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22033
 *     cdef nvmlBusType_t type
 *     with nogil:
 *         __status__ = nvmlDeviceGetBusType(<Device>device, &type)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <unsigned int>type
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBusType(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_type)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22033, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22032
 *     """
 *     cdef nvmlBusType_t type
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetBusType(<Device>device, &type)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22034
 *     with nogil:
 *         __status__ = nvmlDeviceGetBusType(<Device>device, &type)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <unsigned int>type
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22034, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22035
 *         __status__ = nvmlDeviceGetBusType(<Device>device, &type)
 *     check_status(__status__)
 *     return <unsigned int>type             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((unsigned int)__pyx_v_type);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22020
 * 
 * 
 * cpdef unsigned int device_get_bus_type(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get the type of the GPU Bus (PCIe, PCI, ...).
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_bus_type", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_271device_get_bus_type(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_270device_get_bus_type, "device_get_bus_type(intptr_t device) -> unsigned int\n\nGet the type of the GPU Bus (PCIe, PCI, ...).\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: The PCI Bus type.\n\n.. seealso:: `nvmlDeviceGetBusType`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_271device_get_bus_type = {"device_get_bus_type", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_271device_get_bus_type, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_270device_get_bus_type};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_271device_get_bus_type(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_bus_type (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22020, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22020, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_bus_type", 0) < (0)) __PYX_ERR(0, 22020, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_bus_type", 1, 1, 1, i); __PYX_ERR(0, 22020, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22020, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22020, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_bus_type", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22020, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_bus_type", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_270device_get_bus_type(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_270device_get_bus_type(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_bus_type", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_bus_type(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 22020, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22020, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_bus_type", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22038
 * 
 * 
 * cpdef object device_get_gpu_fabric_info_v(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Versioned wrapper around nvmlDeviceGetGpuFabricInfo that accepts a versioned ``nvmlGpuFabricInfo_v2_t`` or later output structure.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_273device_get_gpu_fabric_info_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_fabric_info_v(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *__pyx_v_gpu_fabric_info_py = 0;
  nvmlGpuFabricInfoV_t *__pyx_v_gpu_fabric_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpu_fabric_info_v", 0);

  /* "cuda/bindings/_nvml.pyx":22049
 *     .. seealso:: `nvmlDeviceGetGpuFabricInfoV`
 *     """
 *     cdef GpuFabricInfo_v3 gpu_fabric_info_py = GpuFabricInfo_v3()             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuFabricInfoV_t *gpu_fabric_info = <nvmlGpuFabricInfoV_t *><intptr_t>(gpu_fabric_info_py._get_ptr())
 *     gpu_fabric_info.version = sizeof(nvmlGpuFabricInfo_v3_t) | (3 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22049, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_gpu_fabric_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":22050
 *     """
 *     cdef GpuFabricInfo_v3 gpu_fabric_info_py = GpuFabricInfo_v3()
 *     cdef nvmlGpuFabricInfoV_t *gpu_fabric_info = <nvmlGpuFabricInfoV_t *><intptr_t>(gpu_fabric_info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     gpu_fabric_info.version = sizeof(nvmlGpuFabricInfo_v3_t) | (3 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)__pyx_v_gpu_fabric_info_py->__pyx_vtab)->_get_ptr(__pyx_v_gpu_fabric_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22050, __pyx_L1_error)
  __pyx_v_gpu_fabric_info = ((nvmlGpuFabricInfoV_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":22051
 *     cdef GpuFabricInfo_v3 gpu_fabric_info_py = GpuFabricInfo_v3()
 *     cdef nvmlGpuFabricInfoV_t *gpu_fabric_info = <nvmlGpuFabricInfoV_t *><intptr_t>(gpu_fabric_info_py._get_ptr())
 *     gpu_fabric_info.version = sizeof(nvmlGpuFabricInfo_v3_t) | (3 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuFabricInfoV(<Device>device, gpu_fabric_info)
*/
  __pyx_v_gpu_fabric_info->version = ((sizeof(nvmlGpuFabricInfo_v3_t)) | 0x3000000);

  /* "cuda/bindings/_nvml.pyx":22052
 *     cdef nvmlGpuFabricInfoV_t *gpu_fabric_info = <nvmlGpuFabricInfoV_t *><intptr_t>(gpu_fabric_info_py._get_ptr())
 *     gpu_fabric_info.version = sizeof(nvmlGpuFabricInfo_v3_t) | (3 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuFabricInfoV(<Device>device, gpu_fabric_info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22053
 *     gpu_fabric_info.version = sizeof(nvmlGpuFabricInfo_v3_t) | (3 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuFabricInfoV(<Device>device, gpu_fabric_info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return gpu_fabric_info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuFabricInfoV(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_gpu_fabric_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22053, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":22052
 *     cdef nvmlGpuFabricInfoV_t *gpu_fabric_info = <nvmlGpuFabricInfoV_t *><intptr_t>(gpu_fabric_info_py._get_ptr())
 *     gpu_fabric_info.version = sizeof(nvmlGpuFabricInfo_v3_t) | (3 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuFabricInfoV(<Device>device, gpu_fabric_info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22054
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuFabricInfoV(<Device>device, gpu_fabric_info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return gpu_fabric_info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 22054, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22055
 *         __status__ = nvmlDeviceGetGpuFabricInfoV(<Device>device, gpu_fabric_info)
 *     check_status(__status__)
 *     return gpu_fabric_info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_gpu_fabric_info_py);
  __pyx_r = ((PyObject *)__pyx_v_gpu_fabric_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22038
 * 
 * 
 * cpdef object device_get_gpu_fabric_info_v(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Versioned wrapper around nvmlDeviceGetGpuFabricInfo that accepts a versioned ``nvmlGpuFabricInfo_v2_t`` or later output structure.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_fabric_info_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_gpu_fabric_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_273device_get_gpu_fabric_info_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_272device_get_gpu_fabric_info_v, "device_get_gpu_fabric_info_v(intptr_t device)\n\nVersioned wrapper around nvmlDeviceGetGpuFabricInfo that accepts a versioned ``nvmlGpuFabricInfo_v2_t`` or later output structure.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlGpuFabricInfo_v3_t: Information about GPU fabric state.\n\n.. seealso:: `nvmlDeviceGetGpuFabricInfoV`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_273device_get_gpu_fabric_info_v = {"device_get_gpu_fabric_info_v", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_273device_get_gpu_fabric_info_v, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_272device_get_gpu_fabric_info_v};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_273device_get_gpu_fabric_info_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_gpu_fabric_info_v (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22038, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22038, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_gpu_fabric_info_v", 0) < (0)) __PYX_ERR(0, 22038, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_gpu_fabric_info_v", 1, 1, 1, i); __PYX_ERR(0, 22038, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22038, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22038, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_gpu_fabric_info_v", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22038, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_fabric_info_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_272device_get_gpu_fabric_info_v(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_272device_get_gpu_fabric_info_v(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpu_fabric_info_v", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_fabric_info_v(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_fabric_info_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22058
 * 
 * 
 * cpdef object system_get_conf_compute_capabilities():             # <<<<<<<<<<<<<<
 *     """Get Conf Computing System capabilities.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_275system_get_conf_compute_capabilities(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_capabilities(CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *__pyx_v_capabilities_py = 0;
  nvmlConfComputeSystemCaps_t *__pyx_v_capabilities;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_conf_compute_capabilities", 0);

  /* "cuda/bindings/_nvml.pyx":22066
 *     .. seealso:: `nvmlSystemGetConfComputeCapabilities`
 *     """
 *     cdef ConfComputeSystemCaps capabilities_py = ConfComputeSystemCaps()             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeSystemCaps_t *capabilities = <nvmlConfComputeSystemCaps_t *><intptr_t>(capabilities_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22066, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_capabilities_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":22067
 *     """
 *     cdef ConfComputeSystemCaps capabilities_py = ConfComputeSystemCaps()
 *     cdef nvmlConfComputeSystemCaps_t *capabilities = <nvmlConfComputeSystemCaps_t *><intptr_t>(capabilities_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlSystemGetConfComputeCapabilities(capabilities)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)__pyx_v_capabilities_py->__pyx_vtab)->_get_ptr(__pyx_v_capabilities_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22067, __pyx_L1_error)
  __pyx_v_capabilities = ((nvmlConfComputeSystemCaps_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":22068
 *     cdef ConfComputeSystemCaps capabilities_py = ConfComputeSystemCaps()
 *     cdef nvmlConfComputeSystemCaps_t *capabilities = <nvmlConfComputeSystemCaps_t *><intptr_t>(capabilities_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetConfComputeCapabilities(capabilities)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22069
 *     cdef nvmlConfComputeSystemCaps_t *capabilities = <nvmlConfComputeSystemCaps_t *><intptr_t>(capabilities_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlSystemGetConfComputeCapabilities(capabilities)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return capabilities_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeCapabilities(__pyx_v_capabilities); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22069, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":22068
 *     cdef ConfComputeSystemCaps capabilities_py = ConfComputeSystemCaps()
 *     cdef nvmlConfComputeSystemCaps_t *capabilities = <nvmlConfComputeSystemCaps_t *><intptr_t>(capabilities_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetConfComputeCapabilities(capabilities)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22070
 *     with nogil:
 *         __status__ = nvmlSystemGetConfComputeCapabilities(capabilities)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return capabilities_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 22070, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22071
 *         __status__ = nvmlSystemGetConfComputeCapabilities(capabilities)
 *     check_status(__status__)
 *     return capabilities_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_capabilities_py);
  __pyx_r = ((PyObject *)__pyx_v_capabilities_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22058
 * 
 * 
 * cpdef object system_get_conf_compute_capabilities():             # <<<<<<<<<<<<<<
 *     """Get Conf Computing System capabilities.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_conf_compute_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_capabilities_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_275system_get_conf_compute_capabilities(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_274system_get_conf_compute_capabilities, "system_get_conf_compute_capabilities()\n\nGet Conf Computing System capabilities.\n\nReturns:\n    nvmlConfComputeSystemCaps_t: System CC capabilities.\n\n.. seealso:: `nvmlSystemGetConfComputeCapabilities`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_275system_get_conf_compute_capabilities = {"system_get_conf_compute_capabilities", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_275system_get_conf_compute_capabilities, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_274system_get_conf_compute_capabilities};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_275system_get_conf_compute_capabilities(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_get_conf_compute_capabilities (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_274system_get_conf_compute_capabilities(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_274system_get_conf_compute_capabilities(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_conf_compute_capabilities", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_capabilities(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22058, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_conf_compute_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22074
 * 
 * 
 * cpdef object system_get_conf_compute_state():             # <<<<<<<<<<<<<<
 *     """Get Conf Computing System State.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_277system_get_conf_compute_state(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_state(CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *__pyx_v_state_py = 0;
  nvmlConfComputeSystemState_t *__pyx_v_state;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_conf_compute_state", 0);

  /* "cuda/bindings/_nvml.pyx":22082
 *     .. seealso:: `nvmlSystemGetConfComputeState`
 *     """
 *     cdef ConfComputeSystemState state_py = ConfComputeSystemState()             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeSystemState_t *state = <nvmlConfComputeSystemState_t *><intptr_t>(state_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22082, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_state_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":22083
 *     """
 *     cdef ConfComputeSystemState state_py = ConfComputeSystemState()
 *     cdef nvmlConfComputeSystemState_t *state = <nvmlConfComputeSystemState_t *><intptr_t>(state_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlSystemGetConfComputeState(state)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeSystemState *)__pyx_v_state_py->__pyx_vtab)->_get_ptr(__pyx_v_state_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22083, __pyx_L1_error)
  __pyx_v_state = ((nvmlConfComputeSystemState_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":22084
 *     cdef ConfComputeSystemState state_py = ConfComputeSystemState()
 *     cdef nvmlConfComputeSystemState_t *state = <nvmlConfComputeSystemState_t *><intptr_t>(state_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetConfComputeState(state)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22085
 *     cdef nvmlConfComputeSystemState_t *state = <nvmlConfComputeSystemState_t *><intptr_t>(state_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlSystemGetConfComputeState(state)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return state_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeState(__pyx_v_state); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22085, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":22084
 *     cdef ConfComputeSystemState state_py = ConfComputeSystemState()
 *     cdef nvmlConfComputeSystemState_t *state = <nvmlConfComputeSystemState_t *><intptr_t>(state_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetConfComputeState(state)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22086
 *     with nogil:
 *         __status__ = nvmlSystemGetConfComputeState(state)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return state_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 22086, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22087
 *         __status__ = nvmlSystemGetConfComputeState(state)
 *     check_status(__status__)
 *     return state_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_state_py);
  __pyx_r = ((PyObject *)__pyx_v_state_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22074
 * 
 * 
 * cpdef object system_get_conf_compute_state():             # <<<<<<<<<<<<<<
 *     """Get Conf Computing System State.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_conf_compute_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_state_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_277system_get_conf_compute_state(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_276system_get_conf_compute_state, "system_get_conf_compute_state()\n\nGet Conf Computing System State.\n\nReturns:\n    nvmlConfComputeSystemState_t: System CC State.\n\n.. seealso:: `nvmlSystemGetConfComputeState`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_277system_get_conf_compute_state = {"system_get_conf_compute_state", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_277system_get_conf_compute_state, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_276system_get_conf_compute_state};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_277system_get_conf_compute_state(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_get_conf_compute_state (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_276system_get_conf_compute_state(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_276system_get_conf_compute_state(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_conf_compute_state", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_state(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22074, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_conf_compute_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22090
 * 
 * 
 * cpdef object device_get_conf_compute_mem_size_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get Conf Computing Protected and Unprotected Memory Sizes.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_279device_get_conf_compute_mem_size_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_conf_compute_mem_size_info(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *__pyx_v_mem_info_py = 0;
  nvmlConfComputeMemSizeInfo_t *__pyx_v_mem_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_conf_compute_mem_size_info", 0);

  /* "cuda/bindings/_nvml.pyx":22101
 *     .. seealso:: `nvmlDeviceGetConfComputeMemSizeInfo`
 *     """
 *     cdef ConfComputeMemSizeInfo mem_info_py = ConfComputeMemSizeInfo()             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeMemSizeInfo_t *mem_info = <nvmlConfComputeMemSizeInfo_t *><intptr_t>(mem_info_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22101, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_mem_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":22102
 *     """
 *     cdef ConfComputeMemSizeInfo mem_info_py = ConfComputeMemSizeInfo()
 *     cdef nvmlConfComputeMemSizeInfo_t *mem_info = <nvmlConfComputeMemSizeInfo_t *><intptr_t>(mem_info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetConfComputeMemSizeInfo(<Device>device, mem_info)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)__pyx_v_mem_info_py->__pyx_vtab)->_get_ptr(__pyx_v_mem_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22102, __pyx_L1_error)
  __pyx_v_mem_info = ((nvmlConfComputeMemSizeInfo_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":22103
 *     cdef ConfComputeMemSizeInfo mem_info_py = ConfComputeMemSizeInfo()
 *     cdef nvmlConfComputeMemSizeInfo_t *mem_info = <nvmlConfComputeMemSizeInfo_t *><intptr_t>(mem_info_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetConfComputeMemSizeInfo(<Device>device, mem_info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22104
 *     cdef nvmlConfComputeMemSizeInfo_t *mem_info = <nvmlConfComputeMemSizeInfo_t *><intptr_t>(mem_info_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetConfComputeMemSizeInfo(<Device>device, mem_info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return mem_info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeMemSizeInfo(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_mem_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22104, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":22103
 *     cdef ConfComputeMemSizeInfo mem_info_py = ConfComputeMemSizeInfo()
 *     cdef nvmlConfComputeMemSizeInfo_t *mem_info = <nvmlConfComputeMemSizeInfo_t *><intptr_t>(mem_info_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetConfComputeMemSizeInfo(<Device>device, mem_info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22105
 *     with nogil:
 *         __status__ = nvmlDeviceGetConfComputeMemSizeInfo(<Device>device, mem_info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return mem_info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 22105, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22106
 *         __status__ = nvmlDeviceGetConfComputeMemSizeInfo(<Device>device, mem_info)
 *     check_status(__status__)
 *     return mem_info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_mem_info_py);
  __pyx_r = ((PyObject *)__pyx_v_mem_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22090
 * 
 * 
 * cpdef object device_get_conf_compute_mem_size_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get Conf Computing Protected and Unprotected Memory Sizes.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_conf_compute_mem_size_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_mem_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_279device_get_conf_compute_mem_size_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_278device_get_conf_compute_mem_size_info, "device_get_conf_compute_mem_size_info(intptr_t device)\n\nGet Conf Computing Protected and Unprotected Memory Sizes.\n\nArgs:\n    device (intptr_t): Device handle.\n\nReturns:\n    nvmlConfComputeMemSizeInfo_t: Protected/Unprotected Memory sizes.\n\n.. seealso:: `nvmlDeviceGetConfComputeMemSizeInfo`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_279device_get_conf_compute_mem_size_info = {"device_get_conf_compute_mem_size_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_279device_get_conf_compute_mem_size_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_278device_get_conf_compute_mem_size_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_279device_get_conf_compute_mem_size_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_conf_compute_mem_size_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22090, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22090, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_conf_compute_mem_size_info", 0) < (0)) __PYX_ERR(0, 22090, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_conf_compute_mem_size_info", 1, 1, 1, i); __PYX_ERR(0, 22090, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22090, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22090, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_conf_compute_mem_size_info", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22090, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_conf_compute_mem_size_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_278device_get_conf_compute_mem_size_info(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_278device_get_conf_compute_mem_size_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_conf_compute_mem_size_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_conf_compute_mem_size_info(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_conf_compute_mem_size_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22109
 * 
 * 
 * cpdef unsigned int system_get_conf_compute_gpus_ready_state() except? 0:             # <<<<<<<<<<<<<<
 *     """Get Conf Computing GPUs ready state.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_281system_get_conf_compute_gpus_ready_state(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_gpus_ready_state(CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_is_accepting_work;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":22118
 *     """
 *     cdef unsigned int is_accepting_work
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetConfComputeGpusReadyState(&is_accepting_work)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22119
 *     cdef unsigned int is_accepting_work
 *     with nogil:
 *         __status__ = nvmlSystemGetConfComputeGpusReadyState(&is_accepting_work)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return is_accepting_work
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeGpusReadyState((&__pyx_v_is_accepting_work)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22119, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22118
 *     """
 *     cdef unsigned int is_accepting_work
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetConfComputeGpusReadyState(&is_accepting_work)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22120
 *     with nogil:
 *         __status__ = nvmlSystemGetConfComputeGpusReadyState(&is_accepting_work)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return is_accepting_work
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22120, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22121
 *         __status__ = nvmlSystemGetConfComputeGpusReadyState(&is_accepting_work)
 *     check_status(__status__)
 *     return is_accepting_work             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_is_accepting_work;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22109
 * 
 * 
 * cpdef unsigned int system_get_conf_compute_gpus_ready_state() except? 0:             # <<<<<<<<<<<<<<
 *     """Get Conf Computing GPUs ready state.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_conf_compute_gpus_ready_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_281system_get_conf_compute_gpus_ready_state(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_280system_get_conf_compute_gpus_ready_state, "system_get_conf_compute_gpus_ready_state() -> unsigned int\n\nGet Conf Computing GPUs ready state.\n\nReturns:\n    unsigned int: Returns GPU current work accepting state, NVML_CC_ACCEPTING_CLIENT_REQUESTS_TRUE or NVML_CC_ACCEPTING_CLIENT_REQUESTS_FALSE.\n\n.. seealso:: `nvmlSystemGetConfComputeGpusReadyState`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_281system_get_conf_compute_gpus_ready_state = {"system_get_conf_compute_gpus_ready_state", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_281system_get_conf_compute_gpus_ready_state, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_280system_get_conf_compute_gpus_ready_state};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_281system_get_conf_compute_gpus_ready_state(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_get_conf_compute_gpus_ready_state (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_280system_get_conf_compute_gpus_ready_state(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_280system_get_conf_compute_gpus_ready_state(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_conf_compute_gpus_ready_state", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_gpus_ready_state(1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 22109, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22109, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_conf_compute_gpus_ready_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22124
 * 
 * 
 * cpdef object device_get_conf_compute_protected_memory_usage(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get Conf Computing protected memory usage.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_283device_get_conf_compute_protected_memory_usage(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_conf_compute_protected_memory_usage(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *__pyx_v_memory_py = 0;
  nvmlMemory_t *__pyx_v_memory;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_conf_compute_protected_memory_usage", 0);

  /* "cuda/bindings/_nvml.pyx":22135
 *     .. seealso:: `nvmlDeviceGetConfComputeProtectedMemoryUsage`
 *     """
 *     cdef Memory memory_py = Memory()             # <<<<<<<<<<<<<<
 *     cdef nvmlMemory_t *memory = <nvmlMemory_t *><intptr_t>(memory_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22135, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_memory_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":22136
 *     """
 *     cdef Memory memory_py = Memory()
 *     cdef nvmlMemory_t *memory = <nvmlMemory_t *><intptr_t>(memory_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetConfComputeProtectedMemoryUsage(<Device>device, memory)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Memory *)__pyx_v_memory_py->__pyx_vtab)->_get_ptr(__pyx_v_memory_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22136, __pyx_L1_error)
  __pyx_v_memory = ((nvmlMemory_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":22137
 *     cdef Memory memory_py = Memory()
 *     cdef nvmlMemory_t *memory = <nvmlMemory_t *><intptr_t>(memory_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetConfComputeProtectedMemoryUsage(<Device>device, memory)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22138
 *     cdef nvmlMemory_t *memory = <nvmlMemory_t *><intptr_t>(memory_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetConfComputeProtectedMemoryUsage(<Device>device, memory)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return memory_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeProtectedMemoryUsage(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_memory); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22138, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":22137
 *     cdef Memory memory_py = Memory()
 *     cdef nvmlMemory_t *memory = <nvmlMemory_t *><intptr_t>(memory_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetConfComputeProtectedMemoryUsage(<Device>device, memory)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22139
 *     with nogil:
 *         __status__ = nvmlDeviceGetConfComputeProtectedMemoryUsage(<Device>device, memory)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return memory_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 22139, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22140
 *         __status__ = nvmlDeviceGetConfComputeProtectedMemoryUsage(<Device>device, memory)
 *     check_status(__status__)
 *     return memory_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_memory_py);
  __pyx_r = ((PyObject *)__pyx_v_memory_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22124
 * 
 * 
 * cpdef object device_get_conf_compute_protected_memory_usage(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get Conf Computing protected memory usage.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_conf_compute_protected_memory_usage", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_memory_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_283device_get_conf_compute_protected_memory_usage(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_282device_get_conf_compute_protected_memory_usage, "device_get_conf_compute_protected_memory_usage(intptr_t device)\n\nGet Conf Computing protected memory usage.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlMemory_t: Reference in which to return the memory information.\n\n.. seealso:: `nvmlDeviceGetConfComputeProtectedMemoryUsage`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_283device_get_conf_compute_protected_memory_usage = {"device_get_conf_compute_protected_memory_usage", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_283device_get_conf_compute_protected_memory_usage, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_282device_get_conf_compute_protected_memory_usage};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_283device_get_conf_compute_protected_memory_usage(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_conf_compute_protected_memory_usage (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22124, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22124, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_conf_compute_protected_memory_usage", 0) < (0)) __PYX_ERR(0, 22124, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_conf_compute_protected_memory_usage", 1, 1, 1, i); __PYX_ERR(0, 22124, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22124, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22124, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_conf_compute_protected_memory_usage", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22124, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_conf_compute_protected_memory_usage", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_282device_get_conf_compute_protected_memory_usage(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_282device_get_conf_compute_protected_memory_usage(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_conf_compute_protected_memory_usage", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_conf_compute_protected_memory_usage(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22124, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_conf_compute_protected_memory_usage", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22143
 * 
 * 
 * cpdef object device_get_conf_compute_gpu_certificate(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get Conf Computing GPU certificate details.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_285device_get_conf_compute_gpu_certificate(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_conf_compute_gpu_certificate(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *__pyx_v_gpu_cert_py = 0;
  nvmlConfComputeGpuCertificate_t *__pyx_v_gpu_cert;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_conf_compute_gpu_certificate", 0);

  /* "cuda/bindings/_nvml.pyx":22154
 *     .. seealso:: `nvmlDeviceGetConfComputeGpuCertificate`
 *     """
 *     cdef ConfComputeGpuCertificate gpu_cert_py = ConfComputeGpuCertificate()             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeGpuCertificate_t *gpu_cert = <nvmlConfComputeGpuCertificate_t *><intptr_t>(gpu_cert_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22154, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_gpu_cert_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":22155
 *     """
 *     cdef ConfComputeGpuCertificate gpu_cert_py = ConfComputeGpuCertificate()
 *     cdef nvmlConfComputeGpuCertificate_t *gpu_cert = <nvmlConfComputeGpuCertificate_t *><intptr_t>(gpu_cert_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetConfComputeGpuCertificate(<Device>device, gpu_cert)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)__pyx_v_gpu_cert_py->__pyx_vtab)->_get_ptr(__pyx_v_gpu_cert_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22155, __pyx_L1_error)
  __pyx_v_gpu_cert = ((nvmlConfComputeGpuCertificate_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":22156
 *     cdef ConfComputeGpuCertificate gpu_cert_py = ConfComputeGpuCertificate()
 *     cdef nvmlConfComputeGpuCertificate_t *gpu_cert = <nvmlConfComputeGpuCertificate_t *><intptr_t>(gpu_cert_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetConfComputeGpuCertificate(<Device>device, gpu_cert)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22157
 *     cdef nvmlConfComputeGpuCertificate_t *gpu_cert = <nvmlConfComputeGpuCertificate_t *><intptr_t>(gpu_cert_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetConfComputeGpuCertificate(<Device>device, gpu_cert)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return gpu_cert_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeGpuCertificate(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_gpu_cert); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22157, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":22156
 *     cdef ConfComputeGpuCertificate gpu_cert_py = ConfComputeGpuCertificate()
 *     cdef nvmlConfComputeGpuCertificate_t *gpu_cert = <nvmlConfComputeGpuCertificate_t *><intptr_t>(gpu_cert_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetConfComputeGpuCertificate(<Device>device, gpu_cert)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22158
 *     with nogil:
 *         __status__ = nvmlDeviceGetConfComputeGpuCertificate(<Device>device, gpu_cert)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return gpu_cert_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 22158, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22159
 *         __status__ = nvmlDeviceGetConfComputeGpuCertificate(<Device>device, gpu_cert)
 *     check_status(__status__)
 *     return gpu_cert_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_gpu_cert_py);
  __pyx_r = ((PyObject *)__pyx_v_gpu_cert_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22143
 * 
 * 
 * cpdef object device_get_conf_compute_gpu_certificate(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get Conf Computing GPU certificate details.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_conf_compute_gpu_certificate", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_gpu_cert_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_285device_get_conf_compute_gpu_certificate(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_284device_get_conf_compute_gpu_certificate, "device_get_conf_compute_gpu_certificate(intptr_t device)\n\nGet Conf Computing GPU certificate details.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlConfComputeGpuCertificate_t: Reference in which to return the gpu certificate information.\n\n.. seealso:: `nvmlDeviceGetConfComputeGpuCertificate`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_285device_get_conf_compute_gpu_certificate = {"device_get_conf_compute_gpu_certificate", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_285device_get_conf_compute_gpu_certificate, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_284device_get_conf_compute_gpu_certificate};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_285device_get_conf_compute_gpu_certificate(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_conf_compute_gpu_certificate (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22143, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22143, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_conf_compute_gpu_certificate", 0) < (0)) __PYX_ERR(0, 22143, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_conf_compute_gpu_certificate", 1, 1, 1, i); __PYX_ERR(0, 22143, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22143, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22143, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_conf_compute_gpu_certificate", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22143, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_conf_compute_gpu_certificate", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_284device_get_conf_compute_gpu_certificate(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_284device_get_conf_compute_gpu_certificate(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_conf_compute_gpu_certificate", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_conf_compute_gpu_certificate(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22143, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_conf_compute_gpu_certificate", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22162
 * 
 * 
 * cpdef object device_get_conf_compute_gpu_attestation_report(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get Conf Computing GPU attestation report.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_287device_get_conf_compute_gpu_attestation_report(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_conf_compute_gpu_attestation_report(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *__pyx_v_gpu_atst_report_py = 0;
  nvmlConfComputeGpuAttestationReport_t *__pyx_v_gpu_atst_report;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_conf_compute_gpu_attestation_report", 0);

  /* "cuda/bindings/_nvml.pyx":22173
 *     .. seealso:: `nvmlDeviceGetConfComputeGpuAttestationReport`
 *     """
 *     cdef ConfComputeGpuAttestationReport gpu_atst_report_py = ConfComputeGpuAttestationReport()             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeGpuAttestationReport_t *gpu_atst_report = <nvmlConfComputeGpuAttestationReport_t *><intptr_t>(gpu_atst_report_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22173, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_gpu_atst_report_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":22174
 *     """
 *     cdef ConfComputeGpuAttestationReport gpu_atst_report_py = ConfComputeGpuAttestationReport()
 *     cdef nvmlConfComputeGpuAttestationReport_t *gpu_atst_report = <nvmlConfComputeGpuAttestationReport_t *><intptr_t>(gpu_atst_report_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetConfComputeGpuAttestationReport(<Device>device, gpu_atst_report)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)__pyx_v_gpu_atst_report_py->__pyx_vtab)->_get_ptr(__pyx_v_gpu_atst_report_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22174, __pyx_L1_error)
  __pyx_v_gpu_atst_report = ((nvmlConfComputeGpuAttestationReport_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":22175
 *     cdef ConfComputeGpuAttestationReport gpu_atst_report_py = ConfComputeGpuAttestationReport()
 *     cdef nvmlConfComputeGpuAttestationReport_t *gpu_atst_report = <nvmlConfComputeGpuAttestationReport_t *><intptr_t>(gpu_atst_report_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetConfComputeGpuAttestationReport(<Device>device, gpu_atst_report)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22176
 *     cdef nvmlConfComputeGpuAttestationReport_t *gpu_atst_report = <nvmlConfComputeGpuAttestationReport_t *><intptr_t>(gpu_atst_report_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetConfComputeGpuAttestationReport(<Device>device, gpu_atst_report)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return gpu_atst_report_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeGpuAttestationReport(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_gpu_atst_report); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22176, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":22175
 *     cdef ConfComputeGpuAttestationReport gpu_atst_report_py = ConfComputeGpuAttestationReport()
 *     cdef nvmlConfComputeGpuAttestationReport_t *gpu_atst_report = <nvmlConfComputeGpuAttestationReport_t *><intptr_t>(gpu_atst_report_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetConfComputeGpuAttestationReport(<Device>device, gpu_atst_report)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22177
 *     with nogil:
 *         __status__ = nvmlDeviceGetConfComputeGpuAttestationReport(<Device>device, gpu_atst_report)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return gpu_atst_report_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 22177, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22178
 *         __status__ = nvmlDeviceGetConfComputeGpuAttestationReport(<Device>device, gpu_atst_report)
 *     check_status(__status__)
 *     return gpu_atst_report_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_gpu_atst_report_py);
  __pyx_r = ((PyObject *)__pyx_v_gpu_atst_report_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22162
 * 
 * 
 * cpdef object device_get_conf_compute_gpu_attestation_report(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get Conf Computing GPU attestation report.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_conf_compute_gpu_attestation_report", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_gpu_atst_report_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_287device_get_conf_compute_gpu_attestation_report(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_286device_get_conf_compute_gpu_attestation_report, "device_get_conf_compute_gpu_attestation_report(intptr_t device)\n\nGet Conf Computing GPU attestation report.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlConfComputeGpuAttestationReport_t: Reference in which to return the gpu attestation report.\n\n.. seealso:: `nvmlDeviceGetConfComputeGpuAttestationReport`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_287device_get_conf_compute_gpu_attestation_report = {"device_get_conf_compute_gpu_attestation_report", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_287device_get_conf_compute_gpu_attestation_report, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_286device_get_conf_compute_gpu_attestation_report};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_287device_get_conf_compute_gpu_attestation_report(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_conf_compute_gpu_attestation_report (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22162, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22162, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_conf_compute_gpu_attestation_report", 0) < (0)) __PYX_ERR(0, 22162, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_conf_compute_gpu_attestation_report", 1, 1, 1, i); __PYX_ERR(0, 22162, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22162, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22162, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_conf_compute_gpu_attestation_report", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22162, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_conf_compute_gpu_attestation_report", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_286device_get_conf_compute_gpu_attestation_report(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_286device_get_conf_compute_gpu_attestation_report(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_conf_compute_gpu_attestation_report", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_conf_compute_gpu_attestation_report(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22162, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_conf_compute_gpu_attestation_report", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22181
 * 
 * 
 * cpdef object system_get_conf_compute_key_rotation_threshold_info():             # <<<<<<<<<<<<<<
 *     """Get Conf Computing key rotation threshold detail.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_289system_get_conf_compute_key_rotation_threshold_info(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_key_rotation_threshold_info(CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *__pyx_v_p_key_rotation_thr_info_py = 0;
  nvmlConfComputeGetKeyRotationThresholdInfo_t *__pyx_v_p_key_rotation_thr_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_conf_compute_key_rotation_threshold_info", 0);

  /* "cuda/bindings/_nvml.pyx":22189
 *     .. seealso:: `nvmlSystemGetConfComputeKeyRotationThresholdInfo`
 *     """
 *     cdef ConfComputeGetKeyRotationThresholdInfo_v1 p_key_rotation_thr_info_py = ConfComputeGetKeyRotationThresholdInfo_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlConfComputeGetKeyRotationThresholdInfo_t *p_key_rotation_thr_info = <nvmlConfComputeGetKeyRotationThresholdInfo_t *><intptr_t>(p_key_rotation_thr_info_py._get_ptr())
 *     p_key_rotation_thr_info.version = sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22189, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_p_key_rotation_thr_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":22190
 *     """
 *     cdef ConfComputeGetKeyRotationThresholdInfo_v1 p_key_rotation_thr_info_py = ConfComputeGetKeyRotationThresholdInfo_v1()
 *     cdef nvmlConfComputeGetKeyRotationThresholdInfo_t *p_key_rotation_thr_info = <nvmlConfComputeGetKeyRotationThresholdInfo_t *><intptr_t>(p_key_rotation_thr_info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     p_key_rotation_thr_info.version = sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)__pyx_v_p_key_rotation_thr_info_py->__pyx_vtab)->_get_ptr(__pyx_v_p_key_rotation_thr_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22190, __pyx_L1_error)
  __pyx_v_p_key_rotation_thr_info = ((nvmlConfComputeGetKeyRotationThresholdInfo_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":22191
 *     cdef ConfComputeGetKeyRotationThresholdInfo_v1 p_key_rotation_thr_info_py = ConfComputeGetKeyRotationThresholdInfo_v1()
 *     cdef nvmlConfComputeGetKeyRotationThresholdInfo_t *p_key_rotation_thr_info = <nvmlConfComputeGetKeyRotationThresholdInfo_t *><intptr_t>(p_key_rotation_thr_info_py._get_ptr())
 *     p_key_rotation_thr_info.version = sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlSystemGetConfComputeKeyRotationThresholdInfo(p_key_rotation_thr_info)
*/
  __pyx_v_p_key_rotation_thr_info->version = ((sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":22192
 *     cdef nvmlConfComputeGetKeyRotationThresholdInfo_t *p_key_rotation_thr_info = <nvmlConfComputeGetKeyRotationThresholdInfo_t *><intptr_t>(p_key_rotation_thr_info_py._get_ptr())
 *     p_key_rotation_thr_info.version = sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetConfComputeKeyRotationThresholdInfo(p_key_rotation_thr_info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22193
 *     p_key_rotation_thr_info.version = sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlSystemGetConfComputeKeyRotationThresholdInfo(p_key_rotation_thr_info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return p_key_rotation_thr_info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeKeyRotationThresholdInfo(__pyx_v_p_key_rotation_thr_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22193, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":22192
 *     cdef nvmlConfComputeGetKeyRotationThresholdInfo_t *p_key_rotation_thr_info = <nvmlConfComputeGetKeyRotationThresholdInfo_t *><intptr_t>(p_key_rotation_thr_info_py._get_ptr())
 *     p_key_rotation_thr_info.version = sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetConfComputeKeyRotationThresholdInfo(p_key_rotation_thr_info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22194
 *     with nogil:
 *         __status__ = nvmlSystemGetConfComputeKeyRotationThresholdInfo(p_key_rotation_thr_info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return p_key_rotation_thr_info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 22194, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22195
 *         __status__ = nvmlSystemGetConfComputeKeyRotationThresholdInfo(p_key_rotation_thr_info)
 *     check_status(__status__)
 *     return p_key_rotation_thr_info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_p_key_rotation_thr_info_py);
  __pyx_r = ((PyObject *)__pyx_v_p_key_rotation_thr_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22181
 * 
 * 
 * cpdef object system_get_conf_compute_key_rotation_threshold_info():             # <<<<<<<<<<<<<<
 *     """Get Conf Computing key rotation threshold detail.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_conf_compute_key_rotation_threshold_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_p_key_rotation_thr_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_289system_get_conf_compute_key_rotation_threshold_info(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_288system_get_conf_compute_key_rotation_threshold_info, "system_get_conf_compute_key_rotation_threshold_info()\n\nGet Conf Computing key rotation threshold detail.\n\nReturns:\n    nvmlConfComputeGetKeyRotationThresholdInfo_v1_t: Reference in which to return the key rotation threshold data.\n\n.. seealso:: `nvmlSystemGetConfComputeKeyRotationThresholdInfo`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_289system_get_conf_compute_key_rotation_threshold_info = {"system_get_conf_compute_key_rotation_threshold_info", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_289system_get_conf_compute_key_rotation_threshold_info, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_288system_get_conf_compute_key_rotation_threshold_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_289system_get_conf_compute_key_rotation_threshold_info(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_get_conf_compute_key_rotation_threshold_info (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_288system_get_conf_compute_key_rotation_threshold_info(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_288system_get_conf_compute_key_rotation_threshold_info(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_conf_compute_key_rotation_threshold_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_key_rotation_threshold_info(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22181, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_conf_compute_key_rotation_threshold_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22198
 * 
 * 
 * cpdef device_set_conf_compute_unprotected_mem_size(intptr_t device, unsigned long long size_ki_b):             # <<<<<<<<<<<<<<
 *     """Set Conf Computing Unprotected Memory Size.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_291device_set_conf_compute_unprotected_mem_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_conf_compute_unprotected_mem_size(intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_size_ki_b, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_conf_compute_unprotected_mem_size", 0);

  /* "cuda/bindings/_nvml.pyx":22207
 *     .. seealso:: `nvmlDeviceSetConfComputeUnprotectedMemSize`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetConfComputeUnprotectedMemSize(<Device>device, size_ki_b)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22208
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetConfComputeUnprotectedMemSize(<Device>device, size_ki_b)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetConfComputeUnprotectedMemSize(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_size_ki_b); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22208, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22207
 *     .. seealso:: `nvmlDeviceSetConfComputeUnprotectedMemSize`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetConfComputeUnprotectedMemSize(<Device>device, size_ki_b)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22209
 *     with nogil:
 *         __status__ = nvmlDeviceSetConfComputeUnprotectedMemSize(<Device>device, size_ki_b)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22209, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22198
 * 
 * 
 * cpdef device_set_conf_compute_unprotected_mem_size(intptr_t device, unsigned long long size_ki_b):             # <<<<<<<<<<<<<<
 *     """Set Conf Computing Unprotected Memory Size.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_conf_compute_unprotected_mem_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_291device_set_conf_compute_unprotected_mem_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_290device_set_conf_compute_unprotected_mem_size, "device_set_conf_compute_unprotected_mem_size(intptr_t device, unsigned long long size_ki_b)\n\nSet Conf Computing Unprotected Memory Size.\n\nArgs:\n    device (intptr_t): Device Handle.\n    size_ki_b (unsigned long long): Unprotected Memory size to be set in KiB.\n\n.. seealso:: `nvmlDeviceSetConfComputeUnprotectedMemSize`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_291device_set_conf_compute_unprotected_mem_size = {"device_set_conf_compute_unprotected_mem_size", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_291device_set_conf_compute_unprotected_mem_size, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_290device_set_conf_compute_unprotected_mem_size};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_291device_set_conf_compute_unprotected_mem_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned PY_LONG_LONG __pyx_v_size_ki_b;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_conf_compute_unprotected_mem_size (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_size_ki_b,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22198, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22198, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22198, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_conf_compute_unprotected_mem_size", 0) < (0)) __PYX_ERR(0, 22198, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_conf_compute_unprotected_mem_size", 1, 2, 2, i); __PYX_ERR(0, 22198, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22198, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22198, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22198, __pyx_L3_error)
    __pyx_v_size_ki_b = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[1]); if (unlikely((__pyx_v_size_ki_b == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22198, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_conf_compute_unprotected_mem_size", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22198, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_conf_compute_unprotected_mem_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_290device_set_conf_compute_unprotected_mem_size(__pyx_self, __pyx_v_device, __pyx_v_size_ki_b);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_290device_set_conf_compute_unprotected_mem_size(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_size_ki_b) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_conf_compute_unprotected_mem_size", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_conf_compute_unprotected_mem_size(__pyx_v_device, __pyx_v_size_ki_b, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22198, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_conf_compute_unprotected_mem_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22212
 * 
 * 
 * cpdef system_set_conf_compute_gpus_ready_state(unsigned int is_accepting_work):             # <<<<<<<<<<<<<<
 *     """Set Conf Computing GPUs ready state.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_293system_set_conf_compute_gpus_ready_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_set_conf_compute_gpus_ready_state(unsigned int __pyx_v_is_accepting_work, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_set_conf_compute_gpus_ready_state", 0);

  /* "cuda/bindings/_nvml.pyx":22220
 *     .. seealso:: `nvmlSystemSetConfComputeGpusReadyState`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemSetConfComputeGpusReadyState(is_accepting_work)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22221
 *     """
 *     with nogil:
 *         __status__ = nvmlSystemSetConfComputeGpusReadyState(is_accepting_work)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemSetConfComputeGpusReadyState(__pyx_v_is_accepting_work); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22221, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22220
 *     .. seealso:: `nvmlSystemSetConfComputeGpusReadyState`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemSetConfComputeGpusReadyState(is_accepting_work)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22222
 *     with nogil:
 *         __status__ = nvmlSystemSetConfComputeGpusReadyState(is_accepting_work)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22222, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22212
 * 
 * 
 * cpdef system_set_conf_compute_gpus_ready_state(unsigned int is_accepting_work):             # <<<<<<<<<<<<<<
 *     """Set Conf Computing GPUs ready state.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.system_set_conf_compute_gpus_ready_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_293system_set_conf_compute_gpus_ready_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_292system_set_conf_compute_gpus_ready_state, "system_set_conf_compute_gpus_ready_state(unsigned int is_accepting_work)\n\nSet Conf Computing GPUs ready state.\n\nArgs:\n    is_accepting_work (unsigned int): GPU accepting new work, NVML_CC_ACCEPTING_CLIENT_REQUESTS_TRUE or NVML_CC_ACCEPTING_CLIENT_REQUESTS_FALSE.\n\n.. seealso:: `nvmlSystemSetConfComputeGpusReadyState`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_293system_set_conf_compute_gpus_ready_state = {"system_set_conf_compute_gpus_ready_state", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_293system_set_conf_compute_gpus_ready_state, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_292system_set_conf_compute_gpus_ready_state};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_293system_set_conf_compute_gpus_ready_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_is_accepting_work;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_set_conf_compute_gpus_ready_state (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_is_accepting_work,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22212, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22212, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "system_set_conf_compute_gpus_ready_state", 0) < (0)) __PYX_ERR(0, 22212, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("system_set_conf_compute_gpus_ready_state", 1, 1, 1, i); __PYX_ERR(0, 22212, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22212, __pyx_L3_error)
    }
    __pyx_v_is_accepting_work = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_is_accepting_work == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22212, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("system_set_conf_compute_gpus_ready_state", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22212, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.system_set_conf_compute_gpus_ready_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_292system_set_conf_compute_gpus_ready_state(__pyx_self, __pyx_v_is_accepting_work);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_292system_set_conf_compute_gpus_ready_state(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_is_accepting_work) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_set_conf_compute_gpus_ready_state", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_set_conf_compute_gpus_ready_state(__pyx_v_is_accepting_work, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22212, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_set_conf_compute_gpus_ready_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22225
 * 
 * 
 * cpdef system_set_conf_compute_key_rotation_threshold_info(intptr_t p_key_rotation_thr_info):             # <<<<<<<<<<<<<<
 *     """Set Conf Computing key rotation threshold.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_295system_set_conf_compute_key_rotation_threshold_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_set_conf_compute_key_rotation_threshold_info(intptr_t __pyx_v_p_key_rotation_thr_info, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_set_conf_compute_key_rotation_threshold_info", 0);

  /* "cuda/bindings/_nvml.pyx":22233
 *     .. seealso:: `nvmlSystemSetConfComputeKeyRotationThresholdInfo`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemSetConfComputeKeyRotationThresholdInfo(<nvmlConfComputeSetKeyRotationThresholdInfo_t*>p_key_rotation_thr_info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22234
 *     """
 *     with nogil:
 *         __status__ = nvmlSystemSetConfComputeKeyRotationThresholdInfo(<nvmlConfComputeSetKeyRotationThresholdInfo_t*>p_key_rotation_thr_info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemSetConfComputeKeyRotationThresholdInfo(((nvmlConfComputeSetKeyRotationThresholdInfo_t *)__pyx_v_p_key_rotation_thr_info)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22234, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22233
 *     .. seealso:: `nvmlSystemSetConfComputeKeyRotationThresholdInfo`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemSetConfComputeKeyRotationThresholdInfo(<nvmlConfComputeSetKeyRotationThresholdInfo_t*>p_key_rotation_thr_info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22235
 *     with nogil:
 *         __status__ = nvmlSystemSetConfComputeKeyRotationThresholdInfo(<nvmlConfComputeSetKeyRotationThresholdInfo_t*>p_key_rotation_thr_info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22235, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22225
 * 
 * 
 * cpdef system_set_conf_compute_key_rotation_threshold_info(intptr_t p_key_rotation_thr_info):             # <<<<<<<<<<<<<<
 *     """Set Conf Computing key rotation threshold.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.system_set_conf_compute_key_rotation_threshold_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_295system_set_conf_compute_key_rotation_threshold_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_294system_set_conf_compute_key_rotation_threshold_info, "system_set_conf_compute_key_rotation_threshold_info(intptr_t p_key_rotation_thr_info)\n\nSet Conf Computing key rotation threshold.\n\nArgs:\n    p_key_rotation_thr_info (intptr_t): Reference to the key rotation threshold data.\n\n.. seealso:: `nvmlSystemSetConfComputeKeyRotationThresholdInfo`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_295system_set_conf_compute_key_rotation_threshold_info = {"system_set_conf_compute_key_rotation_threshold_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_295system_set_conf_compute_key_rotation_threshold_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_294system_set_conf_compute_key_rotation_threshold_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_295system_set_conf_compute_key_rotation_threshold_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_p_key_rotation_thr_info;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_set_conf_compute_key_rotation_threshold_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_p_key_rotation_thr_info,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22225, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22225, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "system_set_conf_compute_key_rotation_threshold_info", 0) < (0)) __PYX_ERR(0, 22225, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("system_set_conf_compute_key_rotation_threshold_info", 1, 1, 1, i); __PYX_ERR(0, 22225, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22225, __pyx_L3_error)
    }
    __pyx_v_p_key_rotation_thr_info = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_p_key_rotation_thr_info == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22225, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("system_set_conf_compute_key_rotation_threshold_info", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22225, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.system_set_conf_compute_key_rotation_threshold_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_294system_set_conf_compute_key_rotation_threshold_info(__pyx_self, __pyx_v_p_key_rotation_thr_info);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_294system_set_conf_compute_key_rotation_threshold_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_p_key_rotation_thr_info) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_set_conf_compute_key_rotation_threshold_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_set_conf_compute_key_rotation_threshold_info(__pyx_v_p_key_rotation_thr_info, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22225, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_set_conf_compute_key_rotation_threshold_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22238
 * 
 * 
 * cpdef object system_get_conf_compute_settings():             # <<<<<<<<<<<<<<
 *     """Get Conf Computing System Settings.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_297system_get_conf_compute_settings(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_settings(CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *__pyx_v_settings_py = 0;
  nvmlSystemConfComputeSettings_t *__pyx_v_settings;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_conf_compute_settings", 0);

  /* "cuda/bindings/_nvml.pyx":22246
 *     .. seealso:: `nvmlSystemGetConfComputeSettings`
 *     """
 *     cdef SystemConfComputeSettings_v1 settings_py = SystemConfComputeSettings_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlSystemConfComputeSettings_t *settings = <nvmlSystemConfComputeSettings_t *><intptr_t>(settings_py._get_ptr())
 *     settings.version = sizeof(nvmlSystemConfComputeSettings_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22246, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_settings_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":22247
 *     """
 *     cdef SystemConfComputeSettings_v1 settings_py = SystemConfComputeSettings_v1()
 *     cdef nvmlSystemConfComputeSettings_t *settings = <nvmlSystemConfComputeSettings_t *><intptr_t>(settings_py._get_ptr())             # <<<<<<<<<<<<<<
 *     settings.version = sizeof(nvmlSystemConfComputeSettings_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)__pyx_v_settings_py->__pyx_vtab)->_get_ptr(__pyx_v_settings_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22247, __pyx_L1_error)
  __pyx_v_settings = ((nvmlSystemConfComputeSettings_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":22248
 *     cdef SystemConfComputeSettings_v1 settings_py = SystemConfComputeSettings_v1()
 *     cdef nvmlSystemConfComputeSettings_t *settings = <nvmlSystemConfComputeSettings_t *><intptr_t>(settings_py._get_ptr())
 *     settings.version = sizeof(nvmlSystemConfComputeSettings_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlSystemGetConfComputeSettings(settings)
*/
  __pyx_v_settings->version = ((sizeof(nvmlSystemConfComputeSettings_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":22249
 *     cdef nvmlSystemConfComputeSettings_t *settings = <nvmlSystemConfComputeSettings_t *><intptr_t>(settings_py._get_ptr())
 *     settings.version = sizeof(nvmlSystemConfComputeSettings_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetConfComputeSettings(settings)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22250
 *     settings.version = sizeof(nvmlSystemConfComputeSettings_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlSystemGetConfComputeSettings(settings)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return settings_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeSettings(__pyx_v_settings); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22250, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":22249
 *     cdef nvmlSystemConfComputeSettings_t *settings = <nvmlSystemConfComputeSettings_t *><intptr_t>(settings_py._get_ptr())
 *     settings.version = sizeof(nvmlSystemConfComputeSettings_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetConfComputeSettings(settings)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22251
 *     with nogil:
 *         __status__ = nvmlSystemGetConfComputeSettings(settings)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return settings_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 22251, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22252
 *         __status__ = nvmlSystemGetConfComputeSettings(settings)
 *     check_status(__status__)
 *     return settings_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_settings_py);
  __pyx_r = ((PyObject *)__pyx_v_settings_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22238
 * 
 * 
 * cpdef object system_get_conf_compute_settings():             # <<<<<<<<<<<<<<
 *     """Get Conf Computing System Settings.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_conf_compute_settings", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_settings_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_297system_get_conf_compute_settings(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_296system_get_conf_compute_settings, "system_get_conf_compute_settings()\n\nGet Conf Computing System Settings.\n\nReturns:\n    nvmlSystemConfComputeSettings_v1_t: System CC settings.\n\n.. seealso:: `nvmlSystemGetConfComputeSettings`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_297system_get_conf_compute_settings = {"system_get_conf_compute_settings", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_297system_get_conf_compute_settings, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_296system_get_conf_compute_settings};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_297system_get_conf_compute_settings(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_get_conf_compute_settings (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_296system_get_conf_compute_settings(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_296system_get_conf_compute_settings(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_conf_compute_settings", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_settings(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22238, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_conf_compute_settings", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22255
 * 
 * 
 * cpdef char device_get_gsp_firmware_version(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve GSP firmware version.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_299device_get_gsp_firmware_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static char __pyx_f_4cuda_8bindings_5_nvml_device_get_gsp_firmware_version(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  char __pyx_v_version;
  nvmlReturn_t __pyx_v___status__;
  char __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":22267
 *     """
 *     cdef char version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGspFirmwareVersion(<Device>device, &version)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22268
 *     cdef char version
 *     with nogil:
 *         __status__ = nvmlDeviceGetGspFirmwareVersion(<Device>device, &version)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return version
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGspFirmwareVersion(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_version)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22268, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22267
 *     """
 *     cdef char version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGspFirmwareVersion(<Device>device, &version)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22269
 *     with nogil:
 *         __status__ = nvmlDeviceGetGspFirmwareVersion(<Device>device, &version)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return version
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22269, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22270
 *         __status__ = nvmlDeviceGetGspFirmwareVersion(<Device>device, &version)
 *     check_status(__status__)
 *     return version             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_version;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22255
 * 
 * 
 * cpdef char device_get_gsp_firmware_version(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve GSP firmware version.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gsp_firmware_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_299device_get_gsp_firmware_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_298device_get_gsp_firmware_version, "device_get_gsp_firmware_version(intptr_t device) -> char\n\nRetrieve GSP firmware version.\n\nArgs:\n    device (intptr_t): Device handle.\n\nReturns:\n    char: The retrieved GSP firmware version.\n\n.. seealso:: `nvmlDeviceGetGspFirmwareVersion`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_299device_get_gsp_firmware_version = {"device_get_gsp_firmware_version", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_299device_get_gsp_firmware_version, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_298device_get_gsp_firmware_version};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_299device_get_gsp_firmware_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_gsp_firmware_version (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22255, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22255, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_gsp_firmware_version", 0) < (0)) __PYX_ERR(0, 22255, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_gsp_firmware_version", 1, 1, 1, i); __PYX_ERR(0, 22255, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22255, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22255, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_gsp_firmware_version", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22255, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gsp_firmware_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_298device_get_gsp_firmware_version(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_298device_get_gsp_firmware_version(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  char __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gsp_firmware_version", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_gsp_firmware_version(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((char)0) && PyErr_Occurred())) __PYX_ERR(0, 22255, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_char(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22255, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gsp_firmware_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22273
 * 
 * 
 * cpdef tuple device_get_gsp_firmware_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve GSP firmware mode.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_301device_get_gsp_firmware_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_gsp_firmware_mode(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_is_enabled;
  unsigned int __pyx_v_default_mode;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gsp_firmware_mode", 0);

  /* "cuda/bindings/_nvml.pyx":22289
 *     cdef unsigned int is_enabled
 *     cdef unsigned int default_mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGspFirmwareMode(<Device>device, &is_enabled, &default_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22290
 *     cdef unsigned int default_mode
 *     with nogil:
 *         __status__ = nvmlDeviceGetGspFirmwareMode(<Device>device, &is_enabled, &default_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (is_enabled, default_mode)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGspFirmwareMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_is_enabled), (&__pyx_v_default_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22290, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22289
 *     cdef unsigned int is_enabled
 *     cdef unsigned int default_mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGspFirmwareMode(<Device>device, &is_enabled, &default_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22291
 *     with nogil:
 *         __status__ = nvmlDeviceGetGspFirmwareMode(<Device>device, &is_enabled, &default_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (is_enabled, default_mode)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22291, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22292
 *         __status__ = nvmlDeviceGetGspFirmwareMode(<Device>device, &is_enabled, &default_mode)
 *     check_status(__status__)
 *     return (is_enabled, default_mode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_unsigned_int(__pyx_v_is_enabled); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22292, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_default_mode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22292, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22292, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 22292, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 22292, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22273
 * 
 * 
 * cpdef tuple device_get_gsp_firmware_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve GSP firmware mode.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gsp_firmware_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_301device_get_gsp_firmware_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_300device_get_gsp_firmware_mode, "device_get_gsp_firmware_mode(intptr_t device) -> tuple\n\nRetrieve GSP firmware mode.\n\nArgs:\n    device (intptr_t): Device handle.\n\nReturns:\n    A 2-tuple containing:\n\n    - unsigned int: Pointer to specify if GSP firmware is enabled.\n    - unsigned int: Pointer to specify if GSP firmware is supported by default on ``device``.\n\n.. seealso:: `nvmlDeviceGetGspFirmwareMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_301device_get_gsp_firmware_mode = {"device_get_gsp_firmware_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_301device_get_gsp_firmware_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_300device_get_gsp_firmware_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_301device_get_gsp_firmware_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_gsp_firmware_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22273, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22273, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_gsp_firmware_mode", 0) < (0)) __PYX_ERR(0, 22273, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_gsp_firmware_mode", 1, 1, 1, i); __PYX_ERR(0, 22273, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22273, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22273, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_gsp_firmware_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22273, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gsp_firmware_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_300device_get_gsp_firmware_mode(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_300device_get_gsp_firmware_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gsp_firmware_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_gsp_firmware_mode(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22273, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gsp_firmware_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22295
 * 
 * 
 * cpdef object device_get_sram_ecc_error_status(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get SRAM ECC error status of this device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_303device_get_sram_ecc_error_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_sram_ecc_error_status(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *__pyx_v_status_py = 0;
  nvmlEccSramErrorStatus_t *__pyx_v_status;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_sram_ecc_error_status", 0);

  /* "cuda/bindings/_nvml.pyx":22306
 *     .. seealso:: `nvmlDeviceGetSramEccErrorStatus`
 *     """
 *     cdef EccSramErrorStatus_v1 status_py = EccSramErrorStatus_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlEccSramErrorStatus_t *status = <nvmlEccSramErrorStatus_t *><intptr_t>(status_py._get_ptr())
 *     status.version = sizeof(nvmlEccSramErrorStatus_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22306, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_status_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":22307
 *     """
 *     cdef EccSramErrorStatus_v1 status_py = EccSramErrorStatus_v1()
 *     cdef nvmlEccSramErrorStatus_t *status = <nvmlEccSramErrorStatus_t *><intptr_t>(status_py._get_ptr())             # <<<<<<<<<<<<<<
 *     status.version = sizeof(nvmlEccSramErrorStatus_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)__pyx_v_status_py->__pyx_vtab)->_get_ptr(__pyx_v_status_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22307, __pyx_L1_error)
  __pyx_v_status = ((nvmlEccSramErrorStatus_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":22308
 *     cdef EccSramErrorStatus_v1 status_py = EccSramErrorStatus_v1()
 *     cdef nvmlEccSramErrorStatus_t *status = <nvmlEccSramErrorStatus_t *><intptr_t>(status_py._get_ptr())
 *     status.version = sizeof(nvmlEccSramErrorStatus_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetSramEccErrorStatus(<Device>device, status)
*/
  __pyx_v_status->version = ((sizeof(nvmlEccSramErrorStatus_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":22309
 *     cdef nvmlEccSramErrorStatus_t *status = <nvmlEccSramErrorStatus_t *><intptr_t>(status_py._get_ptr())
 *     status.version = sizeof(nvmlEccSramErrorStatus_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSramEccErrorStatus(<Device>device, status)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22310
 *     status.version = sizeof(nvmlEccSramErrorStatus_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetSramEccErrorStatus(<Device>device, status)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return status_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSramEccErrorStatus(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_status); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22310, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":22309
 *     cdef nvmlEccSramErrorStatus_t *status = <nvmlEccSramErrorStatus_t *><intptr_t>(status_py._get_ptr())
 *     status.version = sizeof(nvmlEccSramErrorStatus_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSramEccErrorStatus(<Device>device, status)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22311
 *     with nogil:
 *         __status__ = nvmlDeviceGetSramEccErrorStatus(<Device>device, status)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return status_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 22311, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22312
 *         __status__ = nvmlDeviceGetSramEccErrorStatus(<Device>device, status)
 *     check_status(__status__)
 *     return status_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_status_py);
  __pyx_r = ((PyObject *)__pyx_v_status_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22295
 * 
 * 
 * cpdef object device_get_sram_ecc_error_status(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get SRAM ECC error status of this device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_sram_ecc_error_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_status_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_303device_get_sram_ecc_error_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_302device_get_sram_ecc_error_status, "device_get_sram_ecc_error_status(intptr_t device)\n\nGet SRAM ECC error status of this device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlEccSramErrorStatus_v1_t: Returns SRAM ECC error status.\n\n.. seealso:: `nvmlDeviceGetSramEccErrorStatus`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_303device_get_sram_ecc_error_status = {"device_get_sram_ecc_error_status", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_303device_get_sram_ecc_error_status, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_302device_get_sram_ecc_error_status};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_303device_get_sram_ecc_error_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_sram_ecc_error_status (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22295, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22295, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_sram_ecc_error_status", 0) < (0)) __PYX_ERR(0, 22295, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_sram_ecc_error_status", 1, 1, 1, i); __PYX_ERR(0, 22295, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22295, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22295, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_sram_ecc_error_status", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22295, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_sram_ecc_error_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_302device_get_sram_ecc_error_status(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_302device_get_sram_ecc_error_status(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_sram_ecc_error_status", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_sram_ecc_error_status(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22295, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_sram_ecc_error_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22315
 * 
 * 
 * cpdef int device_get_accounting_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Queries the state of per process accounting mode.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_305device_get_accounting_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_accounting_mode(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__EnableState __pyx_v_mode;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":22327
 *     """
 *     cdef _EnableState mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAccountingMode(<Device>device, &mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22328
 *     cdef _EnableState mode
 *     with nogil:
 *         __status__ = nvmlDeviceGetAccountingMode(<Device>device, &mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>mode
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22328, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22327
 *     """
 *     cdef _EnableState mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAccountingMode(<Device>device, &mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22329
 *     with nogil:
 *         __status__ = nvmlDeviceGetAccountingMode(<Device>device, &mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>mode
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22329, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22330
 *         __status__ = nvmlDeviceGetAccountingMode(<Device>device, &mode)
 *     check_status(__status__)
 *     return <int>mode             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_mode);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22315
 * 
 * 
 * cpdef int device_get_accounting_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Queries the state of per process accounting mode.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_accounting_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_305device_get_accounting_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_304device_get_accounting_mode, "device_get_accounting_mode(intptr_t device) -> int\n\nQueries the state of per process accounting mode.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    int: Reference in which to return the current accounting mode.\n\n.. seealso:: `nvmlDeviceGetAccountingMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_305device_get_accounting_mode = {"device_get_accounting_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_305device_get_accounting_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_304device_get_accounting_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_305device_get_accounting_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_accounting_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22315, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22315, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_accounting_mode", 0) < (0)) __PYX_ERR(0, 22315, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_accounting_mode", 1, 1, 1, i); __PYX_ERR(0, 22315, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22315, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22315, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_accounting_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22315, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_accounting_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_304device_get_accounting_mode(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_304device_get_accounting_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_accounting_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_accounting_mode(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22315, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22315, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_accounting_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22333
 * 
 * 
 * cpdef object device_get_accounting_stats(intptr_t device, unsigned int pid):             # <<<<<<<<<<<<<<
 *     """Queries process's accounting stats.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_307device_get_accounting_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_accounting_stats(intptr_t __pyx_v_device, unsigned int __pyx_v_pid, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_stats_py = 0;
  nvmlAccountingStats_t *__pyx_v_stats;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_accounting_stats", 0);

  /* "cuda/bindings/_nvml.pyx":22345
 *     .. seealso:: `nvmlDeviceGetAccountingStats`
 *     """
 *     cdef AccountingStats stats_py = AccountingStats()             # <<<<<<<<<<<<<<
 *     cdef nvmlAccountingStats_t *stats = <nvmlAccountingStats_t *><intptr_t>(stats_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22345, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_stats_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":22346
 *     """
 *     cdef AccountingStats stats_py = AccountingStats()
 *     cdef nvmlAccountingStats_t *stats = <nvmlAccountingStats_t *><intptr_t>(stats_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetAccountingStats(<Device>device, pid, stats)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_stats_py->__pyx_vtab)->_get_ptr(__pyx_v_stats_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22346, __pyx_L1_error)
  __pyx_v_stats = ((nvmlAccountingStats_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":22347
 *     cdef AccountingStats stats_py = AccountingStats()
 *     cdef nvmlAccountingStats_t *stats = <nvmlAccountingStats_t *><intptr_t>(stats_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAccountingStats(<Device>device, pid, stats)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22348
 *     cdef nvmlAccountingStats_t *stats = <nvmlAccountingStats_t *><intptr_t>(stats_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetAccountingStats(<Device>device, pid, stats)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return stats_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingStats(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_pid, __pyx_v_stats); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22348, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":22347
 *     cdef AccountingStats stats_py = AccountingStats()
 *     cdef nvmlAccountingStats_t *stats = <nvmlAccountingStats_t *><intptr_t>(stats_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAccountingStats(<Device>device, pid, stats)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22349
 *     with nogil:
 *         __status__ = nvmlDeviceGetAccountingStats(<Device>device, pid, stats)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return stats_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 22349, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22350
 *         __status__ = nvmlDeviceGetAccountingStats(<Device>device, pid, stats)
 *     check_status(__status__)
 *     return stats_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_stats_py);
  __pyx_r = ((PyObject *)__pyx_v_stats_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22333
 * 
 * 
 * cpdef object device_get_accounting_stats(intptr_t device, unsigned int pid):             # <<<<<<<<<<<<<<
 *     """Queries process's accounting stats.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_accounting_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_stats_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_307device_get_accounting_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_306device_get_accounting_stats, "device_get_accounting_stats(intptr_t device, unsigned int pid)\n\nQueries process's accounting stats.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    pid (unsigned int): Process Id of the target process to query stats for.\n\nReturns:\n    nvmlAccountingStats_t: Reference in which to return the process's accounting stats.\n\n.. seealso:: `nvmlDeviceGetAccountingStats`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_307device_get_accounting_stats = {"device_get_accounting_stats", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_307device_get_accounting_stats, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_306device_get_accounting_stats};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_307device_get_accounting_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_pid;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_accounting_stats (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_pid,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22333, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22333, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22333, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_accounting_stats", 0) < (0)) __PYX_ERR(0, 22333, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_accounting_stats", 1, 2, 2, i); __PYX_ERR(0, 22333, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22333, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22333, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22333, __pyx_L3_error)
    __pyx_v_pid = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_pid == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22333, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_accounting_stats", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22333, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_accounting_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_306device_get_accounting_stats(__pyx_self, __pyx_v_device, __pyx_v_pid);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_306device_get_accounting_stats(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_pid) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_accounting_stats", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_accounting_stats(__pyx_v_device, __pyx_v_pid, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22333, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_accounting_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22353
 * 
 * 
 * cpdef object device_get_accounting_pids(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Queries list of processes that can be queried for accounting stats. The list of processes returned can be in running or terminated state.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_309device_get_accounting_pids(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_accounting_pids(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_pids = 0;
  unsigned int *__pyx_v_pids_ptr;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_accounting_pids", 0);

  /* "cuda/bindings/_nvml.pyx":22361
 *     .. seealso:: `nvmlDeviceGetAccountingPids`
 *     """
 *     cdef unsigned int[1] count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetAccountingPids(<Device>device, <unsigned int*>count, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_count[0]), __pyx_t_1, sizeof(__pyx_v_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":22362
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAccountingPids(<Device>device, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22363
 *     cdef unsigned int[1] count = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetAccountingPids(<Device>device, <unsigned int*>count, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     if count[0] == 0:
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingPids(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((unsigned int *)__pyx_v_count), NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22363, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":22362
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAccountingPids(<Device>device, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22364
 *     with nogil:
 *         __status__ = nvmlDeviceGetAccountingPids(<Device>device, <unsigned int*>count, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 22364, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22365
 *         __status__ = nvmlDeviceGetAccountingPids(<Device>device, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array pids = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  __pyx_t_4 = ((__pyx_v_count[0]) == 0);
  if (__pyx_t_4) {

    /* "cuda/bindings/_nvml.pyx":22366
 *     check_status_size(__status__)
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]             # <<<<<<<<<<<<<<
 *     cdef view.array pids = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     cdef unsigned int *pids_ptr = <unsigned int *>(pids.data)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_6 = NULL;
    __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 22366, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_8 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_6, NULL};
      __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 22366, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_9);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_9, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 22366, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_9, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 22366, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_9, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 22366, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_9, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 22366, __pyx_L1_error)
      __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22366, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_5);
    }
    __pyx_t_9 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_5), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 22366, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_9;
    __pyx_t_9 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":22365
 *         __status__ = nvmlDeviceGetAccountingPids(<Device>device, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array pids = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":22367
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array pids = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")             # <<<<<<<<<<<<<<
 *     cdef unsigned int *pids_ptr = <unsigned int *>(pids.data)
 *     with nogil:
*/
  __pyx_t_5 = NULL;
  __pyx_t_7 = __Pyx_PyLong_From_unsigned_int((__pyx_v_count[0])); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 22367, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 22367, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 22367, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 22367, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_5, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 22367, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_6, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 22367, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 22367, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 22367, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 22367, __pyx_L1_error)
    __pyx_t_9 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 22367, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_9);
  }
  __pyx_v_pids = ((struct __pyx_array_obj *)__pyx_t_9);
  __pyx_t_9 = 0;

  /* "cuda/bindings/_nvml.pyx":22368
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array pids = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     cdef unsigned int *pids_ptr = <unsigned int *>(pids.data)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetAccountingPids(<Device>device, <unsigned int*>count, pids_ptr)
*/
  __pyx_v_pids_ptr = ((unsigned int *)__pyx_v_pids->data);

  /* "cuda/bindings/_nvml.pyx":22369
 *     cdef view.array pids = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     cdef unsigned int *pids_ptr = <unsigned int *>(pids.data)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAccountingPids(<Device>device, <unsigned int*>count, pids_ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22370
 *     cdef unsigned int *pids_ptr = <unsigned int *>(pids.data)
 *     with nogil:
 *         __status__ = nvmlDeviceGetAccountingPids(<Device>device, <unsigned int*>count, pids_ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return pids
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingPids(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((unsigned int *)__pyx_v_count), __pyx_v_pids_ptr); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22370, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":22369
 *     cdef view.array pids = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     cdef unsigned int *pids_ptr = <unsigned int *>(pids.data)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAccountingPids(<Device>device, <unsigned int*>count, pids_ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22371
 *     with nogil:
 *         __status__ = nvmlDeviceGetAccountingPids(<Device>device, <unsigned int*>count, pids_ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return pids
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 22371, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22372
 *         __status__ = nvmlDeviceGetAccountingPids(<Device>device, <unsigned int*>count, pids_ptr)
 *     check_status(__status__)
 *     return pids             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_pids);
  __pyx_r = ((PyObject *)__pyx_v_pids);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22353
 * 
 * 
 * cpdef object device_get_accounting_pids(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Queries list of processes that can be queried for accounting stats. The list of processes returned can be in running or terminated state.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_accounting_pids", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_pids);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_309device_get_accounting_pids(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_308device_get_accounting_pids, "device_get_accounting_pids(intptr_t device)\n\nQueries list of processes that can be queried for accounting stats. The list of processes returned can be in running or terminated state.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\n.. seealso:: `nvmlDeviceGetAccountingPids`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_309device_get_accounting_pids = {"device_get_accounting_pids", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_309device_get_accounting_pids, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_308device_get_accounting_pids};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_309device_get_accounting_pids(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_accounting_pids (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22353, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22353, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_accounting_pids", 0) < (0)) __PYX_ERR(0, 22353, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_accounting_pids", 1, 1, 1, i); __PYX_ERR(0, 22353, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22353, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22353, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_accounting_pids", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22353, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_accounting_pids", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_308device_get_accounting_pids(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_308device_get_accounting_pids(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_accounting_pids", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_accounting_pids(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22353, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_accounting_pids", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22375
 * 
 * 
 * cpdef unsigned int device_get_accounting_buffer_size(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Returns the number of processes that the circular buffer with accounting pids can hold.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_311device_get_accounting_buffer_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_accounting_buffer_size(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_buffer_size;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":22387
 *     """
 *     cdef unsigned int buffer_size
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAccountingBufferSize(<Device>device, &buffer_size)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22388
 *     cdef unsigned int buffer_size
 *     with nogil:
 *         __status__ = nvmlDeviceGetAccountingBufferSize(<Device>device, &buffer_size)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return buffer_size
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingBufferSize(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_buffer_size)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22388, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22387
 *     """
 *     cdef unsigned int buffer_size
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAccountingBufferSize(<Device>device, &buffer_size)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22389
 *     with nogil:
 *         __status__ = nvmlDeviceGetAccountingBufferSize(<Device>device, &buffer_size)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return buffer_size
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22389, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22390
 *         __status__ = nvmlDeviceGetAccountingBufferSize(<Device>device, &buffer_size)
 *     check_status(__status__)
 *     return buffer_size             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_buffer_size;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22375
 * 
 * 
 * cpdef unsigned int device_get_accounting_buffer_size(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Returns the number of processes that the circular buffer with accounting pids can hold.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_accounting_buffer_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_311device_get_accounting_buffer_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_310device_get_accounting_buffer_size, "device_get_accounting_buffer_size(intptr_t device) -> unsigned int\n\nReturns the number of processes that the circular buffer with accounting pids can hold.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Reference in which to provide the size (in number of elements) of the circular buffer for accounting stats.\n\n.. seealso:: `nvmlDeviceGetAccountingBufferSize`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_311device_get_accounting_buffer_size = {"device_get_accounting_buffer_size", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_311device_get_accounting_buffer_size, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_310device_get_accounting_buffer_size};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_311device_get_accounting_buffer_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_accounting_buffer_size (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22375, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22375, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_accounting_buffer_size", 0) < (0)) __PYX_ERR(0, 22375, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_accounting_buffer_size", 1, 1, 1, i); __PYX_ERR(0, 22375, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22375, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22375, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_accounting_buffer_size", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22375, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_accounting_buffer_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_310device_get_accounting_buffer_size(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_310device_get_accounting_buffer_size(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_accounting_buffer_size", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_accounting_buffer_size(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 22375, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22375, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_accounting_buffer_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22393
 * 
 * 
 * cpdef object device_get_retired_pages(intptr_t device, int cause):             # <<<<<<<<<<<<<<
 *     """Returns the list of retired pages by source, including pages that are pending retirement The address information provided from this API is the hardware address of the page that was retired. Note that this does not match the virtual address used in CUDA, but will match the address information in Xid 63.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_313device_get_retired_pages(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_retired_pages(intptr_t __pyx_v_device, int __pyx_v_cause, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_page_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_addresses = 0;
  unsigned PY_LONG_LONG *__pyx_v_addresses_ptr;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_retired_pages", 0);

  /* "cuda/bindings/_nvml.pyx":22402
 *     .. seealso:: `nvmlDeviceGetRetiredPages`
 *     """
 *     cdef unsigned int[1] page_count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetRetiredPages(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_page_count[0]), __pyx_t_1, sizeof(__pyx_v_page_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":22403
 *     """
 *     cdef unsigned int[1] page_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRetiredPages(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22404
 *     cdef unsigned int[1] page_count = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetRetiredPages(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     if page_count[0] == 0:
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRetiredPages(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__PageRetirementCause)__pyx_v_cause), ((unsigned int *)__pyx_v_page_count), NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22404, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":22403
 *     """
 *     cdef unsigned int[1] page_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRetiredPages(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22405
 *     with nogil:
 *         __status__ = nvmlDeviceGetRetiredPages(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     if page_count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0]
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 22405, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22406
 *         __status__ = nvmlDeviceGetRetiredPages(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, NULL)
 *     check_status_size(__status__)
 *     if page_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0]
 *     cdef view.array addresses = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c")
*/
  __pyx_t_4 = ((__pyx_v_page_count[0]) == 0);
  if (__pyx_t_4) {

    /* "cuda/bindings/_nvml.pyx":22407
 *     check_status_size(__status__)
 *     if page_count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0]             # <<<<<<<<<<<<<<
 *     cdef view.array addresses = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c")
 *     cdef unsigned long long *addresses_ptr = <unsigned long long *>(addresses.data)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_6 = NULL;
    __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(unsigned PY_LONG_LONG))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 22407, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_8 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_6, NULL};
      __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 22407, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_9);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_9, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 22407, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_9, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 22407, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_Q, __pyx_t_9, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 22407, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_9, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 22407, __pyx_L1_error)
      __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22407, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_5);
    }
    __pyx_t_9 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_5), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 22407, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_9;
    __pyx_t_9 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":22406
 *         __status__ = nvmlDeviceGetRetiredPages(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, NULL)
 *     check_status_size(__status__)
 *     if page_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0]
 *     cdef view.array addresses = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":22408
 *     if page_count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0]
 *     cdef view.array addresses = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c")             # <<<<<<<<<<<<<<
 *     cdef unsigned long long *addresses_ptr = <unsigned long long *>(addresses.data)
 *     with nogil:
*/
  __pyx_t_5 = NULL;
  __pyx_t_7 = __Pyx_PyLong_From_unsigned_int((__pyx_v_page_count[0])); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 22408, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 22408, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 22408, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(unsigned PY_LONG_LONG))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 22408, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_5, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 22408, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_6, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 22408, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 22408, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_Q, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 22408, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 22408, __pyx_L1_error)
    __pyx_t_9 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 22408, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_9);
  }
  __pyx_v_addresses = ((struct __pyx_array_obj *)__pyx_t_9);
  __pyx_t_9 = 0;

  /* "cuda/bindings/_nvml.pyx":22409
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0]
 *     cdef view.array addresses = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c")
 *     cdef unsigned long long *addresses_ptr = <unsigned long long *>(addresses.data)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetRetiredPages(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, addresses_ptr)
*/
  __pyx_v_addresses_ptr = ((unsigned PY_LONG_LONG *)__pyx_v_addresses->data);

  /* "cuda/bindings/_nvml.pyx":22410
 *     cdef view.array addresses = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c")
 *     cdef unsigned long long *addresses_ptr = <unsigned long long *>(addresses.data)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRetiredPages(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, addresses_ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22411
 *     cdef unsigned long long *addresses_ptr = <unsigned long long *>(addresses.data)
 *     with nogil:
 *         __status__ = nvmlDeviceGetRetiredPages(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, addresses_ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return addresses
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRetiredPages(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__PageRetirementCause)__pyx_v_cause), ((unsigned int *)__pyx_v_page_count), __pyx_v_addresses_ptr); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22411, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":22410
 *     cdef view.array addresses = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c")
 *     cdef unsigned long long *addresses_ptr = <unsigned long long *>(addresses.data)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRetiredPages(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, addresses_ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22412
 *     with nogil:
 *         __status__ = nvmlDeviceGetRetiredPages(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, addresses_ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return addresses
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 22412, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22413
 *         __status__ = nvmlDeviceGetRetiredPages(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, addresses_ptr)
 *     check_status(__status__)
 *     return addresses             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_addresses);
  __pyx_r = ((PyObject *)__pyx_v_addresses);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22393
 * 
 * 
 * cpdef object device_get_retired_pages(intptr_t device, int cause):             # <<<<<<<<<<<<<<
 *     """Returns the list of retired pages by source, including pages that are pending retirement The address information provided from this API is the hardware address of the page that was retired. Note that this does not match the virtual address used in CUDA, but will match the address information in Xid 63.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_retired_pages", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_addresses);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_313device_get_retired_pages(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_312device_get_retired_pages, "device_get_retired_pages(intptr_t device, int cause)\n\nReturns the list of retired pages by source, including pages that are pending retirement The address information provided from this API is the hardware address of the page that was retired. Note that this does not match the virtual address used in CUDA, but will match the address information in Xid 63.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    cause (PageRetirementCause): Filter page addresses by cause of retirement.\n\n.. seealso:: `nvmlDeviceGetRetiredPages`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_313device_get_retired_pages = {"device_get_retired_pages", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_313device_get_retired_pages, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_312device_get_retired_pages};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_313device_get_retired_pages(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_cause;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_retired_pages (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_cause,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22393, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22393, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22393, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_retired_pages", 0) < (0)) __PYX_ERR(0, 22393, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_retired_pages", 1, 2, 2, i); __PYX_ERR(0, 22393, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22393, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22393, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22393, __pyx_L3_error)
    __pyx_v_cause = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_cause == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22393, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_retired_pages", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22393, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_retired_pages", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_312device_get_retired_pages(__pyx_self, __pyx_v_device, __pyx_v_cause);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_312device_get_retired_pages(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_cause) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_retired_pages", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_retired_pages(__pyx_v_device, __pyx_v_cause, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22393, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_retired_pages", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22416
 * 
 * 
 * cpdef int device_get_retired_pages_pending_status(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Check if any pages are pending retirement and need a reboot to fully retire.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_315device_get_retired_pages_pending_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_retired_pages_pending_status(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__EnableState __pyx_v_is_pending;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":22428
 *     """
 *     cdef _EnableState is_pending
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRetiredPagesPendingStatus(<Device>device, &is_pending)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22429
 *     cdef _EnableState is_pending
 *     with nogil:
 *         __status__ = nvmlDeviceGetRetiredPagesPendingStatus(<Device>device, &is_pending)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>is_pending
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRetiredPagesPendingStatus(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_is_pending)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22429, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22428
 *     """
 *     cdef _EnableState is_pending
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRetiredPagesPendingStatus(<Device>device, &is_pending)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22430
 *     with nogil:
 *         __status__ = nvmlDeviceGetRetiredPagesPendingStatus(<Device>device, &is_pending)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>is_pending
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22430, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22431
 *         __status__ = nvmlDeviceGetRetiredPagesPendingStatus(<Device>device, &is_pending)
 *     check_status(__status__)
 *     return <int>is_pending             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_is_pending);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22416
 * 
 * 
 * cpdef int device_get_retired_pages_pending_status(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Check if any pages are pending retirement and need a reboot to fully retire.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_retired_pages_pending_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_315device_get_retired_pages_pending_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_314device_get_retired_pages_pending_status, "device_get_retired_pages_pending_status(intptr_t device) -> int\n\nCheck if any pages are pending retirement and need a reboot to fully retire.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    int: Reference in which to return the pending status.\n\n.. seealso:: `nvmlDeviceGetRetiredPagesPendingStatus`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_315device_get_retired_pages_pending_status = {"device_get_retired_pages_pending_status", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_315device_get_retired_pages_pending_status, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_314device_get_retired_pages_pending_status};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_315device_get_retired_pages_pending_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_retired_pages_pending_status (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22416, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22416, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_retired_pages_pending_status", 0) < (0)) __PYX_ERR(0, 22416, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_retired_pages_pending_status", 1, 1, 1, i); __PYX_ERR(0, 22416, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22416, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22416, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_retired_pages_pending_status", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22416, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_retired_pages_pending_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_314device_get_retired_pages_pending_status(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_314device_get_retired_pages_pending_status(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_retired_pages_pending_status", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_retired_pages_pending_status(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22416, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22416, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_retired_pages_pending_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22434
 * 
 * 
 * cpdef tuple device_get_remapped_rows(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get number of remapped rows. The number of rows reported will be based on the cause of the remapping. isPending indicates whether or not there are pending remappings. A reset will be required to actually remap the row. failureOccurred will be set if a row remapping ever failed in the past. A pending remapping won't affect future work on the GPU since error-containment and dynamic page blacklisting will take care of that.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_317device_get_remapped_rows(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_remapped_rows(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_corr_rows;
  unsigned int __pyx_v_unc_rows;
  unsigned int __pyx_v_is_pending;
  unsigned int __pyx_v_failure_occurred;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_remapped_rows", 0);

  /* "cuda/bindings/_nvml.pyx":22454
 *     cdef unsigned int is_pending
 *     cdef unsigned int failure_occurred
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRemappedRows(<Device>device, &corr_rows, &unc_rows, &is_pending, &failure_occurred)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22455
 *     cdef unsigned int failure_occurred
 *     with nogil:
 *         __status__ = nvmlDeviceGetRemappedRows(<Device>device, &corr_rows, &unc_rows, &is_pending, &failure_occurred)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (corr_rows, unc_rows, is_pending, failure_occurred)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRemappedRows(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_corr_rows), (&__pyx_v_unc_rows), (&__pyx_v_is_pending), (&__pyx_v_failure_occurred)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22455, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22454
 *     cdef unsigned int is_pending
 *     cdef unsigned int failure_occurred
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRemappedRows(<Device>device, &corr_rows, &unc_rows, &is_pending, &failure_occurred)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22456
 *     with nogil:
 *         __status__ = nvmlDeviceGetRemappedRows(<Device>device, &corr_rows, &unc_rows, &is_pending, &failure_occurred)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (corr_rows, unc_rows, is_pending, failure_occurred)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22456, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22457
 *         __status__ = nvmlDeviceGetRemappedRows(<Device>device, &corr_rows, &unc_rows, &is_pending, &failure_occurred)
 *     check_status(__status__)
 *     return (corr_rows, unc_rows, is_pending, failure_occurred)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_unsigned_int(__pyx_v_corr_rows); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22457, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_unc_rows); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22457, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyLong_From_unsigned_int(__pyx_v_is_pending); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22457, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = __Pyx_PyLong_From_unsigned_int(__pyx_v_failure_occurred); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 22457, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = PyTuple_New(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 22457, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 22457, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 22457, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_5);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_t_5) != (0)) __PYX_ERR(0, 22457, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 3, __pyx_t_6) != (0)) __PYX_ERR(0, 22457, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_t_5 = 0;
  __pyx_t_6 = 0;
  __pyx_r = ((PyObject*)__pyx_t_7);
  __pyx_t_7 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22434
 * 
 * 
 * cpdef tuple device_get_remapped_rows(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get number of remapped rows. The number of rows reported will be based on the cause of the remapping. isPending indicates whether or not there are pending remappings. A reset will be required to actually remap the row. failureOccurred will be set if a row remapping ever failed in the past. A pending remapping won't affect future work on the GPU since error-containment and dynamic page blacklisting will take care of that.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_remapped_rows", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_317device_get_remapped_rows(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_316device_get_remapped_rows, "device_get_remapped_rows(intptr_t device) -> tuple\n\nGet number of remapped rows. The number of rows reported will be based on the cause of the remapping. isPending indicates whether or not there are pending remappings. A reset will be required to actually remap the row. failureOccurred will be set if a row remapping ever failed in the past. A pending remapping won't affect future work on the GPU since error-containment and dynamic page blacklisting will take care of that.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    A 4-tuple containing:\n\n    - unsigned int: Reference for number of rows remapped due to correctable errors.\n    - unsigned int: Reference for number of rows remapped due to uncorrectable errors.\n    - unsigned int: Reference for whether or not remappings are pending.\n    - unsigned int: Reference that is set when a remapping has failed in the past.\n\n.. seealso:: `nvmlDeviceGetRemappedRows`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_317device_get_remapped_rows = {"device_get_remapped_rows", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_317device_get_remapped_rows, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_316device_get_remapped_rows};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_317device_get_remapped_rows(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_remapped_rows (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22434, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22434, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_remapped_rows", 0) < (0)) __PYX_ERR(0, 22434, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_remapped_rows", 1, 1, 1, i); __PYX_ERR(0, 22434, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22434, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22434, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_remapped_rows", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22434, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_remapped_rows", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_316device_get_remapped_rows(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_316device_get_remapped_rows(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_remapped_rows", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_remapped_rows(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22434, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_remapped_rows", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22460
 * 
 * 
 * cpdef object device_get_row_remapper_histogram(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the row remapper histogram. Returns the remap availability for each bank on the GPU.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_319device_get_row_remapper_histogram(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_row_remapper_histogram(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *__pyx_v_values_py = 0;
  nvmlRowRemapperHistogramValues_t *__pyx_v_values;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_row_remapper_histogram", 0);

  /* "cuda/bindings/_nvml.pyx":22471
 *     .. seealso:: `nvmlDeviceGetRowRemapperHistogram`
 *     """
 *     cdef RowRemapperHistogramValues values_py = RowRemapperHistogramValues()             # <<<<<<<<<<<<<<
 *     cdef nvmlRowRemapperHistogramValues_t *values = <nvmlRowRemapperHistogramValues_t *><intptr_t>(values_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22471, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_values_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":22472
 *     """
 *     cdef RowRemapperHistogramValues values_py = RowRemapperHistogramValues()
 *     cdef nvmlRowRemapperHistogramValues_t *values = <nvmlRowRemapperHistogramValues_t *><intptr_t>(values_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetRowRemapperHistogram(<Device>device, values)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)__pyx_v_values_py->__pyx_vtab)->_get_ptr(__pyx_v_values_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22472, __pyx_L1_error)
  __pyx_v_values = ((nvmlRowRemapperHistogramValues_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":22473
 *     cdef RowRemapperHistogramValues values_py = RowRemapperHistogramValues()
 *     cdef nvmlRowRemapperHistogramValues_t *values = <nvmlRowRemapperHistogramValues_t *><intptr_t>(values_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRowRemapperHistogram(<Device>device, values)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22474
 *     cdef nvmlRowRemapperHistogramValues_t *values = <nvmlRowRemapperHistogramValues_t *><intptr_t>(values_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetRowRemapperHistogram(<Device>device, values)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return values_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRowRemapperHistogram(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_values); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22474, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":22473
 *     cdef RowRemapperHistogramValues values_py = RowRemapperHistogramValues()
 *     cdef nvmlRowRemapperHistogramValues_t *values = <nvmlRowRemapperHistogramValues_t *><intptr_t>(values_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRowRemapperHistogram(<Device>device, values)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22475
 *     with nogil:
 *         __status__ = nvmlDeviceGetRowRemapperHistogram(<Device>device, values)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return values_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 22475, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22476
 *         __status__ = nvmlDeviceGetRowRemapperHistogram(<Device>device, values)
 *     check_status(__status__)
 *     return values_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_values_py);
  __pyx_r = ((PyObject *)__pyx_v_values_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22460
 * 
 * 
 * cpdef object device_get_row_remapper_histogram(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the row remapper histogram. Returns the remap availability for each bank on the GPU.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_row_remapper_histogram", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_values_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_319device_get_row_remapper_histogram(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_318device_get_row_remapper_histogram, "device_get_row_remapper_histogram(intptr_t device)\n\nGet the row remapper histogram. Returns the remap availability for each bank on the GPU.\n\nArgs:\n    device (intptr_t): Device handle.\n\nReturns:\n    nvmlRowRemapperHistogramValues_t: Histogram values.\n\n.. seealso:: `nvmlDeviceGetRowRemapperHistogram`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_319device_get_row_remapper_histogram = {"device_get_row_remapper_histogram", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_319device_get_row_remapper_histogram, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_318device_get_row_remapper_histogram};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_319device_get_row_remapper_histogram(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_row_remapper_histogram (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22460, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22460, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_row_remapper_histogram", 0) < (0)) __PYX_ERR(0, 22460, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_row_remapper_histogram", 1, 1, 1, i); __PYX_ERR(0, 22460, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22460, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22460, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_row_remapper_histogram", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22460, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_row_remapper_histogram", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_318device_get_row_remapper_histogram(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_318device_get_row_remapper_histogram(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_row_remapper_histogram", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_row_remapper_histogram(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22460, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_row_remapper_histogram", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22479
 * 
 * 
 * cpdef unsigned int device_get_architecture(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get architecture for device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_321device_get_architecture(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_architecture(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlDeviceArchitecture_t __pyx_v_arch;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":22491
 *     """
 *     cdef nvmlDeviceArchitecture_t arch
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetArchitecture(<Device>device, &arch)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22492
 *     cdef nvmlDeviceArchitecture_t arch
 *     with nogil:
 *         __status__ = nvmlDeviceGetArchitecture(<Device>device, &arch)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <unsigned int>arch
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetArchitecture(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_arch)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22492, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22491
 *     """
 *     cdef nvmlDeviceArchitecture_t arch
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetArchitecture(<Device>device, &arch)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22493
 *     with nogil:
 *         __status__ = nvmlDeviceGetArchitecture(<Device>device, &arch)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <unsigned int>arch
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22493, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22494
 *         __status__ = nvmlDeviceGetArchitecture(<Device>device, &arch)
 *     check_status(__status__)
 *     return <unsigned int>arch             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((unsigned int)__pyx_v_arch);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22479
 * 
 * 
 * cpdef unsigned int device_get_architecture(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get architecture for device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_architecture", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_321device_get_architecture(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_320device_get_architecture, "device_get_architecture(intptr_t device) -> unsigned int\n\nGet architecture for device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Reference where architecture is returned, if call successful. Set to NVML_DEVICE_ARCH_* upon success.\n\n.. seealso:: `nvmlDeviceGetArchitecture`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_321device_get_architecture = {"device_get_architecture", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_321device_get_architecture, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_320device_get_architecture};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_321device_get_architecture(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_architecture (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22479, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22479, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_architecture", 0) < (0)) __PYX_ERR(0, 22479, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_architecture", 1, 1, 1, i); __PYX_ERR(0, 22479, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22479, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22479, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_architecture", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22479, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_architecture", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_320device_get_architecture(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_320device_get_architecture(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_architecture", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_architecture(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 22479, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22479, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_architecture", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22497
 * 
 * 
 * cpdef object device_get_clk_mon_status(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the frequency monitor fault status for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_323device_get_clk_mon_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_clk_mon_status(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *__pyx_v_status_py = 0;
  nvmlClkMonStatus_t *__pyx_v_status;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_clk_mon_status", 0);

  /* "cuda/bindings/_nvml.pyx":22508
 *     .. seealso:: `nvmlDeviceGetClkMonStatus`
 *     """
 *     cdef ClkMonStatus status_py = ClkMonStatus()             # <<<<<<<<<<<<<<
 *     cdef nvmlClkMonStatus_t *status = <nvmlClkMonStatus_t *><intptr_t>(status_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22508, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_status_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":22509
 *     """
 *     cdef ClkMonStatus status_py = ClkMonStatus()
 *     cdef nvmlClkMonStatus_t *status = <nvmlClkMonStatus_t *><intptr_t>(status_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetClkMonStatus(<Device>device, status)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ClkMonStatus *)__pyx_v_status_py->__pyx_vtab)->_get_ptr(__pyx_v_status_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22509, __pyx_L1_error)
  __pyx_v_status = ((nvmlClkMonStatus_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":22510
 *     cdef ClkMonStatus status_py = ClkMonStatus()
 *     cdef nvmlClkMonStatus_t *status = <nvmlClkMonStatus_t *><intptr_t>(status_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetClkMonStatus(<Device>device, status)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22511
 *     cdef nvmlClkMonStatus_t *status = <nvmlClkMonStatus_t *><intptr_t>(status_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetClkMonStatus(<Device>device, status)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return status_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClkMonStatus(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_status); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22511, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":22510
 *     cdef ClkMonStatus status_py = ClkMonStatus()
 *     cdef nvmlClkMonStatus_t *status = <nvmlClkMonStatus_t *><intptr_t>(status_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetClkMonStatus(<Device>device, status)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22512
 *     with nogil:
 *         __status__ = nvmlDeviceGetClkMonStatus(<Device>device, status)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return status_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 22512, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22513
 *         __status__ = nvmlDeviceGetClkMonStatus(<Device>device, status)
 *     check_status(__status__)
 *     return status_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_status_py);
  __pyx_r = ((PyObject *)__pyx_v_status_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22497
 * 
 * 
 * cpdef object device_get_clk_mon_status(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the frequency monitor fault status for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_clk_mon_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_status_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_323device_get_clk_mon_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_322device_get_clk_mon_status, "device_get_clk_mon_status(intptr_t device)\n\nRetrieves the frequency monitor fault status for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlClkMonStatus_t: Reference in which to return the clkmon fault status.\n\n.. seealso:: `nvmlDeviceGetClkMonStatus`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_323device_get_clk_mon_status = {"device_get_clk_mon_status", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_323device_get_clk_mon_status, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_322device_get_clk_mon_status};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_323device_get_clk_mon_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_clk_mon_status (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22497, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22497, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_clk_mon_status", 0) < (0)) __PYX_ERR(0, 22497, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_clk_mon_status", 1, 1, 1, i); __PYX_ERR(0, 22497, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22497, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22497, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_clk_mon_status", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22497, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_clk_mon_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_322device_get_clk_mon_status(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_322device_get_clk_mon_status(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_clk_mon_status", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_clk_mon_status(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22497, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_clk_mon_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22516
 * 
 * 
 * cpdef object device_get_process_utilization(intptr_t device, unsigned long long last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization and process ID.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_325device_get_process_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_process_utilization(intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_process_samples_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v_utilization = 0;
  nvmlProcessUtilizationSample_t *__pyx_v_utilization_ptr;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  intptr_t __pyx_t_8;
  int __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_process_utilization", 0);

  /* "cuda/bindings/_nvml.pyx":22525
 *     .. seealso:: `nvmlDeviceGetProcessUtilization`
 *     """
 *     cdef unsigned int[1] process_samples_count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetProcessUtilization(<Device>device, NULL, <unsigned int*>process_samples_count, last_seen_time_stamp)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_process_samples_count[0]), __pyx_t_1, sizeof(__pyx_v_process_samples_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":22526
 *     """
 *     cdef unsigned int[1] process_samples_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetProcessUtilization(<Device>device, NULL, <unsigned int*>process_samples_count, last_seen_time_stamp)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22527
 *     cdef unsigned int[1] process_samples_count = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetProcessUtilization(<Device>device, NULL, <unsigned int*>process_samples_count, last_seen_time_stamp)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     cdef ProcessUtilizationSample utilization = ProcessUtilizationSample(process_samples_count[0])
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetProcessUtilization(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), NULL, ((unsigned int *)__pyx_v_process_samples_count), __pyx_v_last_seen_time_stamp); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22527, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":22526
 *     """
 *     cdef unsigned int[1] process_samples_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetProcessUtilization(<Device>device, NULL, <unsigned int*>process_samples_count, last_seen_time_stamp)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22528
 *     with nogil:
 *         __status__ = nvmlDeviceGetProcessUtilization(<Device>device, NULL, <unsigned int*>process_samples_count, last_seen_time_stamp)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     cdef ProcessUtilizationSample utilization = ProcessUtilizationSample(process_samples_count[0])
 *     cdef nvmlProcessUtilizationSample_t *utilization_ptr = <nvmlProcessUtilizationSample_t *><intptr_t>(utilization._get_ptr())
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 22528, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22529
 *         __status__ = nvmlDeviceGetProcessUtilization(<Device>device, NULL, <unsigned int*>process_samples_count, last_seen_time_stamp)
 *     check_status_size(__status__)
 *     cdef ProcessUtilizationSample utilization = ProcessUtilizationSample(process_samples_count[0])             # <<<<<<<<<<<<<<
 *     cdef nvmlProcessUtilizationSample_t *utilization_ptr = <nvmlProcessUtilizationSample_t *><intptr_t>(utilization._get_ptr())
 *     if process_samples_count[0] == 0:
*/
  __pyx_t_5 = NULL;
  __pyx_t_6 = __Pyx_PyLong_From_unsigned_int((__pyx_v_process_samples_count[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 22529, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_6};
    __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample, __pyx_callargs+__pyx_t_7, (2-__pyx_t_7) | (__pyx_t_7*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22529, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_4);
  }
  __pyx_v_utilization = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_t_4);
  __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":22530
 *     check_status_size(__status__)
 *     cdef ProcessUtilizationSample utilization = ProcessUtilizationSample(process_samples_count[0])
 *     cdef nvmlProcessUtilizationSample_t *utilization_ptr = <nvmlProcessUtilizationSample_t *><intptr_t>(utilization._get_ptr())             # <<<<<<<<<<<<<<
 *     if process_samples_count[0] == 0:
 *         return utilization
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v_utilization->__pyx_vtab)->_get_ptr(__pyx_v_utilization); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22530, __pyx_L1_error)
  __pyx_v_utilization_ptr = ((nvmlProcessUtilizationSample_t *)((intptr_t)__pyx_t_8));

  /* "cuda/bindings/_nvml.pyx":22531
 *     cdef ProcessUtilizationSample utilization = ProcessUtilizationSample(process_samples_count[0])
 *     cdef nvmlProcessUtilizationSample_t *utilization_ptr = <nvmlProcessUtilizationSample_t *><intptr_t>(utilization._get_ptr())
 *     if process_samples_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return utilization
 *     with nogil:
*/
  __pyx_t_9 = ((__pyx_v_process_samples_count[0]) == 0);
  if (__pyx_t_9) {

    /* "cuda/bindings/_nvml.pyx":22532
 *     cdef nvmlProcessUtilizationSample_t *utilization_ptr = <nvmlProcessUtilizationSample_t *><intptr_t>(utilization._get_ptr())
 *     if process_samples_count[0] == 0:
 *         return utilization             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetProcessUtilization(<Device>device, utilization_ptr, <unsigned int*>process_samples_count, last_seen_time_stamp)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_utilization);
    __pyx_r = ((PyObject *)__pyx_v_utilization);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":22531
 *     cdef ProcessUtilizationSample utilization = ProcessUtilizationSample(process_samples_count[0])
 *     cdef nvmlProcessUtilizationSample_t *utilization_ptr = <nvmlProcessUtilizationSample_t *><intptr_t>(utilization._get_ptr())
 *     if process_samples_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return utilization
 *     with nogil:
*/
  }

  /* "cuda/bindings/_nvml.pyx":22533
 *     if process_samples_count[0] == 0:
 *         return utilization
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetProcessUtilization(<Device>device, utilization_ptr, <unsigned int*>process_samples_count, last_seen_time_stamp)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22534
 *         return utilization
 *     with nogil:
 *         __status__ = nvmlDeviceGetProcessUtilization(<Device>device, utilization_ptr, <unsigned int*>process_samples_count, last_seen_time_stamp)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return utilization
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetProcessUtilization(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_utilization_ptr, ((unsigned int *)__pyx_v_process_samples_count), __pyx_v_last_seen_time_stamp); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22534, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":22533
 *     if process_samples_count[0] == 0:
 *         return utilization
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetProcessUtilization(<Device>device, utilization_ptr, <unsigned int*>process_samples_count, last_seen_time_stamp)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22535
 *     with nogil:
 *         __status__ = nvmlDeviceGetProcessUtilization(<Device>device, utilization_ptr, <unsigned int*>process_samples_count, last_seen_time_stamp)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return utilization
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 22535, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22536
 *         __status__ = nvmlDeviceGetProcessUtilization(<Device>device, utilization_ptr, <unsigned int*>process_samples_count, last_seen_time_stamp)
 *     check_status(__status__)
 *     return utilization             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_utilization);
  __pyx_r = ((PyObject *)__pyx_v_utilization);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22516
 * 
 * 
 * cpdef object device_get_process_utilization(intptr_t device, unsigned long long last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization and process ID.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_process_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_utilization);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_325device_get_process_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_324device_get_process_utilization, "device_get_process_utilization(intptr_t device, unsigned long long last_seen_time_stamp)\n\nRetrieves the current utilization and process ID.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    last_seen_time_stamp (unsigned long long): Pointer to caller-supplied buffer in which guest process utilization samples are returned.\n\n.. seealso:: `nvmlDeviceGetProcessUtilization`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_325device_get_process_utilization = {"device_get_process_utilization", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_325device_get_process_utilization, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_324device_get_process_utilization};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_325device_get_process_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_process_utilization (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_last_seen_time_stamp,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22516, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22516, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22516, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_process_utilization", 0) < (0)) __PYX_ERR(0, 22516, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_process_utilization", 1, 2, 2, i); __PYX_ERR(0, 22516, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22516, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22516, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22516, __pyx_L3_error)
    __pyx_v_last_seen_time_stamp = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[1]); if (unlikely((__pyx_v_last_seen_time_stamp == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22516, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_process_utilization", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22516, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_process_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_324device_get_process_utilization(__pyx_self, __pyx_v_device, __pyx_v_last_seen_time_stamp);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_324device_get_process_utilization(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_process_utilization", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_process_utilization(__pyx_v_device, __pyx_v_last_seen_time_stamp, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_process_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22539
 * 
 * 
 * cpdef object device_get_platform_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get platform information of this device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_327device_get_platform_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_platform_info(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *__pyx_v_platform_info_py = 0;
  nvmlPlatformInfo_t *__pyx_v_platform_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_platform_info", 0);

  /* "cuda/bindings/_nvml.pyx":22550
 *     .. seealso:: `nvmlDeviceGetPlatformInfo`
 *     """
 *     cdef PlatformInfo_v2 platform_info_py = PlatformInfo_v2()             # <<<<<<<<<<<<<<
 *     cdef nvmlPlatformInfo_t *platform_info = <nvmlPlatformInfo_t *><intptr_t>(platform_info_py._get_ptr())
 *     platform_info.version = sizeof(nvmlPlatformInfo_v2_t) | (2 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22550, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_platform_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":22551
 *     """
 *     cdef PlatformInfo_v2 platform_info_py = PlatformInfo_v2()
 *     cdef nvmlPlatformInfo_t *platform_info = <nvmlPlatformInfo_t *><intptr_t>(platform_info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     platform_info.version = sizeof(nvmlPlatformInfo_v2_t) | (2 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)__pyx_v_platform_info_py->__pyx_vtab)->_get_ptr(__pyx_v_platform_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22551, __pyx_L1_error)
  __pyx_v_platform_info = ((nvmlPlatformInfo_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":22552
 *     cdef PlatformInfo_v2 platform_info_py = PlatformInfo_v2()
 *     cdef nvmlPlatformInfo_t *platform_info = <nvmlPlatformInfo_t *><intptr_t>(platform_info_py._get_ptr())
 *     platform_info.version = sizeof(nvmlPlatformInfo_v2_t) | (2 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetPlatformInfo(<Device>device, platform_info)
*/
  __pyx_v_platform_info->version = ((sizeof(nvmlPlatformInfo_v2_t)) | 0x2000000);

  /* "cuda/bindings/_nvml.pyx":22553
 *     cdef nvmlPlatformInfo_t *platform_info = <nvmlPlatformInfo_t *><intptr_t>(platform_info_py._get_ptr())
 *     platform_info.version = sizeof(nvmlPlatformInfo_v2_t) | (2 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPlatformInfo(<Device>device, platform_info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22554
 *     platform_info.version = sizeof(nvmlPlatformInfo_v2_t) | (2 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetPlatformInfo(<Device>device, platform_info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return platform_info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPlatformInfo(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_platform_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22554, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":22553
 *     cdef nvmlPlatformInfo_t *platform_info = <nvmlPlatformInfo_t *><intptr_t>(platform_info_py._get_ptr())
 *     platform_info.version = sizeof(nvmlPlatformInfo_v2_t) | (2 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPlatformInfo(<Device>device, platform_info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22555
 *     with nogil:
 *         __status__ = nvmlDeviceGetPlatformInfo(<Device>device, platform_info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return platform_info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 22555, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22556
 *         __status__ = nvmlDeviceGetPlatformInfo(<Device>device, platform_info)
 *     check_status(__status__)
 *     return platform_info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_platform_info_py);
  __pyx_r = ((PyObject *)__pyx_v_platform_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22539
 * 
 * 
 * cpdef object device_get_platform_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get platform information of this device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_platform_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_platform_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_327device_get_platform_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_326device_get_platform_info, "device_get_platform_info(intptr_t device)\n\nGet platform information of this device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlPlatformInfo_v2_t: Pointer to the caller-provided structure of nvmlPlatformInfo_t.\n\n.. seealso:: `nvmlDeviceGetPlatformInfo`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_327device_get_platform_info = {"device_get_platform_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_327device_get_platform_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_326device_get_platform_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_327device_get_platform_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_platform_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22539, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22539, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_platform_info", 0) < (0)) __PYX_ERR(0, 22539, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_platform_info", 1, 1, 1, i); __PYX_ERR(0, 22539, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22539, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22539, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_platform_info", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22539, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_platform_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_326device_get_platform_info(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_326device_get_platform_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_platform_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_platform_info(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22539, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_platform_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22559
 * 
 * 
 * cpdef unit_set_led_state(intptr_t unit, int color):             # <<<<<<<<<<<<<<
 *     """Set the LED state for the unit. The LED can be either green (0) or amber (1).
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_329unit_set_led_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_unit_set_led_state(intptr_t __pyx_v_unit, int __pyx_v_color, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("unit_set_led_state", 0);

  /* "cuda/bindings/_nvml.pyx":22568
 *     .. seealso:: `nvmlUnitSetLedState`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitSetLedState(<Unit>unit, <_LedColor>color)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22569
 *     """
 *     with nogil:
 *         __status__ = nvmlUnitSetLedState(<Unit>unit, <_LedColor>color)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitSetLedState(((__pyx_t_4cuda_8bindings_5_nvml_Unit)__pyx_v_unit), ((__pyx_t_4cuda_8bindings_5_nvml__LedColor)__pyx_v_color)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22569, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22568
 *     .. seealso:: `nvmlUnitSetLedState`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitSetLedState(<Unit>unit, <_LedColor>color)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22570
 *     with nogil:
 *         __status__ = nvmlUnitSetLedState(<Unit>unit, <_LedColor>color)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22570, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22559
 * 
 * 
 * cpdef unit_set_led_state(intptr_t unit, int color):             # <<<<<<<<<<<<<<
 *     """Set the LED state for the unit. The LED can be either green (0) or amber (1).
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_set_led_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_329unit_set_led_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_328unit_set_led_state, "unit_set_led_state(intptr_t unit, int color)\n\nSet the LED state for the unit. The LED can be either green (0) or amber (1).\n\nArgs:\n    unit (intptr_t): The identifier of the target unit.\n    color (LedColor): The target LED color.\n\n.. seealso:: `nvmlUnitSetLedState`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_329unit_set_led_state = {"unit_set_led_state", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_329unit_set_led_state, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_328unit_set_led_state};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_329unit_set_led_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_unit;
  int __pyx_v_color;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("unit_set_led_state (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_unit,&__pyx_mstate_global->__pyx_n_u_color,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22559, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22559, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22559, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "unit_set_led_state", 0) < (0)) __PYX_ERR(0, 22559, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("unit_set_led_state", 1, 2, 2, i); __PYX_ERR(0, 22559, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22559, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22559, __pyx_L3_error)
    }
    __pyx_v_unit = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_unit == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22559, __pyx_L3_error)
    __pyx_v_color = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_color == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22559, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("unit_set_led_state", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22559, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_set_led_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_328unit_set_led_state(__pyx_self, __pyx_v_unit, __pyx_v_color);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_328unit_set_led_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_unit, int __pyx_v_color) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("unit_set_led_state", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_unit_set_led_state(__pyx_v_unit, __pyx_v_color, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22559, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_set_led_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22573
 * 
 * 
 * cpdef device_set_persistence_mode(intptr_t device, int mode):             # <<<<<<<<<<<<<<
 *     """Set the persistence mode for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_331device_set_persistence_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_persistence_mode(intptr_t __pyx_v_device, int __pyx_v_mode, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_persistence_mode", 0);

  /* "cuda/bindings/_nvml.pyx":22582
 *     .. seealso:: `nvmlDeviceSetPersistenceMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetPersistenceMode(<Device>device, <_EnableState>mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22583
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetPersistenceMode(<Device>device, <_EnableState>mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPersistenceMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__EnableState)__pyx_v_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22583, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22582
 *     .. seealso:: `nvmlDeviceSetPersistenceMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetPersistenceMode(<Device>device, <_EnableState>mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22584
 *     with nogil:
 *         __status__ = nvmlDeviceSetPersistenceMode(<Device>device, <_EnableState>mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22584, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22573
 * 
 * 
 * cpdef device_set_persistence_mode(intptr_t device, int mode):             # <<<<<<<<<<<<<<
 *     """Set the persistence mode for the device.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_persistence_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_331device_set_persistence_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_330device_set_persistence_mode, "device_set_persistence_mode(intptr_t device, int mode)\n\nSet the persistence mode for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    mode (EnableState): The target persistence mode.\n\n.. seealso:: `nvmlDeviceSetPersistenceMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_331device_set_persistence_mode = {"device_set_persistence_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_331device_set_persistence_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_330device_set_persistence_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_331device_set_persistence_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_mode;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_persistence_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_mode,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22573, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22573, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22573, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_persistence_mode", 0) < (0)) __PYX_ERR(0, 22573, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_persistence_mode", 1, 2, 2, i); __PYX_ERR(0, 22573, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22573, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22573, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22573, __pyx_L3_error)
    __pyx_v_mode = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_mode == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22573, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_persistence_mode", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22573, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_persistence_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_330device_set_persistence_mode(__pyx_self, __pyx_v_device, __pyx_v_mode);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_330device_set_persistence_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_mode) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_persistence_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_persistence_mode(__pyx_v_device, __pyx_v_mode, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22573, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_persistence_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22587
 * 
 * 
 * cpdef device_set_compute_mode(intptr_t device, int mode):             # <<<<<<<<<<<<<<
 *     """Set the compute mode for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_333device_set_compute_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_compute_mode(intptr_t __pyx_v_device, int __pyx_v_mode, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_compute_mode", 0);

  /* "cuda/bindings/_nvml.pyx":22596
 *     .. seealso:: `nvmlDeviceSetComputeMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetComputeMode(<Device>device, <_ComputeMode>mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22597
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetComputeMode(<Device>device, <_ComputeMode>mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetComputeMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__ComputeMode)__pyx_v_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22597, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22596
 *     .. seealso:: `nvmlDeviceSetComputeMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetComputeMode(<Device>device, <_ComputeMode>mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22598
 *     with nogil:
 *         __status__ = nvmlDeviceSetComputeMode(<Device>device, <_ComputeMode>mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22598, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22587
 * 
 * 
 * cpdef device_set_compute_mode(intptr_t device, int mode):             # <<<<<<<<<<<<<<
 *     """Set the compute mode for the device.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_compute_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_333device_set_compute_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_332device_set_compute_mode, "device_set_compute_mode(intptr_t device, int mode)\n\nSet the compute mode for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    mode (ComputeMode): The target compute mode.\n\n.. seealso:: `nvmlDeviceSetComputeMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_333device_set_compute_mode = {"device_set_compute_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_333device_set_compute_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_332device_set_compute_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_333device_set_compute_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_mode;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_compute_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_mode,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22587, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22587, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22587, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_compute_mode", 0) < (0)) __PYX_ERR(0, 22587, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_compute_mode", 1, 2, 2, i); __PYX_ERR(0, 22587, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22587, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22587, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22587, __pyx_L3_error)
    __pyx_v_mode = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_mode == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22587, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_compute_mode", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22587, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_compute_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_332device_set_compute_mode(__pyx_self, __pyx_v_device, __pyx_v_mode);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_332device_set_compute_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_mode) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_compute_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_compute_mode(__pyx_v_device, __pyx_v_mode, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22587, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_compute_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22601
 * 
 * 
 * cpdef device_set_ecc_mode(intptr_t device, int ecc):             # <<<<<<<<<<<<<<
 *     """Set the ECC mode for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_335device_set_ecc_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_ecc_mode(intptr_t __pyx_v_device, int __pyx_v_ecc, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_ecc_mode", 0);

  /* "cuda/bindings/_nvml.pyx":22610
 *     .. seealso:: `nvmlDeviceSetEccMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetEccMode(<Device>device, <_EnableState>ecc)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22611
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetEccMode(<Device>device, <_EnableState>ecc)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetEccMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__EnableState)__pyx_v_ecc)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22611, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22610
 *     .. seealso:: `nvmlDeviceSetEccMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetEccMode(<Device>device, <_EnableState>ecc)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22612
 *     with nogil:
 *         __status__ = nvmlDeviceSetEccMode(<Device>device, <_EnableState>ecc)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22612, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22601
 * 
 * 
 * cpdef device_set_ecc_mode(intptr_t device, int ecc):             # <<<<<<<<<<<<<<
 *     """Set the ECC mode for the device.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_ecc_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_335device_set_ecc_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_334device_set_ecc_mode, "device_set_ecc_mode(intptr_t device, int ecc)\n\nSet the ECC mode for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    ecc (EnableState): The target ECC mode.\n\n.. seealso:: `nvmlDeviceSetEccMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_335device_set_ecc_mode = {"device_set_ecc_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_335device_set_ecc_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_334device_set_ecc_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_335device_set_ecc_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_ecc;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_ecc_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_ecc,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22601, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22601, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22601, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_ecc_mode", 0) < (0)) __PYX_ERR(0, 22601, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_ecc_mode", 1, 2, 2, i); __PYX_ERR(0, 22601, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22601, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22601, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22601, __pyx_L3_error)
    __pyx_v_ecc = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_ecc == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22601, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_ecc_mode", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22601, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_ecc_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_334device_set_ecc_mode(__pyx_self, __pyx_v_device, __pyx_v_ecc);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_334device_set_ecc_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_ecc) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_ecc_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_ecc_mode(__pyx_v_device, __pyx_v_ecc, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22601, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_ecc_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22615
 * 
 * 
 * cpdef device_clear_ecc_error_counts(intptr_t device, int counter_type):             # <<<<<<<<<<<<<<
 *     """Clear the ECC error and other memory error counts for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_337device_clear_ecc_error_counts(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_clear_ecc_error_counts(intptr_t __pyx_v_device, int __pyx_v_counter_type, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_clear_ecc_error_counts", 0);

  /* "cuda/bindings/_nvml.pyx":22624
 *     .. seealso:: `nvmlDeviceClearEccErrorCounts`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceClearEccErrorCounts(<Device>device, <_EccCounterType>counter_type)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22625
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceClearEccErrorCounts(<Device>device, <_EccCounterType>counter_type)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearEccErrorCounts(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__EccCounterType)__pyx_v_counter_type)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22625, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22624
 *     .. seealso:: `nvmlDeviceClearEccErrorCounts`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceClearEccErrorCounts(<Device>device, <_EccCounterType>counter_type)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22626
 *     with nogil:
 *         __status__ = nvmlDeviceClearEccErrorCounts(<Device>device, <_EccCounterType>counter_type)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22626, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22615
 * 
 * 
 * cpdef device_clear_ecc_error_counts(intptr_t device, int counter_type):             # <<<<<<<<<<<<<<
 *     """Clear the ECC error and other memory error counts for the device.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_clear_ecc_error_counts", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_337device_clear_ecc_error_counts(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_336device_clear_ecc_error_counts, "device_clear_ecc_error_counts(intptr_t device, int counter_type)\n\nClear the ECC error and other memory error counts for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    counter_type (EccCounterType): Flag that indicates which type of errors should be cleared.\n\n.. seealso:: `nvmlDeviceClearEccErrorCounts`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_337device_clear_ecc_error_counts = {"device_clear_ecc_error_counts", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_337device_clear_ecc_error_counts, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_336device_clear_ecc_error_counts};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_337device_clear_ecc_error_counts(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_counter_type;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_clear_ecc_error_counts (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_counter_type,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22615, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22615, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22615, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_clear_ecc_error_counts", 0) < (0)) __PYX_ERR(0, 22615, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_clear_ecc_error_counts", 1, 2, 2, i); __PYX_ERR(0, 22615, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22615, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22615, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22615, __pyx_L3_error)
    __pyx_v_counter_type = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_counter_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22615, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_clear_ecc_error_counts", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22615, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_clear_ecc_error_counts", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_336device_clear_ecc_error_counts(__pyx_self, __pyx_v_device, __pyx_v_counter_type);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_336device_clear_ecc_error_counts(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_counter_type) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_clear_ecc_error_counts", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_clear_ecc_error_counts(__pyx_v_device, __pyx_v_counter_type, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22615, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_clear_ecc_error_counts", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22629
 * 
 * 
 * cpdef device_set_driver_model(intptr_t device, int driver_model, unsigned int flags):             # <<<<<<<<<<<<<<
 *     """Set the driver model for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_339device_set_driver_model(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_driver_model(intptr_t __pyx_v_device, int __pyx_v_driver_model, unsigned int __pyx_v_flags, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_driver_model", 0);

  /* "cuda/bindings/_nvml.pyx":22639
 *     .. seealso:: `nvmlDeviceSetDriverModel`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetDriverModel(<Device>device, <_DriverModel>driver_model, flags)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22640
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetDriverModel(<Device>device, <_DriverModel>driver_model, flags)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDriverModel(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__DriverModel)__pyx_v_driver_model), __pyx_v_flags); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22640, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22639
 *     .. seealso:: `nvmlDeviceSetDriverModel`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetDriverModel(<Device>device, <_DriverModel>driver_model, flags)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22641
 *     with nogil:
 *         __status__ = nvmlDeviceSetDriverModel(<Device>device, <_DriverModel>driver_model, flags)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22641, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22629
 * 
 * 
 * cpdef device_set_driver_model(intptr_t device, int driver_model, unsigned int flags):             # <<<<<<<<<<<<<<
 *     """Set the driver model for the device.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_driver_model", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_339device_set_driver_model(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_338device_set_driver_model, "device_set_driver_model(intptr_t device, int driver_model, unsigned int flags)\n\nSet the driver model for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    driver_model (DriverModel): The target driver model.\n    flags (unsigned int): Flags that change the default behavior.\n\n.. seealso:: `nvmlDeviceSetDriverModel`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_339device_set_driver_model = {"device_set_driver_model", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_339device_set_driver_model, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_338device_set_driver_model};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_339device_set_driver_model(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_driver_model;
  unsigned int __pyx_v_flags;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_driver_model (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_driver_model,&__pyx_mstate_global->__pyx_n_u_flags,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22629, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22629, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22629, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22629, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_driver_model", 0) < (0)) __PYX_ERR(0, 22629, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_driver_model", 1, 3, 3, i); __PYX_ERR(0, 22629, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22629, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22629, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22629, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22629, __pyx_L3_error)
    __pyx_v_driver_model = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_driver_model == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22629, __pyx_L3_error)
    __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22629, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_driver_model", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 22629, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_driver_model", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_338device_set_driver_model(__pyx_self, __pyx_v_device, __pyx_v_driver_model, __pyx_v_flags);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_338device_set_driver_model(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_driver_model, unsigned int __pyx_v_flags) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_driver_model", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_driver_model(__pyx_v_device, __pyx_v_driver_model, __pyx_v_flags, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_driver_model", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22644
 * 
 * 
 * cpdef device_set_gpu_locked_clocks(intptr_t device, unsigned int min_gpu_clock_m_hz, unsigned int max_gpu_clock_m_hz):             # <<<<<<<<<<<<<<
 *     """Set clocks that device will lock to.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_341device_set_gpu_locked_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_gpu_locked_clocks(intptr_t __pyx_v_device, unsigned int __pyx_v_min_gpu_clock_m_hz, unsigned int __pyx_v_max_gpu_clock_m_hz, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_gpu_locked_clocks", 0);

  /* "cuda/bindings/_nvml.pyx":22654
 *     .. seealso:: `nvmlDeviceSetGpuLockedClocks`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetGpuLockedClocks(<Device>device, min_gpu_clock_m_hz, max_gpu_clock_m_hz)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22655
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetGpuLockedClocks(<Device>device, min_gpu_clock_m_hz, max_gpu_clock_m_hz)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetGpuLockedClocks(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_min_gpu_clock_m_hz, __pyx_v_max_gpu_clock_m_hz); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22655, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22654
 *     .. seealso:: `nvmlDeviceSetGpuLockedClocks`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetGpuLockedClocks(<Device>device, min_gpu_clock_m_hz, max_gpu_clock_m_hz)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22656
 *     with nogil:
 *         __status__ = nvmlDeviceSetGpuLockedClocks(<Device>device, min_gpu_clock_m_hz, max_gpu_clock_m_hz)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22656, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22644
 * 
 * 
 * cpdef device_set_gpu_locked_clocks(intptr_t device, unsigned int min_gpu_clock_m_hz, unsigned int max_gpu_clock_m_hz):             # <<<<<<<<<<<<<<
 *     """Set clocks that device will lock to.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_gpu_locked_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_341device_set_gpu_locked_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_340device_set_gpu_locked_clocks, "device_set_gpu_locked_clocks(intptr_t device, unsigned int min_gpu_clock_m_hz, unsigned int max_gpu_clock_m_hz)\n\nSet clocks that device will lock to.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    min_gpu_clock_m_hz (unsigned int): Requested minimum gpu clock in MHz.\n    max_gpu_clock_m_hz (unsigned int): Requested maximum gpu clock in MHz.\n\n.. seealso:: `nvmlDeviceSetGpuLockedClocks`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_341device_set_gpu_locked_clocks = {"device_set_gpu_locked_clocks", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_341device_set_gpu_locked_clocks, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_340device_set_gpu_locked_clocks};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_341device_set_gpu_locked_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_min_gpu_clock_m_hz;
  unsigned int __pyx_v_max_gpu_clock_m_hz;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_gpu_locked_clocks (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_min_gpu_clock_m_hz,&__pyx_mstate_global->__pyx_n_u_max_gpu_clock_m_hz,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22644, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22644, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22644, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22644, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_gpu_locked_clocks", 0) < (0)) __PYX_ERR(0, 22644, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_gpu_locked_clocks", 1, 3, 3, i); __PYX_ERR(0, 22644, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22644, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22644, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22644, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22644, __pyx_L3_error)
    __pyx_v_min_gpu_clock_m_hz = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_min_gpu_clock_m_hz == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22644, __pyx_L3_error)
    __pyx_v_max_gpu_clock_m_hz = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_max_gpu_clock_m_hz == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22644, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_gpu_locked_clocks", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 22644, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_gpu_locked_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_340device_set_gpu_locked_clocks(__pyx_self, __pyx_v_device, __pyx_v_min_gpu_clock_m_hz, __pyx_v_max_gpu_clock_m_hz);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_340device_set_gpu_locked_clocks(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_min_gpu_clock_m_hz, unsigned int __pyx_v_max_gpu_clock_m_hz) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_gpu_locked_clocks", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_gpu_locked_clocks(__pyx_v_device, __pyx_v_min_gpu_clock_m_hz, __pyx_v_max_gpu_clock_m_hz, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22644, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_gpu_locked_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22659
 * 
 * 
 * cpdef device_reset_gpu_locked_clocks(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Resets the gpu clock to the default value.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_343device_reset_gpu_locked_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_reset_gpu_locked_clocks(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_reset_gpu_locked_clocks", 0);

  /* "cuda/bindings/_nvml.pyx":22667
 *     .. seealso:: `nvmlDeviceResetGpuLockedClocks`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceResetGpuLockedClocks(<Device>device)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22668
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceResetGpuLockedClocks(<Device>device)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceResetGpuLockedClocks(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22668, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22667
 *     .. seealso:: `nvmlDeviceResetGpuLockedClocks`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceResetGpuLockedClocks(<Device>device)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22669
 *     with nogil:
 *         __status__ = nvmlDeviceResetGpuLockedClocks(<Device>device)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22669, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22659
 * 
 * 
 * cpdef device_reset_gpu_locked_clocks(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Resets the gpu clock to the default value.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_reset_gpu_locked_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_343device_reset_gpu_locked_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_342device_reset_gpu_locked_clocks, "device_reset_gpu_locked_clocks(intptr_t device)\n\nResets the gpu clock to the default value.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\n.. seealso:: `nvmlDeviceResetGpuLockedClocks`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_343device_reset_gpu_locked_clocks = {"device_reset_gpu_locked_clocks", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_343device_reset_gpu_locked_clocks, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_342device_reset_gpu_locked_clocks};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_343device_reset_gpu_locked_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_reset_gpu_locked_clocks (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22659, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22659, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_reset_gpu_locked_clocks", 0) < (0)) __PYX_ERR(0, 22659, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_reset_gpu_locked_clocks", 1, 1, 1, i); __PYX_ERR(0, 22659, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22659, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22659, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_reset_gpu_locked_clocks", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22659, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_reset_gpu_locked_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_342device_reset_gpu_locked_clocks(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_342device_reset_gpu_locked_clocks(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_reset_gpu_locked_clocks", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_reset_gpu_locked_clocks(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22659, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_reset_gpu_locked_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22672
 * 
 * 
 * cpdef device_set_memory_locked_clocks(intptr_t device, unsigned int min_mem_clock_m_hz, unsigned int max_mem_clock_m_hz):             # <<<<<<<<<<<<<<
 *     """Set memory clocks that device will lock to.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_345device_set_memory_locked_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_memory_locked_clocks(intptr_t __pyx_v_device, unsigned int __pyx_v_min_mem_clock_m_hz, unsigned int __pyx_v_max_mem_clock_m_hz, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_memory_locked_clocks", 0);

  /* "cuda/bindings/_nvml.pyx":22682
 *     .. seealso:: `nvmlDeviceSetMemoryLockedClocks`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetMemoryLockedClocks(<Device>device, min_mem_clock_m_hz, max_mem_clock_m_hz)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22683
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetMemoryLockedClocks(<Device>device, min_mem_clock_m_hz, max_mem_clock_m_hz)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetMemoryLockedClocks(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_min_mem_clock_m_hz, __pyx_v_max_mem_clock_m_hz); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22683, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22682
 *     .. seealso:: `nvmlDeviceSetMemoryLockedClocks`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetMemoryLockedClocks(<Device>device, min_mem_clock_m_hz, max_mem_clock_m_hz)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22684
 *     with nogil:
 *         __status__ = nvmlDeviceSetMemoryLockedClocks(<Device>device, min_mem_clock_m_hz, max_mem_clock_m_hz)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22684, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22672
 * 
 * 
 * cpdef device_set_memory_locked_clocks(intptr_t device, unsigned int min_mem_clock_m_hz, unsigned int max_mem_clock_m_hz):             # <<<<<<<<<<<<<<
 *     """Set memory clocks that device will lock to.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_memory_locked_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_345device_set_memory_locked_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_344device_set_memory_locked_clocks, "device_set_memory_locked_clocks(intptr_t device, unsigned int min_mem_clock_m_hz, unsigned int max_mem_clock_m_hz)\n\nSet memory clocks that device will lock to.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    min_mem_clock_m_hz (unsigned int): Requested minimum memory clock in MHz.\n    max_mem_clock_m_hz (unsigned int): Requested maximum memory clock in MHz.\n\n.. seealso:: `nvmlDeviceSetMemoryLockedClocks`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_345device_set_memory_locked_clocks = {"device_set_memory_locked_clocks", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_345device_set_memory_locked_clocks, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_344device_set_memory_locked_clocks};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_345device_set_memory_locked_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_min_mem_clock_m_hz;
  unsigned int __pyx_v_max_mem_clock_m_hz;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_memory_locked_clocks (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_min_mem_clock_m_hz,&__pyx_mstate_global->__pyx_n_u_max_mem_clock_m_hz,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22672, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22672, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22672, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22672, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_memory_locked_clocks", 0) < (0)) __PYX_ERR(0, 22672, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_memory_locked_clocks", 1, 3, 3, i); __PYX_ERR(0, 22672, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22672, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22672, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22672, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22672, __pyx_L3_error)
    __pyx_v_min_mem_clock_m_hz = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_min_mem_clock_m_hz == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22672, __pyx_L3_error)
    __pyx_v_max_mem_clock_m_hz = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_max_mem_clock_m_hz == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22672, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_memory_locked_clocks", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 22672, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_memory_locked_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_344device_set_memory_locked_clocks(__pyx_self, __pyx_v_device, __pyx_v_min_mem_clock_m_hz, __pyx_v_max_mem_clock_m_hz);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_344device_set_memory_locked_clocks(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_min_mem_clock_m_hz, unsigned int __pyx_v_max_mem_clock_m_hz) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_memory_locked_clocks", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_memory_locked_clocks(__pyx_v_device, __pyx_v_min_mem_clock_m_hz, __pyx_v_max_mem_clock_m_hz, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22672, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_memory_locked_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22687
 * 
 * 
 * cpdef device_reset_memory_locked_clocks(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Resets the memory clock to the default value.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_347device_reset_memory_locked_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_reset_memory_locked_clocks(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_reset_memory_locked_clocks", 0);

  /* "cuda/bindings/_nvml.pyx":22695
 *     .. seealso:: `nvmlDeviceResetMemoryLockedClocks`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceResetMemoryLockedClocks(<Device>device)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22696
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceResetMemoryLockedClocks(<Device>device)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceResetMemoryLockedClocks(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22696, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22695
 *     .. seealso:: `nvmlDeviceResetMemoryLockedClocks`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceResetMemoryLockedClocks(<Device>device)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22697
 *     with nogil:
 *         __status__ = nvmlDeviceResetMemoryLockedClocks(<Device>device)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22697, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22687
 * 
 * 
 * cpdef device_reset_memory_locked_clocks(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Resets the memory clock to the default value.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_reset_memory_locked_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_347device_reset_memory_locked_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_346device_reset_memory_locked_clocks, "device_reset_memory_locked_clocks(intptr_t device)\n\nResets the memory clock to the default value.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\n.. seealso:: `nvmlDeviceResetMemoryLockedClocks`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_347device_reset_memory_locked_clocks = {"device_reset_memory_locked_clocks", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_347device_reset_memory_locked_clocks, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_346device_reset_memory_locked_clocks};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_347device_reset_memory_locked_clocks(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_reset_memory_locked_clocks (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22687, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22687, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_reset_memory_locked_clocks", 0) < (0)) __PYX_ERR(0, 22687, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_reset_memory_locked_clocks", 1, 1, 1, i); __PYX_ERR(0, 22687, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22687, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22687, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_reset_memory_locked_clocks", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22687, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_reset_memory_locked_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_346device_reset_memory_locked_clocks(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_346device_reset_memory_locked_clocks(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_reset_memory_locked_clocks", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_reset_memory_locked_clocks(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22687, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_reset_memory_locked_clocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22700
 * 
 * 
 * cpdef device_set_auto_boosted_clocks_enabled(intptr_t device, int enabled):             # <<<<<<<<<<<<<<
 *     """Try to set the current state of Auto Boosted clocks on a device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_349device_set_auto_boosted_clocks_enabled(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_auto_boosted_clocks_enabled(intptr_t __pyx_v_device, int __pyx_v_enabled, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_auto_boosted_clocks_enabled", 0);

  /* "cuda/bindings/_nvml.pyx":22709
 *     .. seealso:: `nvmlDeviceSetAutoBoostedClocksEnabled`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetAutoBoostedClocksEnabled(<Device>device, <_EnableState>enabled)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22710
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetAutoBoostedClocksEnabled(<Device>device, <_EnableState>enabled)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetAutoBoostedClocksEnabled(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__EnableState)__pyx_v_enabled)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22710, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22709
 *     .. seealso:: `nvmlDeviceSetAutoBoostedClocksEnabled`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetAutoBoostedClocksEnabled(<Device>device, <_EnableState>enabled)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22711
 *     with nogil:
 *         __status__ = nvmlDeviceSetAutoBoostedClocksEnabled(<Device>device, <_EnableState>enabled)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22711, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22700
 * 
 * 
 * cpdef device_set_auto_boosted_clocks_enabled(intptr_t device, int enabled):             # <<<<<<<<<<<<<<
 *     """Try to set the current state of Auto Boosted clocks on a device.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_auto_boosted_clocks_enabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_349device_set_auto_boosted_clocks_enabled(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_348device_set_auto_boosted_clocks_enabled, "device_set_auto_boosted_clocks_enabled(intptr_t device, int enabled)\n\nTry to set the current state of Auto Boosted clocks on a device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    enabled (EnableState): What state to try to set Auto Boosted clocks of the target device to.\n\n.. seealso:: `nvmlDeviceSetAutoBoostedClocksEnabled`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_349device_set_auto_boosted_clocks_enabled = {"device_set_auto_boosted_clocks_enabled", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_349device_set_auto_boosted_clocks_enabled, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_348device_set_auto_boosted_clocks_enabled};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_349device_set_auto_boosted_clocks_enabled(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_enabled;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_auto_boosted_clocks_enabled (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_enabled,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22700, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22700, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22700, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_auto_boosted_clocks_enabled", 0) < (0)) __PYX_ERR(0, 22700, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_auto_boosted_clocks_enabled", 1, 2, 2, i); __PYX_ERR(0, 22700, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22700, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22700, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22700, __pyx_L3_error)
    __pyx_v_enabled = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_enabled == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22700, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_auto_boosted_clocks_enabled", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22700, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_auto_boosted_clocks_enabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_348device_set_auto_boosted_clocks_enabled(__pyx_self, __pyx_v_device, __pyx_v_enabled);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_348device_set_auto_boosted_clocks_enabled(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_enabled) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_auto_boosted_clocks_enabled", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_auto_boosted_clocks_enabled(__pyx_v_device, __pyx_v_enabled, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22700, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_auto_boosted_clocks_enabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22714
 * 
 * 
 * cpdef device_set_default_auto_boosted_clocks_enabled(intptr_t device, int enabled, unsigned int flags):             # <<<<<<<<<<<<<<
 *     """Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will return to when no compute running processes (e.g. CUDA application which have an active context) are running.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_351device_set_default_auto_boosted_clocks_enabled(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_default_auto_boosted_clocks_enabled(intptr_t __pyx_v_device, int __pyx_v_enabled, unsigned int __pyx_v_flags, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_default_auto_boosted_clocks_enabled", 0);

  /* "cuda/bindings/_nvml.pyx":22724
 *     .. seealso:: `nvmlDeviceSetDefaultAutoBoostedClocksEnabled`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetDefaultAutoBoostedClocksEnabled(<Device>device, <_EnableState>enabled, flags)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22725
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetDefaultAutoBoostedClocksEnabled(<Device>device, <_EnableState>enabled, flags)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDefaultAutoBoostedClocksEnabled(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__EnableState)__pyx_v_enabled), __pyx_v_flags); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22725, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22724
 *     .. seealso:: `nvmlDeviceSetDefaultAutoBoostedClocksEnabled`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetDefaultAutoBoostedClocksEnabled(<Device>device, <_EnableState>enabled, flags)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22726
 *     with nogil:
 *         __status__ = nvmlDeviceSetDefaultAutoBoostedClocksEnabled(<Device>device, <_EnableState>enabled, flags)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22726, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22714
 * 
 * 
 * cpdef device_set_default_auto_boosted_clocks_enabled(intptr_t device, int enabled, unsigned int flags):             # <<<<<<<<<<<<<<
 *     """Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will return to when no compute running processes (e.g. CUDA application which have an active context) are running.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_default_auto_boosted_clocks_enabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_351device_set_default_auto_boosted_clocks_enabled(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_350device_set_default_auto_boosted_clocks_enabled, "device_set_default_auto_boosted_clocks_enabled(intptr_t device, int enabled, unsigned int flags)\n\nTry to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will return to when no compute running processes (e.g. CUDA application which have an active context) are running.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    enabled (EnableState): What state to try to set default Auto Boosted clocks of the target device to.\n    flags (unsigned int): Flags that change the default behavior. Currently Unused.\n\n.. seealso:: `nvmlDeviceSetDefaultAutoBoostedClocksEnabled`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_351device_set_default_auto_boosted_clocks_enabled = {"device_set_default_auto_boosted_clocks_enabled", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_351device_set_default_auto_boosted_clocks_enabled, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_350device_set_default_auto_boosted_clocks_enabled};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_351device_set_default_auto_boosted_clocks_enabled(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_enabled;
  unsigned int __pyx_v_flags;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_default_auto_boosted_clocks_enabled (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_enabled,&__pyx_mstate_global->__pyx_n_u_flags,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22714, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22714, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22714, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22714, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_default_auto_boosted_clocks_enabled", 0) < (0)) __PYX_ERR(0, 22714, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_default_auto_boosted_clocks_enabled", 1, 3, 3, i); __PYX_ERR(0, 22714, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22714, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22714, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22714, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22714, __pyx_L3_error)
    __pyx_v_enabled = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_enabled == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22714, __pyx_L3_error)
    __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22714, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_default_auto_boosted_clocks_enabled", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 22714, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_default_auto_boosted_clocks_enabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_350device_set_default_auto_boosted_clocks_enabled(__pyx_self, __pyx_v_device, __pyx_v_enabled, __pyx_v_flags);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_350device_set_default_auto_boosted_clocks_enabled(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_enabled, unsigned int __pyx_v_flags) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_default_auto_boosted_clocks_enabled", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_default_auto_boosted_clocks_enabled(__pyx_v_device, __pyx_v_enabled, __pyx_v_flags, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22714, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_default_auto_boosted_clocks_enabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22729
 * 
 * 
 * cpdef device_set_default_fan_speed_v2(intptr_t device, unsigned int fan):             # <<<<<<<<<<<<<<
 *     """Sets the speed of the fan control policy to default.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_353device_set_default_fan_speed_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_default_fan_speed_v2(intptr_t __pyx_v_device, unsigned int __pyx_v_fan, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_default_fan_speed_v2", 0);

  /* "cuda/bindings/_nvml.pyx":22738
 *     .. seealso:: `nvmlDeviceSetDefaultFanSpeed_v2`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetDefaultFanSpeed_v2(<Device>device, fan)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22739
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetDefaultFanSpeed_v2(<Device>device, fan)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDefaultFanSpeed_v2(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_fan); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22739, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22738
 *     .. seealso:: `nvmlDeviceSetDefaultFanSpeed_v2`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetDefaultFanSpeed_v2(<Device>device, fan)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22740
 *     with nogil:
 *         __status__ = nvmlDeviceSetDefaultFanSpeed_v2(<Device>device, fan)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22740, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22729
 * 
 * 
 * cpdef device_set_default_fan_speed_v2(intptr_t device, unsigned int fan):             # <<<<<<<<<<<<<<
 *     """Sets the speed of the fan control policy to default.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_default_fan_speed_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_353device_set_default_fan_speed_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_352device_set_default_fan_speed_v2, "device_set_default_fan_speed_v2(intptr_t device, unsigned int fan)\n\nSets the speed of the fan control policy to default.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    fan (unsigned int): The index of the fan, starting at zero.\n\n.. seealso:: `nvmlDeviceSetDefaultFanSpeed_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_353device_set_default_fan_speed_v2 = {"device_set_default_fan_speed_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_353device_set_default_fan_speed_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_352device_set_default_fan_speed_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_353device_set_default_fan_speed_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_fan;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_default_fan_speed_v2 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_fan,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22729, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22729, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22729, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_default_fan_speed_v2", 0) < (0)) __PYX_ERR(0, 22729, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_default_fan_speed_v2", 1, 2, 2, i); __PYX_ERR(0, 22729, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22729, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22729, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22729, __pyx_L3_error)
    __pyx_v_fan = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_fan == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22729, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_default_fan_speed_v2", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22729, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_default_fan_speed_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_352device_set_default_fan_speed_v2(__pyx_self, __pyx_v_device, __pyx_v_fan);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_352device_set_default_fan_speed_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_fan) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_default_fan_speed_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_default_fan_speed_v2(__pyx_v_device, __pyx_v_fan, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22729, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_default_fan_speed_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22743
 * 
 * 
 * cpdef device_set_fan_control_policy(intptr_t device, unsigned int fan, unsigned int policy):             # <<<<<<<<<<<<<<
 *     """Sets current fan control policy.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_355device_set_fan_control_policy(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_fan_control_policy(intptr_t __pyx_v_device, unsigned int __pyx_v_fan, unsigned int __pyx_v_policy, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_fan_control_policy", 0);

  /* "cuda/bindings/_nvml.pyx":22753
 *     .. seealso:: `nvmlDeviceSetFanControlPolicy`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetFanControlPolicy(<Device>device, fan, <nvmlFanControlPolicy_t>policy)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22754
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetFanControlPolicy(<Device>device, fan, <nvmlFanControlPolicy_t>policy)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetFanControlPolicy(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_fan, ((nvmlFanControlPolicy_t)__pyx_v_policy)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22754, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22753
 *     .. seealso:: `nvmlDeviceSetFanControlPolicy`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetFanControlPolicy(<Device>device, fan, <nvmlFanControlPolicy_t>policy)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22755
 *     with nogil:
 *         __status__ = nvmlDeviceSetFanControlPolicy(<Device>device, fan, <nvmlFanControlPolicy_t>policy)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22755, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22743
 * 
 * 
 * cpdef device_set_fan_control_policy(intptr_t device, unsigned int fan, unsigned int policy):             # <<<<<<<<<<<<<<
 *     """Sets current fan control policy.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_fan_control_policy", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_355device_set_fan_control_policy(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_354device_set_fan_control_policy, "device_set_fan_control_policy(intptr_t device, unsigned int fan, unsigned int policy)\n\nSets current fan control policy.\n\nArgs:\n    device (intptr_t): The identifier of the target ``device``.\n    fan (unsigned int): The index of the fan, starting at zero.\n    policy (unsigned int): The fan control ``policy`` to set.\n\n.. seealso:: `nvmlDeviceSetFanControlPolicy`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_355device_set_fan_control_policy = {"device_set_fan_control_policy", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_355device_set_fan_control_policy, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_354device_set_fan_control_policy};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_355device_set_fan_control_policy(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_fan;
  unsigned int __pyx_v_policy;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_fan_control_policy (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_fan,&__pyx_mstate_global->__pyx_n_u_policy,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22743, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22743, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22743, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22743, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_fan_control_policy", 0) < (0)) __PYX_ERR(0, 22743, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_fan_control_policy", 1, 3, 3, i); __PYX_ERR(0, 22743, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22743, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22743, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22743, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22743, __pyx_L3_error)
    __pyx_v_fan = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_fan == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22743, __pyx_L3_error)
    __pyx_v_policy = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_policy == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22743, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_fan_control_policy", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 22743, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_fan_control_policy", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_354device_set_fan_control_policy(__pyx_self, __pyx_v_device, __pyx_v_fan, __pyx_v_policy);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_354device_set_fan_control_policy(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_fan, unsigned int __pyx_v_policy) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_fan_control_policy", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_fan_control_policy(__pyx_v_device, __pyx_v_fan, __pyx_v_policy, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22743, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_fan_control_policy", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22758
 * 
 * 
 * cpdef device_set_temperature_threshold(intptr_t device, int threshold_type, intptr_t temp):             # <<<<<<<<<<<<<<
 *     """Sets the temperature threshold for the GPU with the specified threshold type in degrees C.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_357device_set_temperature_threshold(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_temperature_threshold(intptr_t __pyx_v_device, int __pyx_v_threshold_type, intptr_t __pyx_v_temp, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_temperature_threshold", 0);

  /* "cuda/bindings/_nvml.pyx":22768
 *     .. seealso:: `nvmlDeviceSetTemperatureThreshold`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetTemperatureThreshold(<Device>device, <_TemperatureThresholds>threshold_type, <int*>temp)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22769
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetTemperatureThreshold(<Device>device, <_TemperatureThresholds>threshold_type, <int*>temp)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetTemperatureThreshold(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__TemperatureThresholds)__pyx_v_threshold_type), ((int *)__pyx_v_temp)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22769, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22768
 *     .. seealso:: `nvmlDeviceSetTemperatureThreshold`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetTemperatureThreshold(<Device>device, <_TemperatureThresholds>threshold_type, <int*>temp)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22770
 *     with nogil:
 *         __status__ = nvmlDeviceSetTemperatureThreshold(<Device>device, <_TemperatureThresholds>threshold_type, <int*>temp)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22770, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22758
 * 
 * 
 * cpdef device_set_temperature_threshold(intptr_t device, int threshold_type, intptr_t temp):             # <<<<<<<<<<<<<<
 *     """Sets the temperature threshold for the GPU with the specified threshold type in degrees C.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_temperature_threshold", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_357device_set_temperature_threshold(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_356device_set_temperature_threshold, "device_set_temperature_threshold(intptr_t device, int threshold_type, intptr_t temp)\n\nSets the temperature threshold for the GPU with the specified threshold type in degrees C.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    threshold_type (TemperatureThresholds): The type of threshold value to be set.\n    temp (intptr_t): Reference which hold the value to be set.\n\n.. seealso:: `nvmlDeviceSetTemperatureThreshold`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_357device_set_temperature_threshold = {"device_set_temperature_threshold", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_357device_set_temperature_threshold, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_356device_set_temperature_threshold};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_357device_set_temperature_threshold(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_threshold_type;
  intptr_t __pyx_v_temp;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_temperature_threshold (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_threshold_type,&__pyx_mstate_global->__pyx_n_u_temp,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22758, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22758, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22758, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22758, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_temperature_threshold", 0) < (0)) __PYX_ERR(0, 22758, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_temperature_threshold", 1, 3, 3, i); __PYX_ERR(0, 22758, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22758, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22758, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22758, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22758, __pyx_L3_error)
    __pyx_v_threshold_type = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_threshold_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22758, __pyx_L3_error)
    __pyx_v_temp = PyLong_AsSsize_t(values[2]); if (unlikely((__pyx_v_temp == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22758, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_temperature_threshold", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 22758, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_temperature_threshold", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_356device_set_temperature_threshold(__pyx_self, __pyx_v_device, __pyx_v_threshold_type, __pyx_v_temp);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_356device_set_temperature_threshold(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_threshold_type, intptr_t __pyx_v_temp) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_temperature_threshold", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_temperature_threshold(__pyx_v_device, __pyx_v_threshold_type, __pyx_v_temp, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22758, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_temperature_threshold", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22773
 * 
 * 
 * cpdef device_set_power_management_limit(intptr_t device, unsigned int limit):             # <<<<<<<<<<<<<<
 *     """Set new power limit of this device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_359device_set_power_management_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_power_management_limit(intptr_t __pyx_v_device, unsigned int __pyx_v_limit, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_power_management_limit", 0);

  /* "cuda/bindings/_nvml.pyx":22782
 *     .. seealso:: `nvmlDeviceSetPowerManagementLimit`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetPowerManagementLimit(<Device>device, limit)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22783
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetPowerManagementLimit(<Device>device, limit)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPowerManagementLimit(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_limit); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22783, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22782
 *     .. seealso:: `nvmlDeviceSetPowerManagementLimit`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetPowerManagementLimit(<Device>device, limit)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22784
 *     with nogil:
 *         __status__ = nvmlDeviceSetPowerManagementLimit(<Device>device, limit)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22784, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22773
 * 
 * 
 * cpdef device_set_power_management_limit(intptr_t device, unsigned int limit):             # <<<<<<<<<<<<<<
 *     """Set new power limit of this device.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_power_management_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_359device_set_power_management_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_358device_set_power_management_limit, "device_set_power_management_limit(intptr_t device, unsigned int limit)\n\nSet new power limit of this device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    limit (unsigned int): Power management limit in milliwatts to set.\n\n.. seealso:: `nvmlDeviceSetPowerManagementLimit`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_359device_set_power_management_limit = {"device_set_power_management_limit", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_359device_set_power_management_limit, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_358device_set_power_management_limit};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_359device_set_power_management_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_limit;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_power_management_limit (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_limit,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22773, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22773, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22773, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_power_management_limit", 0) < (0)) __PYX_ERR(0, 22773, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_power_management_limit", 1, 2, 2, i); __PYX_ERR(0, 22773, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22773, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22773, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22773, __pyx_L3_error)
    __pyx_v_limit = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_limit == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22773, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_power_management_limit", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22773, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_power_management_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_358device_set_power_management_limit(__pyx_self, __pyx_v_device, __pyx_v_limit);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_358device_set_power_management_limit(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_limit) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_power_management_limit", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_power_management_limit(__pyx_v_device, __pyx_v_limit, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22773, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_power_management_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22787
 * 
 * 
 * cpdef device_set_gpu_operation_mode(intptr_t device, int mode):             # <<<<<<<<<<<<<<
 *     """Sets new GOM. See ``nvmlGpuOperationMode_t`` for details.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_361device_set_gpu_operation_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_gpu_operation_mode(intptr_t __pyx_v_device, int __pyx_v_mode, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_gpu_operation_mode", 0);

  /* "cuda/bindings/_nvml.pyx":22796
 *     .. seealso:: `nvmlDeviceSetGpuOperationMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetGpuOperationMode(<Device>device, <_GpuOperationMode>mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22797
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetGpuOperationMode(<Device>device, <_GpuOperationMode>mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetGpuOperationMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__GpuOperationMode)__pyx_v_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22797, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22796
 *     .. seealso:: `nvmlDeviceSetGpuOperationMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetGpuOperationMode(<Device>device, <_GpuOperationMode>mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22798
 *     with nogil:
 *         __status__ = nvmlDeviceSetGpuOperationMode(<Device>device, <_GpuOperationMode>mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22798, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22787
 * 
 * 
 * cpdef device_set_gpu_operation_mode(intptr_t device, int mode):             # <<<<<<<<<<<<<<
 *     """Sets new GOM. See ``nvmlGpuOperationMode_t`` for details.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_gpu_operation_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_361device_set_gpu_operation_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_360device_set_gpu_operation_mode, "device_set_gpu_operation_mode(intptr_t device, int mode)\n\nSets new GOM. See ``nvmlGpuOperationMode_t`` for details.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    mode (GpuOperationMode): Target GOM.\n\n.. seealso:: `nvmlDeviceSetGpuOperationMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_361device_set_gpu_operation_mode = {"device_set_gpu_operation_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_361device_set_gpu_operation_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_360device_set_gpu_operation_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_361device_set_gpu_operation_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_mode;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_gpu_operation_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_mode,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22787, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22787, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22787, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_gpu_operation_mode", 0) < (0)) __PYX_ERR(0, 22787, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_gpu_operation_mode", 1, 2, 2, i); __PYX_ERR(0, 22787, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22787, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22787, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22787, __pyx_L3_error)
    __pyx_v_mode = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_mode == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22787, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_gpu_operation_mode", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22787, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_gpu_operation_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_360device_set_gpu_operation_mode(__pyx_self, __pyx_v_device, __pyx_v_mode);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_360device_set_gpu_operation_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_mode) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_gpu_operation_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_gpu_operation_mode(__pyx_v_device, __pyx_v_mode, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22787, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_gpu_operation_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22801
 * 
 * 
 * cpdef device_set_api_restriction(intptr_t device, int api_type, int is_restricted):             # <<<<<<<<<<<<<<
 *     """Changes the root/admin restructions on certain APIs. See ``nvmlRestrictedAPI_t`` for the list of supported APIs. This method can be used by a root/admin user to give non-root/admin access to certain otherwise-restricted APIs. The new setting lasts for the lifetime of the NVIDIA driver; it is not persistent. See ``nvmlDeviceGetAPIRestriction`` to query the current restriction settings.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_363device_set_api_restriction(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_api_restriction(intptr_t __pyx_v_device, int __pyx_v_api_type, int __pyx_v_is_restricted, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_api_restriction", 0);

  /* "cuda/bindings/_nvml.pyx":22811
 *     .. seealso:: `nvmlDeviceSetAPIRestriction`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetAPIRestriction(<Device>device, <_RestrictedAPI>api_type, <_EnableState>is_restricted)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22812
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetAPIRestriction(<Device>device, <_RestrictedAPI>api_type, <_EnableState>is_restricted)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetAPIRestriction(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__RestrictedAPI)__pyx_v_api_type), ((__pyx_t_4cuda_8bindings_5_nvml__EnableState)__pyx_v_is_restricted)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22812, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22811
 *     .. seealso:: `nvmlDeviceSetAPIRestriction`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetAPIRestriction(<Device>device, <_RestrictedAPI>api_type, <_EnableState>is_restricted)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22813
 *     with nogil:
 *         __status__ = nvmlDeviceSetAPIRestriction(<Device>device, <_RestrictedAPI>api_type, <_EnableState>is_restricted)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22813, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22801
 * 
 * 
 * cpdef device_set_api_restriction(intptr_t device, int api_type, int is_restricted):             # <<<<<<<<<<<<<<
 *     """Changes the root/admin restructions on certain APIs. See ``nvmlRestrictedAPI_t`` for the list of supported APIs. This method can be used by a root/admin user to give non-root/admin access to certain otherwise-restricted APIs. The new setting lasts for the lifetime of the NVIDIA driver; it is not persistent. See ``nvmlDeviceGetAPIRestriction`` to query the current restriction settings.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_api_restriction", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_363device_set_api_restriction(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_362device_set_api_restriction, "device_set_api_restriction(intptr_t device, int api_type, int is_restricted)\n\nChanges the root/admin restructions on certain APIs. See ``nvmlRestrictedAPI_t`` for the list of supported APIs. This method can be used by a root/admin user to give non-root/admin access to certain otherwise-restricted APIs. The new setting lasts for the lifetime of the NVIDIA driver; it is not persistent. See ``nvmlDeviceGetAPIRestriction`` to query the current restriction settings.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    api_type (RestrictedAPI): Target API type for this operation.\n    is_restricted (EnableState): The target restriction.\n\n.. seealso:: `nvmlDeviceSetAPIRestriction`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_363device_set_api_restriction = {"device_set_api_restriction", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_363device_set_api_restriction, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_362device_set_api_restriction};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_363device_set_api_restriction(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_api_type;
  int __pyx_v_is_restricted;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_api_restriction (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_api_type,&__pyx_mstate_global->__pyx_n_u_is_restricted,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22801, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22801, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22801, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22801, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_api_restriction", 0) < (0)) __PYX_ERR(0, 22801, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_api_restriction", 1, 3, 3, i); __PYX_ERR(0, 22801, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22801, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22801, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22801, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22801, __pyx_L3_error)
    __pyx_v_api_type = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_api_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22801, __pyx_L3_error)
    __pyx_v_is_restricted = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_is_restricted == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22801, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_api_restriction", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 22801, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_api_restriction", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_362device_set_api_restriction(__pyx_self, __pyx_v_device, __pyx_v_api_type, __pyx_v_is_restricted);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_362device_set_api_restriction(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_api_type, int __pyx_v_is_restricted) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_api_restriction", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_api_restriction(__pyx_v_device, __pyx_v_api_type, __pyx_v_is_restricted, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22801, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_api_restriction", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22816
 * 
 * 
 * cpdef device_set_fan_speed_v2(intptr_t device, unsigned int fan, unsigned int speed):             # <<<<<<<<<<<<<<
 *     """Sets the speed of a specified fan.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_365device_set_fan_speed_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_fan_speed_v2(intptr_t __pyx_v_device, unsigned int __pyx_v_fan, unsigned int __pyx_v_speed, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_fan_speed_v2", 0);

  /* "cuda/bindings/_nvml.pyx":22826
 *     .. seealso:: `nvmlDeviceSetFanSpeed_v2`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetFanSpeed_v2(<Device>device, fan, speed)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22827
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetFanSpeed_v2(<Device>device, fan, speed)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetFanSpeed_v2(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_fan, __pyx_v_speed); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22827, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22826
 *     .. seealso:: `nvmlDeviceSetFanSpeed_v2`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetFanSpeed_v2(<Device>device, fan, speed)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22828
 *     with nogil:
 *         __status__ = nvmlDeviceSetFanSpeed_v2(<Device>device, fan, speed)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22828, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22816
 * 
 * 
 * cpdef device_set_fan_speed_v2(intptr_t device, unsigned int fan, unsigned int speed):             # <<<<<<<<<<<<<<
 *     """Sets the speed of a specified fan.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_fan_speed_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_365device_set_fan_speed_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_364device_set_fan_speed_v2, "device_set_fan_speed_v2(intptr_t device, unsigned int fan, unsigned int speed)\n\nSets the speed of a specified fan.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    fan (unsigned int): The index of the fan, starting at zero.\n    speed (unsigned int): The target speed of the fan [0-100] in % of max speed.\n\n.. seealso:: `nvmlDeviceSetFanSpeed_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_365device_set_fan_speed_v2 = {"device_set_fan_speed_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_365device_set_fan_speed_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_364device_set_fan_speed_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_365device_set_fan_speed_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_fan;
  unsigned int __pyx_v_speed;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_fan_speed_v2 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_fan,&__pyx_mstate_global->__pyx_n_u_speed,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22816, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22816, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22816, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22816, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_fan_speed_v2", 0) < (0)) __PYX_ERR(0, 22816, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_fan_speed_v2", 1, 3, 3, i); __PYX_ERR(0, 22816, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22816, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22816, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22816, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22816, __pyx_L3_error)
    __pyx_v_fan = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_fan == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22816, __pyx_L3_error)
    __pyx_v_speed = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_speed == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22816, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_fan_speed_v2", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 22816, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_fan_speed_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_364device_set_fan_speed_v2(__pyx_self, __pyx_v_device, __pyx_v_fan, __pyx_v_speed);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_364device_set_fan_speed_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_fan, unsigned int __pyx_v_speed) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_fan_speed_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_fan_speed_v2(__pyx_v_device, __pyx_v_fan, __pyx_v_speed, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22816, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_fan_speed_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22831
 * 
 * 
 * cpdef device_set_accounting_mode(intptr_t device, int mode):             # <<<<<<<<<<<<<<
 *     """Enables or disables per process accounting.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_367device_set_accounting_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_accounting_mode(intptr_t __pyx_v_device, int __pyx_v_mode, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_accounting_mode", 0);

  /* "cuda/bindings/_nvml.pyx":22840
 *     .. seealso:: `nvmlDeviceSetAccountingMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetAccountingMode(<Device>device, <_EnableState>mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22841
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetAccountingMode(<Device>device, <_EnableState>mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetAccountingMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__EnableState)__pyx_v_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22841, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22840
 *     .. seealso:: `nvmlDeviceSetAccountingMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetAccountingMode(<Device>device, <_EnableState>mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22842
 *     with nogil:
 *         __status__ = nvmlDeviceSetAccountingMode(<Device>device, <_EnableState>mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22842, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22831
 * 
 * 
 * cpdef device_set_accounting_mode(intptr_t device, int mode):             # <<<<<<<<<<<<<<
 *     """Enables or disables per process accounting.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_accounting_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_367device_set_accounting_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_366device_set_accounting_mode, "device_set_accounting_mode(intptr_t device, int mode)\n\nEnables or disables per process accounting.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    mode (EnableState): The target accounting mode.\n\n.. seealso:: `nvmlDeviceSetAccountingMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_367device_set_accounting_mode = {"device_set_accounting_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_367device_set_accounting_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_366device_set_accounting_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_367device_set_accounting_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_mode;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_accounting_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_mode,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22831, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22831, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22831, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_accounting_mode", 0) < (0)) __PYX_ERR(0, 22831, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_accounting_mode", 1, 2, 2, i); __PYX_ERR(0, 22831, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22831, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22831, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22831, __pyx_L3_error)
    __pyx_v_mode = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_mode == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22831, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_accounting_mode", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22831, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_accounting_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_366device_set_accounting_mode(__pyx_self, __pyx_v_device, __pyx_v_mode);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_366device_set_accounting_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_mode) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_accounting_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_accounting_mode(__pyx_v_device, __pyx_v_mode, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22831, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_accounting_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22845
 * 
 * 
 * cpdef device_clear_accounting_pids(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Clears accounting information about all processes that have already terminated.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_369device_clear_accounting_pids(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_clear_accounting_pids(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_clear_accounting_pids", 0);

  /* "cuda/bindings/_nvml.pyx":22853
 *     .. seealso:: `nvmlDeviceClearAccountingPids`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceClearAccountingPids(<Device>device)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22854
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceClearAccountingPids(<Device>device)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearAccountingPids(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22854, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22853
 *     .. seealso:: `nvmlDeviceClearAccountingPids`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceClearAccountingPids(<Device>device)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22855
 *     with nogil:
 *         __status__ = nvmlDeviceClearAccountingPids(<Device>device)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22855, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22845
 * 
 * 
 * cpdef device_clear_accounting_pids(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Clears accounting information about all processes that have already terminated.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_clear_accounting_pids", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_369device_clear_accounting_pids(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_368device_clear_accounting_pids, "device_clear_accounting_pids(intptr_t device)\n\nClears accounting information about all processes that have already terminated.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\n.. seealso:: `nvmlDeviceClearAccountingPids`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_369device_clear_accounting_pids = {"device_clear_accounting_pids", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_369device_clear_accounting_pids, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_368device_clear_accounting_pids};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_369device_clear_accounting_pids(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_clear_accounting_pids (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22845, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22845, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_clear_accounting_pids", 0) < (0)) __PYX_ERR(0, 22845, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_clear_accounting_pids", 1, 1, 1, i); __PYX_ERR(0, 22845, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22845, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22845, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_clear_accounting_pids", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 22845, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_clear_accounting_pids", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_368device_clear_accounting_pids(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_368device_clear_accounting_pids(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_clear_accounting_pids", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_clear_accounting_pids(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22845, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_clear_accounting_pids", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22858
 * 
 * 
 * cpdef device_set_power_management_limit_v2(intptr_t device, intptr_t power_value):             # <<<<<<<<<<<<<<
 *     """Set new power limit of this device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_371device_set_power_management_limit_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_power_management_limit_v2(intptr_t __pyx_v_device, intptr_t __pyx_v_power_value, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_power_management_limit_v2", 0);

  /* "cuda/bindings/_nvml.pyx":22867
 *     .. seealso:: `nvmlDeviceSetPowerManagementLimit_v2`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetPowerManagementLimit_v2(<Device>device, <nvmlPowerValue_v2_t*>power_value)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22868
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetPowerManagementLimit_v2(<Device>device, <nvmlPowerValue_v2_t*>power_value)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPowerManagementLimit_v2(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlPowerValue_v2_t *)__pyx_v_power_value)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22868, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22867
 *     .. seealso:: `nvmlDeviceSetPowerManagementLimit_v2`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetPowerManagementLimit_v2(<Device>device, <nvmlPowerValue_v2_t*>power_value)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22869
 *     with nogil:
 *         __status__ = nvmlDeviceSetPowerManagementLimit_v2(<Device>device, <nvmlPowerValue_v2_t*>power_value)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22869, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22858
 * 
 * 
 * cpdef device_set_power_management_limit_v2(intptr_t device, intptr_t power_value):             # <<<<<<<<<<<<<<
 *     """Set new power limit of this device.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_power_management_limit_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_371device_set_power_management_limit_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_370device_set_power_management_limit_v2, "device_set_power_management_limit_v2(intptr_t device, intptr_t power_value)\n\nSet new power limit of this device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    power_value (intptr_t): Power management limit in milliwatts to set.\n\n.. seealso:: `nvmlDeviceSetPowerManagementLimit_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_371device_set_power_management_limit_v2 = {"device_set_power_management_limit_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_371device_set_power_management_limit_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_370device_set_power_management_limit_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_371device_set_power_management_limit_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  intptr_t __pyx_v_power_value;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_power_management_limit_v2 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_power_value,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22858, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22858, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22858, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_power_management_limit_v2", 0) < (0)) __PYX_ERR(0, 22858, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_power_management_limit_v2", 1, 2, 2, i); __PYX_ERR(0, 22858, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22858, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22858, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22858, __pyx_L3_error)
    __pyx_v_power_value = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_power_value == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22858, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_power_management_limit_v2", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22858, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_power_management_limit_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_370device_set_power_management_limit_v2(__pyx_self, __pyx_v_device, __pyx_v_power_value);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_370device_set_power_management_limit_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_power_value) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_power_management_limit_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_power_management_limit_v2(__pyx_v_device, __pyx_v_power_value, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22858, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_power_management_limit_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22872
 * 
 * 
 * cpdef int device_get_nvlink_state(intptr_t device, unsigned int link) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the state of the device's NvLink for the link specified.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_373device_get_nvlink_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_state(intptr_t __pyx_v_device, unsigned int __pyx_v_link, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__EnableState __pyx_v_is_active;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":22885
 *     """
 *     cdef _EnableState is_active
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvLinkState(<Device>device, link, &is_active)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22886
 *     cdef _EnableState is_active
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvLinkState(<Device>device, link, &is_active)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>is_active
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkState(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_link, (&__pyx_v_is_active)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22886, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22885
 *     """
 *     cdef _EnableState is_active
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvLinkState(<Device>device, link, &is_active)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22887
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvLinkState(<Device>device, link, &is_active)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>is_active
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22887, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22888
 *         __status__ = nvmlDeviceGetNvLinkState(<Device>device, link, &is_active)
 *     check_status(__status__)
 *     return <int>is_active             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_is_active);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22872
 * 
 * 
 * cpdef int device_get_nvlink_state(intptr_t device, unsigned int link) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the state of the device's NvLink for the link specified.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_373device_get_nvlink_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_372device_get_nvlink_state, "device_get_nvlink_state(intptr_t device, unsigned int link) -> int\n\nRetrieves the state of the device's NvLink for the link specified.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    link (unsigned int): Specifies the NvLink link to be queried.\n\nReturns:\n    int: ``nvmlEnableState_t`` where NVML_FEATURE_ENABLED indicates that the link is active and NVML_FEATURE_DISABLED indicates it is inactive.\n\n.. seealso:: `nvmlDeviceGetNvLinkState`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_373device_get_nvlink_state = {"device_get_nvlink_state", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_373device_get_nvlink_state, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_372device_get_nvlink_state};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_373device_get_nvlink_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_link;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_nvlink_state (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_link,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22872, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22872, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22872, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_nvlink_state", 0) < (0)) __PYX_ERR(0, 22872, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_nvlink_state", 1, 2, 2, i); __PYX_ERR(0, 22872, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22872, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22872, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22872, __pyx_L3_error)
    __pyx_v_link = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_link == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22872, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_nvlink_state", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22872, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_372device_get_nvlink_state(__pyx_self, __pyx_v_device, __pyx_v_link);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_372device_get_nvlink_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_link) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_nvlink_state", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_state(__pyx_v_device, __pyx_v_link, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22872, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22872, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22891
 * 
 * 
 * cpdef unsigned int device_get_nvlink_version(intptr_t device, unsigned int link) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the version of the device's NvLink for the link specified.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_375device_get_nvlink_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_version(intptr_t __pyx_v_device, unsigned int __pyx_v_link, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_version;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":22904
 *     """
 *     cdef unsigned int version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvLinkVersion(<Device>device, link, &version)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22905
 *     cdef unsigned int version
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvLinkVersion(<Device>device, link, &version)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return version
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkVersion(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_link, (&__pyx_v_version)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22905, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22904
 *     """
 *     cdef unsigned int version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvLinkVersion(<Device>device, link, &version)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22906
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvLinkVersion(<Device>device, link, &version)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return version
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22906, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22907
 *         __status__ = nvmlDeviceGetNvLinkVersion(<Device>device, link, &version)
 *     check_status(__status__)
 *     return version             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_version;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22891
 * 
 * 
 * cpdef unsigned int device_get_nvlink_version(intptr_t device, unsigned int link) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the version of the device's NvLink for the link specified.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_375device_get_nvlink_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_374device_get_nvlink_version, "device_get_nvlink_version(intptr_t device, unsigned int link) -> unsigned int\n\nRetrieves the version of the device's NvLink for the link specified.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    link (unsigned int): Specifies the NvLink link to be queried.\n\nReturns:\n    unsigned int: Requested NvLink version from nvmlNvlinkVersion_t.\n\n.. seealso:: `nvmlDeviceGetNvLinkVersion`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_375device_get_nvlink_version = {"device_get_nvlink_version", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_375device_get_nvlink_version, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_374device_get_nvlink_version};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_375device_get_nvlink_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_link;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_nvlink_version (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_link,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22891, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22891, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22891, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_nvlink_version", 0) < (0)) __PYX_ERR(0, 22891, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_nvlink_version", 1, 2, 2, i); __PYX_ERR(0, 22891, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22891, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22891, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22891, __pyx_L3_error)
    __pyx_v_link = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_link == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22891, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_nvlink_version", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22891, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_374device_get_nvlink_version(__pyx_self, __pyx_v_device, __pyx_v_link);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_374device_get_nvlink_version(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_link) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_nvlink_version", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_version(__pyx_v_device, __pyx_v_link, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 22891, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22891, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22910
 * 
 * 
 * cpdef unsigned int device_get_nvlink_capability(intptr_t device, unsigned int link, int capability) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the requested capability from the device's NvLink for the link specified Please refer to the ``nvmlNvLinkCapability_t`` structure for the specific caps that can be queried The return value should be treated as a boolean.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_377device_get_nvlink_capability(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_capability(intptr_t __pyx_v_device, unsigned int __pyx_v_link, int __pyx_v_capability, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_cap_result;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":22924
 *     """
 *     cdef unsigned int cap_result
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvLinkCapability(<Device>device, link, <_NvLinkCapability>capability, &cap_result)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22925
 *     cdef unsigned int cap_result
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvLinkCapability(<Device>device, link, <_NvLinkCapability>capability, &cap_result)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cap_result
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkCapability(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_link, ((__pyx_t_4cuda_8bindings_5_nvml__NvLinkCapability)__pyx_v_capability), (&__pyx_v_cap_result)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22925, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22924
 *     """
 *     cdef unsigned int cap_result
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvLinkCapability(<Device>device, link, <_NvLinkCapability>capability, &cap_result)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22926
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvLinkCapability(<Device>device, link, <_NvLinkCapability>capability, &cap_result)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cap_result
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22926, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22927
 *         __status__ = nvmlDeviceGetNvLinkCapability(<Device>device, link, <_NvLinkCapability>capability, &cap_result)
 *     check_status(__status__)
 *     return cap_result             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_cap_result;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22910
 * 
 * 
 * cpdef unsigned int device_get_nvlink_capability(intptr_t device, unsigned int link, int capability) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the requested capability from the device's NvLink for the link specified Please refer to the ``nvmlNvLinkCapability_t`` structure for the specific caps that can be queried The return value should be treated as a boolean.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_capability", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_377device_get_nvlink_capability(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_376device_get_nvlink_capability, "device_get_nvlink_capability(intptr_t device, unsigned int link, int capability) -> unsigned int\n\nRetrieves the requested capability from the device's NvLink for the link specified Please refer to the ``nvmlNvLinkCapability_t`` structure for the specific caps that can be queried The return value should be treated as a boolean.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    link (unsigned int): Specifies the NvLink link to be queried.\n    capability (NvLinkCapability): Specifies the ``nvmlNvLinkCapability_t`` to be queried.\n\nReturns:\n    unsigned int: A boolean for the queried capability indicating that feature is available.\n\n.. seealso:: `nvmlDeviceGetNvLinkCapability`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_377device_get_nvlink_capability = {"device_get_nvlink_capability", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_377device_get_nvlink_capability, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_376device_get_nvlink_capability};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_377device_get_nvlink_capability(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_link;
  int __pyx_v_capability;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_nvlink_capability (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_link,&__pyx_mstate_global->__pyx_n_u_capability,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22910, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22910, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22910, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22910, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_nvlink_capability", 0) < (0)) __PYX_ERR(0, 22910, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_nvlink_capability", 1, 3, 3, i); __PYX_ERR(0, 22910, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22910, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22910, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22910, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22910, __pyx_L3_error)
    __pyx_v_link = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_link == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22910, __pyx_L3_error)
    __pyx_v_capability = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_capability == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22910, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_nvlink_capability", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 22910, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_capability", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_376device_get_nvlink_capability(__pyx_self, __pyx_v_device, __pyx_v_link, __pyx_v_capability);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_376device_get_nvlink_capability(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_link, int __pyx_v_capability) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_nvlink_capability", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_capability(__pyx_v_device, __pyx_v_link, __pyx_v_capability, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 22910, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22910, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_capability", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22930
 * 
 * 
 * cpdef object device_get_nvlink_remote_pci_info_v2(intptr_t device, unsigned int link):             # <<<<<<<<<<<<<<
 *     """Retrieves the PCI information for the remote node on a NvLink link Note: pciSubSystemId is not filled in this function and is indeterminate.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_379device_get_nvlink_remote_pci_info_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_remote_pci_info_v2(intptr_t __pyx_v_device, unsigned int __pyx_v_link, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *__pyx_v_pci_py = 0;
  nvmlPciInfo_t *__pyx_v_pci;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_nvlink_remote_pci_info_v2", 0);

  /* "cuda/bindings/_nvml.pyx":22942
 *     .. seealso:: `nvmlDeviceGetNvLinkRemotePciInfo_v2`
 *     """
 *     cdef PciInfo pci_py = PciInfo()             # <<<<<<<<<<<<<<
 *     cdef nvmlPciInfo_t *pci = <nvmlPciInfo_t *><intptr_t>(pci_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22942, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_pci_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":22943
 *     """
 *     cdef PciInfo pci_py = PciInfo()
 *     cdef nvmlPciInfo_t *pci = <nvmlPciInfo_t *><intptr_t>(pci_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvLinkRemotePciInfo_v2(<Device>device, link, pci)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PciInfo *)__pyx_v_pci_py->__pyx_vtab)->_get_ptr(__pyx_v_pci_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22943, __pyx_L1_error)
  __pyx_v_pci = ((nvmlPciInfo_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":22944
 *     cdef PciInfo pci_py = PciInfo()
 *     cdef nvmlPciInfo_t *pci = <nvmlPciInfo_t *><intptr_t>(pci_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvLinkRemotePciInfo_v2(<Device>device, link, pci)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22945
 *     cdef nvmlPciInfo_t *pci = <nvmlPciInfo_t *><intptr_t>(pci_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvLinkRemotePciInfo_v2(<Device>device, link, pci)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return pci_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkRemotePciInfo_v2(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_link, __pyx_v_pci); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22945, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":22944
 *     cdef PciInfo pci_py = PciInfo()
 *     cdef nvmlPciInfo_t *pci = <nvmlPciInfo_t *><intptr_t>(pci_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvLinkRemotePciInfo_v2(<Device>device, link, pci)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22946
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvLinkRemotePciInfo_v2(<Device>device, link, pci)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return pci_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 22946, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22947
 *         __status__ = nvmlDeviceGetNvLinkRemotePciInfo_v2(<Device>device, link, pci)
 *     check_status(__status__)
 *     return pci_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_pci_py);
  __pyx_r = ((PyObject *)__pyx_v_pci_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22930
 * 
 * 
 * cpdef object device_get_nvlink_remote_pci_info_v2(intptr_t device, unsigned int link):             # <<<<<<<<<<<<<<
 *     """Retrieves the PCI information for the remote node on a NvLink link Note: pciSubSystemId is not filled in this function and is indeterminate.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_remote_pci_info_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_pci_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_379device_get_nvlink_remote_pci_info_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_378device_get_nvlink_remote_pci_info_v2, "device_get_nvlink_remote_pci_info_v2(intptr_t device, unsigned int link)\n\nRetrieves the PCI information for the remote node on a NvLink link Note: pciSubSystemId is not filled in this function and is indeterminate.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    link (unsigned int): Specifies the NvLink link to be queried.\n\nReturns:\n    nvmlPciInfo_t: ``nvmlPciInfo_t`` of the remote node for the specified link.\n\n.. seealso:: `nvmlDeviceGetNvLinkRemotePciInfo_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_379device_get_nvlink_remote_pci_info_v2 = {"device_get_nvlink_remote_pci_info_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_379device_get_nvlink_remote_pci_info_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_378device_get_nvlink_remote_pci_info_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_379device_get_nvlink_remote_pci_info_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_link;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_nvlink_remote_pci_info_v2 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_link,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22930, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22930, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22930, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_nvlink_remote_pci_info_v2", 0) < (0)) __PYX_ERR(0, 22930, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_nvlink_remote_pci_info_v2", 1, 2, 2, i); __PYX_ERR(0, 22930, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22930, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22930, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22930, __pyx_L3_error)
    __pyx_v_link = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_link == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22930, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_nvlink_remote_pci_info_v2", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22930, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_remote_pci_info_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_378device_get_nvlink_remote_pci_info_v2(__pyx_self, __pyx_v_device, __pyx_v_link);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_378device_get_nvlink_remote_pci_info_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_link) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_nvlink_remote_pci_info_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_remote_pci_info_v2(__pyx_v_device, __pyx_v_link, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22930, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_remote_pci_info_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22950
 * 
 * 
 * cpdef unsigned long long device_get_nvlink_error_counter(intptr_t device, unsigned int link, int counter) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the specified error counter value Please refer to ``nvmlNvLinkErrorCounter_t`` for error counters that are available.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_381device_get_nvlink_error_counter(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_error_counter(intptr_t __pyx_v_device, unsigned int __pyx_v_link, int __pyx_v_counter, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned PY_LONG_LONG __pyx_v_counter_value;
  nvmlReturn_t __pyx_v___status__;
  unsigned PY_LONG_LONG __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":22964
 *     """
 *     cdef unsigned long long counter_value
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvLinkErrorCounter(<Device>device, link, <_NvLinkErrorCounter>counter, &counter_value)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22965
 *     cdef unsigned long long counter_value
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvLinkErrorCounter(<Device>device, link, <_NvLinkErrorCounter>counter, &counter_value)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return counter_value
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkErrorCounter(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_link, ((__pyx_t_4cuda_8bindings_5_nvml__NvLinkErrorCounter)__pyx_v_counter), (&__pyx_v_counter_value)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22965, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22964
 *     """
 *     cdef unsigned long long counter_value
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvLinkErrorCounter(<Device>device, link, <_NvLinkErrorCounter>counter, &counter_value)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22966
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvLinkErrorCounter(<Device>device, link, <_NvLinkErrorCounter>counter, &counter_value)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return counter_value
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22966, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22967
 *         __status__ = nvmlDeviceGetNvLinkErrorCounter(<Device>device, link, <_NvLinkErrorCounter>counter, &counter_value)
 *     check_status(__status__)
 *     return counter_value             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_counter_value;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22950
 * 
 * 
 * cpdef unsigned long long device_get_nvlink_error_counter(intptr_t device, unsigned int link, int counter) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the specified error counter value Please refer to ``nvmlNvLinkErrorCounter_t`` for error counters that are available.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_error_counter", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_381device_get_nvlink_error_counter(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_380device_get_nvlink_error_counter, "device_get_nvlink_error_counter(intptr_t device, unsigned int link, int counter) -> unsigned long long\n\nRetrieves the specified error counter value Please refer to ``nvmlNvLinkErrorCounter_t`` for error counters that are available.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    link (unsigned int): Specifies the NvLink link to be queried.\n    counter (NvLinkErrorCounter): Specifies the NvLink counter to be queried.\n\nReturns:\n    unsigned long long: Returned counter value.\n\n.. seealso:: `nvmlDeviceGetNvLinkErrorCounter`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_381device_get_nvlink_error_counter = {"device_get_nvlink_error_counter", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_381device_get_nvlink_error_counter, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_380device_get_nvlink_error_counter};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_381device_get_nvlink_error_counter(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_link;
  int __pyx_v_counter;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_nvlink_error_counter (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_link,&__pyx_mstate_global->__pyx_n_u_counter,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22950, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22950, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22950, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22950, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_nvlink_error_counter", 0) < (0)) __PYX_ERR(0, 22950, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_nvlink_error_counter", 1, 3, 3, i); __PYX_ERR(0, 22950, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22950, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22950, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22950, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22950, __pyx_L3_error)
    __pyx_v_link = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_link == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22950, __pyx_L3_error)
    __pyx_v_counter = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_counter == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22950, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_nvlink_error_counter", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 22950, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_error_counter", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_380device_get_nvlink_error_counter(__pyx_self, __pyx_v_device, __pyx_v_link, __pyx_v_counter);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_380device_get_nvlink_error_counter(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_link, int __pyx_v_counter) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned PY_LONG_LONG __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_nvlink_error_counter", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_error_counter(__pyx_v_device, __pyx_v_link, __pyx_v_counter, 1); if (unlikely(__pyx_t_1 == ((unsigned PY_LONG_LONG)0) && PyErr_Occurred())) __PYX_ERR(0, 22950, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22950, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_error_counter", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22970
 * 
 * 
 * cpdef device_reset_nvlink_error_counters(intptr_t device, unsigned int link):             # <<<<<<<<<<<<<<
 *     """Resets all error counters to zero Please refer to ``nvmlNvLinkErrorCounter_t`` for the list of error counters that are reset.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_383device_reset_nvlink_error_counters(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_reset_nvlink_error_counters(intptr_t __pyx_v_device, unsigned int __pyx_v_link, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_reset_nvlink_error_counters", 0);

  /* "cuda/bindings/_nvml.pyx":22979
 *     .. seealso:: `nvmlDeviceResetNvLinkErrorCounters`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceResetNvLinkErrorCounters(<Device>device, link)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22980
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceResetNvLinkErrorCounters(<Device>device, link)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceResetNvLinkErrorCounters(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_link); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22980, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22979
 *     .. seealso:: `nvmlDeviceResetNvLinkErrorCounters`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceResetNvLinkErrorCounters(<Device>device, link)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22981
 *     with nogil:
 *         __status__ = nvmlDeviceResetNvLinkErrorCounters(<Device>device, link)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22981, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":22970
 * 
 * 
 * cpdef device_reset_nvlink_error_counters(intptr_t device, unsigned int link):             # <<<<<<<<<<<<<<
 *     """Resets all error counters to zero Please refer to ``nvmlNvLinkErrorCounter_t`` for the list of error counters that are reset.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_reset_nvlink_error_counters", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_383device_reset_nvlink_error_counters(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_382device_reset_nvlink_error_counters, "device_reset_nvlink_error_counters(intptr_t device, unsigned int link)\n\nResets all error counters to zero Please refer to ``nvmlNvLinkErrorCounter_t`` for the list of error counters that are reset.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    link (unsigned int): Specifies the NvLink link to be queried.\n\n.. seealso:: `nvmlDeviceResetNvLinkErrorCounters`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_383device_reset_nvlink_error_counters = {"device_reset_nvlink_error_counters", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_383device_reset_nvlink_error_counters, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_382device_reset_nvlink_error_counters};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_383device_reset_nvlink_error_counters(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_link;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_reset_nvlink_error_counters (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_link,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22970, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22970, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22970, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_reset_nvlink_error_counters", 0) < (0)) __PYX_ERR(0, 22970, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_reset_nvlink_error_counters", 1, 2, 2, i); __PYX_ERR(0, 22970, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22970, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22970, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22970, __pyx_L3_error)
    __pyx_v_link = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_link == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22970, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_reset_nvlink_error_counters", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22970, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_reset_nvlink_error_counters", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_382device_reset_nvlink_error_counters(__pyx_self, __pyx_v_device, __pyx_v_link);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_382device_reset_nvlink_error_counters(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_link) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_reset_nvlink_error_counters", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_reset_nvlink_error_counters(__pyx_v_device, __pyx_v_link, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22970, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_reset_nvlink_error_counters", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":22984
 * 
 * 
 * cpdef int device_get_nvlink_remote_device_type(intptr_t device, unsigned int link) except? -1:             # <<<<<<<<<<<<<<
 *     """Get the NVLink device type of the remote device connected over the given link.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_385device_get_nvlink_remote_device_type(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_remote_device_type(intptr_t __pyx_v_device, unsigned int __pyx_v_link, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__IntNvLinkDeviceType __pyx_v_p_nv_link_device_type;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":22997
 *     """
 *     cdef _IntNvLinkDeviceType p_nv_link_device_type
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvLinkRemoteDeviceType(<Device>device, link, &p_nv_link_device_type)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":22998
 *     cdef _IntNvLinkDeviceType p_nv_link_device_type
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvLinkRemoteDeviceType(<Device>device, link, &p_nv_link_device_type)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>p_nv_link_device_type
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkRemoteDeviceType(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_link, (&__pyx_v_p_nv_link_device_type)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22998, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":22997
 *     """
 *     cdef _IntNvLinkDeviceType p_nv_link_device_type
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvLinkRemoteDeviceType(<Device>device, link, &p_nv_link_device_type)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":22999
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvLinkRemoteDeviceType(<Device>device, link, &p_nv_link_device_type)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>p_nv_link_device_type
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 22999, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23000
 *         __status__ = nvmlDeviceGetNvLinkRemoteDeviceType(<Device>device, link, &p_nv_link_device_type)
 *     check_status(__status__)
 *     return <int>p_nv_link_device_type             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_p_nv_link_device_type);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":22984
 * 
 * 
 * cpdef int device_get_nvlink_remote_device_type(intptr_t device, unsigned int link) except? -1:             # <<<<<<<<<<<<<<
 *     """Get the NVLink device type of the remote device connected over the given link.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_remote_device_type", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_385device_get_nvlink_remote_device_type(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_384device_get_nvlink_remote_device_type, "device_get_nvlink_remote_device_type(intptr_t device, unsigned int link) -> int\n\nGet the NVLink device type of the remote device connected over the given link.\n\nArgs:\n    device (intptr_t): The device handle of the target GPU.\n    link (unsigned int): The NVLink link index on the target GPU.\n\nReturns:\n    int: Pointer in which the output remote device type is returned.\n\n.. seealso:: `nvmlDeviceGetNvLinkRemoteDeviceType`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_385device_get_nvlink_remote_device_type = {"device_get_nvlink_remote_device_type", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_385device_get_nvlink_remote_device_type, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_384device_get_nvlink_remote_device_type};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_385device_get_nvlink_remote_device_type(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_link;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_nvlink_remote_device_type (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_link,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22984, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22984, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22984, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_nvlink_remote_device_type", 0) < (0)) __PYX_ERR(0, 22984, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_nvlink_remote_device_type", 1, 2, 2, i); __PYX_ERR(0, 22984, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22984, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22984, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 22984, __pyx_L3_error)
    __pyx_v_link = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_link == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22984, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_nvlink_remote_device_type", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 22984, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_remote_device_type", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_384device_get_nvlink_remote_device_type(__pyx_self, __pyx_v_device, __pyx_v_link);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_384device_get_nvlink_remote_device_type(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_link) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_nvlink_remote_device_type", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_remote_device_type(__pyx_v_device, __pyx_v_link, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22984, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22984, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_remote_device_type", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23003
 * 
 * 
 * cpdef device_set_nvlink_device_low_power_threshold(intptr_t device, intptr_t info):             # <<<<<<<<<<<<<<
 *     """Set NvLink Low Power Threshold for device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_387device_set_nvlink_device_low_power_threshold(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_nvlink_device_low_power_threshold(intptr_t __pyx_v_device, intptr_t __pyx_v_info, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_nvlink_device_low_power_threshold", 0);

  /* "cuda/bindings/_nvml.pyx":23012
 *     .. seealso:: `nvmlDeviceSetNvLinkDeviceLowPowerThreshold`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetNvLinkDeviceLowPowerThreshold(<Device>device, <nvmlNvLinkPowerThres_t*>info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23013
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetNvLinkDeviceLowPowerThreshold(<Device>device, <nvmlNvLinkPowerThres_t*>info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetNvLinkDeviceLowPowerThreshold(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlNvLinkPowerThres_t *)__pyx_v_info)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23013, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23012
 *     .. seealso:: `nvmlDeviceSetNvLinkDeviceLowPowerThreshold`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetNvLinkDeviceLowPowerThreshold(<Device>device, <nvmlNvLinkPowerThres_t*>info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23014
 *     with nogil:
 *         __status__ = nvmlDeviceSetNvLinkDeviceLowPowerThreshold(<Device>device, <nvmlNvLinkPowerThres_t*>info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23014, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23003
 * 
 * 
 * cpdef device_set_nvlink_device_low_power_threshold(intptr_t device, intptr_t info):             # <<<<<<<<<<<<<<
 *     """Set NvLink Low Power Threshold for device.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_nvlink_device_low_power_threshold", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_387device_set_nvlink_device_low_power_threshold(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_386device_set_nvlink_device_low_power_threshold, "device_set_nvlink_device_low_power_threshold(intptr_t device, intptr_t info)\n\nSet NvLink Low Power Threshold for device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    info (intptr_t): Reference to ``nvmlNvLinkPowerThres_t`` struct input parameters.\n\n.. seealso:: `nvmlDeviceSetNvLinkDeviceLowPowerThreshold`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_387device_set_nvlink_device_low_power_threshold = {"device_set_nvlink_device_low_power_threshold", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_387device_set_nvlink_device_low_power_threshold, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_386device_set_nvlink_device_low_power_threshold};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_387device_set_nvlink_device_low_power_threshold(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  intptr_t __pyx_v_info;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_nvlink_device_low_power_threshold (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_info,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23003, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23003, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23003, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_nvlink_device_low_power_threshold", 0) < (0)) __PYX_ERR(0, 23003, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_nvlink_device_low_power_threshold", 1, 2, 2, i); __PYX_ERR(0, 23003, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23003, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23003, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23003, __pyx_L3_error)
    __pyx_v_info = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_info == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23003, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_nvlink_device_low_power_threshold", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 23003, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_nvlink_device_low_power_threshold", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_386device_set_nvlink_device_low_power_threshold(__pyx_self, __pyx_v_device, __pyx_v_info);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_386device_set_nvlink_device_low_power_threshold(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_info) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_nvlink_device_low_power_threshold", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_nvlink_device_low_power_threshold(__pyx_v_device, __pyx_v_info, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23003, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_nvlink_device_low_power_threshold", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23017
 * 
 * 
 * cpdef system_set_nvlink_bw_mode(unsigned int nvlink_bw_mode):             # <<<<<<<<<<<<<<
 *     """Set the global nvlink bandwith mode.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_389system_set_nvlink_bw_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_set_nvlink_bw_mode(unsigned int __pyx_v_nvlink_bw_mode, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_set_nvlink_bw_mode", 0);

  /* "cuda/bindings/_nvml.pyx":23025
 *     .. seealso:: `nvmlSystemSetNvlinkBwMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemSetNvlinkBwMode(nvlink_bw_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23026
 *     """
 *     with nogil:
 *         __status__ = nvmlSystemSetNvlinkBwMode(nvlink_bw_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemSetNvlinkBwMode(__pyx_v_nvlink_bw_mode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23026, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23025
 *     .. seealso:: `nvmlSystemSetNvlinkBwMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemSetNvlinkBwMode(nvlink_bw_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23027
 *     with nogil:
 *         __status__ = nvmlSystemSetNvlinkBwMode(nvlink_bw_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23027, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23017
 * 
 * 
 * cpdef system_set_nvlink_bw_mode(unsigned int nvlink_bw_mode):             # <<<<<<<<<<<<<<
 *     """Set the global nvlink bandwith mode.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.system_set_nvlink_bw_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_389system_set_nvlink_bw_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_388system_set_nvlink_bw_mode, "system_set_nvlink_bw_mode(unsigned int nvlink_bw_mode)\n\nSet the global nvlink bandwith mode.\n\nArgs:\n    nvlink_bw_mode (unsigned int): nvlink bandwidth mode.\n\n.. seealso:: `nvmlSystemSetNvlinkBwMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_389system_set_nvlink_bw_mode = {"system_set_nvlink_bw_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_389system_set_nvlink_bw_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_388system_set_nvlink_bw_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_389system_set_nvlink_bw_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_nvlink_bw_mode;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_set_nvlink_bw_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_nvlink_bw_mode,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23017, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23017, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "system_set_nvlink_bw_mode", 0) < (0)) __PYX_ERR(0, 23017, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("system_set_nvlink_bw_mode", 1, 1, 1, i); __PYX_ERR(0, 23017, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23017, __pyx_L3_error)
    }
    __pyx_v_nvlink_bw_mode = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_nvlink_bw_mode == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23017, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("system_set_nvlink_bw_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23017, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.system_set_nvlink_bw_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_388system_set_nvlink_bw_mode(__pyx_self, __pyx_v_nvlink_bw_mode);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_388system_set_nvlink_bw_mode(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_nvlink_bw_mode) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_set_nvlink_bw_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_set_nvlink_bw_mode(__pyx_v_nvlink_bw_mode, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23017, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_set_nvlink_bw_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23030
 * 
 * 
 * cpdef unsigned int system_get_nvlink_bw_mode() except? 0:             # <<<<<<<<<<<<<<
 *     """Get the global nvlink bandwith mode.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_391system_get_nvlink_bw_mode(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_system_get_nvlink_bw_mode(CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_nvlink_bw_mode;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23039
 *     """
 *     cdef unsigned int nvlink_bw_mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetNvlinkBwMode(&nvlink_bw_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23040
 *     cdef unsigned int nvlink_bw_mode
 *     with nogil:
 *         __status__ = nvmlSystemGetNvlinkBwMode(&nvlink_bw_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return nvlink_bw_mode
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetNvlinkBwMode((&__pyx_v_nvlink_bw_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23040, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23039
 *     """
 *     cdef unsigned int nvlink_bw_mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetNvlinkBwMode(&nvlink_bw_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23041
 *     with nogil:
 *         __status__ = nvmlSystemGetNvlinkBwMode(&nvlink_bw_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return nvlink_bw_mode
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23041, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23042
 *         __status__ = nvmlSystemGetNvlinkBwMode(&nvlink_bw_mode)
 *     check_status(__status__)
 *     return nvlink_bw_mode             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_nvlink_bw_mode;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23030
 * 
 * 
 * cpdef unsigned int system_get_nvlink_bw_mode() except? 0:             # <<<<<<<<<<<<<<
 *     """Get the global nvlink bandwith mode.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_nvlink_bw_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_391system_get_nvlink_bw_mode(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_390system_get_nvlink_bw_mode, "system_get_nvlink_bw_mode() -> unsigned int\n\nGet the global nvlink bandwith mode.\n\nReturns:\n    unsigned int: reference of nvlink bandwidth mode.\n\n.. seealso:: `nvmlSystemGetNvlinkBwMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_391system_get_nvlink_bw_mode = {"system_get_nvlink_bw_mode", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_391system_get_nvlink_bw_mode, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_390system_get_nvlink_bw_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_391system_get_nvlink_bw_mode(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_get_nvlink_bw_mode (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_390system_get_nvlink_bw_mode(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_390system_get_nvlink_bw_mode(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_nvlink_bw_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_get_nvlink_bw_mode(1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 23030, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_nvlink_bw_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23045
 * 
 * 
 * cpdef object device_get_nvlink_supported_bw_modes(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the supported NvLink Reduced Bandwidth Modes of the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_393device_get_nvlink_supported_bw_modes(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_supported_bw_modes(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *__pyx_v_supported_bw_mode_py = 0;
  nvmlNvlinkSupportedBwModes_t *__pyx_v_supported_bw_mode;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_nvlink_supported_bw_modes", 0);

  /* "cuda/bindings/_nvml.pyx":23056
 *     .. seealso:: `nvmlDeviceGetNvlinkSupportedBwModes`
 *     """
 *     cdef NvlinkSupportedBwModes_v1 supported_bw_mode_py = NvlinkSupportedBwModes_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlNvlinkSupportedBwModes_t *supported_bw_mode = <nvmlNvlinkSupportedBwModes_t *><intptr_t>(supported_bw_mode_py._get_ptr())
 *     supported_bw_mode.version = sizeof(nvmlNvlinkSupportedBwModes_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23056, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_supported_bw_mode_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":23057
 *     """
 *     cdef NvlinkSupportedBwModes_v1 supported_bw_mode_py = NvlinkSupportedBwModes_v1()
 *     cdef nvmlNvlinkSupportedBwModes_t *supported_bw_mode = <nvmlNvlinkSupportedBwModes_t *><intptr_t>(supported_bw_mode_py._get_ptr())             # <<<<<<<<<<<<<<
 *     supported_bw_mode.version = sizeof(nvmlNvlinkSupportedBwModes_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)__pyx_v_supported_bw_mode_py->__pyx_vtab)->_get_ptr(__pyx_v_supported_bw_mode_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23057, __pyx_L1_error)
  __pyx_v_supported_bw_mode = ((nvmlNvlinkSupportedBwModes_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":23058
 *     cdef NvlinkSupportedBwModes_v1 supported_bw_mode_py = NvlinkSupportedBwModes_v1()
 *     cdef nvmlNvlinkSupportedBwModes_t *supported_bw_mode = <nvmlNvlinkSupportedBwModes_t *><intptr_t>(supported_bw_mode_py._get_ptr())
 *     supported_bw_mode.version = sizeof(nvmlNvlinkSupportedBwModes_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvlinkSupportedBwModes(<Device>device, supported_bw_mode)
*/
  __pyx_v_supported_bw_mode->version = ((sizeof(nvmlNvlinkSupportedBwModes_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":23059
 *     cdef nvmlNvlinkSupportedBwModes_t *supported_bw_mode = <nvmlNvlinkSupportedBwModes_t *><intptr_t>(supported_bw_mode_py._get_ptr())
 *     supported_bw_mode.version = sizeof(nvmlNvlinkSupportedBwModes_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvlinkSupportedBwModes(<Device>device, supported_bw_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23060
 *     supported_bw_mode.version = sizeof(nvmlNvlinkSupportedBwModes_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvlinkSupportedBwModes(<Device>device, supported_bw_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return supported_bw_mode_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvlinkSupportedBwModes(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_supported_bw_mode); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23060, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":23059
 *     cdef nvmlNvlinkSupportedBwModes_t *supported_bw_mode = <nvmlNvlinkSupportedBwModes_t *><intptr_t>(supported_bw_mode_py._get_ptr())
 *     supported_bw_mode.version = sizeof(nvmlNvlinkSupportedBwModes_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvlinkSupportedBwModes(<Device>device, supported_bw_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23061
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvlinkSupportedBwModes(<Device>device, supported_bw_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return supported_bw_mode_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 23061, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23062
 *         __status__ = nvmlDeviceGetNvlinkSupportedBwModes(<Device>device, supported_bw_mode)
 *     check_status(__status__)
 *     return supported_bw_mode_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_supported_bw_mode_py);
  __pyx_r = ((PyObject *)__pyx_v_supported_bw_mode_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23045
 * 
 * 
 * cpdef object device_get_nvlink_supported_bw_modes(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the supported NvLink Reduced Bandwidth Modes of the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_supported_bw_modes", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_supported_bw_mode_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_393device_get_nvlink_supported_bw_modes(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_392device_get_nvlink_supported_bw_modes, "device_get_nvlink_supported_bw_modes(intptr_t device)\n\nGet the supported NvLink Reduced Bandwidth Modes of the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlNvlinkSupportedBwModes_v1_t: Reference to ``nvmlNvlinkSupportedBwModes_t``.\n\n.. seealso:: `nvmlDeviceGetNvlinkSupportedBwModes`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_393device_get_nvlink_supported_bw_modes = {"device_get_nvlink_supported_bw_modes", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_393device_get_nvlink_supported_bw_modes, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_392device_get_nvlink_supported_bw_modes};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_393device_get_nvlink_supported_bw_modes(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_nvlink_supported_bw_modes (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23045, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23045, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_nvlink_supported_bw_modes", 0) < (0)) __PYX_ERR(0, 23045, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_nvlink_supported_bw_modes", 1, 1, 1, i); __PYX_ERR(0, 23045, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23045, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23045, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_nvlink_supported_bw_modes", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23045, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_supported_bw_modes", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_392device_get_nvlink_supported_bw_modes(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_392device_get_nvlink_supported_bw_modes(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_nvlink_supported_bw_modes", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_supported_bw_modes(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23045, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_supported_bw_modes", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23065
 * 
 * 
 * cpdef object device_get_nvlink_bw_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the NvLink Reduced Bandwidth Mode for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_395device_get_nvlink_bw_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_bw_mode(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *__pyx_v_get_bw_mode_py = 0;
  nvmlNvlinkGetBwMode_t *__pyx_v_get_bw_mode;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_nvlink_bw_mode", 0);

  /* "cuda/bindings/_nvml.pyx":23076
 *     .. seealso:: `nvmlDeviceGetNvlinkBwMode`
 *     """
 *     cdef NvlinkGetBwMode_v1 get_bw_mode_py = NvlinkGetBwMode_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlNvlinkGetBwMode_t *get_bw_mode = <nvmlNvlinkGetBwMode_t *><intptr_t>(get_bw_mode_py._get_ptr())
 *     get_bw_mode.version = sizeof(nvmlNvlinkGetBwMode_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23076, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_get_bw_mode_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":23077
 *     """
 *     cdef NvlinkGetBwMode_v1 get_bw_mode_py = NvlinkGetBwMode_v1()
 *     cdef nvmlNvlinkGetBwMode_t *get_bw_mode = <nvmlNvlinkGetBwMode_t *><intptr_t>(get_bw_mode_py._get_ptr())             # <<<<<<<<<<<<<<
 *     get_bw_mode.version = sizeof(nvmlNvlinkGetBwMode_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)__pyx_v_get_bw_mode_py->__pyx_vtab)->_get_ptr(__pyx_v_get_bw_mode_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23077, __pyx_L1_error)
  __pyx_v_get_bw_mode = ((nvmlNvlinkGetBwMode_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":23078
 *     cdef NvlinkGetBwMode_v1 get_bw_mode_py = NvlinkGetBwMode_v1()
 *     cdef nvmlNvlinkGetBwMode_t *get_bw_mode = <nvmlNvlinkGetBwMode_t *><intptr_t>(get_bw_mode_py._get_ptr())
 *     get_bw_mode.version = sizeof(nvmlNvlinkGetBwMode_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvlinkBwMode(<Device>device, get_bw_mode)
*/
  __pyx_v_get_bw_mode->version = ((sizeof(nvmlNvlinkGetBwMode_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":23079
 *     cdef nvmlNvlinkGetBwMode_t *get_bw_mode = <nvmlNvlinkGetBwMode_t *><intptr_t>(get_bw_mode_py._get_ptr())
 *     get_bw_mode.version = sizeof(nvmlNvlinkGetBwMode_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvlinkBwMode(<Device>device, get_bw_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23080
 *     get_bw_mode.version = sizeof(nvmlNvlinkGetBwMode_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvlinkBwMode(<Device>device, get_bw_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return get_bw_mode_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvlinkBwMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_get_bw_mode); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23080, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":23079
 *     cdef nvmlNvlinkGetBwMode_t *get_bw_mode = <nvmlNvlinkGetBwMode_t *><intptr_t>(get_bw_mode_py._get_ptr())
 *     get_bw_mode.version = sizeof(nvmlNvlinkGetBwMode_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvlinkBwMode(<Device>device, get_bw_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23081
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvlinkBwMode(<Device>device, get_bw_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return get_bw_mode_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 23081, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23082
 *         __status__ = nvmlDeviceGetNvlinkBwMode(<Device>device, get_bw_mode)
 *     check_status(__status__)
 *     return get_bw_mode_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_get_bw_mode_py);
  __pyx_r = ((PyObject *)__pyx_v_get_bw_mode_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23065
 * 
 * 
 * cpdef object device_get_nvlink_bw_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the NvLink Reduced Bandwidth Mode for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_bw_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_get_bw_mode_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_395device_get_nvlink_bw_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_394device_get_nvlink_bw_mode, "device_get_nvlink_bw_mode(intptr_t device)\n\nGet the NvLink Reduced Bandwidth Mode for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlNvlinkGetBwMode_v1_t: Reference to ``nvmlNvlinkGetBwMode_t``.\n\n.. seealso:: `nvmlDeviceGetNvlinkBwMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_395device_get_nvlink_bw_mode = {"device_get_nvlink_bw_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_395device_get_nvlink_bw_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_394device_get_nvlink_bw_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_395device_get_nvlink_bw_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_nvlink_bw_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23065, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23065, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_nvlink_bw_mode", 0) < (0)) __PYX_ERR(0, 23065, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_nvlink_bw_mode", 1, 1, 1, i); __PYX_ERR(0, 23065, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23065, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23065, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_nvlink_bw_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23065, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_bw_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_394device_get_nvlink_bw_mode(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_394device_get_nvlink_bw_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_nvlink_bw_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_bw_mode(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23065, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_bw_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23085
 * 
 * 
 * cpdef device_set_nvlink_bw_mode(intptr_t device, intptr_t set_bw_mode):             # <<<<<<<<<<<<<<
 *     """Set the NvLink Reduced Bandwidth Mode for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_397device_set_nvlink_bw_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_nvlink_bw_mode(intptr_t __pyx_v_device, intptr_t __pyx_v_set_bw_mode, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  nvmlReturn_t __pyx_t_3;
  int __pyx_t_4;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_nvlink_bw_mode", 0);

  /* "cuda/bindings/_nvml.pyx":23094
 *     .. seealso:: `nvmlDeviceSetNvlinkBwMode`
 *     """
 *     set_bw_mode.version = sizeof(nvmlNvlinkSetBwMode_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceSetNvlinkBwMode(<Device>device, <nvmlNvlinkSetBwMode_t*>set_bw_mode)
*/
  __pyx_t_1 = __Pyx_PyLong_FromSize_t(((sizeof(nvmlNvlinkSetBwMode_v1_t)) | 0x1000000)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23094, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_2 = PyLong_FromSsize_t(__pyx_v_set_bw_mode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23094, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  if (__Pyx_PyObject_SetAttrStr(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_version, __pyx_t_1) < (0)) __PYX_ERR(0, 23094, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /* "cuda/bindings/_nvml.pyx":23095
 *     """
 *     set_bw_mode.version = sizeof(nvmlNvlinkSetBwMode_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetNvlinkBwMode(<Device>device, <nvmlNvlinkSetBwMode_t*>set_bw_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23096
 *     set_bw_mode.version = sizeof(nvmlNvlinkSetBwMode_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceSetNvlinkBwMode(<Device>device, <nvmlNvlinkSetBwMode_t*>set_bw_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_3 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetNvlinkBwMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlNvlinkSetBwMode_t *)__pyx_v_set_bw_mode)); if (unlikely(__pyx_t_3 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23096, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_3;
      }

      /* "cuda/bindings/_nvml.pyx":23095
 *     """
 *     set_bw_mode.version = sizeof(nvmlNvlinkSetBwMode_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetNvlinkBwMode(<Device>device, <nvmlNvlinkSetBwMode_t*>set_bw_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23097
 *     with nogil:
 *         __status__ = nvmlDeviceSetNvlinkBwMode(<Device>device, <nvmlNvlinkSetBwMode_t*>set_bw_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_4 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_4 == ((int)1))) __PYX_ERR(0, 23097, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23085
 * 
 * 
 * cpdef device_set_nvlink_bw_mode(intptr_t device, intptr_t set_bw_mode):             # <<<<<<<<<<<<<<
 *     """Set the NvLink Reduced Bandwidth Mode for the device.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_nvlink_bw_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_397device_set_nvlink_bw_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_396device_set_nvlink_bw_mode, "device_set_nvlink_bw_mode(intptr_t device, intptr_t set_bw_mode)\n\nSet the NvLink Reduced Bandwidth Mode for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    set_bw_mode (intptr_t): Reference to ``nvmlNvlinkSetBwMode_t``.\n\n.. seealso:: `nvmlDeviceSetNvlinkBwMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_397device_set_nvlink_bw_mode = {"device_set_nvlink_bw_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_397device_set_nvlink_bw_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_396device_set_nvlink_bw_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_397device_set_nvlink_bw_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  intptr_t __pyx_v_set_bw_mode;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_nvlink_bw_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_set_bw_mode,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23085, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23085, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23085, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_nvlink_bw_mode", 0) < (0)) __PYX_ERR(0, 23085, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_nvlink_bw_mode", 1, 2, 2, i); __PYX_ERR(0, 23085, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23085, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23085, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23085, __pyx_L3_error)
    __pyx_v_set_bw_mode = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_set_bw_mode == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23085, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_nvlink_bw_mode", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 23085, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_nvlink_bw_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_396device_set_nvlink_bw_mode(__pyx_self, __pyx_v_device, __pyx_v_set_bw_mode);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_396device_set_nvlink_bw_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_set_bw_mode) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_nvlink_bw_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_nvlink_bw_mode(__pyx_v_device, __pyx_v_set_bw_mode, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23085, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_nvlink_bw_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23100
 * 
 * 
 * cpdef intptr_t event_set_create() except? 0:             # <<<<<<<<<<<<<<
 *     """Create an empty set of events. Event set should be freed by ``nvmlEventSetFree``.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_399event_set_create(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_event_set_create(CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml_EventSet __pyx_v_set;
  nvmlReturn_t __pyx_v___status__;
  intptr_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23109
 *     """
 *     cdef EventSet set
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlEventSetCreate(&set)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23110
 *     cdef EventSet set
 *     with nogil:
 *         __status__ = nvmlEventSetCreate(&set)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <intptr_t>set
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlEventSetCreate((&__pyx_v_set)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23110, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23109
 *     """
 *     cdef EventSet set
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlEventSetCreate(&set)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23111
 *     with nogil:
 *         __status__ = nvmlEventSetCreate(&set)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <intptr_t>set
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23111, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23112
 *         __status__ = nvmlEventSetCreate(&set)
 *     check_status(__status__)
 *     return <intptr_t>set             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((intptr_t)__pyx_v_set);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23100
 * 
 * 
 * cpdef intptr_t event_set_create() except? 0:             # <<<<<<<<<<<<<<
 *     """Create an empty set of events. Event set should be freed by ``nvmlEventSetFree``.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.event_set_create", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_399event_set_create(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_398event_set_create, "event_set_create() -> intptr_t\n\nCreate an empty set of events. Event set should be freed by ``nvmlEventSetFree``.\n\nReturns:\n    intptr_t: Reference in which to return the event handle.\n\n.. seealso:: `nvmlEventSetCreate`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_399event_set_create = {"event_set_create", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_399event_set_create, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_398event_set_create};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_399event_set_create(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("event_set_create (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_398event_set_create(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_398event_set_create(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  intptr_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("event_set_create", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_event_set_create(1); if (unlikely(__pyx_t_1 == ((intptr_t)0) && PyErr_Occurred())) __PYX_ERR(0, 23100, __pyx_L1_error)
  __pyx_t_2 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23100, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.event_set_create", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23115
 * 
 * 
 * cpdef device_register_events(intptr_t device, unsigned long long event_types, intptr_t set):             # <<<<<<<<<<<<<<
 *     """Starts recording of events on a specified devices and add the events to specified ``nvmlEventSet_t``.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_401device_register_events(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_register_events(intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_event_types, intptr_t __pyx_v_set, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_register_events", 0);

  /* "cuda/bindings/_nvml.pyx":23125
 *     .. seealso:: `nvmlDeviceRegisterEvents`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceRegisterEvents(<Device>device, event_types, <EventSet>set)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23126
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceRegisterEvents(<Device>device, event_types, <EventSet>set)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceRegisterEvents(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_event_types, ((__pyx_t_4cuda_8bindings_5_nvml_EventSet)__pyx_v_set)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23126, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23125
 *     .. seealso:: `nvmlDeviceRegisterEvents`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceRegisterEvents(<Device>device, event_types, <EventSet>set)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23127
 *     with nogil:
 *         __status__ = nvmlDeviceRegisterEvents(<Device>device, event_types, <EventSet>set)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23127, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23115
 * 
 * 
 * cpdef device_register_events(intptr_t device, unsigned long long event_types, intptr_t set):             # <<<<<<<<<<<<<<
 *     """Starts recording of events on a specified devices and add the events to specified ``nvmlEventSet_t``.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_register_events", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_401device_register_events(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_400device_register_events, "device_register_events(intptr_t device, unsigned long long event_types, intptr_t set)\n\nStarts recording of events on a specified devices and add the events to specified ``nvmlEventSet_t``.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    event_types (unsigned long long): Bitmask of ``Event Types`` to record.\n    set (intptr_t): Set to which add new event types.\n\n.. seealso:: `nvmlDeviceRegisterEvents`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_401device_register_events = {"device_register_events", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_401device_register_events, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_400device_register_events};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_401device_register_events(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned PY_LONG_LONG __pyx_v_event_types;
  intptr_t __pyx_v_set;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_register_events (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_event_types,&__pyx_mstate_global->__pyx_n_u_set,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23115, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23115, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23115, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23115, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_register_events", 0) < (0)) __PYX_ERR(0, 23115, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_register_events", 1, 3, 3, i); __PYX_ERR(0, 23115, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23115, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23115, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23115, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23115, __pyx_L3_error)
    __pyx_v_event_types = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[1]); if (unlikely((__pyx_v_event_types == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23115, __pyx_L3_error)
    __pyx_v_set = PyLong_AsSsize_t(values[2]); if (unlikely((__pyx_v_set == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23115, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_register_events", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 23115, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_register_events", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_400device_register_events(__pyx_self, __pyx_v_device, __pyx_v_event_types, __pyx_v_set);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_400device_register_events(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_event_types, intptr_t __pyx_v_set) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_register_events", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_register_events(__pyx_v_device, __pyx_v_event_types, __pyx_v_set, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23115, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_register_events", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23130
 * 
 * 
 * cpdef unsigned long long device_get_supported_event_types(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Returns information about events supported on device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_403device_get_supported_event_types(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_device_get_supported_event_types(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned PY_LONG_LONG __pyx_v_event_types;
  nvmlReturn_t __pyx_v___status__;
  unsigned PY_LONG_LONG __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23142
 *     """
 *     cdef unsigned long long event_types
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedEventTypes(<Device>device, &event_types)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23143
 *     cdef unsigned long long event_types
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedEventTypes(<Device>device, &event_types)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return event_types
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedEventTypes(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_event_types)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23143, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23142
 *     """
 *     cdef unsigned long long event_types
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedEventTypes(<Device>device, &event_types)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23144
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedEventTypes(<Device>device, &event_types)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return event_types
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23144, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23145
 *         __status__ = nvmlDeviceGetSupportedEventTypes(<Device>device, &event_types)
 *     check_status(__status__)
 *     return event_types             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_event_types;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23130
 * 
 * 
 * cpdef unsigned long long device_get_supported_event_types(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Returns information about events supported on device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_event_types", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_403device_get_supported_event_types(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_402device_get_supported_event_types, "device_get_supported_event_types(intptr_t device) -> unsigned long long\n\nReturns information about events supported on device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned long long: Reference in which to return bitmask of supported events.\n\n.. seealso:: `nvmlDeviceGetSupportedEventTypes`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_403device_get_supported_event_types = {"device_get_supported_event_types", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_403device_get_supported_event_types, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_402device_get_supported_event_types};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_403device_get_supported_event_types(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_supported_event_types (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23130, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23130, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_supported_event_types", 0) < (0)) __PYX_ERR(0, 23130, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_supported_event_types", 1, 1, 1, i); __PYX_ERR(0, 23130, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23130, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23130, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_supported_event_types", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23130, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_event_types", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_402device_get_supported_event_types(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_402device_get_supported_event_types(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned PY_LONG_LONG __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_supported_event_types", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_supported_event_types(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned PY_LONG_LONG)0) && PyErr_Occurred())) __PYX_ERR(0, 23130, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23130, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_event_types", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23148
 * 
 * 
 * cpdef object event_set_wait_v2(intptr_t set, unsigned int timeoutms):             # <<<<<<<<<<<<<<
 *     """Waits on events and delivers events.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_405event_set_wait_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_event_set_wait_v2(intptr_t __pyx_v_set, unsigned int __pyx_v_timeoutms, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *__pyx_v_data_py = 0;
  nvmlEventData_t *__pyx_v_data;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("event_set_wait_v2", 0);

  /* "cuda/bindings/_nvml.pyx":23160
 *     .. seealso:: `nvmlEventSetWait_v2`
 *     """
 *     cdef EventData data_py = EventData()             # <<<<<<<<<<<<<<
 *     cdef nvmlEventData_t *data = <nvmlEventData_t *><intptr_t>(data_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EventData, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23160, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_data_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":23161
 *     """
 *     cdef EventData data_py = EventData()
 *     cdef nvmlEventData_t *data = <nvmlEventData_t *><intptr_t>(data_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlEventSetWait_v2(<EventSet>set, data, timeoutms)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EventData *)__pyx_v_data_py->__pyx_vtab)->_get_ptr(__pyx_v_data_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23161, __pyx_L1_error)
  __pyx_v_data = ((nvmlEventData_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":23162
 *     cdef EventData data_py = EventData()
 *     cdef nvmlEventData_t *data = <nvmlEventData_t *><intptr_t>(data_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlEventSetWait_v2(<EventSet>set, data, timeoutms)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23163
 *     cdef nvmlEventData_t *data = <nvmlEventData_t *><intptr_t>(data_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlEventSetWait_v2(<EventSet>set, data, timeoutms)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return data_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlEventSetWait_v2(((__pyx_t_4cuda_8bindings_5_nvml_EventSet)__pyx_v_set), __pyx_v_data, __pyx_v_timeoutms); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23163, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":23162
 *     cdef EventData data_py = EventData()
 *     cdef nvmlEventData_t *data = <nvmlEventData_t *><intptr_t>(data_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlEventSetWait_v2(<EventSet>set, data, timeoutms)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23164
 *     with nogil:
 *         __status__ = nvmlEventSetWait_v2(<EventSet>set, data, timeoutms)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return data_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 23164, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23165
 *         __status__ = nvmlEventSetWait_v2(<EventSet>set, data, timeoutms)
 *     check_status(__status__)
 *     return data_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_data_py);
  __pyx_r = ((PyObject *)__pyx_v_data_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23148
 * 
 * 
 * cpdef object event_set_wait_v2(intptr_t set, unsigned int timeoutms):             # <<<<<<<<<<<<<<
 *     """Waits on events and delivers events.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.event_set_wait_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_data_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_405event_set_wait_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_404event_set_wait_v2, "event_set_wait_v2(intptr_t set, unsigned int timeoutms)\n\nWaits on events and delivers events.\n\nArgs:\n    set (intptr_t): Reference to set of events to wait on.\n    timeoutms (unsigned int): Maximum amount of wait time in milliseconds for registered event.\n\nReturns:\n    nvmlEventData_t: Reference in which to return event data.\n\n.. seealso:: `nvmlEventSetWait_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_405event_set_wait_v2 = {"event_set_wait_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_405event_set_wait_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_404event_set_wait_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_405event_set_wait_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_set;
  unsigned int __pyx_v_timeoutms;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("event_set_wait_v2 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_set,&__pyx_mstate_global->__pyx_n_u_timeoutms,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23148, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23148, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23148, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "event_set_wait_v2", 0) < (0)) __PYX_ERR(0, 23148, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("event_set_wait_v2", 1, 2, 2, i); __PYX_ERR(0, 23148, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23148, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23148, __pyx_L3_error)
    }
    __pyx_v_set = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_set == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23148, __pyx_L3_error)
    __pyx_v_timeoutms = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_timeoutms == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23148, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("event_set_wait_v2", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 23148, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.event_set_wait_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_404event_set_wait_v2(__pyx_self, __pyx_v_set, __pyx_v_timeoutms);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_404event_set_wait_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_set, unsigned int __pyx_v_timeoutms) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("event_set_wait_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_event_set_wait_v2(__pyx_v_set, __pyx_v_timeoutms, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23148, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.event_set_wait_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23168
 * 
 * 
 * cpdef event_set_free(intptr_t set):             # <<<<<<<<<<<<<<
 *     """Releases events in the set.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_407event_set_free(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_event_set_free(intptr_t __pyx_v_set, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("event_set_free", 0);

  /* "cuda/bindings/_nvml.pyx":23176
 *     .. seealso:: `nvmlEventSetFree`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlEventSetFree(<EventSet>set)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23177
 *     """
 *     with nogil:
 *         __status__ = nvmlEventSetFree(<EventSet>set)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlEventSetFree(((__pyx_t_4cuda_8bindings_5_nvml_EventSet)__pyx_v_set)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23177, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23176
 *     .. seealso:: `nvmlEventSetFree`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlEventSetFree(<EventSet>set)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23178
 *     with nogil:
 *         __status__ = nvmlEventSetFree(<EventSet>set)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23178, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23168
 * 
 * 
 * cpdef event_set_free(intptr_t set):             # <<<<<<<<<<<<<<
 *     """Releases events in the set.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.event_set_free", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_407event_set_free(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_406event_set_free, "event_set_free(intptr_t set)\n\nReleases events in the set.\n\nArgs:\n    set (intptr_t): Reference to events to be released.\n\n.. seealso:: `nvmlEventSetFree`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_407event_set_free = {"event_set_free", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_407event_set_free, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_406event_set_free};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_407event_set_free(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_set;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("event_set_free (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_set,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23168, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23168, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "event_set_free", 0) < (0)) __PYX_ERR(0, 23168, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("event_set_free", 1, 1, 1, i); __PYX_ERR(0, 23168, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23168, __pyx_L3_error)
    }
    __pyx_v_set = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_set == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23168, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("event_set_free", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23168, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.event_set_free", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_406event_set_free(__pyx_self, __pyx_v_set);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_406event_set_free(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_set) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("event_set_free", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_event_set_free(__pyx_v_set, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23168, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.event_set_free", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23181
 * 
 * 
 * cpdef system_event_set_create(intptr_t request):             # <<<<<<<<<<<<<<
 *     """Create an empty set of system events. Event set should be freed by ``nvmlSystemEventSetFree``.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_409system_event_set_create(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_event_set_create(intptr_t __pyx_v_request, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_event_set_create", 0);

  /* "cuda/bindings/_nvml.pyx":23189
 *     .. seealso:: `nvmlSystemEventSetCreate`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemEventSetCreate(<nvmlSystemEventSetCreateRequest_t*>request)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23190
 *     """
 *     with nogil:
 *         __status__ = nvmlSystemEventSetCreate(<nvmlSystemEventSetCreateRequest_t*>request)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemEventSetCreate(((nvmlSystemEventSetCreateRequest_t *)__pyx_v_request)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23190, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23189
 *     .. seealso:: `nvmlSystemEventSetCreate`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemEventSetCreate(<nvmlSystemEventSetCreateRequest_t*>request)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23191
 *     with nogil:
 *         __status__ = nvmlSystemEventSetCreate(<nvmlSystemEventSetCreateRequest_t*>request)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23191, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23181
 * 
 * 
 * cpdef system_event_set_create(intptr_t request):             # <<<<<<<<<<<<<<
 *     """Create an empty set of system events. Event set should be freed by ``nvmlSystemEventSetFree``.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.system_event_set_create", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_409system_event_set_create(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_408system_event_set_create, "system_event_set_create(intptr_t request)\n\nCreate an empty set of system events. Event set should be freed by ``nvmlSystemEventSetFree``.\n\nArgs:\n    request (intptr_t): Reference to nvmlSystemEventSetCreateRequest_t.\n\n.. seealso:: `nvmlSystemEventSetCreate`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_409system_event_set_create = {"system_event_set_create", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_409system_event_set_create, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_408system_event_set_create};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_409system_event_set_create(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_request;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_event_set_create (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_request,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23181, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23181, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "system_event_set_create", 0) < (0)) __PYX_ERR(0, 23181, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("system_event_set_create", 1, 1, 1, i); __PYX_ERR(0, 23181, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23181, __pyx_L3_error)
    }
    __pyx_v_request = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_request == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23181, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("system_event_set_create", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23181, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.system_event_set_create", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_408system_event_set_create(__pyx_self, __pyx_v_request);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_408system_event_set_create(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_request) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_event_set_create", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_event_set_create(__pyx_v_request, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23181, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_event_set_create", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23194
 * 
 * 
 * cpdef system_event_set_free(intptr_t request):             # <<<<<<<<<<<<<<
 *     """Releases system event set.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_411system_event_set_free(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_event_set_free(intptr_t __pyx_v_request, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_event_set_free", 0);

  /* "cuda/bindings/_nvml.pyx":23202
 *     .. seealso:: `nvmlSystemEventSetFree`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemEventSetFree(<nvmlSystemEventSetFreeRequest_t*>request)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23203
 *     """
 *     with nogil:
 *         __status__ = nvmlSystemEventSetFree(<nvmlSystemEventSetFreeRequest_t*>request)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemEventSetFree(((nvmlSystemEventSetFreeRequest_t *)__pyx_v_request)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23203, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23202
 *     .. seealso:: `nvmlSystemEventSetFree`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemEventSetFree(<nvmlSystemEventSetFreeRequest_t*>request)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23204
 *     with nogil:
 *         __status__ = nvmlSystemEventSetFree(<nvmlSystemEventSetFreeRequest_t*>request)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23204, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23194
 * 
 * 
 * cpdef system_event_set_free(intptr_t request):             # <<<<<<<<<<<<<<
 *     """Releases system event set.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.system_event_set_free", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_411system_event_set_free(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_410system_event_set_free, "system_event_set_free(intptr_t request)\n\nReleases system event set.\n\nArgs:\n    request (intptr_t): Reference to nvmlSystemEventSetFreeRequest_t.\n\n.. seealso:: `nvmlSystemEventSetFree`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_411system_event_set_free = {"system_event_set_free", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_411system_event_set_free, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_410system_event_set_free};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_411system_event_set_free(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_request;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_event_set_free (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_request,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23194, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23194, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "system_event_set_free", 0) < (0)) __PYX_ERR(0, 23194, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("system_event_set_free", 1, 1, 1, i); __PYX_ERR(0, 23194, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23194, __pyx_L3_error)
    }
    __pyx_v_request = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_request == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23194, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("system_event_set_free", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23194, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.system_event_set_free", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_410system_event_set_free(__pyx_self, __pyx_v_request);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_410system_event_set_free(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_request) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_event_set_free", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_event_set_free(__pyx_v_request, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23194, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_event_set_free", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23207
 * 
 * 
 * cpdef system_register_events(intptr_t request):             # <<<<<<<<<<<<<<
 *     """Starts recording of events on system and add the events to specified ``nvmlSystemEventSet_t``.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_413system_register_events(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_register_events(intptr_t __pyx_v_request, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_register_events", 0);

  /* "cuda/bindings/_nvml.pyx":23215
 *     .. seealso:: `nvmlSystemRegisterEvents`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemRegisterEvents(<nvmlSystemRegisterEventRequest_t*>request)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23216
 *     """
 *     with nogil:
 *         __status__ = nvmlSystemRegisterEvents(<nvmlSystemRegisterEventRequest_t*>request)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemRegisterEvents(((nvmlSystemRegisterEventRequest_t *)__pyx_v_request)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23216, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23215
 *     .. seealso:: `nvmlSystemRegisterEvents`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemRegisterEvents(<nvmlSystemRegisterEventRequest_t*>request)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23217
 *     with nogil:
 *         __status__ = nvmlSystemRegisterEvents(<nvmlSystemRegisterEventRequest_t*>request)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23217, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23207
 * 
 * 
 * cpdef system_register_events(intptr_t request):             # <<<<<<<<<<<<<<
 *     """Starts recording of events on system and add the events to specified ``nvmlSystemEventSet_t``.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.system_register_events", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_413system_register_events(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_412system_register_events, "system_register_events(intptr_t request)\n\nStarts recording of events on system and add the events to specified ``nvmlSystemEventSet_t``.\n\nArgs:\n    request (intptr_t): Reference to the struct nvmlSystemRegisterEventRequest_t.\n\n.. seealso:: `nvmlSystemRegisterEvents`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_413system_register_events = {"system_register_events", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_413system_register_events, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_412system_register_events};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_413system_register_events(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_request;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_register_events (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_request,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23207, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23207, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "system_register_events", 0) < (0)) __PYX_ERR(0, 23207, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("system_register_events", 1, 1, 1, i); __PYX_ERR(0, 23207, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23207, __pyx_L3_error)
    }
    __pyx_v_request = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_request == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23207, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("system_register_events", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23207, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.system_register_events", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_412system_register_events(__pyx_self, __pyx_v_request);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_412system_register_events(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_request) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_register_events", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_register_events(__pyx_v_request, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23207, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_register_events", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23220
 * 
 * 
 * cpdef system_event_set_wait(intptr_t request):             # <<<<<<<<<<<<<<
 *     """Waits on system events and delivers events.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_415system_event_set_wait(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_event_set_wait(intptr_t __pyx_v_request, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_event_set_wait", 0);

  /* "cuda/bindings/_nvml.pyx":23228
 *     .. seealso:: `nvmlSystemEventSetWait`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemEventSetWait(<nvmlSystemEventSetWaitRequest_t*>request)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23229
 *     """
 *     with nogil:
 *         __status__ = nvmlSystemEventSetWait(<nvmlSystemEventSetWaitRequest_t*>request)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemEventSetWait(((nvmlSystemEventSetWaitRequest_t *)__pyx_v_request)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23229, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23228
 *     .. seealso:: `nvmlSystemEventSetWait`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemEventSetWait(<nvmlSystemEventSetWaitRequest_t*>request)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23230
 *     with nogil:
 *         __status__ = nvmlSystemEventSetWait(<nvmlSystemEventSetWaitRequest_t*>request)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23230, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23220
 * 
 * 
 * cpdef system_event_set_wait(intptr_t request):             # <<<<<<<<<<<<<<
 *     """Waits on system events and delivers events.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.system_event_set_wait", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_415system_event_set_wait(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_414system_event_set_wait, "system_event_set_wait(intptr_t request)\n\nWaits on system events and delivers events.\n\nArgs:\n    request (intptr_t): Reference in which to nvmlSystemEventSetWaitRequest_t.\n\n.. seealso:: `nvmlSystemEventSetWait`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_415system_event_set_wait = {"system_event_set_wait", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_415system_event_set_wait, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_414system_event_set_wait};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_415system_event_set_wait(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_request;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_event_set_wait (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_request,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23220, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23220, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "system_event_set_wait", 0) < (0)) __PYX_ERR(0, 23220, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("system_event_set_wait", 1, 1, 1, i); __PYX_ERR(0, 23220, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23220, __pyx_L3_error)
    }
    __pyx_v_request = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_request == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23220, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("system_event_set_wait", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23220, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.system_event_set_wait", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_414system_event_set_wait(__pyx_self, __pyx_v_request);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_414system_event_set_wait(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_request) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_event_set_wait", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_event_set_wait(__pyx_v_request, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23220, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_event_set_wait", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23233
 * 
 * 
 * cpdef device_modify_drain_state(intptr_t pci_info, int new_state):             # <<<<<<<<<<<<<<
 *     """Modify the drain state of a GPU. This method forces a GPU to no longer accept new incoming requests. Any new NVML process will no longer see this GPU. Persistence mode for this GPU must be turned off before this call is made. Must be called as administrator. For Linux only.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_417device_modify_drain_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_modify_drain_state(intptr_t __pyx_v_pci_info, int __pyx_v_new_state, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_modify_drain_state", 0);

  /* "cuda/bindings/_nvml.pyx":23242
 *     .. seealso:: `nvmlDeviceModifyDrainState`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceModifyDrainState(<nvmlPciInfo_t*>pci_info, <_EnableState>new_state)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23243
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceModifyDrainState(<nvmlPciInfo_t*>pci_info, <_EnableState>new_state)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceModifyDrainState(((nvmlPciInfo_t *)__pyx_v_pci_info), ((__pyx_t_4cuda_8bindings_5_nvml__EnableState)__pyx_v_new_state)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23243, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23242
 *     .. seealso:: `nvmlDeviceModifyDrainState`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceModifyDrainState(<nvmlPciInfo_t*>pci_info, <_EnableState>new_state)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23244
 *     with nogil:
 *         __status__ = nvmlDeviceModifyDrainState(<nvmlPciInfo_t*>pci_info, <_EnableState>new_state)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23244, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23233
 * 
 * 
 * cpdef device_modify_drain_state(intptr_t pci_info, int new_state):             # <<<<<<<<<<<<<<
 *     """Modify the drain state of a GPU. This method forces a GPU to no longer accept new incoming requests. Any new NVML process will no longer see this GPU. Persistence mode for this GPU must be turned off before this call is made. Must be called as administrator. For Linux only.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_modify_drain_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_417device_modify_drain_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_416device_modify_drain_state, "device_modify_drain_state(intptr_t pci_info, int new_state)\n\nModify the drain state of a GPU. This method forces a GPU to no longer accept new incoming requests. Any new NVML process will no longer see this GPU. Persistence mode for this GPU must be turned off before this call is made. Must be called as administrator. For Linux only.\n\nArgs:\n    pci_info (intptr_t): The PCI address of the GPU drain state to be modified.\n    new_state (EnableState): The drain state that should be entered, see ``nvmlEnableState_t``.\n\n.. seealso:: `nvmlDeviceModifyDrainState`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_417device_modify_drain_state = {"device_modify_drain_state", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_417device_modify_drain_state, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_416device_modify_drain_state};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_417device_modify_drain_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_pci_info;
  int __pyx_v_new_state;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_modify_drain_state (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pci_info,&__pyx_mstate_global->__pyx_n_u_new_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23233, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23233, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23233, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_modify_drain_state", 0) < (0)) __PYX_ERR(0, 23233, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_modify_drain_state", 1, 2, 2, i); __PYX_ERR(0, 23233, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23233, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23233, __pyx_L3_error)
    }
    __pyx_v_pci_info = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_pci_info == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23233, __pyx_L3_error)
    __pyx_v_new_state = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_new_state == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23233, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_modify_drain_state", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 23233, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_modify_drain_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_416device_modify_drain_state(__pyx_self, __pyx_v_pci_info, __pyx_v_new_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_416device_modify_drain_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_pci_info, int __pyx_v_new_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_modify_drain_state", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_modify_drain_state(__pyx_v_pci_info, __pyx_v_new_state, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23233, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_modify_drain_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23247
 * 
 * 
 * cpdef int device_query_drain_state(intptr_t pci_info) except? -1:             # <<<<<<<<<<<<<<
 *     """Query the drain state of a GPU. This method is used to check if a GPU is in a currently draining state. For Linux only.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_419device_query_drain_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_query_drain_state(intptr_t __pyx_v_pci_info, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__EnableState __pyx_v_current_state;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23259
 *     """
 *     cdef _EnableState current_state
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceQueryDrainState(<nvmlPciInfo_t*>pci_info, &current_state)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23260
 *     cdef _EnableState current_state
 *     with nogil:
 *         __status__ = nvmlDeviceQueryDrainState(<nvmlPciInfo_t*>pci_info, &current_state)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>current_state
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceQueryDrainState(((nvmlPciInfo_t *)__pyx_v_pci_info), (&__pyx_v_current_state)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23260, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23259
 *     """
 *     cdef _EnableState current_state
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceQueryDrainState(<nvmlPciInfo_t*>pci_info, &current_state)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23261
 *     with nogil:
 *         __status__ = nvmlDeviceQueryDrainState(<nvmlPciInfo_t*>pci_info, &current_state)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>current_state
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23261, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23262
 *         __status__ = nvmlDeviceQueryDrainState(<nvmlPciInfo_t*>pci_info, &current_state)
 *     check_status(__status__)
 *     return <int>current_state             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_current_state);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23247
 * 
 * 
 * cpdef int device_query_drain_state(intptr_t pci_info) except? -1:             # <<<<<<<<<<<<<<
 *     """Query the drain state of a GPU. This method is used to check if a GPU is in a currently draining state. For Linux only.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_query_drain_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_419device_query_drain_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_418device_query_drain_state, "device_query_drain_state(intptr_t pci_info) -> int\n\nQuery the drain state of a GPU. This method is used to check if a GPU is in a currently draining state. For Linux only.\n\nArgs:\n    pci_info (intptr_t): The PCI address of the GPU drain state to be queried.\n\nReturns:\n    int: The current drain state for this GPU, see ``nvmlEnableState_t``.\n\n.. seealso:: `nvmlDeviceQueryDrainState`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_419device_query_drain_state = {"device_query_drain_state", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_419device_query_drain_state, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_418device_query_drain_state};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_419device_query_drain_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_pci_info;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_query_drain_state (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pci_info,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23247, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23247, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_query_drain_state", 0) < (0)) __PYX_ERR(0, 23247, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_query_drain_state", 1, 1, 1, i); __PYX_ERR(0, 23247, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23247, __pyx_L3_error)
    }
    __pyx_v_pci_info = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_pci_info == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23247, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_query_drain_state", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23247, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_query_drain_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_418device_query_drain_state(__pyx_self, __pyx_v_pci_info);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_418device_query_drain_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_pci_info) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_query_drain_state", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_query_drain_state(__pyx_v_pci_info, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23247, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23247, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_query_drain_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23265
 * 
 * 
 * cpdef device_remove_gpu_v2(intptr_t pci_info, int gpu_state, int link_state):             # <<<<<<<<<<<<<<
 *     """This method will remove the specified GPU from the view of both NVML and the NVIDIA kernel driver as long as no other processes are attached. If other processes are attached, this call will return NVML_ERROR_IN_USE and the GPU will be returned to its original "draining" state. Note: the only situation where a process can still be attached after :func:`device_modify_drain_state` is called to initiate the draining state is if that process was using, and is still using, a GPU before the call was made. Also note, persistence mode counts as an attachment to the GPU thus it must be disabled prior to this call.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_421device_remove_gpu_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_remove_gpu_v2(intptr_t __pyx_v_pci_info, int __pyx_v_gpu_state, int __pyx_v_link_state, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_remove_gpu_v2", 0);

  /* "cuda/bindings/_nvml.pyx":23275
 *     .. seealso:: `nvmlDeviceRemoveGpu_v2`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceRemoveGpu_v2(<nvmlPciInfo_t*>pci_info, <_DetachGpuState>gpu_state, <_PcieLinkState>link_state)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23276
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceRemoveGpu_v2(<nvmlPciInfo_t*>pci_info, <_DetachGpuState>gpu_state, <_PcieLinkState>link_state)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceRemoveGpu_v2(((nvmlPciInfo_t *)__pyx_v_pci_info), ((__pyx_t_4cuda_8bindings_5_nvml__DetachGpuState)__pyx_v_gpu_state), ((__pyx_t_4cuda_8bindings_5_nvml__PcieLinkState)__pyx_v_link_state)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23276, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23275
 *     .. seealso:: `nvmlDeviceRemoveGpu_v2`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceRemoveGpu_v2(<nvmlPciInfo_t*>pci_info, <_DetachGpuState>gpu_state, <_PcieLinkState>link_state)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23277
 *     with nogil:
 *         __status__ = nvmlDeviceRemoveGpu_v2(<nvmlPciInfo_t*>pci_info, <_DetachGpuState>gpu_state, <_PcieLinkState>link_state)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23277, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23265
 * 
 * 
 * cpdef device_remove_gpu_v2(intptr_t pci_info, int gpu_state, int link_state):             # <<<<<<<<<<<<<<
 *     """This method will remove the specified GPU from the view of both NVML and the NVIDIA kernel driver as long as no other processes are attached. If other processes are attached, this call will return NVML_ERROR_IN_USE and the GPU will be returned to its original "draining" state. Note: the only situation where a process can still be attached after :func:`device_modify_drain_state` is called to initiate the draining state is if that process was using, and is still using, a GPU before the call was made. Also note, persistence mode counts as an attachment to the GPU thus it must be disabled prior to this call.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_remove_gpu_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_421device_remove_gpu_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_420device_remove_gpu_v2, "device_remove_gpu_v2(intptr_t pci_info, int gpu_state, int link_state)\n\nThis method will remove the specified GPU from the view of both NVML and the NVIDIA kernel driver as long as no other processes are attached. If other processes are attached, this call will return NVML_ERROR_IN_USE and the GPU will be returned to its original \"draining\" state. Note: the only situation where a process can still be attached after :func:`device_modify_drain_state` is called to initiate the draining state is if that process was using, and is still using, a GPU before the call was made. Also note, persistence mode counts as an attachment to the GPU thus it must be disabled prior to this call.\n\nArgs:\n    pci_info (intptr_t): The PCI address of the GPU to be removed.\n    gpu_state (DetachGpuState): Whether the GPU is to be removed, from the OS see ``nvmlDetachGpuState_t``.\n    link_state (PcieLinkState): Requested upstream PCIe link state, see ``nvmlPcieLinkState_t``.\n\n.. seealso:: `nvmlDeviceRemoveGpu_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_421device_remove_gpu_v2 = {"device_remove_gpu_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_421device_remove_gpu_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_420device_remove_gpu_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_421device_remove_gpu_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_pci_info;
  int __pyx_v_gpu_state;
  int __pyx_v_link_state;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_remove_gpu_v2 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pci_info,&__pyx_mstate_global->__pyx_n_u_gpu_state,&__pyx_mstate_global->__pyx_n_u_link_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23265, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23265, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23265, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23265, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_remove_gpu_v2", 0) < (0)) __PYX_ERR(0, 23265, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_remove_gpu_v2", 1, 3, 3, i); __PYX_ERR(0, 23265, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23265, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23265, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23265, __pyx_L3_error)
    }
    __pyx_v_pci_info = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_pci_info == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23265, __pyx_L3_error)
    __pyx_v_gpu_state = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_gpu_state == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23265, __pyx_L3_error)
    __pyx_v_link_state = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_link_state == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23265, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_remove_gpu_v2", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 23265, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_remove_gpu_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_420device_remove_gpu_v2(__pyx_self, __pyx_v_pci_info, __pyx_v_gpu_state, __pyx_v_link_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_420device_remove_gpu_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_pci_info, int __pyx_v_gpu_state, int __pyx_v_link_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_remove_gpu_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_remove_gpu_v2(__pyx_v_pci_info, __pyx_v_gpu_state, __pyx_v_link_state, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23265, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_remove_gpu_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23280
 * 
 * 
 * cpdef device_discover_gpus(intptr_t pci_info):             # <<<<<<<<<<<<<<
 *     """Request the OS and the NVIDIA kernel driver to rediscover a portion of the PCI subsystem looking for GPUs that were previously removed. The portion of the PCI tree can be narrowed by specifying a domain, bus, and device. If all are zeroes then the entire PCI tree will be searched. Please note that for long-running NVML processes the enumeration will change based on how many GPUs are discovered and where they are inserted in bus order.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_423device_discover_gpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_discover_gpus(intptr_t __pyx_v_pci_info, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_discover_gpus", 0);

  /* "cuda/bindings/_nvml.pyx":23288
 *     .. seealso:: `nvmlDeviceDiscoverGpus`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceDiscoverGpus(<nvmlPciInfo_t*>pci_info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23289
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceDiscoverGpus(<nvmlPciInfo_t*>pci_info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceDiscoverGpus(((nvmlPciInfo_t *)__pyx_v_pci_info)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23289, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23288
 *     .. seealso:: `nvmlDeviceDiscoverGpus`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceDiscoverGpus(<nvmlPciInfo_t*>pci_info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23290
 *     with nogil:
 *         __status__ = nvmlDeviceDiscoverGpus(<nvmlPciInfo_t*>pci_info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23290, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23280
 * 
 * 
 * cpdef device_discover_gpus(intptr_t pci_info):             # <<<<<<<<<<<<<<
 *     """Request the OS and the NVIDIA kernel driver to rediscover a portion of the PCI subsystem looking for GPUs that were previously removed. The portion of the PCI tree can be narrowed by specifying a domain, bus, and device. If all are zeroes then the entire PCI tree will be searched. Please note that for long-running NVML processes the enumeration will change based on how many GPUs are discovered and where they are inserted in bus order.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_discover_gpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_423device_discover_gpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_422device_discover_gpus, "device_discover_gpus(intptr_t pci_info)\n\nRequest the OS and the NVIDIA kernel driver to rediscover a portion of the PCI subsystem looking for GPUs that were previously removed. The portion of the PCI tree can be narrowed by specifying a domain, bus, and device. If all are zeroes then the entire PCI tree will be searched. Please note that for long-running NVML processes the enumeration will change based on how many GPUs are discovered and where they are inserted in bus order.\n\nArgs:\n    pci_info (intptr_t): The PCI tree to be searched. Only the domain, bus, and device fields are used in this call.\n\n.. seealso:: `nvmlDeviceDiscoverGpus`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_423device_discover_gpus = {"device_discover_gpus", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_423device_discover_gpus, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_422device_discover_gpus};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_423device_discover_gpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_pci_info;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_discover_gpus (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pci_info,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23280, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23280, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_discover_gpus", 0) < (0)) __PYX_ERR(0, 23280, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_discover_gpus", 1, 1, 1, i); __PYX_ERR(0, 23280, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23280, __pyx_L3_error)
    }
    __pyx_v_pci_info = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_pci_info == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23280, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_discover_gpus", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23280, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_discover_gpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_422device_discover_gpus(__pyx_self, __pyx_v_pci_info);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_422device_discover_gpus(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_pci_info) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_discover_gpus", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_discover_gpus(__pyx_v_pci_info, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23280, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_discover_gpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23293
 * 
 * 
 * cpdef int device_get_virtualization_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """This method is used to get the virtualization mode corresponding to the GPU.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_425device_get_virtualization_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_virtualization_mode(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__GpuVirtualizationMode __pyx_v_p_virtual_mode;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23305
 *     """
 *     cdef _GpuVirtualizationMode p_virtual_mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVirtualizationMode(<Device>device, &p_virtual_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23306
 *     cdef _GpuVirtualizationMode p_virtual_mode
 *     with nogil:
 *         __status__ = nvmlDeviceGetVirtualizationMode(<Device>device, &p_virtual_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>p_virtual_mode
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVirtualizationMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_p_virtual_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23306, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23305
 *     """
 *     cdef _GpuVirtualizationMode p_virtual_mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVirtualizationMode(<Device>device, &p_virtual_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23307
 *     with nogil:
 *         __status__ = nvmlDeviceGetVirtualizationMode(<Device>device, &p_virtual_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>p_virtual_mode
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23307, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23308
 *         __status__ = nvmlDeviceGetVirtualizationMode(<Device>device, &p_virtual_mode)
 *     check_status(__status__)
 *     return <int>p_virtual_mode             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_p_virtual_mode);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23293
 * 
 * 
 * cpdef int device_get_virtualization_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """This method is used to get the virtualization mode corresponding to the GPU.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_virtualization_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_425device_get_virtualization_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_424device_get_virtualization_mode, "device_get_virtualization_mode(intptr_t device) -> int\n\nThis method is used to get the virtualization mode corresponding to the GPU.\n\nArgs:\n    device (intptr_t): Identifier of the target device.\n\nReturns:\n    int: Reference to virtualization mode. One of NVML_GPU_VIRTUALIZATION_?.\n\n.. seealso:: `nvmlDeviceGetVirtualizationMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_425device_get_virtualization_mode = {"device_get_virtualization_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_425device_get_virtualization_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_424device_get_virtualization_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_425device_get_virtualization_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_virtualization_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23293, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23293, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_virtualization_mode", 0) < (0)) __PYX_ERR(0, 23293, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_virtualization_mode", 1, 1, 1, i); __PYX_ERR(0, 23293, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23293, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23293, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_virtualization_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23293, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_virtualization_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_424device_get_virtualization_mode(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_424device_get_virtualization_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_virtualization_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_virtualization_mode(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23293, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23293, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_virtualization_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23311
 * 
 * 
 * cpdef int device_get_host_vgpu_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Queries if SR-IOV host operation is supported on a vGPU supported device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_427device_get_host_vgpu_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_get_host_vgpu_mode(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__HostVgpuMode __pyx_v_p_host_vgpu_mode;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23323
 *     """
 *     cdef _HostVgpuMode p_host_vgpu_mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetHostVgpuMode(<Device>device, &p_host_vgpu_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23324
 *     cdef _HostVgpuMode p_host_vgpu_mode
 *     with nogil:
 *         __status__ = nvmlDeviceGetHostVgpuMode(<Device>device, &p_host_vgpu_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>p_host_vgpu_mode
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHostVgpuMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_p_host_vgpu_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23324, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23323
 *     """
 *     cdef _HostVgpuMode p_host_vgpu_mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetHostVgpuMode(<Device>device, &p_host_vgpu_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23325
 *     with nogil:
 *         __status__ = nvmlDeviceGetHostVgpuMode(<Device>device, &p_host_vgpu_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>p_host_vgpu_mode
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23325, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23326
 *         __status__ = nvmlDeviceGetHostVgpuMode(<Device>device, &p_host_vgpu_mode)
 *     check_status(__status__)
 *     return <int>p_host_vgpu_mode             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_p_host_vgpu_mode);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23311
 * 
 * 
 * cpdef int device_get_host_vgpu_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Queries if SR-IOV host operation is supported on a vGPU supported device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_host_vgpu_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_427device_get_host_vgpu_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_426device_get_host_vgpu_mode, "device_get_host_vgpu_mode(intptr_t device) -> int\n\nQueries if SR-IOV host operation is supported on a vGPU supported device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    int: Reference in which to return the current vGPU mode.\n\n.. seealso:: `nvmlDeviceGetHostVgpuMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_427device_get_host_vgpu_mode = {"device_get_host_vgpu_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_427device_get_host_vgpu_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_426device_get_host_vgpu_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_427device_get_host_vgpu_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_host_vgpu_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23311, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23311, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_host_vgpu_mode", 0) < (0)) __PYX_ERR(0, 23311, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_host_vgpu_mode", 1, 1, 1, i); __PYX_ERR(0, 23311, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23311, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23311, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_host_vgpu_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23311, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_host_vgpu_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_426device_get_host_vgpu_mode(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_426device_get_host_vgpu_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_host_vgpu_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_host_vgpu_mode(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23311, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23311, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_host_vgpu_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23329
 * 
 * 
 * cpdef device_set_virtualization_mode(intptr_t device, int virtual_mode):             # <<<<<<<<<<<<<<
 *     """This method is used to set the virtualization mode corresponding to the GPU.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_429device_set_virtualization_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_virtualization_mode(intptr_t __pyx_v_device, int __pyx_v_virtual_mode, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_virtualization_mode", 0);

  /* "cuda/bindings/_nvml.pyx":23338
 *     .. seealso:: `nvmlDeviceSetVirtualizationMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetVirtualizationMode(<Device>device, <_GpuVirtualizationMode>virtual_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23339
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetVirtualizationMode(<Device>device, <_GpuVirtualizationMode>virtual_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVirtualizationMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__GpuVirtualizationMode)__pyx_v_virtual_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23339, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23338
 *     .. seealso:: `nvmlDeviceSetVirtualizationMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetVirtualizationMode(<Device>device, <_GpuVirtualizationMode>virtual_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23340
 *     with nogil:
 *         __status__ = nvmlDeviceSetVirtualizationMode(<Device>device, <_GpuVirtualizationMode>virtual_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23340, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23329
 * 
 * 
 * cpdef device_set_virtualization_mode(intptr_t device, int virtual_mode):             # <<<<<<<<<<<<<<
 *     """This method is used to set the virtualization mode corresponding to the GPU.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_virtualization_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_429device_set_virtualization_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_428device_set_virtualization_mode, "device_set_virtualization_mode(intptr_t device, int virtual_mode)\n\nThis method is used to set the virtualization mode corresponding to the GPU.\n\nArgs:\n    device (intptr_t): Identifier of the target device.\n    virtual_mode (GpuVirtualizationMode): virtualization mode. One of NVML_GPU_VIRTUALIZATION_?.\n\n.. seealso:: `nvmlDeviceSetVirtualizationMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_429device_set_virtualization_mode = {"device_set_virtualization_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_429device_set_virtualization_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_428device_set_virtualization_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_429device_set_virtualization_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_virtual_mode;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_virtualization_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_virtual_mode,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23329, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23329, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23329, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_virtualization_mode", 0) < (0)) __PYX_ERR(0, 23329, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_virtualization_mode", 1, 2, 2, i); __PYX_ERR(0, 23329, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23329, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23329, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23329, __pyx_L3_error)
    __pyx_v_virtual_mode = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_virtual_mode == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23329, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_virtualization_mode", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 23329, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_virtualization_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_428device_set_virtualization_mode(__pyx_self, __pyx_v_device, __pyx_v_virtual_mode);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_428device_set_virtualization_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_virtual_mode) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_virtualization_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_virtualization_mode(__pyx_v_device, __pyx_v_virtual_mode, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_virtualization_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23343
 * 
 * 
 * cpdef object device_get_vgpu_heterogeneous_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the vGPU heterogeneous mode for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_431device_get_vgpu_heterogeneous_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_heterogeneous_mode(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_p_heterogeneous_mode_py = 0;
  nvmlVgpuHeterogeneousMode_t *__pyx_v_p_heterogeneous_mode;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_heterogeneous_mode", 0);

  /* "cuda/bindings/_nvml.pyx":23354
 *     .. seealso:: `nvmlDeviceGetVgpuHeterogeneousMode`
 *     """
 *     cdef VgpuHeterogeneousMode_v1 p_heterogeneous_mode_py = VgpuHeterogeneousMode_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuHeterogeneousMode_t *p_heterogeneous_mode = <nvmlVgpuHeterogeneousMode_t *><intptr_t>(p_heterogeneous_mode_py._get_ptr())
 *     p_heterogeneous_mode.version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23354, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_p_heterogeneous_mode_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":23355
 *     """
 *     cdef VgpuHeterogeneousMode_v1 p_heterogeneous_mode_py = VgpuHeterogeneousMode_v1()
 *     cdef nvmlVgpuHeterogeneousMode_t *p_heterogeneous_mode = <nvmlVgpuHeterogeneousMode_t *><intptr_t>(p_heterogeneous_mode_py._get_ptr())             # <<<<<<<<<<<<<<
 *     p_heterogeneous_mode.version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_v_p_heterogeneous_mode_py->__pyx_vtab)->_get_ptr(__pyx_v_p_heterogeneous_mode_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23355, __pyx_L1_error)
  __pyx_v_p_heterogeneous_mode = ((nvmlVgpuHeterogeneousMode_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":23356
 *     cdef VgpuHeterogeneousMode_v1 p_heterogeneous_mode_py = VgpuHeterogeneousMode_v1()
 *     cdef nvmlVgpuHeterogeneousMode_t *p_heterogeneous_mode = <nvmlVgpuHeterogeneousMode_t *><intptr_t>(p_heterogeneous_mode_py._get_ptr())
 *     p_heterogeneous_mode.version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuHeterogeneousMode(<Device>device, p_heterogeneous_mode)
*/
  __pyx_v_p_heterogeneous_mode->version = ((sizeof(nvmlVgpuHeterogeneousMode_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":23357
 *     cdef nvmlVgpuHeterogeneousMode_t *p_heterogeneous_mode = <nvmlVgpuHeterogeneousMode_t *><intptr_t>(p_heterogeneous_mode_py._get_ptr())
 *     p_heterogeneous_mode.version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuHeterogeneousMode(<Device>device, p_heterogeneous_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23358
 *     p_heterogeneous_mode.version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuHeterogeneousMode(<Device>device, p_heterogeneous_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return p_heterogeneous_mode_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuHeterogeneousMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_p_heterogeneous_mode); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23358, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":23357
 *     cdef nvmlVgpuHeterogeneousMode_t *p_heterogeneous_mode = <nvmlVgpuHeterogeneousMode_t *><intptr_t>(p_heterogeneous_mode_py._get_ptr())
 *     p_heterogeneous_mode.version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuHeterogeneousMode(<Device>device, p_heterogeneous_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23359
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuHeterogeneousMode(<Device>device, p_heterogeneous_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return p_heterogeneous_mode_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 23359, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23360
 *         __status__ = nvmlDeviceGetVgpuHeterogeneousMode(<Device>device, p_heterogeneous_mode)
 *     check_status(__status__)
 *     return p_heterogeneous_mode_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_p_heterogeneous_mode_py);
  __pyx_r = ((PyObject *)__pyx_v_p_heterogeneous_mode_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23343
 * 
 * 
 * cpdef object device_get_vgpu_heterogeneous_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the vGPU heterogeneous mode for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_heterogeneous_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_p_heterogeneous_mode_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_431device_get_vgpu_heterogeneous_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_430device_get_vgpu_heterogeneous_mode, "device_get_vgpu_heterogeneous_mode(intptr_t device)\n\nGet the vGPU heterogeneous mode for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlVgpuHeterogeneousMode_v1_t: Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t.\n\n.. seealso:: `nvmlDeviceGetVgpuHeterogeneousMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_431device_get_vgpu_heterogeneous_mode = {"device_get_vgpu_heterogeneous_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_431device_get_vgpu_heterogeneous_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_430device_get_vgpu_heterogeneous_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_431device_get_vgpu_heterogeneous_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_vgpu_heterogeneous_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23343, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23343, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_vgpu_heterogeneous_mode", 0) < (0)) __PYX_ERR(0, 23343, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_vgpu_heterogeneous_mode", 1, 1, 1, i); __PYX_ERR(0, 23343, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23343, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23343, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_vgpu_heterogeneous_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23343, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_heterogeneous_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_430device_get_vgpu_heterogeneous_mode(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_430device_get_vgpu_heterogeneous_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_heterogeneous_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_heterogeneous_mode(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23343, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_heterogeneous_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23363
 * 
 * 
 * cpdef device_set_vgpu_heterogeneous_mode(intptr_t device, intptr_t p_heterogeneous_mode):             # <<<<<<<<<<<<<<
 *     """Enable or disable vGPU heterogeneous mode for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_433device_set_vgpu_heterogeneous_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_vgpu_heterogeneous_mode(intptr_t __pyx_v_device, intptr_t __pyx_v_p_heterogeneous_mode, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_vgpu_heterogeneous_mode", 0);

  /* "cuda/bindings/_nvml.pyx":23372
 *     .. seealso:: `nvmlDeviceSetVgpuHeterogeneousMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetVgpuHeterogeneousMode(<Device>device, <const nvmlVgpuHeterogeneousMode_t*>p_heterogeneous_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23373
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetVgpuHeterogeneousMode(<Device>device, <const nvmlVgpuHeterogeneousMode_t*>p_heterogeneous_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVgpuHeterogeneousMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlVgpuHeterogeneousMode_t const *)__pyx_v_p_heterogeneous_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23373, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23372
 *     .. seealso:: `nvmlDeviceSetVgpuHeterogeneousMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetVgpuHeterogeneousMode(<Device>device, <const nvmlVgpuHeterogeneousMode_t*>p_heterogeneous_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23374
 *     with nogil:
 *         __status__ = nvmlDeviceSetVgpuHeterogeneousMode(<Device>device, <const nvmlVgpuHeterogeneousMode_t*>p_heterogeneous_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23374, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23363
 * 
 * 
 * cpdef device_set_vgpu_heterogeneous_mode(intptr_t device, intptr_t p_heterogeneous_mode):             # <<<<<<<<<<<<<<
 *     """Enable or disable vGPU heterogeneous mode for the device.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_vgpu_heterogeneous_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_433device_set_vgpu_heterogeneous_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_432device_set_vgpu_heterogeneous_mode, "device_set_vgpu_heterogeneous_mode(intptr_t device, intptr_t p_heterogeneous_mode)\n\nEnable or disable vGPU heterogeneous mode for the device.\n\nArgs:\n    device (intptr_t): Identifier of the target device.\n    p_heterogeneous_mode (intptr_t): Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t.\n\n.. seealso:: `nvmlDeviceSetVgpuHeterogeneousMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_433device_set_vgpu_heterogeneous_mode = {"device_set_vgpu_heterogeneous_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_433device_set_vgpu_heterogeneous_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_432device_set_vgpu_heterogeneous_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_433device_set_vgpu_heterogeneous_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  intptr_t __pyx_v_p_heterogeneous_mode;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_vgpu_heterogeneous_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_p_heterogeneous_mode,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23363, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23363, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23363, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_vgpu_heterogeneous_mode", 0) < (0)) __PYX_ERR(0, 23363, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_vgpu_heterogeneous_mode", 1, 2, 2, i); __PYX_ERR(0, 23363, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23363, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23363, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23363, __pyx_L3_error)
    __pyx_v_p_heterogeneous_mode = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_p_heterogeneous_mode == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23363, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_vgpu_heterogeneous_mode", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 23363, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_vgpu_heterogeneous_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_432device_set_vgpu_heterogeneous_mode(__pyx_self, __pyx_v_device, __pyx_v_p_heterogeneous_mode);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_432device_set_vgpu_heterogeneous_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_p_heterogeneous_mode) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_vgpu_heterogeneous_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_vgpu_heterogeneous_mode(__pyx_v_device, __pyx_v_p_heterogeneous_mode, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23363, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_vgpu_heterogeneous_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23377
 * 
 * 
 * cpdef object vgpu_instance_get_placement_id(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Query the placement ID of active vGPU instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_435vgpu_instance_get_placement_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_placement_id(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *__pyx_v_p_placement_py = 0;
  nvmlVgpuPlacementId_t *__pyx_v_p_placement;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_placement_id", 0);

  /* "cuda/bindings/_nvml.pyx":23388
 *     .. seealso:: `nvmlVgpuInstanceGetPlacementId`
 *     """
 *     cdef VgpuPlacementId_v1 p_placement_py = VgpuPlacementId_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuPlacementId_t *p_placement = <nvmlVgpuPlacementId_t *><intptr_t>(p_placement_py._get_ptr())
 *     p_placement.version = sizeof(nvmlVgpuPlacementId_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23388, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_p_placement_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":23389
 *     """
 *     cdef VgpuPlacementId_v1 p_placement_py = VgpuPlacementId_v1()
 *     cdef nvmlVgpuPlacementId_t *p_placement = <nvmlVgpuPlacementId_t *><intptr_t>(p_placement_py._get_ptr())             # <<<<<<<<<<<<<<
 *     p_placement.version = sizeof(nvmlVgpuPlacementId_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)__pyx_v_p_placement_py->__pyx_vtab)->_get_ptr(__pyx_v_p_placement_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23389, __pyx_L1_error)
  __pyx_v_p_placement = ((nvmlVgpuPlacementId_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":23390
 *     cdef VgpuPlacementId_v1 p_placement_py = VgpuPlacementId_v1()
 *     cdef nvmlVgpuPlacementId_t *p_placement = <nvmlVgpuPlacementId_t *><intptr_t>(p_placement_py._get_ptr())
 *     p_placement.version = sizeof(nvmlVgpuPlacementId_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetPlacementId(<nvmlVgpuInstance_t>vgpu_instance, p_placement)
*/
  __pyx_v_p_placement->version = ((sizeof(nvmlVgpuPlacementId_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":23391
 *     cdef nvmlVgpuPlacementId_t *p_placement = <nvmlVgpuPlacementId_t *><intptr_t>(p_placement_py._get_ptr())
 *     p_placement.version = sizeof(nvmlVgpuPlacementId_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetPlacementId(<nvmlVgpuInstance_t>vgpu_instance, p_placement)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23392
 *     p_placement.version = sizeof(nvmlVgpuPlacementId_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetPlacementId(<nvmlVgpuInstance_t>vgpu_instance, p_placement)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return p_placement_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetPlacementId(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), __pyx_v_p_placement); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23392, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":23391
 *     cdef nvmlVgpuPlacementId_t *p_placement = <nvmlVgpuPlacementId_t *><intptr_t>(p_placement_py._get_ptr())
 *     p_placement.version = sizeof(nvmlVgpuPlacementId_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetPlacementId(<nvmlVgpuInstance_t>vgpu_instance, p_placement)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23393
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetPlacementId(<nvmlVgpuInstance_t>vgpu_instance, p_placement)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return p_placement_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 23393, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23394
 *         __status__ = nvmlVgpuInstanceGetPlacementId(<nvmlVgpuInstance_t>vgpu_instance, p_placement)
 *     check_status(__status__)
 *     return p_placement_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_p_placement_py);
  __pyx_r = ((PyObject *)__pyx_v_p_placement_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23377
 * 
 * 
 * cpdef object vgpu_instance_get_placement_id(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Query the placement ID of active vGPU instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_placement_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_p_placement_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_435vgpu_instance_get_placement_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_434vgpu_instance_get_placement_id, "vgpu_instance_get_placement_id(unsigned int vgpu_instance)\n\nQuery the placement ID of active vGPU instance.\n\nArgs:\n    vgpu_instance (unsigned int): Identifier of the target vGPU instance.\n\nReturns:\n    nvmlVgpuPlacementId_v1_t: Pointer to vGPU placement ID structure ``nvmlVgpuPlacementId_t``.\n\n.. seealso:: `nvmlVgpuInstanceGetPlacementId`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_435vgpu_instance_get_placement_id = {"vgpu_instance_get_placement_id", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_435vgpu_instance_get_placement_id, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_434vgpu_instance_get_placement_id};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_435vgpu_instance_get_placement_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_placement_id (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23377, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23377, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_placement_id", 0) < (0)) __PYX_ERR(0, 23377, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_placement_id", 1, 1, 1, i); __PYX_ERR(0, 23377, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23377, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23377, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_placement_id", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23377, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_placement_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_434vgpu_instance_get_placement_id(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_434vgpu_instance_get_placement_id(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_placement_id", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_placement_id(__pyx_v_vgpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23377, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_placement_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23397
 * 
 * 
 * cpdef object device_get_vgpu_type_supported_placements(intptr_t device, unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Query the supported vGPU placement ID of the vGPU type.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_437device_get_vgpu_type_supported_placements(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_type_supported_placements(intptr_t __pyx_v_device, unsigned int __pyx_v_vgpu_type_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_p_placement_list_py = 0;
  nvmlVgpuPlacementList_t *__pyx_v_p_placement_list;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_type_supported_placements", 0);

  /* "cuda/bindings/_nvml.pyx":23409
 *     .. seealso:: `nvmlDeviceGetVgpuTypeSupportedPlacements`
 *     """
 *     cdef VgpuPlacementList_v2 p_placement_list_py = VgpuPlacementList_v2()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuPlacementList_t *p_placement_list = <nvmlVgpuPlacementList_t *><intptr_t>(p_placement_list_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23409, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_p_placement_list_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":23410
 *     """
 *     cdef VgpuPlacementList_v2 p_placement_list_py = VgpuPlacementList_v2()
 *     cdef nvmlVgpuPlacementList_t *p_placement_list = <nvmlVgpuPlacementList_t *><intptr_t>(p_placement_list_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuTypeSupportedPlacements(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, p_placement_list)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_p_placement_list_py->__pyx_vtab)->_get_ptr(__pyx_v_p_placement_list_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23410, __pyx_L1_error)
  __pyx_v_p_placement_list = ((nvmlVgpuPlacementList_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":23411
 *     cdef VgpuPlacementList_v2 p_placement_list_py = VgpuPlacementList_v2()
 *     cdef nvmlVgpuPlacementList_t *p_placement_list = <nvmlVgpuPlacementList_t *><intptr_t>(p_placement_list_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuTypeSupportedPlacements(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, p_placement_list)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23412
 *     cdef nvmlVgpuPlacementList_t *p_placement_list = <nvmlVgpuPlacementList_t *><intptr_t>(p_placement_list_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuTypeSupportedPlacements(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, p_placement_list)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return p_placement_list_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuTypeSupportedPlacements(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), __pyx_v_p_placement_list); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23412, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":23411
 *     cdef VgpuPlacementList_v2 p_placement_list_py = VgpuPlacementList_v2()
 *     cdef nvmlVgpuPlacementList_t *p_placement_list = <nvmlVgpuPlacementList_t *><intptr_t>(p_placement_list_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuTypeSupportedPlacements(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, p_placement_list)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23413
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuTypeSupportedPlacements(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, p_placement_list)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return p_placement_list_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 23413, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23414
 *         __status__ = nvmlDeviceGetVgpuTypeSupportedPlacements(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, p_placement_list)
 *     check_status(__status__)
 *     return p_placement_list_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_p_placement_list_py);
  __pyx_r = ((PyObject *)__pyx_v_p_placement_list_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23397
 * 
 * 
 * cpdef object device_get_vgpu_type_supported_placements(intptr_t device, unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Query the supported vGPU placement ID of the vGPU type.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_type_supported_placements", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_p_placement_list_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_437device_get_vgpu_type_supported_placements(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_436device_get_vgpu_type_supported_placements, "device_get_vgpu_type_supported_placements(intptr_t device, unsigned int vgpu_type_id)\n\nQuery the supported vGPU placement ID of the vGPU type.\n\nArgs:\n    device (intptr_t): Identifier of the target device.\n    vgpu_type_id (unsigned int): Handle to vGPU type. The vGPU type ID.\n\nReturns:\n    nvmlVgpuPlacementList_v2_t: Pointer to the vGPU placement structure ``nvmlVgpuPlacementList_t``.\n\n.. seealso:: `nvmlDeviceGetVgpuTypeSupportedPlacements`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_437device_get_vgpu_type_supported_placements = {"device_get_vgpu_type_supported_placements", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_437device_get_vgpu_type_supported_placements, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_436device_get_vgpu_type_supported_placements};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_437device_get_vgpu_type_supported_placements(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_vgpu_type_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_vgpu_type_supported_placements (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23397, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23397, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23397, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_vgpu_type_supported_placements", 0) < (0)) __PYX_ERR(0, 23397, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_vgpu_type_supported_placements", 1, 2, 2, i); __PYX_ERR(0, 23397, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23397, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23397, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23397, __pyx_L3_error)
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23397, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_vgpu_type_supported_placements", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 23397, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_type_supported_placements", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_436device_get_vgpu_type_supported_placements(__pyx_self, __pyx_v_device, __pyx_v_vgpu_type_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_436device_get_vgpu_type_supported_placements(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_vgpu_type_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_type_supported_placements", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_type_supported_placements(__pyx_v_device, __pyx_v_vgpu_type_id, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_type_supported_placements", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23417
 * 
 * 
 * cpdef unsigned long long vgpu_type_get_gsp_heap_size(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the static GSP heap size of the vGPU type in bytes.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_439vgpu_type_get_gsp_heap_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_gsp_heap_size(unsigned int __pyx_v_vgpu_type_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned PY_LONG_LONG __pyx_v_gsp_heap_size;
  nvmlReturn_t __pyx_v___status__;
  unsigned PY_LONG_LONG __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23429
 *     """
 *     cdef unsigned long long gsp_heap_size
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetGspHeapSize(<nvmlVgpuTypeId_t>vgpu_type_id, &gsp_heap_size)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23430
 *     cdef unsigned long long gsp_heap_size
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetGspHeapSize(<nvmlVgpuTypeId_t>vgpu_type_id, &gsp_heap_size)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return gsp_heap_size
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetGspHeapSize(((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), (&__pyx_v_gsp_heap_size)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23430, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23429
 *     """
 *     cdef unsigned long long gsp_heap_size
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetGspHeapSize(<nvmlVgpuTypeId_t>vgpu_type_id, &gsp_heap_size)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23431
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetGspHeapSize(<nvmlVgpuTypeId_t>vgpu_type_id, &gsp_heap_size)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return gsp_heap_size
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23431, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23432
 *         __status__ = nvmlVgpuTypeGetGspHeapSize(<nvmlVgpuTypeId_t>vgpu_type_id, &gsp_heap_size)
 *     check_status(__status__)
 *     return gsp_heap_size             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_gsp_heap_size;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23417
 * 
 * 
 * cpdef unsigned long long vgpu_type_get_gsp_heap_size(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the static GSP heap size of the vGPU type in bytes.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_gsp_heap_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_439vgpu_type_get_gsp_heap_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_438vgpu_type_get_gsp_heap_size, "vgpu_type_get_gsp_heap_size(unsigned int vgpu_type_id) -> unsigned long long\n\nRetrieve the static GSP heap size of the vGPU type in bytes.\n\nArgs:\n    vgpu_type_id (unsigned int): Handle to vGPU type.\n\nReturns:\n    unsigned long long: Reference to return the GSP heap size value.\n\n.. seealso:: `nvmlVgpuTypeGetGspHeapSize`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_439vgpu_type_get_gsp_heap_size = {"vgpu_type_get_gsp_heap_size", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_439vgpu_type_get_gsp_heap_size, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_438vgpu_type_get_gsp_heap_size};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_439vgpu_type_get_gsp_heap_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_type_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_type_get_gsp_heap_size (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23417, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23417, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_type_get_gsp_heap_size", 0) < (0)) __PYX_ERR(0, 23417, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_type_get_gsp_heap_size", 1, 1, 1, i); __PYX_ERR(0, 23417, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23417, __pyx_L3_error)
    }
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23417, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_type_get_gsp_heap_size", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23417, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_gsp_heap_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_438vgpu_type_get_gsp_heap_size(__pyx_self, __pyx_v_vgpu_type_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_438vgpu_type_get_gsp_heap_size(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned PY_LONG_LONG __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_gsp_heap_size", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_gsp_heap_size(__pyx_v_vgpu_type_id, 1); if (unlikely(__pyx_t_1 == ((unsigned PY_LONG_LONG)0) && PyErr_Occurred())) __PYX_ERR(0, 23417, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23417, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_gsp_heap_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23435
 * 
 * 
 * cpdef unsigned long long vgpu_type_get_fb_reservation(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the static framebuffer reservation of the vGPU type in bytes.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_441vgpu_type_get_fb_reservation(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_fb_reservation(unsigned int __pyx_v_vgpu_type_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned PY_LONG_LONG __pyx_v_fb_reservation;
  nvmlReturn_t __pyx_v___status__;
  unsigned PY_LONG_LONG __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23447
 *     """
 *     cdef unsigned long long fb_reservation
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetFbReservation(<nvmlVgpuTypeId_t>vgpu_type_id, &fb_reservation)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23448
 *     cdef unsigned long long fb_reservation
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetFbReservation(<nvmlVgpuTypeId_t>vgpu_type_id, &fb_reservation)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return fb_reservation
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetFbReservation(((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), (&__pyx_v_fb_reservation)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23448, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23447
 *     """
 *     cdef unsigned long long fb_reservation
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetFbReservation(<nvmlVgpuTypeId_t>vgpu_type_id, &fb_reservation)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23449
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetFbReservation(<nvmlVgpuTypeId_t>vgpu_type_id, &fb_reservation)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return fb_reservation
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23449, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23450
 *         __status__ = nvmlVgpuTypeGetFbReservation(<nvmlVgpuTypeId_t>vgpu_type_id, &fb_reservation)
 *     check_status(__status__)
 *     return fb_reservation             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_fb_reservation;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23435
 * 
 * 
 * cpdef unsigned long long vgpu_type_get_fb_reservation(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the static framebuffer reservation of the vGPU type in bytes.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_fb_reservation", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_441vgpu_type_get_fb_reservation(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_440vgpu_type_get_fb_reservation, "vgpu_type_get_fb_reservation(unsigned int vgpu_type_id) -> unsigned long long\n\nRetrieve the static framebuffer reservation of the vGPU type in bytes.\n\nArgs:\n    vgpu_type_id (unsigned int): Handle to vGPU type.\n\nReturns:\n    unsigned long long: Reference to return the framebuffer reservation.\n\n.. seealso:: `nvmlVgpuTypeGetFbReservation`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_441vgpu_type_get_fb_reservation = {"vgpu_type_get_fb_reservation", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_441vgpu_type_get_fb_reservation, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_440vgpu_type_get_fb_reservation};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_441vgpu_type_get_fb_reservation(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_type_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_type_get_fb_reservation (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23435, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23435, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_type_get_fb_reservation", 0) < (0)) __PYX_ERR(0, 23435, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_type_get_fb_reservation", 1, 1, 1, i); __PYX_ERR(0, 23435, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23435, __pyx_L3_error)
    }
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23435, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_type_get_fb_reservation", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23435, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_fb_reservation", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_440vgpu_type_get_fb_reservation(__pyx_self, __pyx_v_vgpu_type_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_440vgpu_type_get_fb_reservation(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned PY_LONG_LONG __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_fb_reservation", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_fb_reservation(__pyx_v_vgpu_type_id, 1); if (unlikely(__pyx_t_1 == ((unsigned PY_LONG_LONG)0) && PyErr_Occurred())) __PYX_ERR(0, 23435, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23435, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_fb_reservation", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23453
 * 
 * 
 * cpdef object vgpu_instance_get_runtime_state_size(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the currently used runtime state size of the vGPU instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_443vgpu_instance_get_runtime_state_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_runtime_state_size(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *__pyx_v_p_state_py = 0;
  nvmlVgpuRuntimeState_t *__pyx_v_p_state;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_runtime_state_size", 0);

  /* "cuda/bindings/_nvml.pyx":23464
 *     .. seealso:: `nvmlVgpuInstanceGetRuntimeStateSize`
 *     """
 *     cdef VgpuRuntimeState_v1 p_state_py = VgpuRuntimeState_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuRuntimeState_t *p_state = <nvmlVgpuRuntimeState_t *><intptr_t>(p_state_py._get_ptr())
 *     p_state.version = sizeof(nvmlVgpuRuntimeState_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23464, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_p_state_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":23465
 *     """
 *     cdef VgpuRuntimeState_v1 p_state_py = VgpuRuntimeState_v1()
 *     cdef nvmlVgpuRuntimeState_t *p_state = <nvmlVgpuRuntimeState_t *><intptr_t>(p_state_py._get_ptr())             # <<<<<<<<<<<<<<
 *     p_state.version = sizeof(nvmlVgpuRuntimeState_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)__pyx_v_p_state_py->__pyx_vtab)->_get_ptr(__pyx_v_p_state_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23465, __pyx_L1_error)
  __pyx_v_p_state = ((nvmlVgpuRuntimeState_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":23466
 *     cdef VgpuRuntimeState_v1 p_state_py = VgpuRuntimeState_v1()
 *     cdef nvmlVgpuRuntimeState_t *p_state = <nvmlVgpuRuntimeState_t *><intptr_t>(p_state_py._get_ptr())
 *     p_state.version = sizeof(nvmlVgpuRuntimeState_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetRuntimeStateSize(<nvmlVgpuInstance_t>vgpu_instance, p_state)
*/
  __pyx_v_p_state->version = ((sizeof(nvmlVgpuRuntimeState_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":23467
 *     cdef nvmlVgpuRuntimeState_t *p_state = <nvmlVgpuRuntimeState_t *><intptr_t>(p_state_py._get_ptr())
 *     p_state.version = sizeof(nvmlVgpuRuntimeState_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetRuntimeStateSize(<nvmlVgpuInstance_t>vgpu_instance, p_state)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23468
 *     p_state.version = sizeof(nvmlVgpuRuntimeState_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetRuntimeStateSize(<nvmlVgpuInstance_t>vgpu_instance, p_state)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return p_state_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetRuntimeStateSize(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), __pyx_v_p_state); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23468, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":23467
 *     cdef nvmlVgpuRuntimeState_t *p_state = <nvmlVgpuRuntimeState_t *><intptr_t>(p_state_py._get_ptr())
 *     p_state.version = sizeof(nvmlVgpuRuntimeState_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetRuntimeStateSize(<nvmlVgpuInstance_t>vgpu_instance, p_state)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23469
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetRuntimeStateSize(<nvmlVgpuInstance_t>vgpu_instance, p_state)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return p_state_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 23469, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23470
 *         __status__ = nvmlVgpuInstanceGetRuntimeStateSize(<nvmlVgpuInstance_t>vgpu_instance, p_state)
 *     check_status(__status__)
 *     return p_state_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_p_state_py);
  __pyx_r = ((PyObject *)__pyx_v_p_state_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23453
 * 
 * 
 * cpdef object vgpu_instance_get_runtime_state_size(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the currently used runtime state size of the vGPU instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_runtime_state_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_p_state_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_443vgpu_instance_get_runtime_state_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_442vgpu_instance_get_runtime_state_size, "vgpu_instance_get_runtime_state_size(unsigned int vgpu_instance)\n\nRetrieve the currently used runtime state size of the vGPU instance.\n\nArgs:\n    vgpu_instance (unsigned int): Identifier of the target vGPU instance.\n\nReturns:\n    nvmlVgpuRuntimeState_v1_t: Pointer to the vGPU runtime state's structure ``nvmlVgpuRuntimeState_t``.\n\n.. seealso:: `nvmlVgpuInstanceGetRuntimeStateSize`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_443vgpu_instance_get_runtime_state_size = {"vgpu_instance_get_runtime_state_size", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_443vgpu_instance_get_runtime_state_size, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_442vgpu_instance_get_runtime_state_size};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_443vgpu_instance_get_runtime_state_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_runtime_state_size (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23453, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23453, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_runtime_state_size", 0) < (0)) __PYX_ERR(0, 23453, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_runtime_state_size", 1, 1, 1, i); __PYX_ERR(0, 23453, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23453, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23453, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_runtime_state_size", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23453, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_runtime_state_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_442vgpu_instance_get_runtime_state_size(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_442vgpu_instance_get_runtime_state_size(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_runtime_state_size", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_runtime_state_size(__pyx_v_vgpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23453, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_runtime_state_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23473
 * 
 * 
 * cpdef device_set_vgpu_capabilities(intptr_t device, int capability, int state):             # <<<<<<<<<<<<<<
 *     """Set the desirable vGPU capability of a device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_445device_set_vgpu_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_vgpu_capabilities(intptr_t __pyx_v_device, int __pyx_v_capability, int __pyx_v_state, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_vgpu_capabilities", 0);

  /* "cuda/bindings/_nvml.pyx":23483
 *     .. seealso:: `nvmlDeviceSetVgpuCapabilities`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetVgpuCapabilities(<Device>device, <_DeviceVgpuCapability>capability, <_EnableState>state)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23484
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetVgpuCapabilities(<Device>device, <_DeviceVgpuCapability>capability, <_EnableState>state)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVgpuCapabilities(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__DeviceVgpuCapability)__pyx_v_capability), ((__pyx_t_4cuda_8bindings_5_nvml__EnableState)__pyx_v_state)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23484, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23483
 *     .. seealso:: `nvmlDeviceSetVgpuCapabilities`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetVgpuCapabilities(<Device>device, <_DeviceVgpuCapability>capability, <_EnableState>state)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23485
 *     with nogil:
 *         __status__ = nvmlDeviceSetVgpuCapabilities(<Device>device, <_DeviceVgpuCapability>capability, <_EnableState>state)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23485, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23473
 * 
 * 
 * cpdef device_set_vgpu_capabilities(intptr_t device, int capability, int state):             # <<<<<<<<<<<<<<
 *     """Set the desirable vGPU capability of a device.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_vgpu_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_445device_set_vgpu_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_444device_set_vgpu_capabilities, "device_set_vgpu_capabilities(intptr_t device, int capability, int state)\n\nSet the desirable vGPU capability of a device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    capability (DeviceVgpuCapability): Specifies the ``nvmlDeviceVgpuCapability_t`` to be set.\n    state (EnableState): The target capability mode.\n\n.. seealso:: `nvmlDeviceSetVgpuCapabilities`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_445device_set_vgpu_capabilities = {"device_set_vgpu_capabilities", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_445device_set_vgpu_capabilities, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_444device_set_vgpu_capabilities};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_445device_set_vgpu_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_capability;
  int __pyx_v_state;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_vgpu_capabilities (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_capability,&__pyx_mstate_global->__pyx_n_u_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23473, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23473, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23473, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23473, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_vgpu_capabilities", 0) < (0)) __PYX_ERR(0, 23473, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_vgpu_capabilities", 1, 3, 3, i); __PYX_ERR(0, 23473, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23473, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23473, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23473, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23473, __pyx_L3_error)
    __pyx_v_capability = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_capability == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23473, __pyx_L3_error)
    __pyx_v_state = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_state == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23473, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_vgpu_capabilities", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 23473, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_vgpu_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_444device_set_vgpu_capabilities(__pyx_self, __pyx_v_device, __pyx_v_capability, __pyx_v_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_444device_set_vgpu_capabilities(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_capability, int __pyx_v_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_vgpu_capabilities", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_vgpu_capabilities(__pyx_v_device, __pyx_v_capability, __pyx_v_state, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23473, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_vgpu_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23488
 * 
 * 
 * cpdef object device_get_grid_licensable_features_v4(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the vGPU Software licensable features.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_447device_get_grid_licensable_features_v4(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_grid_licensable_features_v4(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *__pyx_v_p_grid_licensable_features_py = 0;
  nvmlGridLicensableFeatures_t *__pyx_v_p_grid_licensable_features;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_grid_licensable_features_v4", 0);

  /* "cuda/bindings/_nvml.pyx":23499
 *     .. seealso:: `nvmlDeviceGetGridLicensableFeatures_v4`
 *     """
 *     cdef GridLicensableFeatures p_grid_licensable_features_py = GridLicensableFeatures()             # <<<<<<<<<<<<<<
 *     cdef nvmlGridLicensableFeatures_t *p_grid_licensable_features = <nvmlGridLicensableFeatures_t *><intptr_t>(p_grid_licensable_features_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23499, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_p_grid_licensable_features_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":23500
 *     """
 *     cdef GridLicensableFeatures p_grid_licensable_features_py = GridLicensableFeatures()
 *     cdef nvmlGridLicensableFeatures_t *p_grid_licensable_features = <nvmlGridLicensableFeatures_t *><intptr_t>(p_grid_licensable_features_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetGridLicensableFeatures_v4(<Device>device, p_grid_licensable_features)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GridLicensableFeatures *)__pyx_v_p_grid_licensable_features_py->__pyx_vtab)->_get_ptr(__pyx_v_p_grid_licensable_features_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23500, __pyx_L1_error)
  __pyx_v_p_grid_licensable_features = ((nvmlGridLicensableFeatures_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":23501
 *     cdef GridLicensableFeatures p_grid_licensable_features_py = GridLicensableFeatures()
 *     cdef nvmlGridLicensableFeatures_t *p_grid_licensable_features = <nvmlGridLicensableFeatures_t *><intptr_t>(p_grid_licensable_features_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGridLicensableFeatures_v4(<Device>device, p_grid_licensable_features)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23502
 *     cdef nvmlGridLicensableFeatures_t *p_grid_licensable_features = <nvmlGridLicensableFeatures_t *><intptr_t>(p_grid_licensable_features_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetGridLicensableFeatures_v4(<Device>device, p_grid_licensable_features)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return p_grid_licensable_features_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGridLicensableFeatures_v4(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_p_grid_licensable_features); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23502, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":23501
 *     cdef GridLicensableFeatures p_grid_licensable_features_py = GridLicensableFeatures()
 *     cdef nvmlGridLicensableFeatures_t *p_grid_licensable_features = <nvmlGridLicensableFeatures_t *><intptr_t>(p_grid_licensable_features_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGridLicensableFeatures_v4(<Device>device, p_grid_licensable_features)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23503
 *     with nogil:
 *         __status__ = nvmlDeviceGetGridLicensableFeatures_v4(<Device>device, p_grid_licensable_features)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return p_grid_licensable_features_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 23503, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23504
 *         __status__ = nvmlDeviceGetGridLicensableFeatures_v4(<Device>device, p_grid_licensable_features)
 *     check_status(__status__)
 *     return p_grid_licensable_features_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_p_grid_licensable_features_py);
  __pyx_r = ((PyObject *)__pyx_v_p_grid_licensable_features_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23488
 * 
 * 
 * cpdef object device_get_grid_licensable_features_v4(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the vGPU Software licensable features.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_grid_licensable_features_v4", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_p_grid_licensable_features_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_447device_get_grid_licensable_features_v4(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_446device_get_grid_licensable_features_v4, "device_get_grid_licensable_features_v4(intptr_t device)\n\nRetrieve the vGPU Software licensable features.\n\nArgs:\n    device (intptr_t): Identifier of the target device.\n\nReturns:\n    nvmlGridLicensableFeatures_t: Pointer to structure in which vGPU software licensable features are returned.\n\n.. seealso:: `nvmlDeviceGetGridLicensableFeatures_v4`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_447device_get_grid_licensable_features_v4 = {"device_get_grid_licensable_features_v4", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_447device_get_grid_licensable_features_v4, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_446device_get_grid_licensable_features_v4};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_447device_get_grid_licensable_features_v4(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_grid_licensable_features_v4 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23488, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23488, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_grid_licensable_features_v4", 0) < (0)) __PYX_ERR(0, 23488, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_grid_licensable_features_v4", 1, 1, 1, i); __PYX_ERR(0, 23488, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23488, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23488, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_grid_licensable_features_v4", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23488, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_grid_licensable_features_v4", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_446device_get_grid_licensable_features_v4(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_446device_get_grid_licensable_features_v4(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_grid_licensable_features_v4", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_grid_licensable_features_v4(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23488, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_grid_licensable_features_v4", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23507
 * 
 * 
 * cpdef unsigned int get_vgpu_driver_capabilities(int capability) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the requested vGPU driver capability.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_449get_vgpu_driver_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_get_vgpu_driver_capabilities(int __pyx_v_capability, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_cap_result;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23519
 *     """
 *     cdef unsigned int cap_result
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGetVgpuDriverCapabilities(<_VgpuDriverCapability>capability, &cap_result)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23520
 *     cdef unsigned int cap_result
 *     with nogil:
 *         __status__ = nvmlGetVgpuDriverCapabilities(<_VgpuDriverCapability>capability, &cap_result)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cap_result
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetVgpuDriverCapabilities(((__pyx_t_4cuda_8bindings_5_nvml__VgpuDriverCapability)__pyx_v_capability), (&__pyx_v_cap_result)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23520, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23519
 *     """
 *     cdef unsigned int cap_result
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGetVgpuDriverCapabilities(<_VgpuDriverCapability>capability, &cap_result)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23521
 *     with nogil:
 *         __status__ = nvmlGetVgpuDriverCapabilities(<_VgpuDriverCapability>capability, &cap_result)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cap_result
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23521, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23522
 *         __status__ = nvmlGetVgpuDriverCapabilities(<_VgpuDriverCapability>capability, &cap_result)
 *     check_status(__status__)
 *     return cap_result             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_cap_result;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23507
 * 
 * 
 * cpdef unsigned int get_vgpu_driver_capabilities(int capability) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the requested vGPU driver capability.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.get_vgpu_driver_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_449get_vgpu_driver_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_448get_vgpu_driver_capabilities, "get_vgpu_driver_capabilities(int capability) -> unsigned int\n\nRetrieve the requested vGPU driver capability.\n\nArgs:\n    capability (VgpuDriverCapability): Specifies the ``nvmlVgpuDriverCapability_t`` to be queried.\n\nReturns:\n    unsigned int: A boolean for the queried capability indicating that feature is supported.\n\n.. seealso:: `nvmlGetVgpuDriverCapabilities`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_449get_vgpu_driver_capabilities = {"get_vgpu_driver_capabilities", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_449get_vgpu_driver_capabilities, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_448get_vgpu_driver_capabilities};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_449get_vgpu_driver_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  int __pyx_v_capability;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("get_vgpu_driver_capabilities (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_capability,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23507, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23507, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "get_vgpu_driver_capabilities", 0) < (0)) __PYX_ERR(0, 23507, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("get_vgpu_driver_capabilities", 1, 1, 1, i); __PYX_ERR(0, 23507, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23507, __pyx_L3_error)
    }
    __pyx_v_capability = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_capability == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23507, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("get_vgpu_driver_capabilities", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23507, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.get_vgpu_driver_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_448get_vgpu_driver_capabilities(__pyx_self, __pyx_v_capability);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_448get_vgpu_driver_capabilities(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_capability) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("get_vgpu_driver_capabilities", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_get_vgpu_driver_capabilities(__pyx_v_capability, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 23507, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23507, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.get_vgpu_driver_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23525
 * 
 * 
 * cpdef unsigned int device_get_vgpu_capabilities(intptr_t device, int capability) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the requested vGPU capability for GPU.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_451device_get_vgpu_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_capabilities(intptr_t __pyx_v_device, int __pyx_v_capability, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_cap_result;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23538
 *     """
 *     cdef unsigned int cap_result
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuCapabilities(<Device>device, <_DeviceVgpuCapability>capability, &cap_result)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23539
 *     cdef unsigned int cap_result
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuCapabilities(<Device>device, <_DeviceVgpuCapability>capability, &cap_result)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cap_result
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuCapabilities(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__DeviceVgpuCapability)__pyx_v_capability), (&__pyx_v_cap_result)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23539, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23538
 *     """
 *     cdef unsigned int cap_result
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuCapabilities(<Device>device, <_DeviceVgpuCapability>capability, &cap_result)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23540
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuCapabilities(<Device>device, <_DeviceVgpuCapability>capability, &cap_result)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cap_result
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23540, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23541
 *         __status__ = nvmlDeviceGetVgpuCapabilities(<Device>device, <_DeviceVgpuCapability>capability, &cap_result)
 *     check_status(__status__)
 *     return cap_result             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_cap_result;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23525
 * 
 * 
 * cpdef unsigned int device_get_vgpu_capabilities(intptr_t device, int capability) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the requested vGPU capability for GPU.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_451device_get_vgpu_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_450device_get_vgpu_capabilities, "device_get_vgpu_capabilities(intptr_t device, int capability) -> unsigned int\n\nRetrieve the requested vGPU capability for GPU.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    capability (DeviceVgpuCapability): Specifies the ``nvmlDeviceVgpuCapability_t`` to be queried.\n\nReturns:\n    unsigned int: Specifies that the queried capability is supported, and also returns capability's data.\n\n.. seealso:: `nvmlDeviceGetVgpuCapabilities`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_451device_get_vgpu_capabilities = {"device_get_vgpu_capabilities", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_451device_get_vgpu_capabilities, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_450device_get_vgpu_capabilities};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_451device_get_vgpu_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_capability;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_vgpu_capabilities (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_capability,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23525, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23525, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23525, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_vgpu_capabilities", 0) < (0)) __PYX_ERR(0, 23525, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_vgpu_capabilities", 1, 2, 2, i); __PYX_ERR(0, 23525, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23525, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23525, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23525, __pyx_L3_error)
    __pyx_v_capability = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_capability == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23525, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_vgpu_capabilities", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 23525, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_450device_get_vgpu_capabilities(__pyx_self, __pyx_v_device, __pyx_v_capability);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_450device_get_vgpu_capabilities(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_capability) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_capabilities", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_capabilities(__pyx_v_device, __pyx_v_capability, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 23525, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23525, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23544
 * 
 * 
 * cpdef str vgpu_type_get_class(unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Retrieve the class of a vGPU type. It will not exceed 64 characters in length (including the NUL terminator). See ``nvmlConstants.NVML_DEVICE_NAME_BUFFER_SIZE``.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_453vgpu_type_get_class(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_class(unsigned int __pyx_v_vgpu_type_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_size[1];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_v__vgpu_type_class_ = 0;
  char *__pyx_v_vgpu_type_class;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  char *__pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_class", 0);

  /* "cuda/bindings/_nvml.pyx":23552
 *     .. seealso:: `nvmlVgpuTypeGetClass`
 *     """
 *     cdef unsigned int[1] size = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetClass(<nvmlVgpuTypeId_t>vgpu_type_id, NULL, <unsigned int*>size)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_size[0]), __pyx_t_1, sizeof(__pyx_v_size[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":23553
 *     """
 *     cdef unsigned int[1] size = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetClass(<nvmlVgpuTypeId_t>vgpu_type_id, NULL, <unsigned int*>size)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23554
 *     cdef unsigned int[1] size = [0]
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetClass(<nvmlVgpuTypeId_t>vgpu_type_id, NULL, <unsigned int*>size)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     if size[0] == 0:
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetClass(((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), NULL, ((unsigned int *)__pyx_v_size)); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23554, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":23553
 *     """
 *     cdef unsigned int[1] size = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetClass(<nvmlVgpuTypeId_t>vgpu_type_id, NULL, <unsigned int*>size)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23555
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetClass(<nvmlVgpuTypeId_t>vgpu_type_id, NULL, <unsigned int*>size)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     if size[0] == 0:
 *         return ""
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 23555, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23556
 *         __status__ = nvmlVgpuTypeGetClass(<nvmlVgpuTypeId_t>vgpu_type_id, NULL, <unsigned int*>size)
 *     check_status_size(__status__)
 *     if size[0] == 0:             # <<<<<<<<<<<<<<
 *         return ""
 *     cdef bytes _vgpu_type_class_ = bytes(size[0])
*/
  __pyx_t_4 = ((__pyx_v_size[0]) == 0);
  if (__pyx_t_4) {

    /* "cuda/bindings/_nvml.pyx":23557
 *     check_status_size(__status__)
 *     if size[0] == 0:
 *         return ""             # <<<<<<<<<<<<<<
 *     cdef bytes _vgpu_type_class_ = bytes(size[0])
 *     cdef char* vgpu_type_class = _vgpu_type_class_
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u__7);
    __pyx_r = __pyx_mstate_global->__pyx_kp_u__7;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":23556
 *         __status__ = nvmlVgpuTypeGetClass(<nvmlVgpuTypeId_t>vgpu_type_id, NULL, <unsigned int*>size)
 *     check_status_size(__status__)
 *     if size[0] == 0:             # <<<<<<<<<<<<<<
 *         return ""
 *     cdef bytes _vgpu_type_class_ = bytes(size[0])
*/
  }

  /* "cuda/bindings/_nvml.pyx":23558
 *     if size[0] == 0:
 *         return ""
 *     cdef bytes _vgpu_type_class_ = bytes(size[0])             # <<<<<<<<<<<<<<
 *     cdef char* vgpu_type_class = _vgpu_type_class_
 *     with nogil:
*/
  __pyx_t_6 = NULL;
  __pyx_t_7 = __Pyx_PyLong_From_unsigned_int((__pyx_v_size[0])); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 23558, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_6, __pyx_t_7};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)(&PyBytes_Type), __pyx_callargs+__pyx_t_8, (2-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23558, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  __pyx_v__vgpu_type_class_ = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23559
 *         return ""
 *     cdef bytes _vgpu_type_class_ = bytes(size[0])
 *     cdef char* vgpu_type_class = _vgpu_type_class_             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetClass(<nvmlVgpuTypeId_t>vgpu_type_id, vgpu_type_class, <unsigned int*>size)
*/
  __pyx_t_9 = __Pyx_PyBytes_AsWritableString(__pyx_v__vgpu_type_class_); if (unlikely((!__pyx_t_9) && PyErr_Occurred())) __PYX_ERR(0, 23559, __pyx_L1_error)
  __pyx_v_vgpu_type_class = __pyx_t_9;

  /* "cuda/bindings/_nvml.pyx":23560
 *     cdef bytes _vgpu_type_class_ = bytes(size[0])
 *     cdef char* vgpu_type_class = _vgpu_type_class_
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetClass(<nvmlVgpuTypeId_t>vgpu_type_id, vgpu_type_class, <unsigned int*>size)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23561
 *     cdef char* vgpu_type_class = _vgpu_type_class_
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetClass(<nvmlVgpuTypeId_t>vgpu_type_id, vgpu_type_class, <unsigned int*>size)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(vgpu_type_class)
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetClass(((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), __pyx_v_vgpu_type_class, ((unsigned int *)__pyx_v_size)); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23561, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":23560
 *     cdef bytes _vgpu_type_class_ = bytes(size[0])
 *     cdef char* vgpu_type_class = _vgpu_type_class_
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetClass(<nvmlVgpuTypeId_t>vgpu_type_id, vgpu_type_class, <unsigned int*>size)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23562
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetClass(<nvmlVgpuTypeId_t>vgpu_type_id, vgpu_type_class, <unsigned int*>size)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(vgpu_type_class)
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 23562, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23563
 *         __status__ = nvmlVgpuTypeGetClass(<nvmlVgpuTypeId_t>vgpu_type_id, vgpu_type_class, <unsigned int*>size)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(vgpu_type_class)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = PyUnicode_FromString(__pyx_v_vgpu_type_class); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23563, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23544
 * 
 * 
 * cpdef str vgpu_type_get_class(unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Retrieve the class of a vGPU type. It will not exceed 64 characters in length (including the NUL terminator). See ``nvmlConstants.NVML_DEVICE_NAME_BUFFER_SIZE``.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_class", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v__vgpu_type_class_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_453vgpu_type_get_class(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_452vgpu_type_get_class, "vgpu_type_get_class(unsigned int vgpu_type_id) -> str\n\nRetrieve the class of a vGPU type. It will not exceed 64 characters in length (including the NUL terminator). See ``nvmlConstants.NVML_DEVICE_NAME_BUFFER_SIZE``.\n\nArgs:\n    vgpu_type_id (unsigned int): Handle to vGPU type.\n\n.. seealso:: `nvmlVgpuTypeGetClass`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_453vgpu_type_get_class = {"vgpu_type_get_class", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_453vgpu_type_get_class, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_452vgpu_type_get_class};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_453vgpu_type_get_class(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_type_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_type_get_class (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23544, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23544, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_type_get_class", 0) < (0)) __PYX_ERR(0, 23544, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_type_get_class", 1, 1, 1, i); __PYX_ERR(0, 23544, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23544, __pyx_L3_error)
    }
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23544, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_type_get_class", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23544, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_class", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_452vgpu_type_get_class(__pyx_self, __pyx_v_vgpu_type_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_452vgpu_type_get_class(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_class", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_class(__pyx_v_vgpu_type_id, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23544, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_class", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23566
 * 
 * 
 * cpdef str vgpu_type_get_name(unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Retrieve the vGPU type name.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_455vgpu_type_get_name(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_name(unsigned int __pyx_v_vgpu_type_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_size;
  char __pyx_v_vgpu_type_name[64];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_name", 0);

  /* "cuda/bindings/_nvml.pyx":23574
 *     .. seealso:: `nvmlVgpuTypeGetName`
 *     """
 *     cdef unsigned int size = 64             # <<<<<<<<<<<<<<
 *     cdef char[64] vgpu_type_name
 *     with nogil:
*/
  __pyx_v_size = 64;

  /* "cuda/bindings/_nvml.pyx":23576
 *     cdef unsigned int size = 64
 *     cdef char[64] vgpu_type_name
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetName(<nvmlVgpuTypeId_t>vgpu_type_id, vgpu_type_name, <unsigned int*>size)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23577
 *     cdef char[64] vgpu_type_name
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetName(<nvmlVgpuTypeId_t>vgpu_type_id, vgpu_type_name, <unsigned int*>size)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(vgpu_type_name)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetName(((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), __pyx_v_vgpu_type_name, ((unsigned int *)__pyx_v_size)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23577, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23576
 *     cdef unsigned int size = 64
 *     cdef char[64] vgpu_type_name
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetName(<nvmlVgpuTypeId_t>vgpu_type_id, vgpu_type_name, <unsigned int*>size)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23578
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetName(<nvmlVgpuTypeId_t>vgpu_type_id, vgpu_type_name, <unsigned int*>size)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(vgpu_type_name)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23578, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23579
 *         __status__ = nvmlVgpuTypeGetName(<nvmlVgpuTypeId_t>vgpu_type_id, vgpu_type_name, <unsigned int*>size)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(vgpu_type_name)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = PyUnicode_FromString(__pyx_v_vgpu_type_name); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23579, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23566
 * 
 * 
 * cpdef str vgpu_type_get_name(unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Retrieve the vGPU type name.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_name", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_455vgpu_type_get_name(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_454vgpu_type_get_name, "vgpu_type_get_name(unsigned int vgpu_type_id) -> str\n\nRetrieve the vGPU type name.\n\nArgs:\n    vgpu_type_id (unsigned int): Handle to vGPU type.\n\n.. seealso:: `nvmlVgpuTypeGetName`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_455vgpu_type_get_name = {"vgpu_type_get_name", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_455vgpu_type_get_name, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_454vgpu_type_get_name};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_455vgpu_type_get_name(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_type_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_type_get_name (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23566, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23566, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_type_get_name", 0) < (0)) __PYX_ERR(0, 23566, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_type_get_name", 1, 1, 1, i); __PYX_ERR(0, 23566, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23566, __pyx_L3_error)
    }
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23566, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_type_get_name", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23566, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_name", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_454vgpu_type_get_name(__pyx_self, __pyx_v_vgpu_type_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_454vgpu_type_get_name(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_name", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_name(__pyx_v_vgpu_type_id, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23566, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_name", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23582
 * 
 * 
 * cpdef unsigned int vgpu_type_get_gpu_instance_profile_id(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the GPU Instance Profile ID for the given vGPU type ID. The API will return a valid GPU Instance Profile ID for the MIG capable vGPU types, else INVALID_GPU_INSTANCE_PROFILE_ID is returned.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_457vgpu_type_get_gpu_instance_profile_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_gpu_instance_profile_id(unsigned int __pyx_v_vgpu_type_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_gpu_instance_profile_id;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23594
 *     """
 *     cdef unsigned int gpu_instance_profile_id
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetGpuInstanceProfileId(<nvmlVgpuTypeId_t>vgpu_type_id, &gpu_instance_profile_id)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23595
 *     cdef unsigned int gpu_instance_profile_id
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetGpuInstanceProfileId(<nvmlVgpuTypeId_t>vgpu_type_id, &gpu_instance_profile_id)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return gpu_instance_profile_id
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetGpuInstanceProfileId(((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), (&__pyx_v_gpu_instance_profile_id)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23595, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23594
 *     """
 *     cdef unsigned int gpu_instance_profile_id
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetGpuInstanceProfileId(<nvmlVgpuTypeId_t>vgpu_type_id, &gpu_instance_profile_id)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23596
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetGpuInstanceProfileId(<nvmlVgpuTypeId_t>vgpu_type_id, &gpu_instance_profile_id)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return gpu_instance_profile_id
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23596, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23597
 *         __status__ = nvmlVgpuTypeGetGpuInstanceProfileId(<nvmlVgpuTypeId_t>vgpu_type_id, &gpu_instance_profile_id)
 *     check_status(__status__)
 *     return gpu_instance_profile_id             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_gpu_instance_profile_id;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23582
 * 
 * 
 * cpdef unsigned int vgpu_type_get_gpu_instance_profile_id(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the GPU Instance Profile ID for the given vGPU type ID. The API will return a valid GPU Instance Profile ID for the MIG capable vGPU types, else INVALID_GPU_INSTANCE_PROFILE_ID is returned.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_gpu_instance_profile_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_457vgpu_type_get_gpu_instance_profile_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_456vgpu_type_get_gpu_instance_profile_id, "vgpu_type_get_gpu_instance_profile_id(unsigned int vgpu_type_id) -> unsigned int\n\nRetrieve the GPU Instance Profile ID for the given vGPU type ID. The API will return a valid GPU Instance Profile ID for the MIG capable vGPU types, else INVALID_GPU_INSTANCE_PROFILE_ID is returned.\n\nArgs:\n    vgpu_type_id (unsigned int): Handle to vGPU type.\n\nReturns:\n    unsigned int: GPU Instance Profile ID.\n\n.. seealso:: `nvmlVgpuTypeGetGpuInstanceProfileId`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_457vgpu_type_get_gpu_instance_profile_id = {"vgpu_type_get_gpu_instance_profile_id", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_457vgpu_type_get_gpu_instance_profile_id, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_456vgpu_type_get_gpu_instance_profile_id};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_457vgpu_type_get_gpu_instance_profile_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_type_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_type_get_gpu_instance_profile_id (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23582, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23582, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_type_get_gpu_instance_profile_id", 0) < (0)) __PYX_ERR(0, 23582, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_type_get_gpu_instance_profile_id", 1, 1, 1, i); __PYX_ERR(0, 23582, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23582, __pyx_L3_error)
    }
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23582, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_type_get_gpu_instance_profile_id", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23582, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_gpu_instance_profile_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_456vgpu_type_get_gpu_instance_profile_id(__pyx_self, __pyx_v_vgpu_type_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_456vgpu_type_get_gpu_instance_profile_id(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_gpu_instance_profile_id", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_gpu_instance_profile_id(__pyx_v_vgpu_type_id, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 23582, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23582, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_gpu_instance_profile_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23600
 * 
 * 
 * cpdef tuple vgpu_type_get_device_id(unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Retrieve the device ID of a vGPU type.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_459vgpu_type_get_device_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_device_id(unsigned int __pyx_v_vgpu_type_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned PY_LONG_LONG __pyx_v_device_id;
  unsigned PY_LONG_LONG __pyx_v_subsystem_id;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_device_id", 0);

  /* "cuda/bindings/_nvml.pyx":23616
 *     cdef unsigned long long device_id
 *     cdef unsigned long long subsystem_id
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetDeviceID(<nvmlVgpuTypeId_t>vgpu_type_id, &device_id, &subsystem_id)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23617
 *     cdef unsigned long long subsystem_id
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetDeviceID(<nvmlVgpuTypeId_t>vgpu_type_id, &device_id, &subsystem_id)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (device_id, subsystem_id)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetDeviceID(((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), (&__pyx_v_device_id), (&__pyx_v_subsystem_id)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23617, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23616
 *     cdef unsigned long long device_id
 *     cdef unsigned long long subsystem_id
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetDeviceID(<nvmlVgpuTypeId_t>vgpu_type_id, &device_id, &subsystem_id)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23618
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetDeviceID(<nvmlVgpuTypeId_t>vgpu_type_id, &device_id, &subsystem_id)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (device_id, subsystem_id)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23618, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23619
 *         __status__ = nvmlVgpuTypeGetDeviceID(<nvmlVgpuTypeId_t>vgpu_type_id, &device_id, &subsystem_id)
 *     check_status(__status__)
 *     return (device_id, subsystem_id)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_v_device_id); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23619, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_v_subsystem_id); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23619, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23619, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 23619, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 23619, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23600
 * 
 * 
 * cpdef tuple vgpu_type_get_device_id(unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Retrieve the device ID of a vGPU type.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_device_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_459vgpu_type_get_device_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_458vgpu_type_get_device_id, "vgpu_type_get_device_id(unsigned int vgpu_type_id) -> tuple\n\nRetrieve the device ID of a vGPU type.\n\nArgs:\n    vgpu_type_id (unsigned int): Handle to vGPU type.\n\nReturns:\n    A 2-tuple containing:\n\n    - unsigned long long: Device ID and vendor ID of the device contained in single 32 bit value.\n    - unsigned long long: Subsystem ID and subsystem vendor ID of the device contained in single 32 bit value.\n\n.. seealso:: `nvmlVgpuTypeGetDeviceID`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_459vgpu_type_get_device_id = {"vgpu_type_get_device_id", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_459vgpu_type_get_device_id, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_458vgpu_type_get_device_id};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_459vgpu_type_get_device_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_type_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_type_get_device_id (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23600, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23600, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_type_get_device_id", 0) < (0)) __PYX_ERR(0, 23600, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_type_get_device_id", 1, 1, 1, i); __PYX_ERR(0, 23600, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23600, __pyx_L3_error)
    }
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23600, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_type_get_device_id", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23600, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_device_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_458vgpu_type_get_device_id(__pyx_self, __pyx_v_vgpu_type_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_458vgpu_type_get_device_id(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_device_id", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_device_id(__pyx_v_vgpu_type_id, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23600, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_device_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23622
 * 
 * 
 * cpdef unsigned long long vgpu_type_get_framebuffer_size(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the vGPU framebuffer size in bytes.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_461vgpu_type_get_framebuffer_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_framebuffer_size(unsigned int __pyx_v_vgpu_type_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned PY_LONG_LONG __pyx_v_fb_size;
  nvmlReturn_t __pyx_v___status__;
  unsigned PY_LONG_LONG __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23634
 *     """
 *     cdef unsigned long long fb_size
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetFramebufferSize(<nvmlVgpuTypeId_t>vgpu_type_id, &fb_size)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23635
 *     cdef unsigned long long fb_size
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetFramebufferSize(<nvmlVgpuTypeId_t>vgpu_type_id, &fb_size)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return fb_size
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetFramebufferSize(((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), (&__pyx_v_fb_size)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23635, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23634
 *     """
 *     cdef unsigned long long fb_size
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetFramebufferSize(<nvmlVgpuTypeId_t>vgpu_type_id, &fb_size)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23636
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetFramebufferSize(<nvmlVgpuTypeId_t>vgpu_type_id, &fb_size)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return fb_size
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23636, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23637
 *         __status__ = nvmlVgpuTypeGetFramebufferSize(<nvmlVgpuTypeId_t>vgpu_type_id, &fb_size)
 *     check_status(__status__)
 *     return fb_size             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_fb_size;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23622
 * 
 * 
 * cpdef unsigned long long vgpu_type_get_framebuffer_size(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the vGPU framebuffer size in bytes.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_framebuffer_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_461vgpu_type_get_framebuffer_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_460vgpu_type_get_framebuffer_size, "vgpu_type_get_framebuffer_size(unsigned int vgpu_type_id) -> unsigned long long\n\nRetrieve the vGPU framebuffer size in bytes.\n\nArgs:\n    vgpu_type_id (unsigned int): Handle to vGPU type.\n\nReturns:\n    unsigned long long: Pointer to framebuffer size in bytes.\n\n.. seealso:: `nvmlVgpuTypeGetFramebufferSize`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_461vgpu_type_get_framebuffer_size = {"vgpu_type_get_framebuffer_size", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_461vgpu_type_get_framebuffer_size, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_460vgpu_type_get_framebuffer_size};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_461vgpu_type_get_framebuffer_size(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_type_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_type_get_framebuffer_size (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23622, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23622, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_type_get_framebuffer_size", 0) < (0)) __PYX_ERR(0, 23622, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_type_get_framebuffer_size", 1, 1, 1, i); __PYX_ERR(0, 23622, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23622, __pyx_L3_error)
    }
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23622, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_type_get_framebuffer_size", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23622, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_framebuffer_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_460vgpu_type_get_framebuffer_size(__pyx_self, __pyx_v_vgpu_type_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_460vgpu_type_get_framebuffer_size(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned PY_LONG_LONG __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_framebuffer_size", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_framebuffer_size(__pyx_v_vgpu_type_id, 1); if (unlikely(__pyx_t_1 == ((unsigned PY_LONG_LONG)0) && PyErr_Occurred())) __PYX_ERR(0, 23622, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23622, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_framebuffer_size", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23640
 * 
 * 
 * cpdef unsigned int vgpu_type_get_num_display_heads(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve count of vGPU's supported display heads.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_463vgpu_type_get_num_display_heads(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_num_display_heads(unsigned int __pyx_v_vgpu_type_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_num_display_heads;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23652
 *     """
 *     cdef unsigned int num_display_heads
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetNumDisplayHeads(<nvmlVgpuTypeId_t>vgpu_type_id, &num_display_heads)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23653
 *     cdef unsigned int num_display_heads
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetNumDisplayHeads(<nvmlVgpuTypeId_t>vgpu_type_id, &num_display_heads)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return num_display_heads
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetNumDisplayHeads(((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), (&__pyx_v_num_display_heads)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23653, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23652
 *     """
 *     cdef unsigned int num_display_heads
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetNumDisplayHeads(<nvmlVgpuTypeId_t>vgpu_type_id, &num_display_heads)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23654
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetNumDisplayHeads(<nvmlVgpuTypeId_t>vgpu_type_id, &num_display_heads)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return num_display_heads
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23654, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23655
 *         __status__ = nvmlVgpuTypeGetNumDisplayHeads(<nvmlVgpuTypeId_t>vgpu_type_id, &num_display_heads)
 *     check_status(__status__)
 *     return num_display_heads             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_num_display_heads;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23640
 * 
 * 
 * cpdef unsigned int vgpu_type_get_num_display_heads(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve count of vGPU's supported display heads.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_num_display_heads", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_463vgpu_type_get_num_display_heads(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_462vgpu_type_get_num_display_heads, "vgpu_type_get_num_display_heads(unsigned int vgpu_type_id) -> unsigned int\n\nRetrieve count of vGPU's supported display heads.\n\nArgs:\n    vgpu_type_id (unsigned int): Handle to vGPU type.\n\nReturns:\n    unsigned int: Pointer to number of display heads.\n\n.. seealso:: `nvmlVgpuTypeGetNumDisplayHeads`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_463vgpu_type_get_num_display_heads = {"vgpu_type_get_num_display_heads", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_463vgpu_type_get_num_display_heads, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_462vgpu_type_get_num_display_heads};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_463vgpu_type_get_num_display_heads(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_type_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_type_get_num_display_heads (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23640, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23640, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_type_get_num_display_heads", 0) < (0)) __PYX_ERR(0, 23640, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_type_get_num_display_heads", 1, 1, 1, i); __PYX_ERR(0, 23640, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23640, __pyx_L3_error)
    }
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23640, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_type_get_num_display_heads", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23640, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_num_display_heads", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_462vgpu_type_get_num_display_heads(__pyx_self, __pyx_v_vgpu_type_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_462vgpu_type_get_num_display_heads(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_num_display_heads", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_num_display_heads(__pyx_v_vgpu_type_id, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 23640, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23640, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_num_display_heads", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23658
 * 
 * 
 * cpdef tuple vgpu_type_get_resolution(unsigned int vgpu_type_id, unsigned int display_ind_ex):             # <<<<<<<<<<<<<<
 *     """Retrieve vGPU display head's maximum supported resolution.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_465vgpu_type_get_resolution(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_resolution(unsigned int __pyx_v_vgpu_type_id, unsigned int __pyx_v_display_ind_ex, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_xdim;
  unsigned int __pyx_v_ydim;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_resolution", 0);

  /* "cuda/bindings/_nvml.pyx":23675
 *     cdef unsigned int xdim
 *     cdef unsigned int ydim
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetResolution(<nvmlVgpuTypeId_t>vgpu_type_id, display_ind_ex, &xdim, &ydim)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23676
 *     cdef unsigned int ydim
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetResolution(<nvmlVgpuTypeId_t>vgpu_type_id, display_ind_ex, &xdim, &ydim)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (xdim, ydim)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetResolution(((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), __pyx_v_display_ind_ex, (&__pyx_v_xdim), (&__pyx_v_ydim)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23676, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23675
 *     cdef unsigned int xdim
 *     cdef unsigned int ydim
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetResolution(<nvmlVgpuTypeId_t>vgpu_type_id, display_ind_ex, &xdim, &ydim)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23677
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetResolution(<nvmlVgpuTypeId_t>vgpu_type_id, display_ind_ex, &xdim, &ydim)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (xdim, ydim)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23677, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23678
 *         __status__ = nvmlVgpuTypeGetResolution(<nvmlVgpuTypeId_t>vgpu_type_id, display_ind_ex, &xdim, &ydim)
 *     check_status(__status__)
 *     return (xdim, ydim)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_unsigned_int(__pyx_v_xdim); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23678, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_ydim); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23678, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23678, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 23678, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 23678, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23658
 * 
 * 
 * cpdef tuple vgpu_type_get_resolution(unsigned int vgpu_type_id, unsigned int display_ind_ex):             # <<<<<<<<<<<<<<
 *     """Retrieve vGPU display head's maximum supported resolution.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_resolution", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_465vgpu_type_get_resolution(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_464vgpu_type_get_resolution, "vgpu_type_get_resolution(unsigned int vgpu_type_id, unsigned int display_ind_ex) -> tuple\n\nRetrieve vGPU display head's maximum supported resolution.\n\nArgs:\n    vgpu_type_id (unsigned int): Handle to vGPU type.\n    display_ind_ex (unsigned int): Zero-based index of display head.\n\nReturns:\n    A 2-tuple containing:\n\n    - unsigned int: Pointer to maximum number of pixels in X dimension.\n    - unsigned int: Pointer to maximum number of pixels in Y dimension.\n\n.. seealso:: `nvmlVgpuTypeGetResolution`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_465vgpu_type_get_resolution = {"vgpu_type_get_resolution", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_465vgpu_type_get_resolution, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_464vgpu_type_get_resolution};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_465vgpu_type_get_resolution(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_type_id;
  unsigned int __pyx_v_display_ind_ex;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_type_get_resolution (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,&__pyx_mstate_global->__pyx_n_u_display_ind_ex,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23658, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23658, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23658, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_type_get_resolution", 0) < (0)) __PYX_ERR(0, 23658, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_type_get_resolution", 1, 2, 2, i); __PYX_ERR(0, 23658, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23658, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23658, __pyx_L3_error)
    }
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23658, __pyx_L3_error)
    __pyx_v_display_ind_ex = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_display_ind_ex == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23658, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_type_get_resolution", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 23658, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_resolution", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_464vgpu_type_get_resolution(__pyx_self, __pyx_v_vgpu_type_id, __pyx_v_display_ind_ex);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_464vgpu_type_get_resolution(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id, unsigned int __pyx_v_display_ind_ex) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_resolution", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_resolution(__pyx_v_vgpu_type_id, __pyx_v_display_ind_ex, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23658, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_resolution", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23681
 * 
 * 
 * cpdef vgpu_type_get_license(unsigned int vgpu_type_id, intptr_t vgpu_type_license_string, unsigned int size):             # <<<<<<<<<<<<<<
 *     """Retrieve license requirements for a vGPU type.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_467vgpu_type_get_license(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_license(unsigned int __pyx_v_vgpu_type_id, intptr_t __pyx_v_vgpu_type_license_string, unsigned int __pyx_v_size, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_license", 0);

  /* "cuda/bindings/_nvml.pyx":23691
 *     .. seealso:: `nvmlVgpuTypeGetLicense`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetLicense(<nvmlVgpuTypeId_t>vgpu_type_id, <char*>vgpu_type_license_string, size)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23692
 *     """
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetLicense(<nvmlVgpuTypeId_t>vgpu_type_id, <char*>vgpu_type_license_string, size)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetLicense(((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), ((char *)__pyx_v_vgpu_type_license_string), __pyx_v_size); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23692, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23691
 *     .. seealso:: `nvmlVgpuTypeGetLicense`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetLicense(<nvmlVgpuTypeId_t>vgpu_type_id, <char*>vgpu_type_license_string, size)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23693
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetLicense(<nvmlVgpuTypeId_t>vgpu_type_id, <char*>vgpu_type_license_string, size)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23693, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23681
 * 
 * 
 * cpdef vgpu_type_get_license(unsigned int vgpu_type_id, intptr_t vgpu_type_license_string, unsigned int size):             # <<<<<<<<<<<<<<
 *     """Retrieve license requirements for a vGPU type.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_license", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_467vgpu_type_get_license(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_466vgpu_type_get_license, "vgpu_type_get_license(unsigned int vgpu_type_id, intptr_t vgpu_type_license_string, unsigned int size)\n\nRetrieve license requirements for a vGPU type.\n\nArgs:\n    vgpu_type_id (unsigned int): Handle to vGPU type.\n    vgpu_type_license_string (intptr_t): Pointer to buffer to return license info.\n    size (unsigned int): Size of ``vgpu_type_license_string`` buffer.\n\n.. seealso:: `nvmlVgpuTypeGetLicense`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_467vgpu_type_get_license = {"vgpu_type_get_license", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_467vgpu_type_get_license, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_466vgpu_type_get_license};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_467vgpu_type_get_license(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_type_id;
  intptr_t __pyx_v_vgpu_type_license_string;
  unsigned int __pyx_v_size;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_type_get_license (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,&__pyx_mstate_global->__pyx_n_u_vgpu_type_license_string,&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23681, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23681, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23681, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23681, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_type_get_license", 0) < (0)) __PYX_ERR(0, 23681, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_type_get_license", 1, 3, 3, i); __PYX_ERR(0, 23681, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23681, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23681, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23681, __pyx_L3_error)
    }
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23681, __pyx_L3_error)
    __pyx_v_vgpu_type_license_string = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_vgpu_type_license_string == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23681, __pyx_L3_error)
    __pyx_v_size = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_size == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23681, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_type_get_license", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 23681, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_license", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_466vgpu_type_get_license(__pyx_self, __pyx_v_vgpu_type_id, __pyx_v_vgpu_type_license_string, __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_466vgpu_type_get_license(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id, intptr_t __pyx_v_vgpu_type_license_string, unsigned int __pyx_v_size) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_license", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_license(__pyx_v_vgpu_type_id, __pyx_v_vgpu_type_license_string, __pyx_v_size, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23681, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_license", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23696
 * 
 * 
 * cpdef unsigned int vgpu_type_get_frame_rate_limit(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the static frame rate limit value of the vGPU type.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_469vgpu_type_get_frame_rate_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_frame_rate_limit(unsigned int __pyx_v_vgpu_type_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_frame_rate_limit;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23708
 *     """
 *     cdef unsigned int frame_rate_limit
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetFrameRateLimit(<nvmlVgpuTypeId_t>vgpu_type_id, &frame_rate_limit)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23709
 *     cdef unsigned int frame_rate_limit
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetFrameRateLimit(<nvmlVgpuTypeId_t>vgpu_type_id, &frame_rate_limit)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return frame_rate_limit
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetFrameRateLimit(((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), (&__pyx_v_frame_rate_limit)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23709, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23708
 *     """
 *     cdef unsigned int frame_rate_limit
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetFrameRateLimit(<nvmlVgpuTypeId_t>vgpu_type_id, &frame_rate_limit)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23710
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetFrameRateLimit(<nvmlVgpuTypeId_t>vgpu_type_id, &frame_rate_limit)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return frame_rate_limit
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23710, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23711
 *         __status__ = nvmlVgpuTypeGetFrameRateLimit(<nvmlVgpuTypeId_t>vgpu_type_id, &frame_rate_limit)
 *     check_status(__status__)
 *     return frame_rate_limit             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_frame_rate_limit;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23696
 * 
 * 
 * cpdef unsigned int vgpu_type_get_frame_rate_limit(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the static frame rate limit value of the vGPU type.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_frame_rate_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_469vgpu_type_get_frame_rate_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_468vgpu_type_get_frame_rate_limit, "vgpu_type_get_frame_rate_limit(unsigned int vgpu_type_id) -> unsigned int\n\nRetrieve the static frame rate limit value of the vGPU type.\n\nArgs:\n    vgpu_type_id (unsigned int): Handle to vGPU type.\n\nReturns:\n    unsigned int: Reference to return the frame rate limit value.\n\n.. seealso:: `nvmlVgpuTypeGetFrameRateLimit`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_469vgpu_type_get_frame_rate_limit = {"vgpu_type_get_frame_rate_limit", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_469vgpu_type_get_frame_rate_limit, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_468vgpu_type_get_frame_rate_limit};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_469vgpu_type_get_frame_rate_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_type_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_type_get_frame_rate_limit (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23696, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23696, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_type_get_frame_rate_limit", 0) < (0)) __PYX_ERR(0, 23696, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_type_get_frame_rate_limit", 1, 1, 1, i); __PYX_ERR(0, 23696, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23696, __pyx_L3_error)
    }
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23696, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_type_get_frame_rate_limit", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23696, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_frame_rate_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_468vgpu_type_get_frame_rate_limit(__pyx_self, __pyx_v_vgpu_type_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_468vgpu_type_get_frame_rate_limit(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_frame_rate_limit", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_frame_rate_limit(__pyx_v_vgpu_type_id, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 23696, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23696, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_frame_rate_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23714
 * 
 * 
 * cpdef unsigned int vgpu_type_get_max_instances(intptr_t device, unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the maximum number of vGPU instances creatable on a device for given vGPU type.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_471vgpu_type_get_max_instances(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_max_instances(intptr_t __pyx_v_device, unsigned int __pyx_v_vgpu_type_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_vgpu_instance_count;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23727
 *     """
 *     cdef unsigned int vgpu_instance_count
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetMaxInstances(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, &vgpu_instance_count)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23728
 *     cdef unsigned int vgpu_instance_count
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetMaxInstances(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, &vgpu_instance_count)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return vgpu_instance_count
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetMaxInstances(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), (&__pyx_v_vgpu_instance_count)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23728, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23727
 *     """
 *     cdef unsigned int vgpu_instance_count
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetMaxInstances(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, &vgpu_instance_count)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23729
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetMaxInstances(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, &vgpu_instance_count)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return vgpu_instance_count
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23729, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23730
 *         __status__ = nvmlVgpuTypeGetMaxInstances(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, &vgpu_instance_count)
 *     check_status(__status__)
 *     return vgpu_instance_count             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_vgpu_instance_count;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23714
 * 
 * 
 * cpdef unsigned int vgpu_type_get_max_instances(intptr_t device, unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the maximum number of vGPU instances creatable on a device for given vGPU type.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_max_instances", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_471vgpu_type_get_max_instances(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_470vgpu_type_get_max_instances, "vgpu_type_get_max_instances(intptr_t device, unsigned int vgpu_type_id) -> unsigned int\n\nRetrieve the maximum number of vGPU instances creatable on a device for given vGPU type.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    vgpu_type_id (unsigned int): Handle to vGPU type.\n\nReturns:\n    unsigned int: Pointer to get the max number of vGPU instances that can be created on a deicve for given vgpu_type_id.\n\n.. seealso:: `nvmlVgpuTypeGetMaxInstances`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_471vgpu_type_get_max_instances = {"vgpu_type_get_max_instances", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_471vgpu_type_get_max_instances, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_470vgpu_type_get_max_instances};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_471vgpu_type_get_max_instances(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_vgpu_type_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_type_get_max_instances (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23714, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23714, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23714, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_type_get_max_instances", 0) < (0)) __PYX_ERR(0, 23714, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_type_get_max_instances", 1, 2, 2, i); __PYX_ERR(0, 23714, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23714, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23714, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 23714, __pyx_L3_error)
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23714, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_type_get_max_instances", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 23714, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_max_instances", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_470vgpu_type_get_max_instances(__pyx_self, __pyx_v_device, __pyx_v_vgpu_type_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_470vgpu_type_get_max_instances(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_vgpu_type_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_max_instances", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_max_instances(__pyx_v_device, __pyx_v_vgpu_type_id, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 23714, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23714, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_max_instances", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23733
 * 
 * 
 * cpdef unsigned int vgpu_type_get_max_instances_per_vm(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the maximum number of vGPU instances supported per VM for given vGPU type.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_473vgpu_type_get_max_instances_per_vm(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_max_instances_per_vm(unsigned int __pyx_v_vgpu_type_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_vgpu_instance_count_per_vm;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23745
 *     """
 *     cdef unsigned int vgpu_instance_count_per_vm
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetMaxInstancesPerVm(<nvmlVgpuTypeId_t>vgpu_type_id, &vgpu_instance_count_per_vm)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23746
 *     cdef unsigned int vgpu_instance_count_per_vm
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetMaxInstancesPerVm(<nvmlVgpuTypeId_t>vgpu_type_id, &vgpu_instance_count_per_vm)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return vgpu_instance_count_per_vm
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetMaxInstancesPerVm(((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), (&__pyx_v_vgpu_instance_count_per_vm)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23746, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23745
 *     """
 *     cdef unsigned int vgpu_instance_count_per_vm
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetMaxInstancesPerVm(<nvmlVgpuTypeId_t>vgpu_type_id, &vgpu_instance_count_per_vm)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23747
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetMaxInstancesPerVm(<nvmlVgpuTypeId_t>vgpu_type_id, &vgpu_instance_count_per_vm)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return vgpu_instance_count_per_vm
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23747, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23748
 *         __status__ = nvmlVgpuTypeGetMaxInstancesPerVm(<nvmlVgpuTypeId_t>vgpu_type_id, &vgpu_instance_count_per_vm)
 *     check_status(__status__)
 *     return vgpu_instance_count_per_vm             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_vgpu_instance_count_per_vm;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23733
 * 
 * 
 * cpdef unsigned int vgpu_type_get_max_instances_per_vm(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the maximum number of vGPU instances supported per VM for given vGPU type.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_max_instances_per_vm", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_473vgpu_type_get_max_instances_per_vm(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_472vgpu_type_get_max_instances_per_vm, "vgpu_type_get_max_instances_per_vm(unsigned int vgpu_type_id) -> unsigned int\n\nRetrieve the maximum number of vGPU instances supported per VM for given vGPU type.\n\nArgs:\n    vgpu_type_id (unsigned int): Handle to vGPU type.\n\nReturns:\n    unsigned int: Pointer to get the max number of vGPU instances supported per VM for given ``vgpu_type_id``.\n\n.. seealso:: `nvmlVgpuTypeGetMaxInstancesPerVm`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_473vgpu_type_get_max_instances_per_vm = {"vgpu_type_get_max_instances_per_vm", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_473vgpu_type_get_max_instances_per_vm, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_472vgpu_type_get_max_instances_per_vm};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_473vgpu_type_get_max_instances_per_vm(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_type_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_type_get_max_instances_per_vm (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23733, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23733, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_type_get_max_instances_per_vm", 0) < (0)) __PYX_ERR(0, 23733, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_type_get_max_instances_per_vm", 1, 1, 1, i); __PYX_ERR(0, 23733, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23733, __pyx_L3_error)
    }
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23733, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_type_get_max_instances_per_vm", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23733, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_max_instances_per_vm", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_472vgpu_type_get_max_instances_per_vm(__pyx_self, __pyx_v_vgpu_type_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_472vgpu_type_get_max_instances_per_vm(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_max_instances_per_vm", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_max_instances_per_vm(__pyx_v_vgpu_type_id, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 23733, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23733, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_max_instances_per_vm", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23751
 * 
 * 
 * cpdef object vgpu_type_get_bar1_info(unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Retrieve the BAR1 info for given vGPU type.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_475vgpu_type_get_bar1_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_bar1_info(unsigned int __pyx_v_vgpu_type_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *__pyx_v_bar1info_py = 0;
  nvmlVgpuTypeBar1Info_t *__pyx_v_bar1info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_bar1_info", 0);

  /* "cuda/bindings/_nvml.pyx":23762
 *     .. seealso:: `nvmlVgpuTypeGetBAR1Info`
 *     """
 *     cdef VgpuTypeBar1Info_v1 bar1info_py = VgpuTypeBar1Info_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuTypeBar1Info_t *bar1info = <nvmlVgpuTypeBar1Info_t *><intptr_t>(bar1info_py._get_ptr())
 *     bar1info.version = sizeof(nvmlVgpuTypeBar1Info_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23762, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_bar1info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":23763
 *     """
 *     cdef VgpuTypeBar1Info_v1 bar1info_py = VgpuTypeBar1Info_v1()
 *     cdef nvmlVgpuTypeBar1Info_t *bar1info = <nvmlVgpuTypeBar1Info_t *><intptr_t>(bar1info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     bar1info.version = sizeof(nvmlVgpuTypeBar1Info_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)__pyx_v_bar1info_py->__pyx_vtab)->_get_ptr(__pyx_v_bar1info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23763, __pyx_L1_error)
  __pyx_v_bar1info = ((nvmlVgpuTypeBar1Info_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":23764
 *     cdef VgpuTypeBar1Info_v1 bar1info_py = VgpuTypeBar1Info_v1()
 *     cdef nvmlVgpuTypeBar1Info_t *bar1info = <nvmlVgpuTypeBar1Info_t *><intptr_t>(bar1info_py._get_ptr())
 *     bar1info.version = sizeof(nvmlVgpuTypeBar1Info_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetBAR1Info(<nvmlVgpuTypeId_t>vgpu_type_id, bar1info)
*/
  __pyx_v_bar1info->version = ((sizeof(nvmlVgpuTypeBar1Info_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":23765
 *     cdef nvmlVgpuTypeBar1Info_t *bar1info = <nvmlVgpuTypeBar1Info_t *><intptr_t>(bar1info_py._get_ptr())
 *     bar1info.version = sizeof(nvmlVgpuTypeBar1Info_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetBAR1Info(<nvmlVgpuTypeId_t>vgpu_type_id, bar1info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23766
 *     bar1info.version = sizeof(nvmlVgpuTypeBar1Info_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetBAR1Info(<nvmlVgpuTypeId_t>vgpu_type_id, bar1info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return bar1info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetBAR1Info(((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), __pyx_v_bar1info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23766, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":23765
 *     cdef nvmlVgpuTypeBar1Info_t *bar1info = <nvmlVgpuTypeBar1Info_t *><intptr_t>(bar1info_py._get_ptr())
 *     bar1info.version = sizeof(nvmlVgpuTypeBar1Info_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetBAR1Info(<nvmlVgpuTypeId_t>vgpu_type_id, bar1info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23767
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetBAR1Info(<nvmlVgpuTypeId_t>vgpu_type_id, bar1info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return bar1info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 23767, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23768
 *         __status__ = nvmlVgpuTypeGetBAR1Info(<nvmlVgpuTypeId_t>vgpu_type_id, bar1info)
 *     check_status(__status__)
 *     return bar1info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_bar1info_py);
  __pyx_r = ((PyObject *)__pyx_v_bar1info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23751
 * 
 * 
 * cpdef object vgpu_type_get_bar1_info(unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Retrieve the BAR1 info for given vGPU type.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_bar1_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_bar1info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_475vgpu_type_get_bar1_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_474vgpu_type_get_bar1_info, "vgpu_type_get_bar1_info(unsigned int vgpu_type_id)\n\nRetrieve the BAR1 info for given vGPU type.\n\nArgs:\n    vgpu_type_id (unsigned int): Handle to vGPU type.\n\nReturns:\n    nvmlVgpuTypeBar1Info_v1_t: Pointer to the vGPU type BAR1 information structure ``nvmlVgpuTypeBar1Info_t``.\n\n.. seealso:: `nvmlVgpuTypeGetBAR1Info`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_475vgpu_type_get_bar1_info = {"vgpu_type_get_bar1_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_475vgpu_type_get_bar1_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_474vgpu_type_get_bar1_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_475vgpu_type_get_bar1_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_type_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_type_get_bar1_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23751, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23751, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_type_get_bar1_info", 0) < (0)) __PYX_ERR(0, 23751, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_type_get_bar1_info", 1, 1, 1, i); __PYX_ERR(0, 23751, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23751, __pyx_L3_error)
    }
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23751, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_type_get_bar1_info", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23751, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_bar1_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_474vgpu_type_get_bar1_info(__pyx_self, __pyx_v_vgpu_type_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_474vgpu_type_get_bar1_info(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_bar1_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_bar1_info(__pyx_v_vgpu_type_id, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23751, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_bar1_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23771
 * 
 * 
 * cpdef str vgpu_instance_get_uuid(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the UUID of a vGPU instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_477vgpu_instance_get_uuid(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_uuid(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_size;
  char __pyx_v_uuid[80];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_uuid", 0);

  /* "cuda/bindings/_nvml.pyx":23779
 *     .. seealso:: `nvmlVgpuInstanceGetUUID`
 *     """
 *     cdef unsigned int size = 80             # <<<<<<<<<<<<<<
 *     cdef char[80] uuid
 *     with nogil:
*/
  __pyx_v_size = 80;

  /* "cuda/bindings/_nvml.pyx":23781
 *     cdef unsigned int size = 80
 *     cdef char[80] uuid
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetUUID(<nvmlVgpuInstance_t>vgpu_instance, uuid, size)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23782
 *     cdef char[80] uuid
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetUUID(<nvmlVgpuInstance_t>vgpu_instance, uuid, size)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(uuid)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetUUID(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), __pyx_v_uuid, __pyx_v_size); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23782, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23781
 *     cdef unsigned int size = 80
 *     cdef char[80] uuid
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetUUID(<nvmlVgpuInstance_t>vgpu_instance, uuid, size)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23783
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetUUID(<nvmlVgpuInstance_t>vgpu_instance, uuid, size)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(uuid)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23783, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23784
 *         __status__ = nvmlVgpuInstanceGetUUID(<nvmlVgpuInstance_t>vgpu_instance, uuid, size)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(uuid)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = PyUnicode_FromString(__pyx_v_uuid); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23784, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23771
 * 
 * 
 * cpdef str vgpu_instance_get_uuid(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the UUID of a vGPU instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_uuid", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_477vgpu_instance_get_uuid(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_476vgpu_instance_get_uuid, "vgpu_instance_get_uuid(unsigned int vgpu_instance) -> str\n\nRetrieve the UUID of a vGPU instance.\n\nArgs:\n    vgpu_instance (unsigned int): Identifier of the target vGPU instance.\n\n.. seealso:: `nvmlVgpuInstanceGetUUID`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_477vgpu_instance_get_uuid = {"vgpu_instance_get_uuid", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_477vgpu_instance_get_uuid, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_476vgpu_instance_get_uuid};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_477vgpu_instance_get_uuid(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_uuid (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23771, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23771, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_uuid", 0) < (0)) __PYX_ERR(0, 23771, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_uuid", 1, 1, 1, i); __PYX_ERR(0, 23771, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23771, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23771, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_uuid", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23771, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_uuid", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_476vgpu_instance_get_uuid(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_476vgpu_instance_get_uuid(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_uuid", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_uuid(__pyx_v_vgpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23771, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_uuid", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23787
 * 
 * 
 * cpdef str vgpu_instance_get_vm_driver_version(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the NVIDIA driver version installed in the VM associated with a vGPU.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_479vgpu_instance_get_vm_driver_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_vm_driver_version(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_length;
  char __pyx_v_version[80];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_vm_driver_version", 0);

  /* "cuda/bindings/_nvml.pyx":23795
 *     .. seealso:: `nvmlVgpuInstanceGetVmDriverVersion`
 *     """
 *     cdef unsigned int length = 80             # <<<<<<<<<<<<<<
 *     cdef char[80] version
 *     with nogil:
*/
  __pyx_v_length = 80;

  /* "cuda/bindings/_nvml.pyx":23797
 *     cdef unsigned int length = 80
 *     cdef char[80] version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetVmDriverVersion(<nvmlVgpuInstance_t>vgpu_instance, version, length)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23798
 *     cdef char[80] version
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetVmDriverVersion(<nvmlVgpuInstance_t>vgpu_instance, version, length)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(version)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetVmDriverVersion(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), __pyx_v_version, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23798, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23797
 *     cdef unsigned int length = 80
 *     cdef char[80] version
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetVmDriverVersion(<nvmlVgpuInstance_t>vgpu_instance, version, length)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23799
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetVmDriverVersion(<nvmlVgpuInstance_t>vgpu_instance, version, length)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(version)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23799, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23800
 *         __status__ = nvmlVgpuInstanceGetVmDriverVersion(<nvmlVgpuInstance_t>vgpu_instance, version, length)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(version)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = PyUnicode_FromString(__pyx_v_version); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23800, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23787
 * 
 * 
 * cpdef str vgpu_instance_get_vm_driver_version(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the NVIDIA driver version installed in the VM associated with a vGPU.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_vm_driver_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_479vgpu_instance_get_vm_driver_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_478vgpu_instance_get_vm_driver_version, "vgpu_instance_get_vm_driver_version(unsigned int vgpu_instance) -> str\n\nRetrieve the NVIDIA driver version installed in the VM associated with a vGPU.\n\nArgs:\n    vgpu_instance (unsigned int): Identifier of the target vGPU instance.\n\n.. seealso:: `nvmlVgpuInstanceGetVmDriverVersion`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_479vgpu_instance_get_vm_driver_version = {"vgpu_instance_get_vm_driver_version", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_479vgpu_instance_get_vm_driver_version, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_478vgpu_instance_get_vm_driver_version};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_479vgpu_instance_get_vm_driver_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_vm_driver_version (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23787, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23787, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_vm_driver_version", 0) < (0)) __PYX_ERR(0, 23787, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_vm_driver_version", 1, 1, 1, i); __PYX_ERR(0, 23787, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23787, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23787, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_vm_driver_version", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23787, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_vm_driver_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_478vgpu_instance_get_vm_driver_version(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_478vgpu_instance_get_vm_driver_version(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_vm_driver_version", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_vm_driver_version(__pyx_v_vgpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23787, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_vm_driver_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23803
 * 
 * 
 * cpdef unsigned long long vgpu_instance_get_fb_usage(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the framebuffer usage in bytes.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_481vgpu_instance_get_fb_usage(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned PY_LONG_LONG __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_fb_usage(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned PY_LONG_LONG __pyx_v_fb_usage;
  nvmlReturn_t __pyx_v___status__;
  unsigned PY_LONG_LONG __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23815
 *     """
 *     cdef unsigned long long fb_usage
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetFbUsage(<nvmlVgpuInstance_t>vgpu_instance, &fb_usage)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23816
 *     cdef unsigned long long fb_usage
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetFbUsage(<nvmlVgpuInstance_t>vgpu_instance, &fb_usage)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return fb_usage
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFbUsage(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), (&__pyx_v_fb_usage)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23816, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23815
 *     """
 *     cdef unsigned long long fb_usage
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetFbUsage(<nvmlVgpuInstance_t>vgpu_instance, &fb_usage)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23817
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetFbUsage(<nvmlVgpuInstance_t>vgpu_instance, &fb_usage)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return fb_usage
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23817, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23818
 *         __status__ = nvmlVgpuInstanceGetFbUsage(<nvmlVgpuInstance_t>vgpu_instance, &fb_usage)
 *     check_status(__status__)
 *     return fb_usage             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_fb_usage;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23803
 * 
 * 
 * cpdef unsigned long long vgpu_instance_get_fb_usage(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the framebuffer usage in bytes.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_fb_usage", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_481vgpu_instance_get_fb_usage(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_480vgpu_instance_get_fb_usage, "vgpu_instance_get_fb_usage(unsigned int vgpu_instance) -> unsigned long long\n\nRetrieve the framebuffer usage in bytes.\n\nArgs:\n    vgpu_instance (unsigned int): The identifier of the target instance.\n\nReturns:\n    unsigned long long: Pointer to framebuffer usage in bytes.\n\n.. seealso:: `nvmlVgpuInstanceGetFbUsage`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_481vgpu_instance_get_fb_usage = {"vgpu_instance_get_fb_usage", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_481vgpu_instance_get_fb_usage, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_480vgpu_instance_get_fb_usage};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_481vgpu_instance_get_fb_usage(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_fb_usage (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23803, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23803, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_fb_usage", 0) < (0)) __PYX_ERR(0, 23803, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_fb_usage", 1, 1, 1, i); __PYX_ERR(0, 23803, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23803, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23803, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_fb_usage", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23803, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_fb_usage", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_480vgpu_instance_get_fb_usage(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_480vgpu_instance_get_fb_usage(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned PY_LONG_LONG __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_fb_usage", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_fb_usage(__pyx_v_vgpu_instance, 1); if (unlikely(__pyx_t_1 == ((unsigned PY_LONG_LONG)0) && PyErr_Occurred())) __PYX_ERR(0, 23803, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23803, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_fb_usage", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23821
 * 
 * 
 * cpdef unsigned int vgpu_instance_get_license_status(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """[Deprecated].
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_483vgpu_instance_get_license_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_license_status(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_licensed;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23833
 *     """
 *     cdef unsigned int licensed
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetLicenseStatus(<nvmlVgpuInstance_t>vgpu_instance, &licensed)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23834
 *     cdef unsigned int licensed
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetLicenseStatus(<nvmlVgpuInstance_t>vgpu_instance, &licensed)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return licensed
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetLicenseStatus(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), (&__pyx_v_licensed)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23834, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23833
 *     """
 *     cdef unsigned int licensed
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetLicenseStatus(<nvmlVgpuInstance_t>vgpu_instance, &licensed)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23835
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetLicenseStatus(<nvmlVgpuInstance_t>vgpu_instance, &licensed)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return licensed
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23835, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23836
 *         __status__ = nvmlVgpuInstanceGetLicenseStatus(<nvmlVgpuInstance_t>vgpu_instance, &licensed)
 *     check_status(__status__)
 *     return licensed             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_licensed;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23821
 * 
 * 
 * cpdef unsigned int vgpu_instance_get_license_status(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """[Deprecated].
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_license_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_483vgpu_instance_get_license_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_482vgpu_instance_get_license_status, "vgpu_instance_get_license_status(unsigned int vgpu_instance) -> unsigned int\n\n[Deprecated].\n\nArgs:\n    vgpu_instance (unsigned int): Identifier of the target vGPU instance.\n\nReturns:\n    unsigned int: Reference to return the licensing status.\n\n.. seealso:: `nvmlVgpuInstanceGetLicenseStatus`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_483vgpu_instance_get_license_status = {"vgpu_instance_get_license_status", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_483vgpu_instance_get_license_status, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_482vgpu_instance_get_license_status};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_483vgpu_instance_get_license_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_license_status (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23821, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23821, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_license_status", 0) < (0)) __PYX_ERR(0, 23821, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_license_status", 1, 1, 1, i); __PYX_ERR(0, 23821, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23821, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23821, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_license_status", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23821, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_license_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_482vgpu_instance_get_license_status(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_482vgpu_instance_get_license_status(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_license_status", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_license_status(__pyx_v_vgpu_instance, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 23821, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23821, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_license_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23839
 * 
 * 
 * cpdef unsigned int vgpu_instance_get_type(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the vGPU type of a vGPU instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_485vgpu_instance_get_type(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_type(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlVgpuTypeId_t __pyx_v_vgpu_type_id;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23851
 *     """
 *     cdef nvmlVgpuTypeId_t vgpu_type_id
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetType(<nvmlVgpuInstance_t>vgpu_instance, &vgpu_type_id)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23852
 *     cdef nvmlVgpuTypeId_t vgpu_type_id
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetType(<nvmlVgpuInstance_t>vgpu_instance, &vgpu_type_id)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <unsigned int>vgpu_type_id
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetType(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), (&__pyx_v_vgpu_type_id)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23852, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23851
 *     """
 *     cdef nvmlVgpuTypeId_t vgpu_type_id
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetType(<nvmlVgpuInstance_t>vgpu_instance, &vgpu_type_id)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23853
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetType(<nvmlVgpuInstance_t>vgpu_instance, &vgpu_type_id)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <unsigned int>vgpu_type_id
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23853, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23854
 *         __status__ = nvmlVgpuInstanceGetType(<nvmlVgpuInstance_t>vgpu_instance, &vgpu_type_id)
 *     check_status(__status__)
 *     return <unsigned int>vgpu_type_id             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((unsigned int)__pyx_v_vgpu_type_id);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23839
 * 
 * 
 * cpdef unsigned int vgpu_instance_get_type(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the vGPU type of a vGPU instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_type", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_485vgpu_instance_get_type(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_484vgpu_instance_get_type, "vgpu_instance_get_type(unsigned int vgpu_instance) -> unsigned int\n\nRetrieve the vGPU type of a vGPU instance.\n\nArgs:\n    vgpu_instance (unsigned int): Identifier of the target vGPU instance.\n\nReturns:\n    unsigned int: Reference to return the vgpuTypeId.\n\n.. seealso:: `nvmlVgpuInstanceGetType`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_485vgpu_instance_get_type = {"vgpu_instance_get_type", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_485vgpu_instance_get_type, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_484vgpu_instance_get_type};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_485vgpu_instance_get_type(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_type (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23839, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23839, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_type", 0) < (0)) __PYX_ERR(0, 23839, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_type", 1, 1, 1, i); __PYX_ERR(0, 23839, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23839, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23839, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_type", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23839, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_type", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_484vgpu_instance_get_type(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_484vgpu_instance_get_type(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_type", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_type(__pyx_v_vgpu_instance, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 23839, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23839, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_type", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23857
 * 
 * 
 * cpdef unsigned int vgpu_instance_get_frame_rate_limit(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the frame rate limit set for the vGPU instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_487vgpu_instance_get_frame_rate_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_frame_rate_limit(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_frame_rate_limit;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23869
 *     """
 *     cdef unsigned int frame_rate_limit
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetFrameRateLimit(<nvmlVgpuInstance_t>vgpu_instance, &frame_rate_limit)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23870
 *     cdef unsigned int frame_rate_limit
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetFrameRateLimit(<nvmlVgpuInstance_t>vgpu_instance, &frame_rate_limit)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return frame_rate_limit
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFrameRateLimit(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), (&__pyx_v_frame_rate_limit)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23870, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23869
 *     """
 *     cdef unsigned int frame_rate_limit
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetFrameRateLimit(<nvmlVgpuInstance_t>vgpu_instance, &frame_rate_limit)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23871
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetFrameRateLimit(<nvmlVgpuInstance_t>vgpu_instance, &frame_rate_limit)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return frame_rate_limit
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23871, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23872
 *         __status__ = nvmlVgpuInstanceGetFrameRateLimit(<nvmlVgpuInstance_t>vgpu_instance, &frame_rate_limit)
 *     check_status(__status__)
 *     return frame_rate_limit             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_frame_rate_limit;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23857
 * 
 * 
 * cpdef unsigned int vgpu_instance_get_frame_rate_limit(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the frame rate limit set for the vGPU instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_frame_rate_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_487vgpu_instance_get_frame_rate_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_486vgpu_instance_get_frame_rate_limit, "vgpu_instance_get_frame_rate_limit(unsigned int vgpu_instance) -> unsigned int\n\nRetrieve the frame rate limit set for the vGPU instance.\n\nArgs:\n    vgpu_instance (unsigned int): Identifier of the target vGPU instance.\n\nReturns:\n    unsigned int: Reference to return the frame rate limit.\n\n.. seealso:: `nvmlVgpuInstanceGetFrameRateLimit`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_487vgpu_instance_get_frame_rate_limit = {"vgpu_instance_get_frame_rate_limit", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_487vgpu_instance_get_frame_rate_limit, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_486vgpu_instance_get_frame_rate_limit};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_487vgpu_instance_get_frame_rate_limit(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_frame_rate_limit (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23857, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23857, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_frame_rate_limit", 0) < (0)) __PYX_ERR(0, 23857, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_frame_rate_limit", 1, 1, 1, i); __PYX_ERR(0, 23857, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23857, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23857, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_frame_rate_limit", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23857, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_frame_rate_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_486vgpu_instance_get_frame_rate_limit(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_486vgpu_instance_get_frame_rate_limit(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_frame_rate_limit", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_frame_rate_limit(__pyx_v_vgpu_instance, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 23857, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23857, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_frame_rate_limit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23875
 * 
 * 
 * cpdef int vgpu_instance_get_ecc_mode(unsigned int vgpu_instance) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieve the current ECC mode of vGPU instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_489vgpu_instance_get_ecc_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_ecc_mode(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__EnableState __pyx_v_ecc_mode;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23887
 *     """
 *     cdef _EnableState ecc_mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetEccMode(<nvmlVgpuInstance_t>vgpu_instance, &ecc_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23888
 *     cdef _EnableState ecc_mode
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetEccMode(<nvmlVgpuInstance_t>vgpu_instance, &ecc_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>ecc_mode
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEccMode(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), (&__pyx_v_ecc_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23888, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23887
 *     """
 *     cdef _EnableState ecc_mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetEccMode(<nvmlVgpuInstance_t>vgpu_instance, &ecc_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23889
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetEccMode(<nvmlVgpuInstance_t>vgpu_instance, &ecc_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>ecc_mode
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23889, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23890
 *         __status__ = nvmlVgpuInstanceGetEccMode(<nvmlVgpuInstance_t>vgpu_instance, &ecc_mode)
 *     check_status(__status__)
 *     return <int>ecc_mode             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_ecc_mode);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23875
 * 
 * 
 * cpdef int vgpu_instance_get_ecc_mode(unsigned int vgpu_instance) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieve the current ECC mode of vGPU instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_ecc_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_489vgpu_instance_get_ecc_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_488vgpu_instance_get_ecc_mode, "vgpu_instance_get_ecc_mode(unsigned int vgpu_instance) -> int\n\nRetrieve the current ECC mode of vGPU instance.\n\nArgs:\n    vgpu_instance (unsigned int): The identifier of the target vGPU instance.\n\nReturns:\n    int: Reference in which to return the current ECC mode.\n\n.. seealso:: `nvmlVgpuInstanceGetEccMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_489vgpu_instance_get_ecc_mode = {"vgpu_instance_get_ecc_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_489vgpu_instance_get_ecc_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_488vgpu_instance_get_ecc_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_489vgpu_instance_get_ecc_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_ecc_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23875, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23875, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_ecc_mode", 0) < (0)) __PYX_ERR(0, 23875, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_ecc_mode", 1, 1, 1, i); __PYX_ERR(0, 23875, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23875, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23875, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_ecc_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23875, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_ecc_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_488vgpu_instance_get_ecc_mode(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_488vgpu_instance_get_ecc_mode(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_ecc_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_ecc_mode(__pyx_v_vgpu_instance, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23875, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23875, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_ecc_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23893
 * 
 * 
 * cpdef unsigned int vgpu_instance_get_encoder_capacity(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_491vgpu_instance_get_encoder_capacity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_encoder_capacity(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_encoder_capacity;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":23905
 *     """
 *     cdef unsigned int encoder_capacity
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetEncoderCapacity(<nvmlVgpuInstance_t>vgpu_instance, &encoder_capacity)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23906
 *     cdef unsigned int encoder_capacity
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetEncoderCapacity(<nvmlVgpuInstance_t>vgpu_instance, &encoder_capacity)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return encoder_capacity
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEncoderCapacity(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), (&__pyx_v_encoder_capacity)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23906, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23905
 *     """
 *     cdef unsigned int encoder_capacity
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetEncoderCapacity(<nvmlVgpuInstance_t>vgpu_instance, &encoder_capacity)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23907
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetEncoderCapacity(<nvmlVgpuInstance_t>vgpu_instance, &encoder_capacity)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return encoder_capacity
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23907, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23908
 *         __status__ = nvmlVgpuInstanceGetEncoderCapacity(<nvmlVgpuInstance_t>vgpu_instance, &encoder_capacity)
 *     check_status(__status__)
 *     return encoder_capacity             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_encoder_capacity;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23893
 * 
 * 
 * cpdef unsigned int vgpu_instance_get_encoder_capacity(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_encoder_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_491vgpu_instance_get_encoder_capacity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_490vgpu_instance_get_encoder_capacity, "vgpu_instance_get_encoder_capacity(unsigned int vgpu_instance) -> unsigned int\n\nRetrieve the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100.\n\nArgs:\n    vgpu_instance (unsigned int): Identifier of the target vGPU instance.\n\nReturns:\n    unsigned int: Reference to an unsigned int for the encoder capacity.\n\n.. seealso:: `nvmlVgpuInstanceGetEncoderCapacity`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_491vgpu_instance_get_encoder_capacity = {"vgpu_instance_get_encoder_capacity", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_491vgpu_instance_get_encoder_capacity, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_490vgpu_instance_get_encoder_capacity};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_491vgpu_instance_get_encoder_capacity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_encoder_capacity (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23893, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23893, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_encoder_capacity", 0) < (0)) __PYX_ERR(0, 23893, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_encoder_capacity", 1, 1, 1, i); __PYX_ERR(0, 23893, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23893, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23893, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_encoder_capacity", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23893, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_encoder_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_490vgpu_instance_get_encoder_capacity(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_490vgpu_instance_get_encoder_capacity(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_encoder_capacity", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_encoder_capacity(__pyx_v_vgpu_instance, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 23893, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23893, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_encoder_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23911
 * 
 * 
 * cpdef vgpu_instance_set_encoder_capacity(unsigned int vgpu_instance, unsigned int encoder_capacity):             # <<<<<<<<<<<<<<
 *     """Set the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_493vgpu_instance_set_encoder_capacity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_set_encoder_capacity(unsigned int __pyx_v_vgpu_instance, unsigned int __pyx_v_encoder_capacity, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_set_encoder_capacity", 0);

  /* "cuda/bindings/_nvml.pyx":23920
 *     .. seealso:: `nvmlVgpuInstanceSetEncoderCapacity`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceSetEncoderCapacity(<nvmlVgpuInstance_t>vgpu_instance, encoder_capacity)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23921
 *     """
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceSetEncoderCapacity(<nvmlVgpuInstance_t>vgpu_instance, encoder_capacity)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceSetEncoderCapacity(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), __pyx_v_encoder_capacity); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23921, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23920
 *     .. seealso:: `nvmlVgpuInstanceSetEncoderCapacity`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceSetEncoderCapacity(<nvmlVgpuInstance_t>vgpu_instance, encoder_capacity)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23922
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceSetEncoderCapacity(<nvmlVgpuInstance_t>vgpu_instance, encoder_capacity)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23922, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23911
 * 
 * 
 * cpdef vgpu_instance_set_encoder_capacity(unsigned int vgpu_instance, unsigned int encoder_capacity):             # <<<<<<<<<<<<<<
 *     """Set the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_set_encoder_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_493vgpu_instance_set_encoder_capacity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_492vgpu_instance_set_encoder_capacity, "vgpu_instance_set_encoder_capacity(unsigned int vgpu_instance, unsigned int encoder_capacity)\n\nSet the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100.\n\nArgs:\n    vgpu_instance (unsigned int): Identifier of the target vGPU instance.\n    encoder_capacity (unsigned int): Unsigned int for the encoder capacity value.\n\n.. seealso:: `nvmlVgpuInstanceSetEncoderCapacity`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_493vgpu_instance_set_encoder_capacity = {"vgpu_instance_set_encoder_capacity", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_493vgpu_instance_set_encoder_capacity, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_492vgpu_instance_set_encoder_capacity};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_493vgpu_instance_set_encoder_capacity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  unsigned int __pyx_v_encoder_capacity;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_set_encoder_capacity (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,&__pyx_mstate_global->__pyx_n_u_encoder_capacity,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23911, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23911, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23911, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_set_encoder_capacity", 0) < (0)) __PYX_ERR(0, 23911, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_set_encoder_capacity", 1, 2, 2, i); __PYX_ERR(0, 23911, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23911, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23911, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23911, __pyx_L3_error)
    __pyx_v_encoder_capacity = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_encoder_capacity == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23911, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_set_encoder_capacity", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 23911, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_set_encoder_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_492vgpu_instance_set_encoder_capacity(__pyx_self, __pyx_v_vgpu_instance, __pyx_v_encoder_capacity);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_492vgpu_instance_set_encoder_capacity(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance, unsigned int __pyx_v_encoder_capacity) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_set_encoder_capacity", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_set_encoder_capacity(__pyx_v_vgpu_instance, __pyx_v_encoder_capacity, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23911, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_set_encoder_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23925
 * 
 * 
 * cpdef tuple vgpu_instance_get_encoder_stats(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieves the current encoder statistics of a vGPU Instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_495vgpu_instance_get_encoder_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_encoder_stats(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_session_count;
  unsigned int __pyx_v_average_fps;
  unsigned int __pyx_v_average_latency;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_encoder_stats", 0);

  /* "cuda/bindings/_nvml.pyx":23943
 *     cdef unsigned int average_fps
 *     cdef unsigned int average_latency
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetEncoderStats(<nvmlVgpuInstance_t>vgpu_instance, &session_count, &average_fps, &average_latency)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23944
 *     cdef unsigned int average_latency
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetEncoderStats(<nvmlVgpuInstance_t>vgpu_instance, &session_count, &average_fps, &average_latency)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (session_count, average_fps, average_latency)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEncoderStats(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), (&__pyx_v_session_count), (&__pyx_v_average_fps), (&__pyx_v_average_latency)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23944, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":23943
 *     cdef unsigned int average_fps
 *     cdef unsigned int average_latency
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetEncoderStats(<nvmlVgpuInstance_t>vgpu_instance, &session_count, &average_fps, &average_latency)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23945
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetEncoderStats(<nvmlVgpuInstance_t>vgpu_instance, &session_count, &average_fps, &average_latency)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (session_count, average_fps, average_latency)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 23945, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23946
 *         __status__ = nvmlVgpuInstanceGetEncoderStats(<nvmlVgpuInstance_t>vgpu_instance, &session_count, &average_fps, &average_latency)
 *     check_status(__status__)
 *     return (session_count, average_fps, average_latency)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_unsigned_int(__pyx_v_session_count); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23946, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_average_fps); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23946, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PyLong_From_unsigned_int(__pyx_v_average_latency); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23946, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 23946, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 23946, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 23946, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_5);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_5) != (0)) __PYX_ERR(0, 23946, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_t_5 = 0;
  __pyx_r = ((PyObject*)__pyx_t_6);
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23925
 * 
 * 
 * cpdef tuple vgpu_instance_get_encoder_stats(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieves the current encoder statistics of a vGPU Instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_encoder_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_495vgpu_instance_get_encoder_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_494vgpu_instance_get_encoder_stats, "vgpu_instance_get_encoder_stats(unsigned int vgpu_instance) -> tuple\n\nRetrieves the current encoder statistics of a vGPU Instance.\n\nArgs:\n    vgpu_instance (unsigned int): Identifier of the target vGPU instance.\n\nReturns:\n    A 3-tuple containing:\n\n    - unsigned int: Reference to an unsigned int for count of active encoder sessions.\n    - unsigned int: Reference to an unsigned int for trailing average FPS of all active sessions.\n    - unsigned int: Reference to an unsigned int for encode latency in microseconds.\n\n.. seealso:: `nvmlVgpuInstanceGetEncoderStats`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_495vgpu_instance_get_encoder_stats = {"vgpu_instance_get_encoder_stats", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_495vgpu_instance_get_encoder_stats, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_494vgpu_instance_get_encoder_stats};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_495vgpu_instance_get_encoder_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_encoder_stats (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23925, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23925, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_encoder_stats", 0) < (0)) __PYX_ERR(0, 23925, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_encoder_stats", 1, 1, 1, i); __PYX_ERR(0, 23925, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23925, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23925, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_encoder_stats", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23925, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_encoder_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_494vgpu_instance_get_encoder_stats(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_494vgpu_instance_get_encoder_stats(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_encoder_stats", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_encoder_stats(__pyx_v_vgpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23925, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_encoder_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23949
 * 
 * 
 * cpdef object vgpu_instance_get_encoder_sessions(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieves information about all active encoder sessions on a vGPU Instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_497vgpu_instance_get_encoder_sessions(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_encoder_sessions(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_session_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v_session_info = 0;
  nvmlEncoderSessionInfo_t *__pyx_v_session_info_ptr;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  intptr_t __pyx_t_8;
  int __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_encoder_sessions", 0);

  /* "cuda/bindings/_nvml.pyx":23957
 *     .. seealso:: `nvmlVgpuInstanceGetEncoderSessions`
 *     """
 *     cdef unsigned int[1] session_count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetEncoderSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_session_count[0]), __pyx_t_1, sizeof(__pyx_v_session_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":23958
 *     """
 *     cdef unsigned int[1] session_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetEncoderSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23959
 *     cdef unsigned int[1] session_count = [0]
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetEncoderSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     cdef EncoderSessionInfo session_info = EncoderSessionInfo(session_count[0])
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEncoderSessions(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), ((unsigned int *)__pyx_v_session_count), NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23959, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":23958
 *     """
 *     cdef unsigned int[1] session_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetEncoderSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23960
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetEncoderSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     cdef EncoderSessionInfo session_info = EncoderSessionInfo(session_count[0])
 *     cdef nvmlEncoderSessionInfo_t *session_info_ptr = <nvmlEncoderSessionInfo_t *><intptr_t>(session_info._get_ptr())
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 23960, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23961
 *         __status__ = nvmlVgpuInstanceGetEncoderSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, NULL)
 *     check_status_size(__status__)
 *     cdef EncoderSessionInfo session_info = EncoderSessionInfo(session_count[0])             # <<<<<<<<<<<<<<
 *     cdef nvmlEncoderSessionInfo_t *session_info_ptr = <nvmlEncoderSessionInfo_t *><intptr_t>(session_info._get_ptr())
 *     if session_count[0] == 0:
*/
  __pyx_t_5 = NULL;
  __pyx_t_6 = __Pyx_PyLong_From_unsigned_int((__pyx_v_session_count[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 23961, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_6};
    __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo, __pyx_callargs+__pyx_t_7, (2-__pyx_t_7) | (__pyx_t_7*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23961, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_4);
  }
  __pyx_v_session_info = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_t_4);
  __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":23962
 *     check_status_size(__status__)
 *     cdef EncoderSessionInfo session_info = EncoderSessionInfo(session_count[0])
 *     cdef nvmlEncoderSessionInfo_t *session_info_ptr = <nvmlEncoderSessionInfo_t *><intptr_t>(session_info._get_ptr())             # <<<<<<<<<<<<<<
 *     if session_count[0] == 0:
 *         return session_info
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v_session_info->__pyx_vtab)->_get_ptr(__pyx_v_session_info); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23962, __pyx_L1_error)
  __pyx_v_session_info_ptr = ((nvmlEncoderSessionInfo_t *)((intptr_t)__pyx_t_8));

  /* "cuda/bindings/_nvml.pyx":23963
 *     cdef EncoderSessionInfo session_info = EncoderSessionInfo(session_count[0])
 *     cdef nvmlEncoderSessionInfo_t *session_info_ptr = <nvmlEncoderSessionInfo_t *><intptr_t>(session_info._get_ptr())
 *     if session_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return session_info
 *     with nogil:
*/
  __pyx_t_9 = ((__pyx_v_session_count[0]) == 0);
  if (__pyx_t_9) {

    /* "cuda/bindings/_nvml.pyx":23964
 *     cdef nvmlEncoderSessionInfo_t *session_info_ptr = <nvmlEncoderSessionInfo_t *><intptr_t>(session_info._get_ptr())
 *     if session_count[0] == 0:
 *         return session_info             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetEncoderSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, session_info_ptr)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_session_info);
    __pyx_r = ((PyObject *)__pyx_v_session_info);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":23963
 *     cdef EncoderSessionInfo session_info = EncoderSessionInfo(session_count[0])
 *     cdef nvmlEncoderSessionInfo_t *session_info_ptr = <nvmlEncoderSessionInfo_t *><intptr_t>(session_info._get_ptr())
 *     if session_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return session_info
 *     with nogil:
*/
  }

  /* "cuda/bindings/_nvml.pyx":23965
 *     if session_count[0] == 0:
 *         return session_info
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetEncoderSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, session_info_ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23966
 *         return session_info
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetEncoderSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, session_info_ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return session_info
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEncoderSessions(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), ((unsigned int *)__pyx_v_session_count), __pyx_v_session_info_ptr); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23966, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":23965
 *     if session_count[0] == 0:
 *         return session_info
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetEncoderSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, session_info_ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23967
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetEncoderSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, session_info_ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return session_info
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 23967, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23968
 *         __status__ = nvmlVgpuInstanceGetEncoderSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, session_info_ptr)
 *     check_status(__status__)
 *     return session_info             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_session_info);
  __pyx_r = ((PyObject *)__pyx_v_session_info);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23949
 * 
 * 
 * cpdef object vgpu_instance_get_encoder_sessions(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieves information about all active encoder sessions on a vGPU Instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_encoder_sessions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_session_info);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_497vgpu_instance_get_encoder_sessions(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_496vgpu_instance_get_encoder_sessions, "vgpu_instance_get_encoder_sessions(unsigned int vgpu_instance)\n\nRetrieves information about all active encoder sessions on a vGPU Instance.\n\nArgs:\n    vgpu_instance (unsigned int): Identifier of the target vGPU instance.\n\n.. seealso:: `nvmlVgpuInstanceGetEncoderSessions`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_497vgpu_instance_get_encoder_sessions = {"vgpu_instance_get_encoder_sessions", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_497vgpu_instance_get_encoder_sessions, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_496vgpu_instance_get_encoder_sessions};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_497vgpu_instance_get_encoder_sessions(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_encoder_sessions (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23949, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23949, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_encoder_sessions", 0) < (0)) __PYX_ERR(0, 23949, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_encoder_sessions", 1, 1, 1, i); __PYX_ERR(0, 23949, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23949, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23949, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_encoder_sessions", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23949, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_encoder_sessions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_496vgpu_instance_get_encoder_sessions(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_496vgpu_instance_get_encoder_sessions(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_encoder_sessions", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_encoder_sessions(__pyx_v_vgpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23949, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_encoder_sessions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23971
 * 
 * 
 * cpdef object vgpu_instance_get_fbc_stats(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieves the active frame buffer capture sessions statistics of a vGPU Instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_499vgpu_instance_get_fbc_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_fbc_stats(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *__pyx_v_fbc_stats_py = 0;
  nvmlFBCStats_t *__pyx_v_fbc_stats;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_fbc_stats", 0);

  /* "cuda/bindings/_nvml.pyx":23982
 *     .. seealso:: `nvmlVgpuInstanceGetFBCStats`
 *     """
 *     cdef FBCStats fbc_stats_py = FBCStats()             # <<<<<<<<<<<<<<
 *     cdef nvmlFBCStats_t *fbc_stats = <nvmlFBCStats_t *><intptr_t>(fbc_stats_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23982, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_fbc_stats_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":23983
 *     """
 *     cdef FBCStats fbc_stats_py = FBCStats()
 *     cdef nvmlFBCStats_t *fbc_stats = <nvmlFBCStats_t *><intptr_t>(fbc_stats_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetFBCStats(<nvmlVgpuInstance_t>vgpu_instance, fbc_stats)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FBCStats *)__pyx_v_fbc_stats_py->__pyx_vtab)->_get_ptr(__pyx_v_fbc_stats_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23983, __pyx_L1_error)
  __pyx_v_fbc_stats = ((nvmlFBCStats_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":23984
 *     cdef FBCStats fbc_stats_py = FBCStats()
 *     cdef nvmlFBCStats_t *fbc_stats = <nvmlFBCStats_t *><intptr_t>(fbc_stats_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetFBCStats(<nvmlVgpuInstance_t>vgpu_instance, fbc_stats)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":23985
 *     cdef nvmlFBCStats_t *fbc_stats = <nvmlFBCStats_t *><intptr_t>(fbc_stats_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetFBCStats(<nvmlVgpuInstance_t>vgpu_instance, fbc_stats)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return fbc_stats_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFBCStats(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), __pyx_v_fbc_stats); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23985, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":23984
 *     cdef FBCStats fbc_stats_py = FBCStats()
 *     cdef nvmlFBCStats_t *fbc_stats = <nvmlFBCStats_t *><intptr_t>(fbc_stats_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetFBCStats(<nvmlVgpuInstance_t>vgpu_instance, fbc_stats)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":23986
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetFBCStats(<nvmlVgpuInstance_t>vgpu_instance, fbc_stats)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return fbc_stats_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 23986, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":23987
 *         __status__ = nvmlVgpuInstanceGetFBCStats(<nvmlVgpuInstance_t>vgpu_instance, fbc_stats)
 *     check_status(__status__)
 *     return fbc_stats_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_fbc_stats_py);
  __pyx_r = ((PyObject *)__pyx_v_fbc_stats_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23971
 * 
 * 
 * cpdef object vgpu_instance_get_fbc_stats(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieves the active frame buffer capture sessions statistics of a vGPU Instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_fbc_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_fbc_stats_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_499vgpu_instance_get_fbc_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_498vgpu_instance_get_fbc_stats, "vgpu_instance_get_fbc_stats(unsigned int vgpu_instance)\n\nRetrieves the active frame buffer capture sessions statistics of a vGPU Instance.\n\nArgs:\n    vgpu_instance (unsigned int): Identifier of the target vGPU instance.\n\nReturns:\n    nvmlFBCStats_t: Reference to nvmlFBCStats_t structure containing NvFBC stats.\n\n.. seealso:: `nvmlVgpuInstanceGetFBCStats`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_499vgpu_instance_get_fbc_stats = {"vgpu_instance_get_fbc_stats", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_499vgpu_instance_get_fbc_stats, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_498vgpu_instance_get_fbc_stats};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_499vgpu_instance_get_fbc_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_fbc_stats (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23971, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23971, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_fbc_stats", 0) < (0)) __PYX_ERR(0, 23971, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_fbc_stats", 1, 1, 1, i); __PYX_ERR(0, 23971, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23971, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23971, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_fbc_stats", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23971, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_fbc_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_498vgpu_instance_get_fbc_stats(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_498vgpu_instance_get_fbc_stats(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_fbc_stats", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_fbc_stats(__pyx_v_vgpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23971, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_fbc_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":23990
 * 
 * 
 * cpdef object vgpu_instance_get_fbc_sessions(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieves information about active frame buffer capture sessions on a vGPU Instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_501vgpu_instance_get_fbc_sessions(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_fbc_sessions(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_session_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v_session_info = 0;
  nvmlFBCSessionInfo_t *__pyx_v_session_info_ptr;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  intptr_t __pyx_t_8;
  int __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_fbc_sessions", 0);

  /* "cuda/bindings/_nvml.pyx":23998
 *     .. seealso:: `nvmlVgpuInstanceGetFBCSessions`
 *     """
 *     cdef unsigned int[1] session_count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetFBCSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_session_count[0]), __pyx_t_1, sizeof(__pyx_v_session_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":23999
 *     """
 *     cdef unsigned int[1] session_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetFBCSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24000
 *     cdef unsigned int[1] session_count = [0]
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetFBCSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     cdef FBCSessionInfo session_info = FBCSessionInfo(session_count[0])
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFBCSessions(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), ((unsigned int *)__pyx_v_session_count), NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24000, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":23999
 *     """
 *     cdef unsigned int[1] session_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetFBCSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24001
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetFBCSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     cdef FBCSessionInfo session_info = FBCSessionInfo(session_count[0])
 *     cdef nvmlFBCSessionInfo_t *session_info_ptr = <nvmlFBCSessionInfo_t *><intptr_t>(session_info._get_ptr())
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 24001, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24002
 *         __status__ = nvmlVgpuInstanceGetFBCSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, NULL)
 *     check_status_size(__status__)
 *     cdef FBCSessionInfo session_info = FBCSessionInfo(session_count[0])             # <<<<<<<<<<<<<<
 *     cdef nvmlFBCSessionInfo_t *session_info_ptr = <nvmlFBCSessionInfo_t *><intptr_t>(session_info._get_ptr())
 *     if session_count[0] == 0:
*/
  __pyx_t_5 = NULL;
  __pyx_t_6 = __Pyx_PyLong_From_unsigned_int((__pyx_v_session_count[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 24002, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_6};
    __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo, __pyx_callargs+__pyx_t_7, (2-__pyx_t_7) | (__pyx_t_7*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24002, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_4);
  }
  __pyx_v_session_info = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_t_4);
  __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":24003
 *     check_status_size(__status__)
 *     cdef FBCSessionInfo session_info = FBCSessionInfo(session_count[0])
 *     cdef nvmlFBCSessionInfo_t *session_info_ptr = <nvmlFBCSessionInfo_t *><intptr_t>(session_info._get_ptr())             # <<<<<<<<<<<<<<
 *     if session_count[0] == 0:
 *         return session_info
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v_session_info->__pyx_vtab)->_get_ptr(__pyx_v_session_info); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24003, __pyx_L1_error)
  __pyx_v_session_info_ptr = ((nvmlFBCSessionInfo_t *)((intptr_t)__pyx_t_8));

  /* "cuda/bindings/_nvml.pyx":24004
 *     cdef FBCSessionInfo session_info = FBCSessionInfo(session_count[0])
 *     cdef nvmlFBCSessionInfo_t *session_info_ptr = <nvmlFBCSessionInfo_t *><intptr_t>(session_info._get_ptr())
 *     if session_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return session_info
 *     with nogil:
*/
  __pyx_t_9 = ((__pyx_v_session_count[0]) == 0);
  if (__pyx_t_9) {

    /* "cuda/bindings/_nvml.pyx":24005
 *     cdef nvmlFBCSessionInfo_t *session_info_ptr = <nvmlFBCSessionInfo_t *><intptr_t>(session_info._get_ptr())
 *     if session_count[0] == 0:
 *         return session_info             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetFBCSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, session_info_ptr)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_session_info);
    __pyx_r = ((PyObject *)__pyx_v_session_info);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":24004
 *     cdef FBCSessionInfo session_info = FBCSessionInfo(session_count[0])
 *     cdef nvmlFBCSessionInfo_t *session_info_ptr = <nvmlFBCSessionInfo_t *><intptr_t>(session_info._get_ptr())
 *     if session_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return session_info
 *     with nogil:
*/
  }

  /* "cuda/bindings/_nvml.pyx":24006
 *     if session_count[0] == 0:
 *         return session_info
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetFBCSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, session_info_ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24007
 *         return session_info
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetFBCSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, session_info_ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return session_info
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFBCSessions(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), ((unsigned int *)__pyx_v_session_count), __pyx_v_session_info_ptr); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24007, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":24006
 *     if session_count[0] == 0:
 *         return session_info
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetFBCSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, session_info_ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24008
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetFBCSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, session_info_ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return session_info
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 24008, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24009
 *         __status__ = nvmlVgpuInstanceGetFBCSessions(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>session_count, session_info_ptr)
 *     check_status(__status__)
 *     return session_info             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_session_info);
  __pyx_r = ((PyObject *)__pyx_v_session_info);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":23990
 * 
 * 
 * cpdef object vgpu_instance_get_fbc_sessions(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieves information about active frame buffer capture sessions on a vGPU Instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_fbc_sessions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_session_info);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_501vgpu_instance_get_fbc_sessions(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_500vgpu_instance_get_fbc_sessions, "vgpu_instance_get_fbc_sessions(unsigned int vgpu_instance)\n\nRetrieves information about active frame buffer capture sessions on a vGPU Instance.\n\nArgs:\n    vgpu_instance (unsigned int): Identifier of the target vGPU instance.\n\n.. seealso:: `nvmlVgpuInstanceGetFBCSessions`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_501vgpu_instance_get_fbc_sessions = {"vgpu_instance_get_fbc_sessions", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_501vgpu_instance_get_fbc_sessions, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_500vgpu_instance_get_fbc_sessions};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_501vgpu_instance_get_fbc_sessions(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_fbc_sessions (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23990, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23990, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_fbc_sessions", 0) < (0)) __PYX_ERR(0, 23990, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_fbc_sessions", 1, 1, 1, i); __PYX_ERR(0, 23990, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23990, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23990, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_fbc_sessions", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23990, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_fbc_sessions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_500vgpu_instance_get_fbc_sessions(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_500vgpu_instance_get_fbc_sessions(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_fbc_sessions", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_fbc_sessions(__pyx_v_vgpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23990, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_fbc_sessions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24012
 * 
 * 
 * cpdef unsigned int vgpu_instance_get_gpu_instance_id(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the GPU Instance ID for the given vGPU Instance. The API will return a valid GPU Instance ID for MIG backed vGPU Instance, else INVALID_GPU_INSTANCE_ID is returned.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_503vgpu_instance_get_gpu_instance_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_gpu_instance_id(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_gpu_instance_id;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24024
 *     """
 *     cdef unsigned int gpu_instance_id
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetGpuInstanceId(<nvmlVgpuInstance_t>vgpu_instance, &gpu_instance_id)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24025
 *     cdef unsigned int gpu_instance_id
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetGpuInstanceId(<nvmlVgpuInstance_t>vgpu_instance, &gpu_instance_id)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return gpu_instance_id
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetGpuInstanceId(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), (&__pyx_v_gpu_instance_id)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24025, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24024
 *     """
 *     cdef unsigned int gpu_instance_id
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetGpuInstanceId(<nvmlVgpuInstance_t>vgpu_instance, &gpu_instance_id)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24026
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetGpuInstanceId(<nvmlVgpuInstance_t>vgpu_instance, &gpu_instance_id)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return gpu_instance_id
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24026, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24027
 *         __status__ = nvmlVgpuInstanceGetGpuInstanceId(<nvmlVgpuInstance_t>vgpu_instance, &gpu_instance_id)
 *     check_status(__status__)
 *     return gpu_instance_id             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_gpu_instance_id;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24012
 * 
 * 
 * cpdef unsigned int vgpu_instance_get_gpu_instance_id(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the GPU Instance ID for the given vGPU Instance. The API will return a valid GPU Instance ID for MIG backed vGPU Instance, else INVALID_GPU_INSTANCE_ID is returned.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_gpu_instance_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_503vgpu_instance_get_gpu_instance_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_502vgpu_instance_get_gpu_instance_id, "vgpu_instance_get_gpu_instance_id(unsigned int vgpu_instance) -> unsigned int\n\nRetrieve the GPU Instance ID for the given vGPU Instance. The API will return a valid GPU Instance ID for MIG backed vGPU Instance, else INVALID_GPU_INSTANCE_ID is returned.\n\nArgs:\n    vgpu_instance (unsigned int): Identifier of the target vGPU instance.\n\nReturns:\n    unsigned int: GPU Instance ID.\n\n.. seealso:: `nvmlVgpuInstanceGetGpuInstanceId`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_503vgpu_instance_get_gpu_instance_id = {"vgpu_instance_get_gpu_instance_id", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_503vgpu_instance_get_gpu_instance_id, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_502vgpu_instance_get_gpu_instance_id};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_503vgpu_instance_get_gpu_instance_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_gpu_instance_id (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24012, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24012, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_gpu_instance_id", 0) < (0)) __PYX_ERR(0, 24012, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_gpu_instance_id", 1, 1, 1, i); __PYX_ERR(0, 24012, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24012, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24012, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_gpu_instance_id", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24012, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_gpu_instance_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_502vgpu_instance_get_gpu_instance_id(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_502vgpu_instance_get_gpu_instance_id(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_gpu_instance_id", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_gpu_instance_id(__pyx_v_vgpu_instance, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 24012, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24012, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_gpu_instance_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24030
 * 
 * 
 * cpdef str vgpu_instance_get_gpu_pci_id(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieves the PCI Id of the given vGPU Instance i.e. the PCI Id of the GPU as seen inside the VM.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_505vgpu_instance_get_gpu_pci_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_gpu_pci_id(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_length[1];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_v__vgpu_pci_id_ = 0;
  char *__pyx_v_vgpu_pci_id;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  char *__pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_gpu_pci_id", 0);

  /* "cuda/bindings/_nvml.pyx":24038
 *     .. seealso:: `nvmlVgpuInstanceGetGpuPciId`
 *     """
 *     cdef unsigned int[1] length = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetGpuPciId(<nvmlVgpuInstance_t>vgpu_instance, NULL, <unsigned int*>length)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_length[0]), __pyx_t_1, sizeof(__pyx_v_length[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":24039
 *     """
 *     cdef unsigned int[1] length = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetGpuPciId(<nvmlVgpuInstance_t>vgpu_instance, NULL, <unsigned int*>length)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24040
 *     cdef unsigned int[1] length = [0]
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetGpuPciId(<nvmlVgpuInstance_t>vgpu_instance, NULL, <unsigned int*>length)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     if length[0] == 0:
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetGpuPciId(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), NULL, ((unsigned int *)__pyx_v_length)); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24040, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":24039
 *     """
 *     cdef unsigned int[1] length = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetGpuPciId(<nvmlVgpuInstance_t>vgpu_instance, NULL, <unsigned int*>length)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24041
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetGpuPciId(<nvmlVgpuInstance_t>vgpu_instance, NULL, <unsigned int*>length)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     if length[0] == 0:
 *         return ""
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 24041, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24042
 *         __status__ = nvmlVgpuInstanceGetGpuPciId(<nvmlVgpuInstance_t>vgpu_instance, NULL, <unsigned int*>length)
 *     check_status_size(__status__)
 *     if length[0] == 0:             # <<<<<<<<<<<<<<
 *         return ""
 *     cdef bytes _vgpu_pci_id_ = bytes(length[0])
*/
  __pyx_t_4 = ((__pyx_v_length[0]) == 0);
  if (__pyx_t_4) {

    /* "cuda/bindings/_nvml.pyx":24043
 *     check_status_size(__status__)
 *     if length[0] == 0:
 *         return ""             # <<<<<<<<<<<<<<
 *     cdef bytes _vgpu_pci_id_ = bytes(length[0])
 *     cdef char* vgpu_pci_id = _vgpu_pci_id_
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u__7);
    __pyx_r = __pyx_mstate_global->__pyx_kp_u__7;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":24042
 *         __status__ = nvmlVgpuInstanceGetGpuPciId(<nvmlVgpuInstance_t>vgpu_instance, NULL, <unsigned int*>length)
 *     check_status_size(__status__)
 *     if length[0] == 0:             # <<<<<<<<<<<<<<
 *         return ""
 *     cdef bytes _vgpu_pci_id_ = bytes(length[0])
*/
  }

  /* "cuda/bindings/_nvml.pyx":24044
 *     if length[0] == 0:
 *         return ""
 *     cdef bytes _vgpu_pci_id_ = bytes(length[0])             # <<<<<<<<<<<<<<
 *     cdef char* vgpu_pci_id = _vgpu_pci_id_
 *     with nogil:
*/
  __pyx_t_6 = NULL;
  __pyx_t_7 = __Pyx_PyLong_From_unsigned_int((__pyx_v_length[0])); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 24044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_6, __pyx_t_7};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)(&PyBytes_Type), __pyx_callargs+__pyx_t_8, (2-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24044, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  __pyx_v__vgpu_pci_id_ = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24045
 *         return ""
 *     cdef bytes _vgpu_pci_id_ = bytes(length[0])
 *     cdef char* vgpu_pci_id = _vgpu_pci_id_             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetGpuPciId(<nvmlVgpuInstance_t>vgpu_instance, vgpu_pci_id, <unsigned int*>length)
*/
  __pyx_t_9 = __Pyx_PyBytes_AsWritableString(__pyx_v__vgpu_pci_id_); if (unlikely((!__pyx_t_9) && PyErr_Occurred())) __PYX_ERR(0, 24045, __pyx_L1_error)
  __pyx_v_vgpu_pci_id = __pyx_t_9;

  /* "cuda/bindings/_nvml.pyx":24046
 *     cdef bytes _vgpu_pci_id_ = bytes(length[0])
 *     cdef char* vgpu_pci_id = _vgpu_pci_id_
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetGpuPciId(<nvmlVgpuInstance_t>vgpu_instance, vgpu_pci_id, <unsigned int*>length)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24047
 *     cdef char* vgpu_pci_id = _vgpu_pci_id_
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetGpuPciId(<nvmlVgpuInstance_t>vgpu_instance, vgpu_pci_id, <unsigned int*>length)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(vgpu_pci_id)
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetGpuPciId(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), __pyx_v_vgpu_pci_id, ((unsigned int *)__pyx_v_length)); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24047, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":24046
 *     cdef bytes _vgpu_pci_id_ = bytes(length[0])
 *     cdef char* vgpu_pci_id = _vgpu_pci_id_
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetGpuPciId(<nvmlVgpuInstance_t>vgpu_instance, vgpu_pci_id, <unsigned int*>length)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24048
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetGpuPciId(<nvmlVgpuInstance_t>vgpu_instance, vgpu_pci_id, <unsigned int*>length)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(vgpu_pci_id)
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 24048, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24049
 *         __status__ = nvmlVgpuInstanceGetGpuPciId(<nvmlVgpuInstance_t>vgpu_instance, vgpu_pci_id, <unsigned int*>length)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(vgpu_pci_id)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = PyUnicode_FromString(__pyx_v_vgpu_pci_id); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24049, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24030
 * 
 * 
 * cpdef str vgpu_instance_get_gpu_pci_id(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieves the PCI Id of the given vGPU Instance i.e. the PCI Id of the GPU as seen inside the VM.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_gpu_pci_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v__vgpu_pci_id_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_505vgpu_instance_get_gpu_pci_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_504vgpu_instance_get_gpu_pci_id, "vgpu_instance_get_gpu_pci_id(unsigned int vgpu_instance) -> str\n\nRetrieves the PCI Id of the given vGPU Instance i.e. the PCI Id of the GPU as seen inside the VM.\n\nArgs:\n    vgpu_instance (unsigned int): Identifier of the target vGPU instance.\n\n.. seealso:: `nvmlVgpuInstanceGetGpuPciId`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_505vgpu_instance_get_gpu_pci_id = {"vgpu_instance_get_gpu_pci_id", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_505vgpu_instance_get_gpu_pci_id, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_504vgpu_instance_get_gpu_pci_id};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_505vgpu_instance_get_gpu_pci_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_gpu_pci_id (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24030, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24030, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_gpu_pci_id", 0) < (0)) __PYX_ERR(0, 24030, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_gpu_pci_id", 1, 1, 1, i); __PYX_ERR(0, 24030, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24030, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24030, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_gpu_pci_id", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24030, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_gpu_pci_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_504vgpu_instance_get_gpu_pci_id(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_504vgpu_instance_get_gpu_pci_id(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_gpu_pci_id", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_gpu_pci_id(__pyx_v_vgpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_gpu_pci_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24052
 * 
 * 
 * cpdef unsigned int vgpu_type_get_capabilities(unsigned int vgpu_type_id, int capability) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the requested capability for a given vGPU type. Refer to the ``nvmlVgpuCapability_t`` structure for the specific capabilities that can be queried. The return value in ``capResult`` should be treated as a boolean, with a non-zero value indicating that the capability is supported.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_507vgpu_type_get_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_capabilities(unsigned int __pyx_v_vgpu_type_id, int __pyx_v_capability, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_cap_result;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24065
 *     """
 *     cdef unsigned int cap_result
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetCapabilities(<nvmlVgpuTypeId_t>vgpu_type_id, <_VgpuCapability>capability, &cap_result)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24066
 *     cdef unsigned int cap_result
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetCapabilities(<nvmlVgpuTypeId_t>vgpu_type_id, <_VgpuCapability>capability, &cap_result)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cap_result
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetCapabilities(((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), ((__pyx_t_4cuda_8bindings_5_nvml__VgpuCapability)__pyx_v_capability), (&__pyx_v_cap_result)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24066, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24065
 *     """
 *     cdef unsigned int cap_result
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetCapabilities(<nvmlVgpuTypeId_t>vgpu_type_id, <_VgpuCapability>capability, &cap_result)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24067
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetCapabilities(<nvmlVgpuTypeId_t>vgpu_type_id, <_VgpuCapability>capability, &cap_result)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cap_result
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24067, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24068
 *         __status__ = nvmlVgpuTypeGetCapabilities(<nvmlVgpuTypeId_t>vgpu_type_id, <_VgpuCapability>capability, &cap_result)
 *     check_status(__status__)
 *     return cap_result             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_cap_result;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24052
 * 
 * 
 * cpdef unsigned int vgpu_type_get_capabilities(unsigned int vgpu_type_id, int capability) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the requested capability for a given vGPU type. Refer to the ``nvmlVgpuCapability_t`` structure for the specific capabilities that can be queried. The return value in ``capResult`` should be treated as a boolean, with a non-zero value indicating that the capability is supported.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_507vgpu_type_get_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_506vgpu_type_get_capabilities, "vgpu_type_get_capabilities(unsigned int vgpu_type_id, int capability) -> unsigned int\n\nRetrieve the requested capability for a given vGPU type. Refer to the ``nvmlVgpuCapability_t`` structure for the specific capabilities that can be queried. The return value in ``capResult`` should be treated as a boolean, with a non-zero value indicating that the capability is supported.\n\nArgs:\n    vgpu_type_id (unsigned int): Handle to vGPU type.\n    capability (VgpuCapability): Specifies the ``nvmlVgpuCapability_t`` to be queried.\n\nReturns:\n    unsigned int: A boolean for the queried capability indicating that feature is supported.\n\n.. seealso:: `nvmlVgpuTypeGetCapabilities`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_507vgpu_type_get_capabilities = {"vgpu_type_get_capabilities", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_507vgpu_type_get_capabilities, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_506vgpu_type_get_capabilities};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_507vgpu_type_get_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_type_id;
  int __pyx_v_capability;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_type_get_capabilities (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,&__pyx_mstate_global->__pyx_n_u_capability,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24052, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24052, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24052, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_type_get_capabilities", 0) < (0)) __PYX_ERR(0, 24052, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_type_get_capabilities", 1, 2, 2, i); __PYX_ERR(0, 24052, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24052, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24052, __pyx_L3_error)
    }
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24052, __pyx_L3_error)
    __pyx_v_capability = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_capability == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24052, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_type_get_capabilities", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24052, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_506vgpu_type_get_capabilities(__pyx_self, __pyx_v_vgpu_type_id, __pyx_v_capability);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_506vgpu_type_get_capabilities(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_type_id, int __pyx_v_capability) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_capabilities", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_capabilities(__pyx_v_vgpu_type_id, __pyx_v_capability, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 24052, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24052, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24071
 * 
 * 
 * cpdef str vgpu_instance_get_mdev_uuid(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the MDEV UUID of a vGPU instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_509vgpu_instance_get_mdev_uuid(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_mdev_uuid(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_size;
  char __pyx_v_mdev_uuid[80];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_mdev_uuid", 0);

  /* "cuda/bindings/_nvml.pyx":24079
 *     .. seealso:: `nvmlVgpuInstanceGetMdevUUID`
 *     """
 *     cdef unsigned int size = 80             # <<<<<<<<<<<<<<
 *     cdef char[80] mdev_uuid
 *     with nogil:
*/
  __pyx_v_size = 80;

  /* "cuda/bindings/_nvml.pyx":24081
 *     cdef unsigned int size = 80
 *     cdef char[80] mdev_uuid
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetMdevUUID(<nvmlVgpuInstance_t>vgpu_instance, mdev_uuid, size)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24082
 *     cdef char[80] mdev_uuid
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetMdevUUID(<nvmlVgpuInstance_t>vgpu_instance, mdev_uuid, size)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(mdev_uuid)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetMdevUUID(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), __pyx_v_mdev_uuid, __pyx_v_size); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24082, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24081
 *     cdef unsigned int size = 80
 *     cdef char[80] mdev_uuid
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetMdevUUID(<nvmlVgpuInstance_t>vgpu_instance, mdev_uuid, size)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24083
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetMdevUUID(<nvmlVgpuInstance_t>vgpu_instance, mdev_uuid, size)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(mdev_uuid)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24083, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24084
 *         __status__ = nvmlVgpuInstanceGetMdevUUID(<nvmlVgpuInstance_t>vgpu_instance, mdev_uuid, size)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(mdev_uuid)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = PyUnicode_FromString(__pyx_v_mdev_uuid); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24084, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24071
 * 
 * 
 * cpdef str vgpu_instance_get_mdev_uuid(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the MDEV UUID of a vGPU instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_mdev_uuid", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_509vgpu_instance_get_mdev_uuid(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_508vgpu_instance_get_mdev_uuid, "vgpu_instance_get_mdev_uuid(unsigned int vgpu_instance) -> str\n\nRetrieve the MDEV UUID of a vGPU instance.\n\nArgs:\n    vgpu_instance (unsigned int): Identifier of the target vGPU instance.\n\n.. seealso:: `nvmlVgpuInstanceGetMdevUUID`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_509vgpu_instance_get_mdev_uuid = {"vgpu_instance_get_mdev_uuid", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_509vgpu_instance_get_mdev_uuid, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_508vgpu_instance_get_mdev_uuid};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_509vgpu_instance_get_mdev_uuid(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_mdev_uuid (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24071, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24071, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_mdev_uuid", 0) < (0)) __PYX_ERR(0, 24071, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_mdev_uuid", 1, 1, 1, i); __PYX_ERR(0, 24071, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24071, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24071, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_mdev_uuid", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24071, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_mdev_uuid", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_508vgpu_instance_get_mdev_uuid(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_508vgpu_instance_get_mdev_uuid(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_mdev_uuid", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_mdev_uuid(__pyx_v_vgpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24071, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_mdev_uuid", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24087
 * 
 * 
 * cpdef object vgpu_type_get_max_instances_per_gpu_instance():             # <<<<<<<<<<<<<<
 *     """Retrieve the maximum number of vGPU instances per GPU instance for given vGPU type.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_511vgpu_type_get_max_instances_per_gpu_instance(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_max_instances_per_gpu_instance(CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *__pyx_v_p_max_instance_py = 0;
  nvmlVgpuTypeMaxInstance_t *__pyx_v_p_max_instance;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_max_instances_per_gpu_instance", 0);

  /* "cuda/bindings/_nvml.pyx":24095
 *     .. seealso:: `nvmlVgpuTypeGetMaxInstancesPerGpuInstance`
 *     """
 *     cdef VgpuTypeMaxInstance_v1 p_max_instance_py = VgpuTypeMaxInstance_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuTypeMaxInstance_t *p_max_instance = <nvmlVgpuTypeMaxInstance_t *><intptr_t>(p_max_instance_py._get_ptr())
 *     p_max_instance.version = sizeof(nvmlVgpuTypeMaxInstance_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24095, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_p_max_instance_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":24096
 *     """
 *     cdef VgpuTypeMaxInstance_v1 p_max_instance_py = VgpuTypeMaxInstance_v1()
 *     cdef nvmlVgpuTypeMaxInstance_t *p_max_instance = <nvmlVgpuTypeMaxInstance_t *><intptr_t>(p_max_instance_py._get_ptr())             # <<<<<<<<<<<<<<
 *     p_max_instance.version = sizeof(nvmlVgpuTypeMaxInstance_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)__pyx_v_p_max_instance_py->__pyx_vtab)->_get_ptr(__pyx_v_p_max_instance_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24096, __pyx_L1_error)
  __pyx_v_p_max_instance = ((nvmlVgpuTypeMaxInstance_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":24097
 *     cdef VgpuTypeMaxInstance_v1 p_max_instance_py = VgpuTypeMaxInstance_v1()
 *     cdef nvmlVgpuTypeMaxInstance_t *p_max_instance = <nvmlVgpuTypeMaxInstance_t *><intptr_t>(p_max_instance_py._get_ptr())
 *     p_max_instance.version = sizeof(nvmlVgpuTypeMaxInstance_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetMaxInstancesPerGpuInstance(p_max_instance)
*/
  __pyx_v_p_max_instance->version = ((sizeof(nvmlVgpuTypeMaxInstance_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":24098
 *     cdef nvmlVgpuTypeMaxInstance_t *p_max_instance = <nvmlVgpuTypeMaxInstance_t *><intptr_t>(p_max_instance_py._get_ptr())
 *     p_max_instance.version = sizeof(nvmlVgpuTypeMaxInstance_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetMaxInstancesPerGpuInstance(p_max_instance)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24099
 *     p_max_instance.version = sizeof(nvmlVgpuTypeMaxInstance_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetMaxInstancesPerGpuInstance(p_max_instance)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return p_max_instance_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetMaxInstancesPerGpuInstance(__pyx_v_p_max_instance); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24099, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":24098
 *     cdef nvmlVgpuTypeMaxInstance_t *p_max_instance = <nvmlVgpuTypeMaxInstance_t *><intptr_t>(p_max_instance_py._get_ptr())
 *     p_max_instance.version = sizeof(nvmlVgpuTypeMaxInstance_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuTypeGetMaxInstancesPerGpuInstance(p_max_instance)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24100
 *     with nogil:
 *         __status__ = nvmlVgpuTypeGetMaxInstancesPerGpuInstance(p_max_instance)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return p_max_instance_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 24100, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24101
 *         __status__ = nvmlVgpuTypeGetMaxInstancesPerGpuInstance(p_max_instance)
 *     check_status(__status__)
 *     return p_max_instance_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_p_max_instance_py);
  __pyx_r = ((PyObject *)__pyx_v_p_max_instance_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24087
 * 
 * 
 * cpdef object vgpu_type_get_max_instances_per_gpu_instance():             # <<<<<<<<<<<<<<
 *     """Retrieve the maximum number of vGPU instances per GPU instance for given vGPU type.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_max_instances_per_gpu_instance", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_p_max_instance_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_511vgpu_type_get_max_instances_per_gpu_instance(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_510vgpu_type_get_max_instances_per_gpu_instance, "vgpu_type_get_max_instances_per_gpu_instance()\n\nRetrieve the maximum number of vGPU instances per GPU instance for given vGPU type.\n\nReturns:\n    nvmlVgpuTypeMaxInstance_v1_t: Pointer to the caller-provided structure of nvmlVgpuTypeMaxInstance_t.\n\n.. seealso:: `nvmlVgpuTypeGetMaxInstancesPerGpuInstance`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_511vgpu_type_get_max_instances_per_gpu_instance = {"vgpu_type_get_max_instances_per_gpu_instance", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_511vgpu_type_get_max_instances_per_gpu_instance, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_510vgpu_type_get_max_instances_per_gpu_instance};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_511vgpu_type_get_max_instances_per_gpu_instance(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_type_get_max_instances_per_gpu_instance (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_510vgpu_type_get_max_instances_per_gpu_instance(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_510vgpu_type_get_max_instances_per_gpu_instance(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_type_get_max_instances_per_gpu_instance", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_max_instances_per_gpu_instance(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24087, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_type_get_max_instances_per_gpu_instance", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24104
 * 
 * 
 * cpdef gpu_instance_set_vgpu_scheduler_state(intptr_t gpu_instance, intptr_t p_scheduler):             # <<<<<<<<<<<<<<
 *     """Set vGPU scheduler state for the given GPU instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_513gpu_instance_set_vgpu_scheduler_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_set_vgpu_scheduler_state(intptr_t __pyx_v_gpu_instance, intptr_t __pyx_v_p_scheduler, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_set_vgpu_scheduler_state", 0);

  /* "cuda/bindings/_nvml.pyx":24113
 *     .. seealso:: `nvmlGpuInstanceSetVgpuSchedulerState`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceSetVgpuSchedulerState(<GpuInstance>gpu_instance, <nvmlVgpuSchedulerState_t*>p_scheduler)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24114
 *     """
 *     with nogil:
 *         __status__ = nvmlGpuInstanceSetVgpuSchedulerState(<GpuInstance>gpu_instance, <nvmlVgpuSchedulerState_t*>p_scheduler)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceSetVgpuSchedulerState(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), ((nvmlVgpuSchedulerState_t *)__pyx_v_p_scheduler)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24114, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24113
 *     .. seealso:: `nvmlGpuInstanceSetVgpuSchedulerState`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceSetVgpuSchedulerState(<GpuInstance>gpu_instance, <nvmlVgpuSchedulerState_t*>p_scheduler)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24115
 *     with nogil:
 *         __status__ = nvmlGpuInstanceSetVgpuSchedulerState(<GpuInstance>gpu_instance, <nvmlVgpuSchedulerState_t*>p_scheduler)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24115, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24104
 * 
 * 
 * cpdef gpu_instance_set_vgpu_scheduler_state(intptr_t gpu_instance, intptr_t p_scheduler):             # <<<<<<<<<<<<<<
 *     """Set vGPU scheduler state for the given GPU instance.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_set_vgpu_scheduler_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_513gpu_instance_set_vgpu_scheduler_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_512gpu_instance_set_vgpu_scheduler_state, "gpu_instance_set_vgpu_scheduler_state(intptr_t gpu_instance, intptr_t p_scheduler)\n\nSet vGPU scheduler state for the given GPU instance.\n\nArgs:\n    gpu_instance (intptr_t): The GPU instance handle.\n    p_scheduler (intptr_t): Pointer to the caller-provided structure of nvmlVgpuSchedulerState_t.\n\n.. seealso:: `nvmlGpuInstanceSetVgpuSchedulerState`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_513gpu_instance_set_vgpu_scheduler_state = {"gpu_instance_set_vgpu_scheduler_state", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_513gpu_instance_set_vgpu_scheduler_state, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_512gpu_instance_set_vgpu_scheduler_state};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_513gpu_instance_set_vgpu_scheduler_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_gpu_instance;
  intptr_t __pyx_v_p_scheduler;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpu_instance_set_vgpu_scheduler_state (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_gpu_instance,&__pyx_mstate_global->__pyx_n_u_p_scheduler,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24104, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24104, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24104, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpu_instance_set_vgpu_scheduler_state", 0) < (0)) __PYX_ERR(0, 24104, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpu_instance_set_vgpu_scheduler_state", 1, 2, 2, i); __PYX_ERR(0, 24104, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24104, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24104, __pyx_L3_error)
    }
    __pyx_v_gpu_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_gpu_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24104, __pyx_L3_error)
    __pyx_v_p_scheduler = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_p_scheduler == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24104, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpu_instance_set_vgpu_scheduler_state", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24104, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_set_vgpu_scheduler_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_512gpu_instance_set_vgpu_scheduler_state(__pyx_self, __pyx_v_gpu_instance, __pyx_v_p_scheduler);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_512gpu_instance_set_vgpu_scheduler_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, intptr_t __pyx_v_p_scheduler) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_set_vgpu_scheduler_state", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_set_vgpu_scheduler_state(__pyx_v_gpu_instance, __pyx_v_p_scheduler, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_set_vgpu_scheduler_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24118
 * 
 * 
 * cpdef object gpu_instance_get_vgpu_scheduler_state(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Returns the vGPU scheduler state for the given GPU instance. The information returned in ``nvmlVgpuSchedulerStateInfo_t`` is not relevant if the BEST EFFORT policy is set.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_515gpu_instance_get_vgpu_scheduler_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_vgpu_scheduler_state(intptr_t __pyx_v_gpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *__pyx_v_p_scheduler_state_info_py = 0;
  nvmlVgpuSchedulerStateInfo_t *__pyx_v_p_scheduler_state_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_vgpu_scheduler_state", 0);

  /* "cuda/bindings/_nvml.pyx":24129
 *     .. seealso:: `nvmlGpuInstanceGetVgpuSchedulerState`
 *     """
 *     cdef VgpuSchedulerStateInfo_v1 p_scheduler_state_info_py = VgpuSchedulerStateInfo_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerStateInfo_t *p_scheduler_state_info = <nvmlVgpuSchedulerStateInfo_t *><intptr_t>(p_scheduler_state_info_py._get_ptr())
 *     p_scheduler_state_info.version = sizeof(nvmlVgpuSchedulerState_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24129, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_p_scheduler_state_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":24130
 *     """
 *     cdef VgpuSchedulerStateInfo_v1 p_scheduler_state_info_py = VgpuSchedulerStateInfo_v1()
 *     cdef nvmlVgpuSchedulerStateInfo_t *p_scheduler_state_info = <nvmlVgpuSchedulerStateInfo_t *><intptr_t>(p_scheduler_state_info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     p_scheduler_state_info.version = sizeof(nvmlVgpuSchedulerState_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)__pyx_v_p_scheduler_state_info_py->__pyx_vtab)->_get_ptr(__pyx_v_p_scheduler_state_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24130, __pyx_L1_error)
  __pyx_v_p_scheduler_state_info = ((nvmlVgpuSchedulerStateInfo_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":24131
 *     cdef VgpuSchedulerStateInfo_v1 p_scheduler_state_info_py = VgpuSchedulerStateInfo_v1()
 *     cdef nvmlVgpuSchedulerStateInfo_t *p_scheduler_state_info = <nvmlVgpuSchedulerStateInfo_t *><intptr_t>(p_scheduler_state_info_py._get_ptr())
 *     p_scheduler_state_info.version = sizeof(nvmlVgpuSchedulerState_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetVgpuSchedulerState(<GpuInstance>gpu_instance, p_scheduler_state_info)
*/
  __pyx_v_p_scheduler_state_info->version = ((sizeof(nvmlVgpuSchedulerState_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":24132
 *     cdef nvmlVgpuSchedulerStateInfo_t *p_scheduler_state_info = <nvmlVgpuSchedulerStateInfo_t *><intptr_t>(p_scheduler_state_info_py._get_ptr())
 *     p_scheduler_state_info.version = sizeof(nvmlVgpuSchedulerState_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetVgpuSchedulerState(<GpuInstance>gpu_instance, p_scheduler_state_info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24133
 *     p_scheduler_state_info.version = sizeof(nvmlVgpuSchedulerState_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetVgpuSchedulerState(<GpuInstance>gpu_instance, p_scheduler_state_info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return p_scheduler_state_info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuSchedulerState(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_p_scheduler_state_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24133, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":24132
 *     cdef nvmlVgpuSchedulerStateInfo_t *p_scheduler_state_info = <nvmlVgpuSchedulerStateInfo_t *><intptr_t>(p_scheduler_state_info_py._get_ptr())
 *     p_scheduler_state_info.version = sizeof(nvmlVgpuSchedulerState_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetVgpuSchedulerState(<GpuInstance>gpu_instance, p_scheduler_state_info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24134
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetVgpuSchedulerState(<GpuInstance>gpu_instance, p_scheduler_state_info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return p_scheduler_state_info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 24134, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24135
 *         __status__ = nvmlGpuInstanceGetVgpuSchedulerState(<GpuInstance>gpu_instance, p_scheduler_state_info)
 *     check_status(__status__)
 *     return p_scheduler_state_info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_p_scheduler_state_info_py);
  __pyx_r = ((PyObject *)__pyx_v_p_scheduler_state_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24118
 * 
 * 
 * cpdef object gpu_instance_get_vgpu_scheduler_state(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Returns the vGPU scheduler state for the given GPU instance. The information returned in ``nvmlVgpuSchedulerStateInfo_t`` is not relevant if the BEST EFFORT policy is set.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_vgpu_scheduler_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_p_scheduler_state_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_515gpu_instance_get_vgpu_scheduler_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_514gpu_instance_get_vgpu_scheduler_state, "gpu_instance_get_vgpu_scheduler_state(intptr_t gpu_instance)\n\nReturns the vGPU scheduler state for the given GPU instance. The information returned in ``nvmlVgpuSchedulerStateInfo_t`` is not relevant if the BEST EFFORT policy is set.\n\nArgs:\n    gpu_instance (intptr_t): The GPU instance handle.\n\nReturns:\n    nvmlVgpuSchedulerStateInfo_v1_t: Reference in which ``pSchedulerStateInfo`` is returned.\n\n.. seealso:: `nvmlGpuInstanceGetVgpuSchedulerState`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_515gpu_instance_get_vgpu_scheduler_state = {"gpu_instance_get_vgpu_scheduler_state", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_515gpu_instance_get_vgpu_scheduler_state, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_514gpu_instance_get_vgpu_scheduler_state};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_515gpu_instance_get_vgpu_scheduler_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_gpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpu_instance_get_vgpu_scheduler_state (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_gpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24118, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24118, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpu_instance_get_vgpu_scheduler_state", 0) < (0)) __PYX_ERR(0, 24118, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpu_instance_get_vgpu_scheduler_state", 1, 1, 1, i); __PYX_ERR(0, 24118, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24118, __pyx_L3_error)
    }
    __pyx_v_gpu_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_gpu_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24118, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpu_instance_get_vgpu_scheduler_state", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24118, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_vgpu_scheduler_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_514gpu_instance_get_vgpu_scheduler_state(__pyx_self, __pyx_v_gpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_514gpu_instance_get_vgpu_scheduler_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_vgpu_scheduler_state", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_vgpu_scheduler_state(__pyx_v_gpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24118, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_vgpu_scheduler_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24138
 * 
 * 
 * cpdef object gpu_instance_get_vgpu_scheduler_log(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Returns the vGPU scheduler logs for the given GPU instance. ``pSchedulerLogInfo`` points to a caller-allocated structure to contain the logs. The number of elements returned will never exceed ``NVML_SCHEDULER_SW_MAX_LOG_ENTRIES``.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_517gpu_instance_get_vgpu_scheduler_log(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_vgpu_scheduler_log(intptr_t __pyx_v_gpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *__pyx_v_p_scheduler_log_info_py = 0;
  nvmlVgpuSchedulerLogInfo_t *__pyx_v_p_scheduler_log_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_vgpu_scheduler_log", 0);

  /* "cuda/bindings/_nvml.pyx":24149
 *     .. seealso:: `nvmlGpuInstanceGetVgpuSchedulerLog`
 *     """
 *     cdef VgpuSchedulerLogInfo_v1 p_scheduler_log_info_py = VgpuSchedulerLogInfo_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerLogInfo_t *p_scheduler_log_info = <nvmlVgpuSchedulerLogInfo_t *><intptr_t>(p_scheduler_log_info_py._get_ptr())
 *     p_scheduler_log_info.version = sizeof(nvmlVgpuSchedulerLogInfo_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24149, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_p_scheduler_log_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":24150
 *     """
 *     cdef VgpuSchedulerLogInfo_v1 p_scheduler_log_info_py = VgpuSchedulerLogInfo_v1()
 *     cdef nvmlVgpuSchedulerLogInfo_t *p_scheduler_log_info = <nvmlVgpuSchedulerLogInfo_t *><intptr_t>(p_scheduler_log_info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     p_scheduler_log_info.version = sizeof(nvmlVgpuSchedulerLogInfo_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)__pyx_v_p_scheduler_log_info_py->__pyx_vtab)->_get_ptr(__pyx_v_p_scheduler_log_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24150, __pyx_L1_error)
  __pyx_v_p_scheduler_log_info = ((nvmlVgpuSchedulerLogInfo_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":24151
 *     cdef VgpuSchedulerLogInfo_v1 p_scheduler_log_info_py = VgpuSchedulerLogInfo_v1()
 *     cdef nvmlVgpuSchedulerLogInfo_t *p_scheduler_log_info = <nvmlVgpuSchedulerLogInfo_t *><intptr_t>(p_scheduler_log_info_py._get_ptr())
 *     p_scheduler_log_info.version = sizeof(nvmlVgpuSchedulerLogInfo_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetVgpuSchedulerLog(<GpuInstance>gpu_instance, p_scheduler_log_info)
*/
  __pyx_v_p_scheduler_log_info->version = ((sizeof(nvmlVgpuSchedulerLogInfo_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":24152
 *     cdef nvmlVgpuSchedulerLogInfo_t *p_scheduler_log_info = <nvmlVgpuSchedulerLogInfo_t *><intptr_t>(p_scheduler_log_info_py._get_ptr())
 *     p_scheduler_log_info.version = sizeof(nvmlVgpuSchedulerLogInfo_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetVgpuSchedulerLog(<GpuInstance>gpu_instance, p_scheduler_log_info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24153
 *     p_scheduler_log_info.version = sizeof(nvmlVgpuSchedulerLogInfo_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetVgpuSchedulerLog(<GpuInstance>gpu_instance, p_scheduler_log_info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return p_scheduler_log_info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuSchedulerLog(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_p_scheduler_log_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24153, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":24152
 *     cdef nvmlVgpuSchedulerLogInfo_t *p_scheduler_log_info = <nvmlVgpuSchedulerLogInfo_t *><intptr_t>(p_scheduler_log_info_py._get_ptr())
 *     p_scheduler_log_info.version = sizeof(nvmlVgpuSchedulerLogInfo_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetVgpuSchedulerLog(<GpuInstance>gpu_instance, p_scheduler_log_info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24154
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetVgpuSchedulerLog(<GpuInstance>gpu_instance, p_scheduler_log_info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return p_scheduler_log_info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 24154, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24155
 *         __status__ = nvmlGpuInstanceGetVgpuSchedulerLog(<GpuInstance>gpu_instance, p_scheduler_log_info)
 *     check_status(__status__)
 *     return p_scheduler_log_info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_p_scheduler_log_info_py);
  __pyx_r = ((PyObject *)__pyx_v_p_scheduler_log_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24138
 * 
 * 
 * cpdef object gpu_instance_get_vgpu_scheduler_log(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Returns the vGPU scheduler logs for the given GPU instance. ``pSchedulerLogInfo`` points to a caller-allocated structure to contain the logs. The number of elements returned will never exceed ``NVML_SCHEDULER_SW_MAX_LOG_ENTRIES``.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_vgpu_scheduler_log", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_p_scheduler_log_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_517gpu_instance_get_vgpu_scheduler_log(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_516gpu_instance_get_vgpu_scheduler_log, "gpu_instance_get_vgpu_scheduler_log(intptr_t gpu_instance)\n\nReturns the vGPU scheduler logs for the given GPU instance. ``pSchedulerLogInfo`` points to a caller-allocated structure to contain the logs. The number of elements returned will never exceed ``NVML_SCHEDULER_SW_MAX_LOG_ENTRIES``.\n\nArgs:\n    gpu_instance (intptr_t): The GPU instance handle.\n\nReturns:\n    nvmlVgpuSchedulerLogInfo_v1_t: Reference in which ``pSchedulerLogInfo`` is written.\n\n.. seealso:: `nvmlGpuInstanceGetVgpuSchedulerLog`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_517gpu_instance_get_vgpu_scheduler_log = {"gpu_instance_get_vgpu_scheduler_log", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_517gpu_instance_get_vgpu_scheduler_log, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_516gpu_instance_get_vgpu_scheduler_log};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_517gpu_instance_get_vgpu_scheduler_log(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_gpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpu_instance_get_vgpu_scheduler_log (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_gpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24138, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24138, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpu_instance_get_vgpu_scheduler_log", 0) < (0)) __PYX_ERR(0, 24138, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpu_instance_get_vgpu_scheduler_log", 1, 1, 1, i); __PYX_ERR(0, 24138, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24138, __pyx_L3_error)
    }
    __pyx_v_gpu_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_gpu_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24138, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpu_instance_get_vgpu_scheduler_log", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24138, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_vgpu_scheduler_log", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_516gpu_instance_get_vgpu_scheduler_log(__pyx_self, __pyx_v_gpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_516gpu_instance_get_vgpu_scheduler_log(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_vgpu_scheduler_log", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_vgpu_scheduler_log(__pyx_v_gpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24138, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_vgpu_scheduler_log", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24158
 * 
 * 
 * cpdef object gpu_instance_get_vgpu_heterogeneous_mode(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Get the vGPU heterogeneous mode for the GPU instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_519gpu_instance_get_vgpu_heterogeneous_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_vgpu_heterogeneous_mode(intptr_t __pyx_v_gpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *__pyx_v_p_heterogeneous_mode_py = 0;
  nvmlVgpuHeterogeneousMode_t *__pyx_v_p_heterogeneous_mode;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_vgpu_heterogeneous_mode", 0);

  /* "cuda/bindings/_nvml.pyx":24169
 *     .. seealso:: `nvmlGpuInstanceGetVgpuHeterogeneousMode`
 *     """
 *     cdef VgpuHeterogeneousMode_v1 p_heterogeneous_mode_py = VgpuHeterogeneousMode_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuHeterogeneousMode_t *p_heterogeneous_mode = <nvmlVgpuHeterogeneousMode_t *><intptr_t>(p_heterogeneous_mode_py._get_ptr())
 *     p_heterogeneous_mode.version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24169, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_p_heterogeneous_mode_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":24170
 *     """
 *     cdef VgpuHeterogeneousMode_v1 p_heterogeneous_mode_py = VgpuHeterogeneousMode_v1()
 *     cdef nvmlVgpuHeterogeneousMode_t *p_heterogeneous_mode = <nvmlVgpuHeterogeneousMode_t *><intptr_t>(p_heterogeneous_mode_py._get_ptr())             # <<<<<<<<<<<<<<
 *     p_heterogeneous_mode.version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)__pyx_v_p_heterogeneous_mode_py->__pyx_vtab)->_get_ptr(__pyx_v_p_heterogeneous_mode_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24170, __pyx_L1_error)
  __pyx_v_p_heterogeneous_mode = ((nvmlVgpuHeterogeneousMode_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":24171
 *     cdef VgpuHeterogeneousMode_v1 p_heterogeneous_mode_py = VgpuHeterogeneousMode_v1()
 *     cdef nvmlVgpuHeterogeneousMode_t *p_heterogeneous_mode = <nvmlVgpuHeterogeneousMode_t *><intptr_t>(p_heterogeneous_mode_py._get_ptr())
 *     p_heterogeneous_mode.version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetVgpuHeterogeneousMode(<GpuInstance>gpu_instance, p_heterogeneous_mode)
*/
  __pyx_v_p_heterogeneous_mode->version = ((sizeof(nvmlVgpuHeterogeneousMode_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":24172
 *     cdef nvmlVgpuHeterogeneousMode_t *p_heterogeneous_mode = <nvmlVgpuHeterogeneousMode_t *><intptr_t>(p_heterogeneous_mode_py._get_ptr())
 *     p_heterogeneous_mode.version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetVgpuHeterogeneousMode(<GpuInstance>gpu_instance, p_heterogeneous_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24173
 *     p_heterogeneous_mode.version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetVgpuHeterogeneousMode(<GpuInstance>gpu_instance, p_heterogeneous_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return p_heterogeneous_mode_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuHeterogeneousMode(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_p_heterogeneous_mode); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24173, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":24172
 *     cdef nvmlVgpuHeterogeneousMode_t *p_heterogeneous_mode = <nvmlVgpuHeterogeneousMode_t *><intptr_t>(p_heterogeneous_mode_py._get_ptr())
 *     p_heterogeneous_mode.version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetVgpuHeterogeneousMode(<GpuInstance>gpu_instance, p_heterogeneous_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24174
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetVgpuHeterogeneousMode(<GpuInstance>gpu_instance, p_heterogeneous_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return p_heterogeneous_mode_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 24174, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24175
 *         __status__ = nvmlGpuInstanceGetVgpuHeterogeneousMode(<GpuInstance>gpu_instance, p_heterogeneous_mode)
 *     check_status(__status__)
 *     return p_heterogeneous_mode_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_p_heterogeneous_mode_py);
  __pyx_r = ((PyObject *)__pyx_v_p_heterogeneous_mode_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24158
 * 
 * 
 * cpdef object gpu_instance_get_vgpu_heterogeneous_mode(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Get the vGPU heterogeneous mode for the GPU instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_vgpu_heterogeneous_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_p_heterogeneous_mode_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_519gpu_instance_get_vgpu_heterogeneous_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_518gpu_instance_get_vgpu_heterogeneous_mode, "gpu_instance_get_vgpu_heterogeneous_mode(intptr_t gpu_instance)\n\nGet the vGPU heterogeneous mode for the GPU instance.\n\nArgs:\n    gpu_instance (intptr_t): The GPU instance handle.\n\nReturns:\n    nvmlVgpuHeterogeneousMode_v1_t: Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t.\n\n.. seealso:: `nvmlGpuInstanceGetVgpuHeterogeneousMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_519gpu_instance_get_vgpu_heterogeneous_mode = {"gpu_instance_get_vgpu_heterogeneous_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_519gpu_instance_get_vgpu_heterogeneous_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_518gpu_instance_get_vgpu_heterogeneous_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_519gpu_instance_get_vgpu_heterogeneous_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_gpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpu_instance_get_vgpu_heterogeneous_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_gpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24158, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24158, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpu_instance_get_vgpu_heterogeneous_mode", 0) < (0)) __PYX_ERR(0, 24158, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpu_instance_get_vgpu_heterogeneous_mode", 1, 1, 1, i); __PYX_ERR(0, 24158, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24158, __pyx_L3_error)
    }
    __pyx_v_gpu_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_gpu_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24158, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpu_instance_get_vgpu_heterogeneous_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24158, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_vgpu_heterogeneous_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_518gpu_instance_get_vgpu_heterogeneous_mode(__pyx_self, __pyx_v_gpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_518gpu_instance_get_vgpu_heterogeneous_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_vgpu_heterogeneous_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_vgpu_heterogeneous_mode(__pyx_v_gpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24158, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_vgpu_heterogeneous_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24178
 * 
 * 
 * cpdef gpu_instance_set_vgpu_heterogeneous_mode(intptr_t gpu_instance, intptr_t p_heterogeneous_mode):             # <<<<<<<<<<<<<<
 *     """Enable or disable vGPU heterogeneous mode for the GPU instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_521gpu_instance_set_vgpu_heterogeneous_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_set_vgpu_heterogeneous_mode(intptr_t __pyx_v_gpu_instance, intptr_t __pyx_v_p_heterogeneous_mode, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_set_vgpu_heterogeneous_mode", 0);

  /* "cuda/bindings/_nvml.pyx":24187
 *     .. seealso:: `nvmlGpuInstanceSetVgpuHeterogeneousMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceSetVgpuHeterogeneousMode(<GpuInstance>gpu_instance, <const nvmlVgpuHeterogeneousMode_t*>p_heterogeneous_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24188
 *     """
 *     with nogil:
 *         __status__ = nvmlGpuInstanceSetVgpuHeterogeneousMode(<GpuInstance>gpu_instance, <const nvmlVgpuHeterogeneousMode_t*>p_heterogeneous_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceSetVgpuHeterogeneousMode(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), ((nvmlVgpuHeterogeneousMode_t const *)__pyx_v_p_heterogeneous_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24188, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24187
 *     .. seealso:: `nvmlGpuInstanceSetVgpuHeterogeneousMode`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceSetVgpuHeterogeneousMode(<GpuInstance>gpu_instance, <const nvmlVgpuHeterogeneousMode_t*>p_heterogeneous_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24189
 *     with nogil:
 *         __status__ = nvmlGpuInstanceSetVgpuHeterogeneousMode(<GpuInstance>gpu_instance, <const nvmlVgpuHeterogeneousMode_t*>p_heterogeneous_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24189, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24178
 * 
 * 
 * cpdef gpu_instance_set_vgpu_heterogeneous_mode(intptr_t gpu_instance, intptr_t p_heterogeneous_mode):             # <<<<<<<<<<<<<<
 *     """Enable or disable vGPU heterogeneous mode for the GPU instance.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_set_vgpu_heterogeneous_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_521gpu_instance_set_vgpu_heterogeneous_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_520gpu_instance_set_vgpu_heterogeneous_mode, "gpu_instance_set_vgpu_heterogeneous_mode(intptr_t gpu_instance, intptr_t p_heterogeneous_mode)\n\nEnable or disable vGPU heterogeneous mode for the GPU instance.\n\nArgs:\n    gpu_instance (intptr_t): The GPU instance handle.\n    p_heterogeneous_mode (intptr_t): Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t.\n\n.. seealso:: `nvmlGpuInstanceSetVgpuHeterogeneousMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_521gpu_instance_set_vgpu_heterogeneous_mode = {"gpu_instance_set_vgpu_heterogeneous_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_521gpu_instance_set_vgpu_heterogeneous_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_520gpu_instance_set_vgpu_heterogeneous_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_521gpu_instance_set_vgpu_heterogeneous_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_gpu_instance;
  intptr_t __pyx_v_p_heterogeneous_mode;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpu_instance_set_vgpu_heterogeneous_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_gpu_instance,&__pyx_mstate_global->__pyx_n_u_p_heterogeneous_mode,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24178, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24178, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24178, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpu_instance_set_vgpu_heterogeneous_mode", 0) < (0)) __PYX_ERR(0, 24178, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpu_instance_set_vgpu_heterogeneous_mode", 1, 2, 2, i); __PYX_ERR(0, 24178, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24178, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24178, __pyx_L3_error)
    }
    __pyx_v_gpu_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_gpu_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24178, __pyx_L3_error)
    __pyx_v_p_heterogeneous_mode = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_p_heterogeneous_mode == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24178, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpu_instance_set_vgpu_heterogeneous_mode", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24178, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_set_vgpu_heterogeneous_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_520gpu_instance_set_vgpu_heterogeneous_mode(__pyx_self, __pyx_v_gpu_instance, __pyx_v_p_heterogeneous_mode);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_520gpu_instance_set_vgpu_heterogeneous_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, intptr_t __pyx_v_p_heterogeneous_mode) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_set_vgpu_heterogeneous_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_set_vgpu_heterogeneous_mode(__pyx_v_gpu_instance, __pyx_v_p_heterogeneous_mode, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24178, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_set_vgpu_heterogeneous_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24192
 * 
 * 
 * cpdef str device_get_pgpu_metadata_string(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Returns the properties of the physical GPU indicated by the device in an ascii-encoded string format.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_523device_get_pgpu_metadata_string(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_pgpu_metadata_string(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_buffer_size[1];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_v__pgpu_metadata_ = 0;
  char *__pyx_v_pgpu_metadata;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  char *__pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_pgpu_metadata_string", 0);

  /* "cuda/bindings/_nvml.pyx":24200
 *     .. seealso:: `nvmlDeviceGetPgpuMetadataString`
 *     """
 *     cdef unsigned int[1] buffer_size = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetPgpuMetadataString(<Device>device, NULL, <unsigned int*>buffer_size)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_buffer_size[0]), __pyx_t_1, sizeof(__pyx_v_buffer_size[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":24201
 *     """
 *     cdef unsigned int[1] buffer_size = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPgpuMetadataString(<Device>device, NULL, <unsigned int*>buffer_size)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24202
 *     cdef unsigned int[1] buffer_size = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetPgpuMetadataString(<Device>device, NULL, <unsigned int*>buffer_size)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     if buffer_size[0] == 0:
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPgpuMetadataString(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), NULL, ((unsigned int *)__pyx_v_buffer_size)); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24202, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":24201
 *     """
 *     cdef unsigned int[1] buffer_size = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPgpuMetadataString(<Device>device, NULL, <unsigned int*>buffer_size)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24203
 *     with nogil:
 *         __status__ = nvmlDeviceGetPgpuMetadataString(<Device>device, NULL, <unsigned int*>buffer_size)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     if buffer_size[0] == 0:
 *         return ""
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 24203, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24204
 *         __status__ = nvmlDeviceGetPgpuMetadataString(<Device>device, NULL, <unsigned int*>buffer_size)
 *     check_status_size(__status__)
 *     if buffer_size[0] == 0:             # <<<<<<<<<<<<<<
 *         return ""
 *     cdef bytes _pgpu_metadata_ = bytes(buffer_size[0])
*/
  __pyx_t_4 = ((__pyx_v_buffer_size[0]) == 0);
  if (__pyx_t_4) {

    /* "cuda/bindings/_nvml.pyx":24205
 *     check_status_size(__status__)
 *     if buffer_size[0] == 0:
 *         return ""             # <<<<<<<<<<<<<<
 *     cdef bytes _pgpu_metadata_ = bytes(buffer_size[0])
 *     cdef char* pgpu_metadata = _pgpu_metadata_
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u__7);
    __pyx_r = __pyx_mstate_global->__pyx_kp_u__7;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":24204
 *         __status__ = nvmlDeviceGetPgpuMetadataString(<Device>device, NULL, <unsigned int*>buffer_size)
 *     check_status_size(__status__)
 *     if buffer_size[0] == 0:             # <<<<<<<<<<<<<<
 *         return ""
 *     cdef bytes _pgpu_metadata_ = bytes(buffer_size[0])
*/
  }

  /* "cuda/bindings/_nvml.pyx":24206
 *     if buffer_size[0] == 0:
 *         return ""
 *     cdef bytes _pgpu_metadata_ = bytes(buffer_size[0])             # <<<<<<<<<<<<<<
 *     cdef char* pgpu_metadata = _pgpu_metadata_
 *     with nogil:
*/
  __pyx_t_6 = NULL;
  __pyx_t_7 = __Pyx_PyLong_From_unsigned_int((__pyx_v_buffer_size[0])); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 24206, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_6, __pyx_t_7};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)(&PyBytes_Type), __pyx_callargs+__pyx_t_8, (2-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24206, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  __pyx_v__pgpu_metadata_ = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24207
 *         return ""
 *     cdef bytes _pgpu_metadata_ = bytes(buffer_size[0])
 *     cdef char* pgpu_metadata = _pgpu_metadata_             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetPgpuMetadataString(<Device>device, pgpu_metadata, <unsigned int*>buffer_size)
*/
  __pyx_t_9 = __Pyx_PyBytes_AsWritableString(__pyx_v__pgpu_metadata_); if (unlikely((!__pyx_t_9) && PyErr_Occurred())) __PYX_ERR(0, 24207, __pyx_L1_error)
  __pyx_v_pgpu_metadata = __pyx_t_9;

  /* "cuda/bindings/_nvml.pyx":24208
 *     cdef bytes _pgpu_metadata_ = bytes(buffer_size[0])
 *     cdef char* pgpu_metadata = _pgpu_metadata_
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPgpuMetadataString(<Device>device, pgpu_metadata, <unsigned int*>buffer_size)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24209
 *     cdef char* pgpu_metadata = _pgpu_metadata_
 *     with nogil:
 *         __status__ = nvmlDeviceGetPgpuMetadataString(<Device>device, pgpu_metadata, <unsigned int*>buffer_size)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(pgpu_metadata)
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPgpuMetadataString(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_pgpu_metadata, ((unsigned int *)__pyx_v_buffer_size)); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24209, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":24208
 *     cdef bytes _pgpu_metadata_ = bytes(buffer_size[0])
 *     cdef char* pgpu_metadata = _pgpu_metadata_
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPgpuMetadataString(<Device>device, pgpu_metadata, <unsigned int*>buffer_size)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24210
 *     with nogil:
 *         __status__ = nvmlDeviceGetPgpuMetadataString(<Device>device, pgpu_metadata, <unsigned int*>buffer_size)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(pgpu_metadata)
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 24210, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24211
 *         __status__ = nvmlDeviceGetPgpuMetadataString(<Device>device, pgpu_metadata, <unsigned int*>buffer_size)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(pgpu_metadata)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = PyUnicode_FromString(__pyx_v_pgpu_metadata); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24211, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24192
 * 
 * 
 * cpdef str device_get_pgpu_metadata_string(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Returns the properties of the physical GPU indicated by the device in an ascii-encoded string format.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pgpu_metadata_string", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v__pgpu_metadata_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_523device_get_pgpu_metadata_string(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_522device_get_pgpu_metadata_string, "device_get_pgpu_metadata_string(intptr_t device) -> str\n\nReturns the properties of the physical GPU indicated by the device in an ascii-encoded string format.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\n.. seealso:: `nvmlDeviceGetPgpuMetadataString`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_523device_get_pgpu_metadata_string = {"device_get_pgpu_metadata_string", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_523device_get_pgpu_metadata_string, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_522device_get_pgpu_metadata_string};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_523device_get_pgpu_metadata_string(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_pgpu_metadata_string (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24192, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24192, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_pgpu_metadata_string", 0) < (0)) __PYX_ERR(0, 24192, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_pgpu_metadata_string", 1, 1, 1, i); __PYX_ERR(0, 24192, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24192, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24192, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_pgpu_metadata_string", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24192, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pgpu_metadata_string", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_522device_get_pgpu_metadata_string(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_522device_get_pgpu_metadata_string(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_pgpu_metadata_string", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_pgpu_metadata_string(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24192, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pgpu_metadata_string", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24214
 * 
 * 
 * cpdef object device_get_vgpu_scheduler_log(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Returns the vGPU Software scheduler logs. ``pSchedulerLog`` points to a caller-allocated structure to contain the logs. The number of elements returned will never exceed ``NVML_SCHEDULER_SW_MAX_LOG_ENTRIES``.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_525device_get_vgpu_scheduler_log(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_scheduler_log(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *__pyx_v_p_scheduler_log_py = 0;
  nvmlVgpuSchedulerLog_t *__pyx_v_p_scheduler_log;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_scheduler_log", 0);

  /* "cuda/bindings/_nvml.pyx":24225
 *     .. seealso:: `nvmlDeviceGetVgpuSchedulerLog`
 *     """
 *     cdef VgpuSchedulerLog p_scheduler_log_py = VgpuSchedulerLog()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerLog_t *p_scheduler_log = <nvmlVgpuSchedulerLog_t *><intptr_t>(p_scheduler_log_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24225, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_p_scheduler_log_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":24226
 *     """
 *     cdef VgpuSchedulerLog p_scheduler_log_py = VgpuSchedulerLog()
 *     cdef nvmlVgpuSchedulerLog_t *p_scheduler_log = <nvmlVgpuSchedulerLog_t *><intptr_t>(p_scheduler_log_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuSchedulerLog(<Device>device, p_scheduler_log)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)__pyx_v_p_scheduler_log_py->__pyx_vtab)->_get_ptr(__pyx_v_p_scheduler_log_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24226, __pyx_L1_error)
  __pyx_v_p_scheduler_log = ((nvmlVgpuSchedulerLog_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":24227
 *     cdef VgpuSchedulerLog p_scheduler_log_py = VgpuSchedulerLog()
 *     cdef nvmlVgpuSchedulerLog_t *p_scheduler_log = <nvmlVgpuSchedulerLog_t *><intptr_t>(p_scheduler_log_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuSchedulerLog(<Device>device, p_scheduler_log)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24228
 *     cdef nvmlVgpuSchedulerLog_t *p_scheduler_log = <nvmlVgpuSchedulerLog_t *><intptr_t>(p_scheduler_log_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuSchedulerLog(<Device>device, p_scheduler_log)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return p_scheduler_log_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuSchedulerLog(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_p_scheduler_log); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24228, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":24227
 *     cdef VgpuSchedulerLog p_scheduler_log_py = VgpuSchedulerLog()
 *     cdef nvmlVgpuSchedulerLog_t *p_scheduler_log = <nvmlVgpuSchedulerLog_t *><intptr_t>(p_scheduler_log_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuSchedulerLog(<Device>device, p_scheduler_log)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24229
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuSchedulerLog(<Device>device, p_scheduler_log)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return p_scheduler_log_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 24229, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24230
 *         __status__ = nvmlDeviceGetVgpuSchedulerLog(<Device>device, p_scheduler_log)
 *     check_status(__status__)
 *     return p_scheduler_log_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_p_scheduler_log_py);
  __pyx_r = ((PyObject *)__pyx_v_p_scheduler_log_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24214
 * 
 * 
 * cpdef object device_get_vgpu_scheduler_log(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Returns the vGPU Software scheduler logs. ``pSchedulerLog`` points to a caller-allocated structure to contain the logs. The number of elements returned will never exceed ``NVML_SCHEDULER_SW_MAX_LOG_ENTRIES``.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_scheduler_log", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_p_scheduler_log_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_525device_get_vgpu_scheduler_log(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_524device_get_vgpu_scheduler_log, "device_get_vgpu_scheduler_log(intptr_t device)\n\nReturns the vGPU Software scheduler logs. ``pSchedulerLog`` points to a caller-allocated structure to contain the logs. The number of elements returned will never exceed ``NVML_SCHEDULER_SW_MAX_LOG_ENTRIES``.\n\nArgs:\n    device (intptr_t): The identifier of the target ``device``.\n\nReturns:\n    nvmlVgpuSchedulerLog_t: Reference in which ``pSchedulerLog`` is written.\n\n.. seealso:: `nvmlDeviceGetVgpuSchedulerLog`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_525device_get_vgpu_scheduler_log = {"device_get_vgpu_scheduler_log", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_525device_get_vgpu_scheduler_log, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_524device_get_vgpu_scheduler_log};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_525device_get_vgpu_scheduler_log(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_vgpu_scheduler_log (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24214, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24214, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_vgpu_scheduler_log", 0) < (0)) __PYX_ERR(0, 24214, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_vgpu_scheduler_log", 1, 1, 1, i); __PYX_ERR(0, 24214, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24214, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24214, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_vgpu_scheduler_log", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24214, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_scheduler_log", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_524device_get_vgpu_scheduler_log(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_524device_get_vgpu_scheduler_log(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_scheduler_log", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_scheduler_log(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24214, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_scheduler_log", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24233
 * 
 * 
 * cpdef object device_get_vgpu_scheduler_state(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Returns the vGPU scheduler state. The information returned in ``nvmlVgpuSchedulerGetState_t`` is not relevant if the BEST EFFORT policy is set.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_527device_get_vgpu_scheduler_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_scheduler_state(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *__pyx_v_p_scheduler_state_py = 0;
  nvmlVgpuSchedulerGetState_t *__pyx_v_p_scheduler_state;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_scheduler_state", 0);

  /* "cuda/bindings/_nvml.pyx":24244
 *     .. seealso:: `nvmlDeviceGetVgpuSchedulerState`
 *     """
 *     cdef VgpuSchedulerGetState p_scheduler_state_py = VgpuSchedulerGetState()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerGetState_t *p_scheduler_state = <nvmlVgpuSchedulerGetState_t *><intptr_t>(p_scheduler_state_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24244, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_p_scheduler_state_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":24245
 *     """
 *     cdef VgpuSchedulerGetState p_scheduler_state_py = VgpuSchedulerGetState()
 *     cdef nvmlVgpuSchedulerGetState_t *p_scheduler_state = <nvmlVgpuSchedulerGetState_t *><intptr_t>(p_scheduler_state_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuSchedulerState(<Device>device, p_scheduler_state)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)__pyx_v_p_scheduler_state_py->__pyx_vtab)->_get_ptr(__pyx_v_p_scheduler_state_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24245, __pyx_L1_error)
  __pyx_v_p_scheduler_state = ((nvmlVgpuSchedulerGetState_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":24246
 *     cdef VgpuSchedulerGetState p_scheduler_state_py = VgpuSchedulerGetState()
 *     cdef nvmlVgpuSchedulerGetState_t *p_scheduler_state = <nvmlVgpuSchedulerGetState_t *><intptr_t>(p_scheduler_state_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuSchedulerState(<Device>device, p_scheduler_state)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24247
 *     cdef nvmlVgpuSchedulerGetState_t *p_scheduler_state = <nvmlVgpuSchedulerGetState_t *><intptr_t>(p_scheduler_state_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuSchedulerState(<Device>device, p_scheduler_state)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return p_scheduler_state_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuSchedulerState(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_p_scheduler_state); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24247, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":24246
 *     cdef VgpuSchedulerGetState p_scheduler_state_py = VgpuSchedulerGetState()
 *     cdef nvmlVgpuSchedulerGetState_t *p_scheduler_state = <nvmlVgpuSchedulerGetState_t *><intptr_t>(p_scheduler_state_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuSchedulerState(<Device>device, p_scheduler_state)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24248
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuSchedulerState(<Device>device, p_scheduler_state)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return p_scheduler_state_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 24248, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24249
 *         __status__ = nvmlDeviceGetVgpuSchedulerState(<Device>device, p_scheduler_state)
 *     check_status(__status__)
 *     return p_scheduler_state_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_p_scheduler_state_py);
  __pyx_r = ((PyObject *)__pyx_v_p_scheduler_state_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24233
 * 
 * 
 * cpdef object device_get_vgpu_scheduler_state(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Returns the vGPU scheduler state. The information returned in ``nvmlVgpuSchedulerGetState_t`` is not relevant if the BEST EFFORT policy is set.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_scheduler_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_p_scheduler_state_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_527device_get_vgpu_scheduler_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_526device_get_vgpu_scheduler_state, "device_get_vgpu_scheduler_state(intptr_t device)\n\nReturns the vGPU scheduler state. The information returned in ``nvmlVgpuSchedulerGetState_t`` is not relevant if the BEST EFFORT policy is set.\n\nArgs:\n    device (intptr_t): The identifier of the target ``device``.\n\nReturns:\n    nvmlVgpuSchedulerGetState_t: Reference in which ``pSchedulerState`` is returned.\n\n.. seealso:: `nvmlDeviceGetVgpuSchedulerState`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_527device_get_vgpu_scheduler_state = {"device_get_vgpu_scheduler_state", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_527device_get_vgpu_scheduler_state, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_526device_get_vgpu_scheduler_state};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_527device_get_vgpu_scheduler_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_vgpu_scheduler_state (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24233, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24233, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_vgpu_scheduler_state", 0) < (0)) __PYX_ERR(0, 24233, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_vgpu_scheduler_state", 1, 1, 1, i); __PYX_ERR(0, 24233, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24233, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24233, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_vgpu_scheduler_state", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24233, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_scheduler_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_526device_get_vgpu_scheduler_state(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_526device_get_vgpu_scheduler_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_scheduler_state", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_scheduler_state(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24233, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_scheduler_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24252
 * 
 * 
 * cpdef object device_get_vgpu_scheduler_capabilities(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Returns the vGPU scheduler capabilities. The list of supported vGPU schedulers returned in ``nvmlVgpuSchedulerCapabilities_t`` is from the NVML_VGPU_SCHEDULER_POLICY_*. This list enumerates the supported scheduler policies if the engine is Graphics type. The other values in ``nvmlVgpuSchedulerCapabilities_t`` are also applicable if the engine is Graphics type. For other engine types, it is BEST EFFORT policy. If ARR is supported and enabled, scheduling frequency and averaging factor are applicable else timeSlice is applicable.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_529device_get_vgpu_scheduler_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_scheduler_capabilities(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *__pyx_v_p_capabilities_py = 0;
  nvmlVgpuSchedulerCapabilities_t *__pyx_v_p_capabilities;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_scheduler_capabilities", 0);

  /* "cuda/bindings/_nvml.pyx":24263
 *     .. seealso:: `nvmlDeviceGetVgpuSchedulerCapabilities`
 *     """
 *     cdef VgpuSchedulerCapabilities p_capabilities_py = VgpuSchedulerCapabilities()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuSchedulerCapabilities_t *p_capabilities = <nvmlVgpuSchedulerCapabilities_t *><intptr_t>(p_capabilities_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24263, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_p_capabilities_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":24264
 *     """
 *     cdef VgpuSchedulerCapabilities p_capabilities_py = VgpuSchedulerCapabilities()
 *     cdef nvmlVgpuSchedulerCapabilities_t *p_capabilities = <nvmlVgpuSchedulerCapabilities_t *><intptr_t>(p_capabilities_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuSchedulerCapabilities(<Device>device, p_capabilities)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)__pyx_v_p_capabilities_py->__pyx_vtab)->_get_ptr(__pyx_v_p_capabilities_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24264, __pyx_L1_error)
  __pyx_v_p_capabilities = ((nvmlVgpuSchedulerCapabilities_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":24265
 *     cdef VgpuSchedulerCapabilities p_capabilities_py = VgpuSchedulerCapabilities()
 *     cdef nvmlVgpuSchedulerCapabilities_t *p_capabilities = <nvmlVgpuSchedulerCapabilities_t *><intptr_t>(p_capabilities_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuSchedulerCapabilities(<Device>device, p_capabilities)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24266
 *     cdef nvmlVgpuSchedulerCapabilities_t *p_capabilities = <nvmlVgpuSchedulerCapabilities_t *><intptr_t>(p_capabilities_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuSchedulerCapabilities(<Device>device, p_capabilities)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return p_capabilities_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuSchedulerCapabilities(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_p_capabilities); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24266, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":24265
 *     cdef VgpuSchedulerCapabilities p_capabilities_py = VgpuSchedulerCapabilities()
 *     cdef nvmlVgpuSchedulerCapabilities_t *p_capabilities = <nvmlVgpuSchedulerCapabilities_t *><intptr_t>(p_capabilities_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuSchedulerCapabilities(<Device>device, p_capabilities)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24267
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuSchedulerCapabilities(<Device>device, p_capabilities)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return p_capabilities_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 24267, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24268
 *         __status__ = nvmlDeviceGetVgpuSchedulerCapabilities(<Device>device, p_capabilities)
 *     check_status(__status__)
 *     return p_capabilities_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_p_capabilities_py);
  __pyx_r = ((PyObject *)__pyx_v_p_capabilities_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24252
 * 
 * 
 * cpdef object device_get_vgpu_scheduler_capabilities(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Returns the vGPU scheduler capabilities. The list of supported vGPU schedulers returned in ``nvmlVgpuSchedulerCapabilities_t`` is from the NVML_VGPU_SCHEDULER_POLICY_*. This list enumerates the supported scheduler policies if the engine is Graphics type. The other values in ``nvmlVgpuSchedulerCapabilities_t`` are also applicable if the engine is Graphics type. For other engine types, it is BEST EFFORT policy. If ARR is supported and enabled, scheduling frequency and averaging factor are applicable else timeSlice is applicable.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_scheduler_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_p_capabilities_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_529device_get_vgpu_scheduler_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_528device_get_vgpu_scheduler_capabilities, "device_get_vgpu_scheduler_capabilities(intptr_t device)\n\nReturns the vGPU scheduler capabilities. The list of supported vGPU schedulers returned in ``nvmlVgpuSchedulerCapabilities_t`` is from the NVML_VGPU_SCHEDULER_POLICY_*. This list enumerates the supported scheduler policies if the engine is Graphics type. The other values in ``nvmlVgpuSchedulerCapabilities_t`` are also applicable if the engine is Graphics type. For other engine types, it is BEST EFFORT policy. If ARR is supported and enabled, scheduling frequency and averaging factor are applicable else timeSlice is applicable.\n\nArgs:\n    device (intptr_t): The identifier of the target ``device``.\n\nReturns:\n    nvmlVgpuSchedulerCapabilities_t: Reference in which ``pCapabilities`` is written.\n\n.. seealso:: `nvmlDeviceGetVgpuSchedulerCapabilities`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_529device_get_vgpu_scheduler_capabilities = {"device_get_vgpu_scheduler_capabilities", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_529device_get_vgpu_scheduler_capabilities, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_528device_get_vgpu_scheduler_capabilities};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_529device_get_vgpu_scheduler_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_vgpu_scheduler_capabilities (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24252, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24252, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_vgpu_scheduler_capabilities", 0) < (0)) __PYX_ERR(0, 24252, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_vgpu_scheduler_capabilities", 1, 1, 1, i); __PYX_ERR(0, 24252, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24252, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24252, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_vgpu_scheduler_capabilities", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24252, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_scheduler_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_528device_get_vgpu_scheduler_capabilities(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_528device_get_vgpu_scheduler_capabilities(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_scheduler_capabilities", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_scheduler_capabilities(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24252, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_scheduler_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24271
 * 
 * 
 * cpdef device_set_vgpu_scheduler_state(intptr_t device, intptr_t p_scheduler_state):             # <<<<<<<<<<<<<<
 *     """Sets the vGPU scheduler state.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_531device_set_vgpu_scheduler_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_vgpu_scheduler_state(intptr_t __pyx_v_device, intptr_t __pyx_v_p_scheduler_state, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_vgpu_scheduler_state", 0);

  /* "cuda/bindings/_nvml.pyx":24280
 *     .. seealso:: `nvmlDeviceSetVgpuSchedulerState`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetVgpuSchedulerState(<Device>device, <nvmlVgpuSchedulerSetState_t*>p_scheduler_state)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24281
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetVgpuSchedulerState(<Device>device, <nvmlVgpuSchedulerSetState_t*>p_scheduler_state)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVgpuSchedulerState(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlVgpuSchedulerSetState_t *)__pyx_v_p_scheduler_state)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24281, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24280
 *     .. seealso:: `nvmlDeviceSetVgpuSchedulerState`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetVgpuSchedulerState(<Device>device, <nvmlVgpuSchedulerSetState_t*>p_scheduler_state)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24282
 *     with nogil:
 *         __status__ = nvmlDeviceSetVgpuSchedulerState(<Device>device, <nvmlVgpuSchedulerSetState_t*>p_scheduler_state)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24282, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24271
 * 
 * 
 * cpdef device_set_vgpu_scheduler_state(intptr_t device, intptr_t p_scheduler_state):             # <<<<<<<<<<<<<<
 *     """Sets the vGPU scheduler state.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_vgpu_scheduler_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_531device_set_vgpu_scheduler_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_530device_set_vgpu_scheduler_state, "device_set_vgpu_scheduler_state(intptr_t device, intptr_t p_scheduler_state)\n\nSets the vGPU scheduler state.\n\nArgs:\n    device (intptr_t): The identifier of the target ``device``.\n    p_scheduler_state (intptr_t): vGPU ``p_scheduler_state`` to set.\n\n.. seealso:: `nvmlDeviceSetVgpuSchedulerState`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_531device_set_vgpu_scheduler_state = {"device_set_vgpu_scheduler_state", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_531device_set_vgpu_scheduler_state, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_530device_set_vgpu_scheduler_state};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_531device_set_vgpu_scheduler_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  intptr_t __pyx_v_p_scheduler_state;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_vgpu_scheduler_state (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_p_scheduler_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24271, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24271, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24271, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_vgpu_scheduler_state", 0) < (0)) __PYX_ERR(0, 24271, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_vgpu_scheduler_state", 1, 2, 2, i); __PYX_ERR(0, 24271, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24271, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24271, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24271, __pyx_L3_error)
    __pyx_v_p_scheduler_state = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_p_scheduler_state == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24271, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_vgpu_scheduler_state", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24271, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_vgpu_scheduler_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_530device_set_vgpu_scheduler_state(__pyx_self, __pyx_v_device, __pyx_v_p_scheduler_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_530device_set_vgpu_scheduler_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_p_scheduler_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_vgpu_scheduler_state", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_vgpu_scheduler_state(__pyx_v_device, __pyx_v_p_scheduler_state, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24271, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_vgpu_scheduler_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24285
 * 
 * 
 * cpdef set_vgpu_version(intptr_t vgpu_version):             # <<<<<<<<<<<<<<
 *     """Override the preset range of vGPU versions supported by the NVIDIA vGPU Manager with a range set by an administrator.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_533set_vgpu_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_set_vgpu_version(intptr_t __pyx_v_vgpu_version, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("set_vgpu_version", 0);

  /* "cuda/bindings/_nvml.pyx":24293
 *     .. seealso:: `nvmlSetVgpuVersion`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSetVgpuVersion(<nvmlVgpuVersion_t*>vgpu_version)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24294
 *     """
 *     with nogil:
 *         __status__ = nvmlSetVgpuVersion(<nvmlVgpuVersion_t*>vgpu_version)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSetVgpuVersion(((nvmlVgpuVersion_t *)__pyx_v_vgpu_version)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24294, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24293
 *     .. seealso:: `nvmlSetVgpuVersion`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSetVgpuVersion(<nvmlVgpuVersion_t*>vgpu_version)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24295
 *     with nogil:
 *         __status__ = nvmlSetVgpuVersion(<nvmlVgpuVersion_t*>vgpu_version)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24295, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24285
 * 
 * 
 * cpdef set_vgpu_version(intptr_t vgpu_version):             # <<<<<<<<<<<<<<
 *     """Override the preset range of vGPU versions supported by the NVIDIA vGPU Manager with a range set by an administrator.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.set_vgpu_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_533set_vgpu_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_532set_vgpu_version, "set_vgpu_version(intptr_t vgpu_version)\n\nOverride the preset range of vGPU versions supported by the NVIDIA vGPU Manager with a range set by an administrator.\n\nArgs:\n    vgpu_version (intptr_t): Pointer to a caller-supplied range of supported vGPU versions.\n\n.. seealso:: `nvmlSetVgpuVersion`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_533set_vgpu_version = {"set_vgpu_version", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_533set_vgpu_version, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_532set_vgpu_version};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_533set_vgpu_version(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_vgpu_version;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("set_vgpu_version (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_version,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24285, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24285, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "set_vgpu_version", 0) < (0)) __PYX_ERR(0, 24285, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("set_vgpu_version", 1, 1, 1, i); __PYX_ERR(0, 24285, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24285, __pyx_L3_error)
    }
    __pyx_v_vgpu_version = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_vgpu_version == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24285, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("set_vgpu_version", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24285, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.set_vgpu_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_532set_vgpu_version(__pyx_self, __pyx_v_vgpu_version);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_532set_vgpu_version(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_vgpu_version) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("set_vgpu_version", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_set_vgpu_version(__pyx_v_vgpu_version, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24285, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.set_vgpu_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24298
 * 
 * 
 * cpdef tuple device_get_vgpu_utilization(intptr_t device, unsigned long long last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """Retrieves current utilization for vGPUs on a physical GPU (device).
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_535device_get_vgpu_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_utilization(intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__ValueType __pyx_v_sample_val_type;
  unsigned int __pyx_v_vgpu_instance_samples_count;
  nvmlVgpuInstanceUtilizationSample_t __pyx_v_utilization_samples;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_utilization", 0);

  /* "cuda/bindings/_nvml.pyx":24317
 *     cdef unsigned int vgpu_instance_samples_count
 *     cdef nvmlVgpuInstanceUtilizationSample_t utilization_samples
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuUtilization(<Device>device, last_seen_time_stamp, &sample_val_type, &vgpu_instance_samples_count, &utilization_samples)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24318
 *     cdef nvmlVgpuInstanceUtilizationSample_t utilization_samples
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuUtilization(<Device>device, last_seen_time_stamp, &sample_val_type, &vgpu_instance_samples_count, &utilization_samples)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (<int>sample_val_type, vgpu_instance_samples_count, utilization_samples)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuUtilization(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_last_seen_time_stamp, (&__pyx_v_sample_val_type), (&__pyx_v_vgpu_instance_samples_count), (&__pyx_v_utilization_samples)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24318, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24317
 *     cdef unsigned int vgpu_instance_samples_count
 *     cdef nvmlVgpuInstanceUtilizationSample_t utilization_samples
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuUtilization(<Device>device, last_seen_time_stamp, &sample_val_type, &vgpu_instance_samples_count, &utilization_samples)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24319
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuUtilization(<Device>device, last_seen_time_stamp, &sample_val_type, &vgpu_instance_samples_count, &utilization_samples)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (<int>sample_val_type, vgpu_instance_samples_count, utilization_samples)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24319, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24320
 *         __status__ = nvmlDeviceGetVgpuUtilization(<Device>device, last_seen_time_stamp, &sample_val_type, &vgpu_instance_samples_count, &utilization_samples)
 *     check_status(__status__)
 *     return (<int>sample_val_type, vgpu_instance_samples_count, utilization_samples)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_int(((int)__pyx_v_sample_val_type)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24320, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_vgpu_instance_samples_count); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24320, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __pyx_convert__to_py_nvmlVgpuInstanceUtilizationSample_t(__pyx_v_utilization_samples); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24320, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 24320, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 24320, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 24320, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_5);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_5) != (0)) __PYX_ERR(0, 24320, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_t_5 = 0;
  __pyx_r = ((PyObject*)__pyx_t_6);
  __pyx_t_6 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24298
 * 
 * 
 * cpdef tuple device_get_vgpu_utilization(intptr_t device, unsigned long long last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """Retrieves current utilization for vGPUs on a physical GPU (device).
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_535device_get_vgpu_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_534device_get_vgpu_utilization, "device_get_vgpu_utilization(intptr_t device, unsigned long long last_seen_time_stamp) -> tuple\n\nRetrieves current utilization for vGPUs on a physical GPU (device).\n\nArgs:\n    device (intptr_t): The identifier for the target device.\n    last_seen_time_stamp (unsigned long long): Return only samples with timestamp greater than last_seen_time_stamp.\n\nReturns:\n    A 3-tuple containing:\n\n    - int: Pointer to caller-supplied buffer to hold the type of returned sample values.\n    - unsigned int: Pointer to caller-supplied array size, and returns number of vGPU instances.\n    - nvmlVgpuInstanceUtilizationSample_t: Pointer to caller-supplied buffer in which vGPU utilization samples are returned.\n\n.. seealso:: `nvmlDeviceGetVgpuUtilization`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_535device_get_vgpu_utilization = {"device_get_vgpu_utilization", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_535device_get_vgpu_utilization, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_534device_get_vgpu_utilization};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_535device_get_vgpu_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_vgpu_utilization (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_last_seen_time_stamp,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24298, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24298, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24298, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_vgpu_utilization", 0) < (0)) __PYX_ERR(0, 24298, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_vgpu_utilization", 1, 2, 2, i); __PYX_ERR(0, 24298, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24298, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24298, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24298, __pyx_L3_error)
    __pyx_v_last_seen_time_stamp = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[1]); if (unlikely((__pyx_v_last_seen_time_stamp == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24298, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_vgpu_utilization", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24298, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_534device_get_vgpu_utilization(__pyx_self, __pyx_v_device, __pyx_v_last_seen_time_stamp);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_534device_get_vgpu_utilization(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_utilization", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_utilization(__pyx_v_device, __pyx_v_last_seen_time_stamp, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24298, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24323
 * 
 * 
 * cpdef tuple device_get_vgpu_process_utilization(intptr_t device, unsigned long long last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """Retrieves current utilization for processes running on vGPUs on a physical GPU (device).
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_537device_get_vgpu_process_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_process_utilization(intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_vgpu_process_samples_count;
  nvmlVgpuProcessUtilizationSample_t __pyx_v_utilization_samples;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_process_utilization", 0);

  /* "cuda/bindings/_nvml.pyx":24340
 *     cdef unsigned int vgpu_process_samples_count
 *     cdef nvmlVgpuProcessUtilizationSample_t utilization_samples
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuProcessUtilization(<Device>device, last_seen_time_stamp, &vgpu_process_samples_count, &utilization_samples)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24341
 *     cdef nvmlVgpuProcessUtilizationSample_t utilization_samples
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuProcessUtilization(<Device>device, last_seen_time_stamp, &vgpu_process_samples_count, &utilization_samples)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (vgpu_process_samples_count, utilization_samples)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuProcessUtilization(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_last_seen_time_stamp, (&__pyx_v_vgpu_process_samples_count), (&__pyx_v_utilization_samples)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24341, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24340
 *     cdef unsigned int vgpu_process_samples_count
 *     cdef nvmlVgpuProcessUtilizationSample_t utilization_samples
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuProcessUtilization(<Device>device, last_seen_time_stamp, &vgpu_process_samples_count, &utilization_samples)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24342
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuProcessUtilization(<Device>device, last_seen_time_stamp, &vgpu_process_samples_count, &utilization_samples)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (vgpu_process_samples_count, utilization_samples)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24342, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24343
 *         __status__ = nvmlDeviceGetVgpuProcessUtilization(<Device>device, last_seen_time_stamp, &vgpu_process_samples_count, &utilization_samples)
 *     check_status(__status__)
 *     return (vgpu_process_samples_count, utilization_samples)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_unsigned_int(__pyx_v_vgpu_process_samples_count); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24343, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __pyx_convert__to_py_nvmlVgpuProcessUtilizationSample_t(__pyx_v_utilization_samples); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24343, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24343, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 24343, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 24343, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24323
 * 
 * 
 * cpdef tuple device_get_vgpu_process_utilization(intptr_t device, unsigned long long last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """Retrieves current utilization for processes running on vGPUs on a physical GPU (device).
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_process_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_537device_get_vgpu_process_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_536device_get_vgpu_process_utilization, "device_get_vgpu_process_utilization(intptr_t device, unsigned long long last_seen_time_stamp) -> tuple\n\nRetrieves current utilization for processes running on vGPUs on a physical GPU (device).\n\nArgs:\n    device (intptr_t): The identifier for the target device.\n    last_seen_time_stamp (unsigned long long): Return only samples with timestamp greater than last_seen_time_stamp.\n\nReturns:\n    A 2-tuple containing:\n\n    - unsigned int: Pointer to caller-supplied array size, and returns number of processes running on vGPU instances.\n    - nvmlVgpuProcessUtilizationSample_t: Pointer to caller-supplied buffer in which vGPU sub process utilization samples are returned.\n\n.. seealso:: `nvmlDeviceGetVgpuProcessUtilization`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_537device_get_vgpu_process_utilization = {"device_get_vgpu_process_utilization", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_537device_get_vgpu_process_utilization, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_536device_get_vgpu_process_utilization};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_537device_get_vgpu_process_utilization(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_vgpu_process_utilization (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_last_seen_time_stamp,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24323, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24323, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24323, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_vgpu_process_utilization", 0) < (0)) __PYX_ERR(0, 24323, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_vgpu_process_utilization", 1, 2, 2, i); __PYX_ERR(0, 24323, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24323, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24323, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24323, __pyx_L3_error)
    __pyx_v_last_seen_time_stamp = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[1]); if (unlikely((__pyx_v_last_seen_time_stamp == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24323, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_vgpu_process_utilization", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24323, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_process_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_536device_get_vgpu_process_utilization(__pyx_self, __pyx_v_device, __pyx_v_last_seen_time_stamp);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_536device_get_vgpu_process_utilization(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_process_utilization", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_process_utilization(__pyx_v_device, __pyx_v_last_seen_time_stamp, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24323, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_process_utilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24346
 * 
 * 
 * cpdef int vgpu_instance_get_accounting_mode(unsigned int vgpu_instance) except? -1:             # <<<<<<<<<<<<<<
 *     """Queries the state of per process accounting mode on vGPU.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_539vgpu_instance_get_accounting_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_accounting_mode(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__EnableState __pyx_v_mode;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24358
 *     """
 *     cdef _EnableState mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetAccountingMode(<nvmlVgpuInstance_t>vgpu_instance, &mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24359
 *     cdef _EnableState mode
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetAccountingMode(<nvmlVgpuInstance_t>vgpu_instance, &mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>mode
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetAccountingMode(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), (&__pyx_v_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24359, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24358
 *     """
 *     cdef _EnableState mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetAccountingMode(<nvmlVgpuInstance_t>vgpu_instance, &mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24360
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetAccountingMode(<nvmlVgpuInstance_t>vgpu_instance, &mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>mode
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24360, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24361
 *         __status__ = nvmlVgpuInstanceGetAccountingMode(<nvmlVgpuInstance_t>vgpu_instance, &mode)
 *     check_status(__status__)
 *     return <int>mode             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_mode);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24346
 * 
 * 
 * cpdef int vgpu_instance_get_accounting_mode(unsigned int vgpu_instance) except? -1:             # <<<<<<<<<<<<<<
 *     """Queries the state of per process accounting mode on vGPU.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_accounting_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_539vgpu_instance_get_accounting_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_538vgpu_instance_get_accounting_mode, "vgpu_instance_get_accounting_mode(unsigned int vgpu_instance) -> int\n\nQueries the state of per process accounting mode on vGPU.\n\nArgs:\n    vgpu_instance (unsigned int): The identifier of the target vGPU instance.\n\nReturns:\n    int: Reference in which to return the current accounting mode.\n\n.. seealso:: `nvmlVgpuInstanceGetAccountingMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_539vgpu_instance_get_accounting_mode = {"vgpu_instance_get_accounting_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_539vgpu_instance_get_accounting_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_538vgpu_instance_get_accounting_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_539vgpu_instance_get_accounting_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_accounting_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24346, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24346, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_accounting_mode", 0) < (0)) __PYX_ERR(0, 24346, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_accounting_mode", 1, 1, 1, i); __PYX_ERR(0, 24346, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24346, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24346, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_accounting_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24346, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_accounting_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_538vgpu_instance_get_accounting_mode(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_538vgpu_instance_get_accounting_mode(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_accounting_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_accounting_mode(__pyx_v_vgpu_instance, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24346, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24346, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_accounting_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24364
 * 
 * 
 * cpdef object vgpu_instance_get_accounting_pids(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Queries list of processes running on vGPU that can be queried for accounting stats. The list of processes returned can be in running or terminated state.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_541vgpu_instance_get_accounting_pids(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_accounting_pids(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_pids = 0;
  unsigned int *__pyx_v_pids_ptr;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_accounting_pids", 0);

  /* "cuda/bindings/_nvml.pyx":24372
 *     .. seealso:: `nvmlVgpuInstanceGetAccountingPids`
 *     """
 *     cdef unsigned int[1] count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetAccountingPids(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>count, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_count[0]), __pyx_t_1, sizeof(__pyx_v_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":24373
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetAccountingPids(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24374
 *     cdef unsigned int[1] count = [0]
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetAccountingPids(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>count, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     if count[0] == 0:
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetAccountingPids(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), ((unsigned int *)__pyx_v_count), NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24374, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":24373
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetAccountingPids(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24375
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetAccountingPids(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>count, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 24375, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24376
 *         __status__ = nvmlVgpuInstanceGetAccountingPids(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array pids = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  __pyx_t_4 = ((__pyx_v_count[0]) == 0);
  if (__pyx_t_4) {

    /* "cuda/bindings/_nvml.pyx":24377
 *     check_status_size(__status__)
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]             # <<<<<<<<<<<<<<
 *     cdef view.array pids = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     cdef unsigned int *pids_ptr = <unsigned int *>(pids.data)
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_6 = NULL;
    __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 24377, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_8 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_6, NULL};
      __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 24377, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_9);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_9, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 24377, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_9, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 24377, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_9, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 24377, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_9, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 24377, __pyx_L1_error)
      __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24377, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_5);
    }
    __pyx_t_9 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_5), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 24377, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_9;
    __pyx_t_9 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":24376
 *         __status__ = nvmlVgpuInstanceGetAccountingPids(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array pids = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":24378
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array pids = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")             # <<<<<<<<<<<<<<
 *     cdef unsigned int *pids_ptr = <unsigned int *>(pids.data)
 *     with nogil:
*/
  __pyx_t_5 = NULL;
  __pyx_t_7 = __Pyx_PyLong_From_unsigned_int((__pyx_v_count[0])); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 24378, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 24378, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 24378, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 24378, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_5, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 24378, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_6, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 24378, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 24378, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 24378, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 24378, __pyx_L1_error)
    __pyx_t_9 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 24378, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_9);
  }
  __pyx_v_pids = ((struct __pyx_array_obj *)__pyx_t_9);
  __pyx_t_9 = 0;

  /* "cuda/bindings/_nvml.pyx":24379
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array pids = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     cdef unsigned int *pids_ptr = <unsigned int *>(pids.data)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetAccountingPids(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>count, pids_ptr)
*/
  __pyx_v_pids_ptr = ((unsigned int *)__pyx_v_pids->data);

  /* "cuda/bindings/_nvml.pyx":24380
 *     cdef view.array pids = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     cdef unsigned int *pids_ptr = <unsigned int *>(pids.data)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetAccountingPids(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>count, pids_ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24381
 *     cdef unsigned int *pids_ptr = <unsigned int *>(pids.data)
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetAccountingPids(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>count, pids_ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return pids
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetAccountingPids(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), ((unsigned int *)__pyx_v_count), __pyx_v_pids_ptr); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24381, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":24380
 *     cdef view.array pids = view.array(shape=(count[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     cdef unsigned int *pids_ptr = <unsigned int *>(pids.data)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetAccountingPids(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>count, pids_ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24382
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetAccountingPids(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>count, pids_ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return pids
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 24382, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24383
 *         __status__ = nvmlVgpuInstanceGetAccountingPids(<nvmlVgpuInstance_t>vgpu_instance, <unsigned int*>count, pids_ptr)
 *     check_status(__status__)
 *     return pids             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_pids);
  __pyx_r = ((PyObject *)__pyx_v_pids);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24364
 * 
 * 
 * cpdef object vgpu_instance_get_accounting_pids(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Queries list of processes running on vGPU that can be queried for accounting stats. The list of processes returned can be in running or terminated state.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_accounting_pids", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_pids);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_541vgpu_instance_get_accounting_pids(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_540vgpu_instance_get_accounting_pids, "vgpu_instance_get_accounting_pids(unsigned int vgpu_instance)\n\nQueries list of processes running on vGPU that can be queried for accounting stats. The list of processes returned can be in running or terminated state.\n\nArgs:\n    vgpu_instance (unsigned int): The identifier of the target vGPU instance.\n\n.. seealso:: `nvmlVgpuInstanceGetAccountingPids`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_541vgpu_instance_get_accounting_pids = {"vgpu_instance_get_accounting_pids", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_541vgpu_instance_get_accounting_pids, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_540vgpu_instance_get_accounting_pids};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_541vgpu_instance_get_accounting_pids(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_accounting_pids (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24364, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24364, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_accounting_pids", 0) < (0)) __PYX_ERR(0, 24364, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_accounting_pids", 1, 1, 1, i); __PYX_ERR(0, 24364, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24364, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24364, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_accounting_pids", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24364, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_accounting_pids", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_540vgpu_instance_get_accounting_pids(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_540vgpu_instance_get_accounting_pids(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_accounting_pids", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_accounting_pids(__pyx_v_vgpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24364, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_accounting_pids", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24386
 * 
 * 
 * cpdef object vgpu_instance_get_accounting_stats(unsigned int vgpu_instance, unsigned int pid):             # <<<<<<<<<<<<<<
 *     """Queries process's accounting stats.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_543vgpu_instance_get_accounting_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_accounting_stats(unsigned int __pyx_v_vgpu_instance, unsigned int __pyx_v_pid, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *__pyx_v_stats_py = 0;
  nvmlAccountingStats_t *__pyx_v_stats;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_accounting_stats", 0);

  /* "cuda/bindings/_nvml.pyx":24398
 *     .. seealso:: `nvmlVgpuInstanceGetAccountingStats`
 *     """
 *     cdef AccountingStats stats_py = AccountingStats()             # <<<<<<<<<<<<<<
 *     cdef nvmlAccountingStats_t *stats = <nvmlAccountingStats_t *><intptr_t>(stats_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24398, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_stats_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":24399
 *     """
 *     cdef AccountingStats stats_py = AccountingStats()
 *     cdef nvmlAccountingStats_t *stats = <nvmlAccountingStats_t *><intptr_t>(stats_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetAccountingStats(<nvmlVgpuInstance_t>vgpu_instance, pid, stats)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_AccountingStats *)__pyx_v_stats_py->__pyx_vtab)->_get_ptr(__pyx_v_stats_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24399, __pyx_L1_error)
  __pyx_v_stats = ((nvmlAccountingStats_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":24400
 *     cdef AccountingStats stats_py = AccountingStats()
 *     cdef nvmlAccountingStats_t *stats = <nvmlAccountingStats_t *><intptr_t>(stats_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetAccountingStats(<nvmlVgpuInstance_t>vgpu_instance, pid, stats)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24401
 *     cdef nvmlAccountingStats_t *stats = <nvmlAccountingStats_t *><intptr_t>(stats_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetAccountingStats(<nvmlVgpuInstance_t>vgpu_instance, pid, stats)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return stats_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetAccountingStats(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), __pyx_v_pid, __pyx_v_stats); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24401, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":24400
 *     cdef AccountingStats stats_py = AccountingStats()
 *     cdef nvmlAccountingStats_t *stats = <nvmlAccountingStats_t *><intptr_t>(stats_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetAccountingStats(<nvmlVgpuInstance_t>vgpu_instance, pid, stats)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24402
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetAccountingStats(<nvmlVgpuInstance_t>vgpu_instance, pid, stats)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return stats_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 24402, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24403
 *         __status__ = nvmlVgpuInstanceGetAccountingStats(<nvmlVgpuInstance_t>vgpu_instance, pid, stats)
 *     check_status(__status__)
 *     return stats_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_stats_py);
  __pyx_r = ((PyObject *)__pyx_v_stats_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24386
 * 
 * 
 * cpdef object vgpu_instance_get_accounting_stats(unsigned int vgpu_instance, unsigned int pid):             # <<<<<<<<<<<<<<
 *     """Queries process's accounting stats.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_accounting_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_stats_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_543vgpu_instance_get_accounting_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_542vgpu_instance_get_accounting_stats, "vgpu_instance_get_accounting_stats(unsigned int vgpu_instance, unsigned int pid)\n\nQueries process's accounting stats.\n\nArgs:\n    vgpu_instance (unsigned int): The identifier of the target vGPU instance.\n    pid (unsigned int): Process Id of the target process to query stats for.\n\nReturns:\n    nvmlAccountingStats_t: Reference in which to return the process's accounting stats.\n\n.. seealso:: `nvmlVgpuInstanceGetAccountingStats`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_543vgpu_instance_get_accounting_stats = {"vgpu_instance_get_accounting_stats", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_543vgpu_instance_get_accounting_stats, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_542vgpu_instance_get_accounting_stats};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_543vgpu_instance_get_accounting_stats(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  unsigned int __pyx_v_pid;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_accounting_stats (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,&__pyx_mstate_global->__pyx_n_u_pid,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24386, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24386, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24386, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_accounting_stats", 0) < (0)) __PYX_ERR(0, 24386, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_accounting_stats", 1, 2, 2, i); __PYX_ERR(0, 24386, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24386, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24386, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24386, __pyx_L3_error)
    __pyx_v_pid = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_pid == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24386, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_accounting_stats", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24386, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_accounting_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_542vgpu_instance_get_accounting_stats(__pyx_self, __pyx_v_vgpu_instance, __pyx_v_pid);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_542vgpu_instance_get_accounting_stats(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance, unsigned int __pyx_v_pid) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_accounting_stats", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_accounting_stats(__pyx_v_vgpu_instance, __pyx_v_pid, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24386, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_accounting_stats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24406
 * 
 * 
 * cpdef vgpu_instance_clear_accounting_pids(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Clears accounting information of the vGPU instance that have already terminated.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_545vgpu_instance_clear_accounting_pids(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_clear_accounting_pids(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_clear_accounting_pids", 0);

  /* "cuda/bindings/_nvml.pyx":24414
 *     .. seealso:: `nvmlVgpuInstanceClearAccountingPids`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceClearAccountingPids(<nvmlVgpuInstance_t>vgpu_instance)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24415
 *     """
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceClearAccountingPids(<nvmlVgpuInstance_t>vgpu_instance)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceClearAccountingPids(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24415, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24414
 *     .. seealso:: `nvmlVgpuInstanceClearAccountingPids`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceClearAccountingPids(<nvmlVgpuInstance_t>vgpu_instance)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24416
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceClearAccountingPids(<nvmlVgpuInstance_t>vgpu_instance)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24416, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24406
 * 
 * 
 * cpdef vgpu_instance_clear_accounting_pids(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Clears accounting information of the vGPU instance that have already terminated.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_clear_accounting_pids", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_545vgpu_instance_clear_accounting_pids(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_544vgpu_instance_clear_accounting_pids, "vgpu_instance_clear_accounting_pids(unsigned int vgpu_instance)\n\nClears accounting information of the vGPU instance that have already terminated.\n\nArgs:\n    vgpu_instance (unsigned int): The identifier of the target vGPU instance.\n\n.. seealso:: `nvmlVgpuInstanceClearAccountingPids`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_545vgpu_instance_clear_accounting_pids = {"vgpu_instance_clear_accounting_pids", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_545vgpu_instance_clear_accounting_pids, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_544vgpu_instance_clear_accounting_pids};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_545vgpu_instance_clear_accounting_pids(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_clear_accounting_pids (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24406, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24406, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_clear_accounting_pids", 0) < (0)) __PYX_ERR(0, 24406, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_clear_accounting_pids", 1, 1, 1, i); __PYX_ERR(0, 24406, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24406, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24406, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_clear_accounting_pids", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24406, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_clear_accounting_pids", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_544vgpu_instance_clear_accounting_pids(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_544vgpu_instance_clear_accounting_pids(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_clear_accounting_pids", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_clear_accounting_pids(__pyx_v_vgpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24406, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_clear_accounting_pids", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24419
 * 
 * 
 * cpdef object vgpu_instance_get_license_info_v2(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Query the license information of the vGPU instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_547vgpu_instance_get_license_info_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_license_info_v2(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *__pyx_v_license_info_py = 0;
  nvmlVgpuLicenseInfo_t *__pyx_v_license_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_license_info_v2", 0);

  /* "cuda/bindings/_nvml.pyx":24430
 *     .. seealso:: `nvmlVgpuInstanceGetLicenseInfo_v2`
 *     """
 *     cdef VgpuLicenseInfo license_info_py = VgpuLicenseInfo()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuLicenseInfo_t *license_info = <nvmlVgpuLicenseInfo_t *><intptr_t>(license_info_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24430, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_license_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":24431
 *     """
 *     cdef VgpuLicenseInfo license_info_py = VgpuLicenseInfo()
 *     cdef nvmlVgpuLicenseInfo_t *license_info = <nvmlVgpuLicenseInfo_t *><intptr_t>(license_info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetLicenseInfo_v2(<nvmlVgpuInstance_t>vgpu_instance, license_info)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)__pyx_v_license_info_py->__pyx_vtab)->_get_ptr(__pyx_v_license_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24431, __pyx_L1_error)
  __pyx_v_license_info = ((nvmlVgpuLicenseInfo_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":24432
 *     cdef VgpuLicenseInfo license_info_py = VgpuLicenseInfo()
 *     cdef nvmlVgpuLicenseInfo_t *license_info = <nvmlVgpuLicenseInfo_t *><intptr_t>(license_info_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetLicenseInfo_v2(<nvmlVgpuInstance_t>vgpu_instance, license_info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24433
 *     cdef nvmlVgpuLicenseInfo_t *license_info = <nvmlVgpuLicenseInfo_t *><intptr_t>(license_info_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetLicenseInfo_v2(<nvmlVgpuInstance_t>vgpu_instance, license_info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return license_info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetLicenseInfo_v2(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), __pyx_v_license_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24433, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":24432
 *     cdef VgpuLicenseInfo license_info_py = VgpuLicenseInfo()
 *     cdef nvmlVgpuLicenseInfo_t *license_info = <nvmlVgpuLicenseInfo_t *><intptr_t>(license_info_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetLicenseInfo_v2(<nvmlVgpuInstance_t>vgpu_instance, license_info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24434
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetLicenseInfo_v2(<nvmlVgpuInstance_t>vgpu_instance, license_info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return license_info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 24434, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24435
 *         __status__ = nvmlVgpuInstanceGetLicenseInfo_v2(<nvmlVgpuInstance_t>vgpu_instance, license_info)
 *     check_status(__status__)
 *     return license_info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_license_info_py);
  __pyx_r = ((PyObject *)__pyx_v_license_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24419
 * 
 * 
 * cpdef object vgpu_instance_get_license_info_v2(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Query the license information of the vGPU instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_license_info_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_license_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_547vgpu_instance_get_license_info_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_546vgpu_instance_get_license_info_v2, "vgpu_instance_get_license_info_v2(unsigned int vgpu_instance)\n\nQuery the license information of the vGPU instance.\n\nArgs:\n    vgpu_instance (unsigned int): Identifier of the target vGPU instance.\n\nReturns:\n    nvmlVgpuLicenseInfo_t: Pointer to vGPU license information structure.\n\n.. seealso:: `nvmlVgpuInstanceGetLicenseInfo_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_547vgpu_instance_get_license_info_v2 = {"vgpu_instance_get_license_info_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_547vgpu_instance_get_license_info_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_546vgpu_instance_get_license_info_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_547vgpu_instance_get_license_info_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_license_info_v2 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24419, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24419, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_license_info_v2", 0) < (0)) __PYX_ERR(0, 24419, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_license_info_v2", 1, 1, 1, i); __PYX_ERR(0, 24419, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24419, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24419, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_license_info_v2", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24419, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_license_info_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_546vgpu_instance_get_license_info_v2(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_546vgpu_instance_get_license_info_v2(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_license_info_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_license_info_v2(__pyx_v_vgpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24419, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_license_info_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24438
 * 
 * 
 * cpdef unsigned int get_excluded_device_count() except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the number of excluded GPU devices in the system.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_549get_excluded_device_count(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_get_excluded_device_count(CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_device_count;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24447
 *     """
 *     cdef unsigned int device_count
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGetExcludedDeviceCount(&device_count)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24448
 *     cdef unsigned int device_count
 *     with nogil:
 *         __status__ = nvmlGetExcludedDeviceCount(&device_count)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return device_count
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetExcludedDeviceCount((&__pyx_v_device_count)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24448, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24447
 *     """
 *     cdef unsigned int device_count
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGetExcludedDeviceCount(&device_count)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24449
 *     with nogil:
 *         __status__ = nvmlGetExcludedDeviceCount(&device_count)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return device_count
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24449, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24450
 *         __status__ = nvmlGetExcludedDeviceCount(&device_count)
 *     check_status(__status__)
 *     return device_count             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_device_count;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24438
 * 
 * 
 * cpdef unsigned int get_excluded_device_count() except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the number of excluded GPU devices in the system.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.get_excluded_device_count", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_549get_excluded_device_count(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_548get_excluded_device_count, "get_excluded_device_count() -> unsigned int\n\nRetrieves the number of excluded GPU devices in the system.\n\nReturns:\n    unsigned int: Reference in which to return the number of excluded devices.\n\n.. seealso:: `nvmlGetExcludedDeviceCount`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_549get_excluded_device_count = {"get_excluded_device_count", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_549get_excluded_device_count, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_548get_excluded_device_count};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_549get_excluded_device_count(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("get_excluded_device_count (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_548get_excluded_device_count(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_548get_excluded_device_count(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("get_excluded_device_count", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_get_excluded_device_count(1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 24438, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.get_excluded_device_count", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24453
 * 
 * 
 * cpdef object get_excluded_device_info_by_index(unsigned int ind_ex):             # <<<<<<<<<<<<<<
 *     """Acquire the device information for an excluded GPU device, based on its ind_ex.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_551get_excluded_device_info_by_index(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_get_excluded_device_info_by_index(unsigned int __pyx_v_ind_ex, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *__pyx_v_info_py = 0;
  nvmlExcludedDeviceInfo_t *__pyx_v_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("get_excluded_device_info_by_index", 0);

  /* "cuda/bindings/_nvml.pyx":24464
 *     .. seealso:: `nvmlGetExcludedDeviceInfoByIndex`
 *     """
 *     cdef ExcludedDeviceInfo info_py = ExcludedDeviceInfo()             # <<<<<<<<<<<<<<
 *     cdef nvmlExcludedDeviceInfo_t *info = <nvmlExcludedDeviceInfo_t *><intptr_t>(info_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24464, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":24465
 *     """
 *     cdef ExcludedDeviceInfo info_py = ExcludedDeviceInfo()
 *     cdef nvmlExcludedDeviceInfo_t *info = <nvmlExcludedDeviceInfo_t *><intptr_t>(info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlGetExcludedDeviceInfoByIndex(ind_ex, info)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)__pyx_v_info_py->__pyx_vtab)->_get_ptr(__pyx_v_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24465, __pyx_L1_error)
  __pyx_v_info = ((nvmlExcludedDeviceInfo_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":24466
 *     cdef ExcludedDeviceInfo info_py = ExcludedDeviceInfo()
 *     cdef nvmlExcludedDeviceInfo_t *info = <nvmlExcludedDeviceInfo_t *><intptr_t>(info_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGetExcludedDeviceInfoByIndex(ind_ex, info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24467
 *     cdef nvmlExcludedDeviceInfo_t *info = <nvmlExcludedDeviceInfo_t *><intptr_t>(info_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlGetExcludedDeviceInfoByIndex(ind_ex, info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetExcludedDeviceInfoByIndex(__pyx_v_ind_ex, __pyx_v_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24467, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":24466
 *     cdef ExcludedDeviceInfo info_py = ExcludedDeviceInfo()
 *     cdef nvmlExcludedDeviceInfo_t *info = <nvmlExcludedDeviceInfo_t *><intptr_t>(info_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGetExcludedDeviceInfoByIndex(ind_ex, info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24468
 *     with nogil:
 *         __status__ = nvmlGetExcludedDeviceInfoByIndex(ind_ex, info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 24468, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24469
 *         __status__ = nvmlGetExcludedDeviceInfoByIndex(ind_ex, info)
 *     check_status(__status__)
 *     return info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_info_py);
  __pyx_r = ((PyObject *)__pyx_v_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24453
 * 
 * 
 * cpdef object get_excluded_device_info_by_index(unsigned int ind_ex):             # <<<<<<<<<<<<<<
 *     """Acquire the device information for an excluded GPU device, based on its ind_ex.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.get_excluded_device_info_by_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_551get_excluded_device_info_by_index(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_550get_excluded_device_info_by_index, "get_excluded_device_info_by_index(unsigned int ind_ex)\n\nAcquire the device information for an excluded GPU device, based on its ind_ex.\n\nArgs:\n    ind_ex (unsigned int): The ind_ex of the target GPU, >= 0 and < ``deviceCount``.\n\nReturns:\n    nvmlExcludedDeviceInfo_t: Reference in which to return the device information.\n\n.. seealso:: `nvmlGetExcludedDeviceInfoByIndex`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_551get_excluded_device_info_by_index = {"get_excluded_device_info_by_index", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_551get_excluded_device_info_by_index, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_550get_excluded_device_info_by_index};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_551get_excluded_device_info_by_index(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_ind_ex;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("get_excluded_device_info_by_index (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ind_ex,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24453, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24453, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "get_excluded_device_info_by_index", 0) < (0)) __PYX_ERR(0, 24453, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("get_excluded_device_info_by_index", 1, 1, 1, i); __PYX_ERR(0, 24453, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24453, __pyx_L3_error)
    }
    __pyx_v_ind_ex = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_ind_ex == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24453, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("get_excluded_device_info_by_index", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24453, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.get_excluded_device_info_by_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_550get_excluded_device_info_by_index(__pyx_self, __pyx_v_ind_ex);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_550get_excluded_device_info_by_index(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_ind_ex) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("get_excluded_device_info_by_index", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_get_excluded_device_info_by_index(__pyx_v_ind_ex, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24453, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.get_excluded_device_info_by_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24472
 * 
 * 
 * cpdef int device_set_mig_mode(intptr_t device, unsigned int mode) except? -1:             # <<<<<<<<<<<<<<
 *     """Set MIG mode for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_553device_set_mig_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static int __pyx_f_4cuda_8bindings_5_nvml_device_set_mig_mode(intptr_t __pyx_v_device, unsigned int __pyx_v_mode, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml__Return __pyx_v_activation_status;
  nvmlReturn_t __pyx_v___status__;
  int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24485
 *     """
 *     cdef _Return activation_status
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetMigMode(<Device>device, mode, &activation_status)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24486
 *     cdef _Return activation_status
 *     with nogil:
 *         __status__ = nvmlDeviceSetMigMode(<Device>device, mode, &activation_status)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <int>activation_status
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetMigMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_mode, (&__pyx_v_activation_status)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24486, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24485
 *     """
 *     cdef _Return activation_status
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetMigMode(<Device>device, mode, &activation_status)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24487
 *     with nogil:
 *         __status__ = nvmlDeviceSetMigMode(<Device>device, mode, &activation_status)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <int>activation_status
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24487, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24488
 *         __status__ = nvmlDeviceSetMigMode(<Device>device, mode, &activation_status)
 *     check_status(__status__)
 *     return <int>activation_status             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((int)__pyx_v_activation_status);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24472
 * 
 * 
 * cpdef int device_set_mig_mode(intptr_t device, unsigned int mode) except? -1:             # <<<<<<<<<<<<<<
 *     """Set MIG mode for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_mig_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_553device_set_mig_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_552device_set_mig_mode, "device_set_mig_mode(intptr_t device, unsigned int mode) -> int\n\nSet MIG mode for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    mode (unsigned int): The mode to be set, ``NVML_DEVICE_MIG_DISABLE`` or ``NVML_DEVICE_MIG_ENABLE``.\n\nReturns:\n    int: The activationStatus status.\n\n.. seealso:: `nvmlDeviceSetMigMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_553device_set_mig_mode = {"device_set_mig_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_553device_set_mig_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_552device_set_mig_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_553device_set_mig_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_mode;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_mig_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_mode,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24472, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24472, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24472, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_mig_mode", 0) < (0)) __PYX_ERR(0, 24472, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_mig_mode", 1, 2, 2, i); __PYX_ERR(0, 24472, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24472, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24472, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24472, __pyx_L3_error)
    __pyx_v_mode = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_mode == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24472, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_mig_mode", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24472, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_mig_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_552device_set_mig_mode(__pyx_self, __pyx_v_device, __pyx_v_mode);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_552device_set_mig_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_mode) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_mig_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_mig_mode(__pyx_v_device, __pyx_v_mode, 1); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24472, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24472, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_mig_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24491
 * 
 * 
 * cpdef tuple device_get_mig_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get MIG mode for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_555device_get_mig_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_mig_mode(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_current_mode;
  unsigned int __pyx_v_pending_mode;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_mig_mode", 0);

  /* "cuda/bindings/_nvml.pyx":24507
 *     cdef unsigned int current_mode
 *     cdef unsigned int pending_mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMigMode(<Device>device, &current_mode, &pending_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24508
 *     cdef unsigned int pending_mode
 *     with nogil:
 *         __status__ = nvmlDeviceGetMigMode(<Device>device, &current_mode, &pending_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (current_mode, pending_mode)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMigMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_current_mode), (&__pyx_v_pending_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24508, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24507
 *     cdef unsigned int current_mode
 *     cdef unsigned int pending_mode
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMigMode(<Device>device, &current_mode, &pending_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24509
 *     with nogil:
 *         __status__ = nvmlDeviceGetMigMode(<Device>device, &current_mode, &pending_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (current_mode, pending_mode)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24509, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24510
 *         __status__ = nvmlDeviceGetMigMode(<Device>device, &current_mode, &pending_mode)
 *     check_status(__status__)
 *     return (current_mode, pending_mode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_unsigned_int(__pyx_v_current_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24510, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_pending_mode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24510, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24510, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 24510, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 24510, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24491
 * 
 * 
 * cpdef tuple device_get_mig_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get MIG mode for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_mig_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_555device_get_mig_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_554device_get_mig_mode, "device_get_mig_mode(intptr_t device) -> tuple\n\nGet MIG mode for the device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    A 2-tuple containing:\n\n    - unsigned int: Returns the current mode, ``NVML_DEVICE_MIG_DISABLE`` or ``NVML_DEVICE_MIG_ENABLE``.\n    - unsigned int: Returns the pending mode, ``NVML_DEVICE_MIG_DISABLE`` or ``NVML_DEVICE_MIG_ENABLE``.\n\n.. seealso:: `nvmlDeviceGetMigMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_555device_get_mig_mode = {"device_get_mig_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_555device_get_mig_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_554device_get_mig_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_555device_get_mig_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_mig_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24491, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24491, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_mig_mode", 0) < (0)) __PYX_ERR(0, 24491, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_mig_mode", 1, 1, 1, i); __PYX_ERR(0, 24491, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24491, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24491, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_mig_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24491, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_mig_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_554device_get_mig_mode(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_554device_get_mig_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_mig_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_mig_mode(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24491, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_mig_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24513
 * 
 * 
 * cpdef object device_get_gpu_instance_profile_info_v(intptr_t device, unsigned int profile):             # <<<<<<<<<<<<<<
 *     """Versioned wrapper around ``nvmlDeviceGetGpuInstanceProfileInfo`` that accepts a versioned ``nvmlGpuInstanceProfileInfo_v2_t`` or later output structure.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_557device_get_gpu_instance_profile_info_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_profile_info_v(intptr_t __pyx_v_device, unsigned int __pyx_v_profile, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_info_py = 0;
  nvmlGpuInstanceProfileInfo_v2_t *__pyx_v_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpu_instance_profile_info_v", 0);

  /* "cuda/bindings/_nvml.pyx":24525
 *     .. seealso:: `nvmlDeviceGetGpuInstanceProfileInfoV`
 *     """
 *     cdef GpuInstanceProfileInfo_v2 info_py = GpuInstanceProfileInfo_v2()             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuInstanceProfileInfo_v2_t *info = <nvmlGpuInstanceProfileInfo_v2_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlGpuInstanceProfileInfo_v3_t) | (3 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24525, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":24526
 *     """
 *     cdef GpuInstanceProfileInfo_v2 info_py = GpuInstanceProfileInfo_v2()
 *     cdef nvmlGpuInstanceProfileInfo_v2_t *info = <nvmlGpuInstanceProfileInfo_v2_t *><intptr_t>(info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     info.version = sizeof(nvmlGpuInstanceProfileInfo_v3_t) | (3 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_info_py->__pyx_vtab)->_get_ptr(__pyx_v_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24526, __pyx_L1_error)
  __pyx_v_info = ((nvmlGpuInstanceProfileInfo_v2_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":24527
 *     cdef GpuInstanceProfileInfo_v2 info_py = GpuInstanceProfileInfo_v2()
 *     cdef nvmlGpuInstanceProfileInfo_v2_t *info = <nvmlGpuInstanceProfileInfo_v2_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlGpuInstanceProfileInfo_v3_t) | (3 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstanceProfileInfoV(<Device>device, profile, info)
*/
  __pyx_v_info->version = ((sizeof(nvmlGpuInstanceProfileInfo_v3_t)) | 0x3000000);

  /* "cuda/bindings/_nvml.pyx":24528
 *     cdef nvmlGpuInstanceProfileInfo_v2_t *info = <nvmlGpuInstanceProfileInfo_v2_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlGpuInstanceProfileInfo_v3_t) | (3 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstanceProfileInfoV(<Device>device, profile, info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24529
 *     info.version = sizeof(nvmlGpuInstanceProfileInfo_v3_t) | (3 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstanceProfileInfoV(<Device>device, profile, info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceProfileInfoV(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_profile, __pyx_v_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24529, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":24528
 *     cdef nvmlGpuInstanceProfileInfo_v2_t *info = <nvmlGpuInstanceProfileInfo_v2_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlGpuInstanceProfileInfo_v3_t) | (3 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstanceProfileInfoV(<Device>device, profile, info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24530
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstanceProfileInfoV(<Device>device, profile, info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 24530, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24531
 *         __status__ = nvmlDeviceGetGpuInstanceProfileInfoV(<Device>device, profile, info)
 *     check_status(__status__)
 *     return info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_info_py);
  __pyx_r = ((PyObject *)__pyx_v_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24513
 * 
 * 
 * cpdef object device_get_gpu_instance_profile_info_v(intptr_t device, unsigned int profile):             # <<<<<<<<<<<<<<
 *     """Versioned wrapper around ``nvmlDeviceGetGpuInstanceProfileInfo`` that accepts a versioned ``nvmlGpuInstanceProfileInfo_v2_t`` or later output structure.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_profile_info_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_557device_get_gpu_instance_profile_info_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_556device_get_gpu_instance_profile_info_v, "device_get_gpu_instance_profile_info_v(intptr_t device, unsigned int profile)\n\nVersioned wrapper around ``nvmlDeviceGetGpuInstanceProfileInfo`` that accepts a versioned ``nvmlGpuInstanceProfileInfo_v2_t`` or later output structure.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    profile (unsigned int): One of the NVML_GPU_INSTANCE_PROFILE_*.\n\nReturns:\n    nvmlGpuInstanceProfileInfo_v2_t: Returns detailed profile information.\n\n.. seealso:: `nvmlDeviceGetGpuInstanceProfileInfoV`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_557device_get_gpu_instance_profile_info_v = {"device_get_gpu_instance_profile_info_v", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_557device_get_gpu_instance_profile_info_v, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_556device_get_gpu_instance_profile_info_v};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_557device_get_gpu_instance_profile_info_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_profile;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_gpu_instance_profile_info_v (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_profile,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24513, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24513, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24513, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_gpu_instance_profile_info_v", 0) < (0)) __PYX_ERR(0, 24513, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_gpu_instance_profile_info_v", 1, 2, 2, i); __PYX_ERR(0, 24513, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24513, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24513, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24513, __pyx_L3_error)
    __pyx_v_profile = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_profile == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24513, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_gpu_instance_profile_info_v", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24513, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_profile_info_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_556device_get_gpu_instance_profile_info_v(__pyx_self, __pyx_v_device, __pyx_v_profile);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_556device_get_gpu_instance_profile_info_v(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_profile) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpu_instance_profile_info_v", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_profile_info_v(__pyx_v_device, __pyx_v_profile, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24513, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_profile_info_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24534
 * 
 * 
 * cpdef object device_get_gpu_instance_possible_placements_v2(intptr_t device, unsigned int profile_id):             # <<<<<<<<<<<<<<
 *     """Get GPU instance placements.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_559device_get_gpu_instance_possible_placements_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_possible_placements_v2(intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v_placements = 0;
  nvmlGpuInstancePlacement_t *__pyx_v_placements_ptr;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  intptr_t __pyx_t_8;
  int __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpu_instance_possible_placements_v2", 0);

  /* "cuda/bindings/_nvml.pyx":24543
 *     .. seealso:: `nvmlDeviceGetGpuInstancePossiblePlacements_v2`
 *     """
 *     cdef unsigned int[1] count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstancePossiblePlacements_v2(<Device>device, profile_id, NULL, <unsigned int*>count)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_count[0]), __pyx_t_1, sizeof(__pyx_v_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":24544
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstancePossiblePlacements_v2(<Device>device, profile_id, NULL, <unsigned int*>count)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24545
 *     cdef unsigned int[1] count = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstancePossiblePlacements_v2(<Device>device, profile_id, NULL, <unsigned int*>count)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     cdef GpuInstancePlacement placements = GpuInstancePlacement(count[0])
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstancePossiblePlacements_v2(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_profile_id, NULL, ((unsigned int *)__pyx_v_count)); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24545, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":24544
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstancePossiblePlacements_v2(<Device>device, profile_id, NULL, <unsigned int*>count)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24546
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstancePossiblePlacements_v2(<Device>device, profile_id, NULL, <unsigned int*>count)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     cdef GpuInstancePlacement placements = GpuInstancePlacement(count[0])
 *     cdef nvmlGpuInstancePlacement_t *placements_ptr = <nvmlGpuInstancePlacement_t *><intptr_t>(placements._get_ptr())
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 24546, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24547
 *         __status__ = nvmlDeviceGetGpuInstancePossiblePlacements_v2(<Device>device, profile_id, NULL, <unsigned int*>count)
 *     check_status_size(__status__)
 *     cdef GpuInstancePlacement placements = GpuInstancePlacement(count[0])             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuInstancePlacement_t *placements_ptr = <nvmlGpuInstancePlacement_t *><intptr_t>(placements._get_ptr())
 *     if count[0] == 0:
*/
  __pyx_t_5 = NULL;
  __pyx_t_6 = __Pyx_PyLong_From_unsigned_int((__pyx_v_count[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 24547, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_6};
    __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement, __pyx_callargs+__pyx_t_7, (2-__pyx_t_7) | (__pyx_t_7*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24547, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_4);
  }
  __pyx_v_placements = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_t_4);
  __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":24548
 *     check_status_size(__status__)
 *     cdef GpuInstancePlacement placements = GpuInstancePlacement(count[0])
 *     cdef nvmlGpuInstancePlacement_t *placements_ptr = <nvmlGpuInstancePlacement_t *><intptr_t>(placements._get_ptr())             # <<<<<<<<<<<<<<
 *     if count[0] == 0:
 *         return placements
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v_placements->__pyx_vtab)->_get_ptr(__pyx_v_placements); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24548, __pyx_L1_error)
  __pyx_v_placements_ptr = ((nvmlGpuInstancePlacement_t *)((intptr_t)__pyx_t_8));

  /* "cuda/bindings/_nvml.pyx":24549
 *     cdef GpuInstancePlacement placements = GpuInstancePlacement(count[0])
 *     cdef nvmlGpuInstancePlacement_t *placements_ptr = <nvmlGpuInstancePlacement_t *><intptr_t>(placements._get_ptr())
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         return placements
 *     with nogil:
*/
  __pyx_t_9 = ((__pyx_v_count[0]) == 0);
  if (__pyx_t_9) {

    /* "cuda/bindings/_nvml.pyx":24550
 *     cdef nvmlGpuInstancePlacement_t *placements_ptr = <nvmlGpuInstancePlacement_t *><intptr_t>(placements._get_ptr())
 *     if count[0] == 0:
 *         return placements             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstancePossiblePlacements_v2(<Device>device, profile_id, placements_ptr, <unsigned int*>count)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_placements);
    __pyx_r = ((PyObject *)__pyx_v_placements);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":24549
 *     cdef GpuInstancePlacement placements = GpuInstancePlacement(count[0])
 *     cdef nvmlGpuInstancePlacement_t *placements_ptr = <nvmlGpuInstancePlacement_t *><intptr_t>(placements._get_ptr())
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         return placements
 *     with nogil:
*/
  }

  /* "cuda/bindings/_nvml.pyx":24551
 *     if count[0] == 0:
 *         return placements
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstancePossiblePlacements_v2(<Device>device, profile_id, placements_ptr, <unsigned int*>count)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24552
 *         return placements
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstancePossiblePlacements_v2(<Device>device, profile_id, placements_ptr, <unsigned int*>count)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return placements
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstancePossiblePlacements_v2(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_profile_id, __pyx_v_placements_ptr, ((unsigned int *)__pyx_v_count)); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24552, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":24551
 *     if count[0] == 0:
 *         return placements
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstancePossiblePlacements_v2(<Device>device, profile_id, placements_ptr, <unsigned int*>count)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24553
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstancePossiblePlacements_v2(<Device>device, profile_id, placements_ptr, <unsigned int*>count)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return placements
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 24553, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24554
 *         __status__ = nvmlDeviceGetGpuInstancePossiblePlacements_v2(<Device>device, profile_id, placements_ptr, <unsigned int*>count)
 *     check_status(__status__)
 *     return placements             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_placements);
  __pyx_r = ((PyObject *)__pyx_v_placements);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24534
 * 
 * 
 * cpdef object device_get_gpu_instance_possible_placements_v2(intptr_t device, unsigned int profile_id):             # <<<<<<<<<<<<<<
 *     """Get GPU instance placements.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_possible_placements_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_placements);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_559device_get_gpu_instance_possible_placements_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_558device_get_gpu_instance_possible_placements_v2, "device_get_gpu_instance_possible_placements_v2(intptr_t device, unsigned int profile_id)\n\nGet GPU instance placements.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    profile_id (unsigned int): The GPU instance profile ID. See ``nvmlDeviceGetGpuInstanceProfileInfo``.\n\n.. seealso:: `nvmlDeviceGetGpuInstancePossiblePlacements_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_559device_get_gpu_instance_possible_placements_v2 = {"device_get_gpu_instance_possible_placements_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_559device_get_gpu_instance_possible_placements_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_558device_get_gpu_instance_possible_placements_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_559device_get_gpu_instance_possible_placements_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_profile_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_gpu_instance_possible_placements_v2 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_profile_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24534, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24534, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24534, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_gpu_instance_possible_placements_v2", 0) < (0)) __PYX_ERR(0, 24534, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_gpu_instance_possible_placements_v2", 1, 2, 2, i); __PYX_ERR(0, 24534, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24534, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24534, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24534, __pyx_L3_error)
    __pyx_v_profile_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_profile_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24534, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_gpu_instance_possible_placements_v2", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24534, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_possible_placements_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_558device_get_gpu_instance_possible_placements_v2(__pyx_self, __pyx_v_device, __pyx_v_profile_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_558device_get_gpu_instance_possible_placements_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpu_instance_possible_placements_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_possible_placements_v2(__pyx_v_device, __pyx_v_profile_id, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24534, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_possible_placements_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24557
 * 
 * 
 * cpdef unsigned int device_get_gpu_instance_remaining_capacity(intptr_t device, unsigned int profile_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Get GPU instance profile capacity.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_561device_get_gpu_instance_remaining_capacity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_remaining_capacity(intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_count;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24570
 *     """
 *     cdef unsigned int count
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstanceRemainingCapacity(<Device>device, profile_id, &count)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24571
 *     cdef unsigned int count
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstanceRemainingCapacity(<Device>device, profile_id, &count)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return count
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceRemainingCapacity(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_profile_id, (&__pyx_v_count)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24571, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24570
 *     """
 *     cdef unsigned int count
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstanceRemainingCapacity(<Device>device, profile_id, &count)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24572
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstanceRemainingCapacity(<Device>device, profile_id, &count)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return count
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24572, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24573
 *         __status__ = nvmlDeviceGetGpuInstanceRemainingCapacity(<Device>device, profile_id, &count)
 *     check_status(__status__)
 *     return count             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_count;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24557
 * 
 * 
 * cpdef unsigned int device_get_gpu_instance_remaining_capacity(intptr_t device, unsigned int profile_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Get GPU instance profile capacity.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_remaining_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_561device_get_gpu_instance_remaining_capacity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_560device_get_gpu_instance_remaining_capacity, "device_get_gpu_instance_remaining_capacity(intptr_t device, unsigned int profile_id) -> unsigned int\n\nGet GPU instance profile capacity.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    profile_id (unsigned int): The GPU instance profile ID. See ``nvmlDeviceGetGpuInstanceProfileInfo``.\n\nReturns:\n    unsigned int: Returns remaining instance count for the profile ID.\n\n.. seealso:: `nvmlDeviceGetGpuInstanceRemainingCapacity`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_561device_get_gpu_instance_remaining_capacity = {"device_get_gpu_instance_remaining_capacity", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_561device_get_gpu_instance_remaining_capacity, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_560device_get_gpu_instance_remaining_capacity};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_561device_get_gpu_instance_remaining_capacity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_profile_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_gpu_instance_remaining_capacity (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_profile_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24557, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24557, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24557, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_gpu_instance_remaining_capacity", 0) < (0)) __PYX_ERR(0, 24557, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_gpu_instance_remaining_capacity", 1, 2, 2, i); __PYX_ERR(0, 24557, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24557, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24557, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24557, __pyx_L3_error)
    __pyx_v_profile_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_profile_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24557, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_gpu_instance_remaining_capacity", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24557, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_remaining_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_560device_get_gpu_instance_remaining_capacity(__pyx_self, __pyx_v_device, __pyx_v_profile_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_560device_get_gpu_instance_remaining_capacity(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpu_instance_remaining_capacity", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_remaining_capacity(__pyx_v_device, __pyx_v_profile_id, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 24557, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_remaining_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24576
 * 
 * 
 * cpdef intptr_t device_create_gpu_instance(intptr_t device, unsigned int profile_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Create GPU instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_563device_create_gpu_instance(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_create_gpu_instance(intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml_GpuInstance __pyx_v_gpu_instance;
  nvmlReturn_t __pyx_v___status__;
  intptr_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24589
 *     """
 *     cdef GpuInstance gpu_instance
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceCreateGpuInstance(<Device>device, profile_id, &gpu_instance)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24590
 *     cdef GpuInstance gpu_instance
 *     with nogil:
 *         __status__ = nvmlDeviceCreateGpuInstance(<Device>device, profile_id, &gpu_instance)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <intptr_t>gpu_instance
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceCreateGpuInstance(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_profile_id, (&__pyx_v_gpu_instance)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24590, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24589
 *     """
 *     cdef GpuInstance gpu_instance
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceCreateGpuInstance(<Device>device, profile_id, &gpu_instance)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24591
 *     with nogil:
 *         __status__ = nvmlDeviceCreateGpuInstance(<Device>device, profile_id, &gpu_instance)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <intptr_t>gpu_instance
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24591, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24592
 *         __status__ = nvmlDeviceCreateGpuInstance(<Device>device, profile_id, &gpu_instance)
 *     check_status(__status__)
 *     return <intptr_t>gpu_instance             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((intptr_t)__pyx_v_gpu_instance);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24576
 * 
 * 
 * cpdef intptr_t device_create_gpu_instance(intptr_t device, unsigned int profile_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Create GPU instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_create_gpu_instance", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_563device_create_gpu_instance(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_562device_create_gpu_instance, "device_create_gpu_instance(intptr_t device, unsigned int profile_id) -> intptr_t\n\nCreate GPU instance.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    profile_id (unsigned int): The GPU instance profile ID. See ``nvmlDeviceGetGpuInstanceProfileInfo``.\n\nReturns:\n    intptr_t: Returns the GPU instance handle.\n\n.. seealso:: `nvmlDeviceCreateGpuInstance`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_563device_create_gpu_instance = {"device_create_gpu_instance", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_563device_create_gpu_instance, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_562device_create_gpu_instance};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_563device_create_gpu_instance(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_profile_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_create_gpu_instance (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_profile_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24576, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24576, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24576, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_create_gpu_instance", 0) < (0)) __PYX_ERR(0, 24576, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_create_gpu_instance", 1, 2, 2, i); __PYX_ERR(0, 24576, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24576, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24576, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24576, __pyx_L3_error)
    __pyx_v_profile_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_profile_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24576, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_create_gpu_instance", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24576, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_create_gpu_instance", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_562device_create_gpu_instance(__pyx_self, __pyx_v_device, __pyx_v_profile_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_562device_create_gpu_instance(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  intptr_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_create_gpu_instance", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_create_gpu_instance(__pyx_v_device, __pyx_v_profile_id, 1); if (unlikely(__pyx_t_1 == ((intptr_t)0) && PyErr_Occurred())) __PYX_ERR(0, 24576, __pyx_L1_error)
  __pyx_t_2 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24576, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_create_gpu_instance", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24595
 * 
 * 
 * cpdef intptr_t device_create_gpu_instance_with_placement(intptr_t device, unsigned int profile_id, intptr_t placement) except? 0:             # <<<<<<<<<<<<<<
 *     """Create GPU instance with the specified placement.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_565device_create_gpu_instance_with_placement(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_create_gpu_instance_with_placement(intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id, intptr_t __pyx_v_placement, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml_GpuInstance __pyx_v_gpu_instance;
  nvmlReturn_t __pyx_v___status__;
  intptr_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24609
 *     """
 *     cdef GpuInstance gpu_instance
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceCreateGpuInstanceWithPlacement(<Device>device, profile_id, <const nvmlGpuInstancePlacement_t*>placement, &gpu_instance)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24610
 *     cdef GpuInstance gpu_instance
 *     with nogil:
 *         __status__ = nvmlDeviceCreateGpuInstanceWithPlacement(<Device>device, profile_id, <const nvmlGpuInstancePlacement_t*>placement, &gpu_instance)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <intptr_t>gpu_instance
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceCreateGpuInstanceWithPlacement(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_profile_id, ((nvmlGpuInstancePlacement_t const *)__pyx_v_placement), (&__pyx_v_gpu_instance)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24610, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24609
 *     """
 *     cdef GpuInstance gpu_instance
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceCreateGpuInstanceWithPlacement(<Device>device, profile_id, <const nvmlGpuInstancePlacement_t*>placement, &gpu_instance)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24611
 *     with nogil:
 *         __status__ = nvmlDeviceCreateGpuInstanceWithPlacement(<Device>device, profile_id, <const nvmlGpuInstancePlacement_t*>placement, &gpu_instance)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <intptr_t>gpu_instance
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24611, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24612
 *         __status__ = nvmlDeviceCreateGpuInstanceWithPlacement(<Device>device, profile_id, <const nvmlGpuInstancePlacement_t*>placement, &gpu_instance)
 *     check_status(__status__)
 *     return <intptr_t>gpu_instance             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((intptr_t)__pyx_v_gpu_instance);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24595
 * 
 * 
 * cpdef intptr_t device_create_gpu_instance_with_placement(intptr_t device, unsigned int profile_id, intptr_t placement) except? 0:             # <<<<<<<<<<<<<<
 *     """Create GPU instance with the specified placement.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_create_gpu_instance_with_placement", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_565device_create_gpu_instance_with_placement(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_564device_create_gpu_instance_with_placement, "device_create_gpu_instance_with_placement(intptr_t device, unsigned int profile_id, intptr_t placement) -> intptr_t\n\nCreate GPU instance with the specified placement.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    profile_id (unsigned int): The GPU instance profile ID. See ``nvmlDeviceGetGpuInstanceProfileInfo``.\n    placement (intptr_t): The requested placement. See ``nvmlDeviceGetGpuInstancePossiblePlacements_v2``.\n\nReturns:\n    intptr_t: Returns the GPU instance handle.\n\n.. seealso:: `nvmlDeviceCreateGpuInstanceWithPlacement`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_565device_create_gpu_instance_with_placement = {"device_create_gpu_instance_with_placement", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_565device_create_gpu_instance_with_placement, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_564device_create_gpu_instance_with_placement};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_565device_create_gpu_instance_with_placement(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_profile_id;
  intptr_t __pyx_v_placement;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_create_gpu_instance_with_placement (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_profile_id,&__pyx_mstate_global->__pyx_n_u_placement,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24595, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24595, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24595, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24595, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_create_gpu_instance_with_placement", 0) < (0)) __PYX_ERR(0, 24595, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_create_gpu_instance_with_placement", 1, 3, 3, i); __PYX_ERR(0, 24595, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24595, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24595, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24595, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24595, __pyx_L3_error)
    __pyx_v_profile_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_profile_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24595, __pyx_L3_error)
    __pyx_v_placement = PyLong_AsSsize_t(values[2]); if (unlikely((__pyx_v_placement == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24595, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_create_gpu_instance_with_placement", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 24595, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_create_gpu_instance_with_placement", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_564device_create_gpu_instance_with_placement(__pyx_self, __pyx_v_device, __pyx_v_profile_id, __pyx_v_placement);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_564device_create_gpu_instance_with_placement(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id, intptr_t __pyx_v_placement) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  intptr_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_create_gpu_instance_with_placement", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_create_gpu_instance_with_placement(__pyx_v_device, __pyx_v_profile_id, __pyx_v_placement, 1); if (unlikely(__pyx_t_1 == ((intptr_t)0) && PyErr_Occurred())) __PYX_ERR(0, 24595, __pyx_L1_error)
  __pyx_t_2 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24595, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_create_gpu_instance_with_placement", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24615
 * 
 * 
 * cpdef gpu_instance_destroy(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Destroy GPU instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_567gpu_instance_destroy(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_destroy(intptr_t __pyx_v_gpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_destroy", 0);

  /* "cuda/bindings/_nvml.pyx":24623
 *     .. seealso:: `nvmlGpuInstanceDestroy`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceDestroy(<GpuInstance>gpu_instance)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24624
 *     """
 *     with nogil:
 *         __status__ = nvmlGpuInstanceDestroy(<GpuInstance>gpu_instance)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceDestroy(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24624, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24623
 *     .. seealso:: `nvmlGpuInstanceDestroy`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceDestroy(<GpuInstance>gpu_instance)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24625
 *     with nogil:
 *         __status__ = nvmlGpuInstanceDestroy(<GpuInstance>gpu_instance)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24625, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24615
 * 
 * 
 * cpdef gpu_instance_destroy(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Destroy GPU instance.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_destroy", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_567gpu_instance_destroy(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_566gpu_instance_destroy, "gpu_instance_destroy(intptr_t gpu_instance)\n\nDestroy GPU instance.\n\nArgs:\n    gpu_instance (intptr_t): The GPU instance handle.\n\n.. seealso:: `nvmlGpuInstanceDestroy`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_567gpu_instance_destroy = {"gpu_instance_destroy", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_567gpu_instance_destroy, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_566gpu_instance_destroy};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_567gpu_instance_destroy(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_gpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpu_instance_destroy (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_gpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24615, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24615, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpu_instance_destroy", 0) < (0)) __PYX_ERR(0, 24615, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpu_instance_destroy", 1, 1, 1, i); __PYX_ERR(0, 24615, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24615, __pyx_L3_error)
    }
    __pyx_v_gpu_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_gpu_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24615, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpu_instance_destroy", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24615, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_destroy", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_566gpu_instance_destroy(__pyx_self, __pyx_v_gpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_566gpu_instance_destroy(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_destroy", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_destroy(__pyx_v_gpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24615, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_destroy", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24628
 * 
 * 
 * cpdef intptr_t device_get_gpu_instance_by_id(intptr_t device, unsigned int id) except? 0:             # <<<<<<<<<<<<<<
 *     """Get GPU instances for given instance ID.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_569device_get_gpu_instance_by_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_by_id(intptr_t __pyx_v_device, unsigned int __pyx_v_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml_GpuInstance __pyx_v_gpu_instance;
  nvmlReturn_t __pyx_v___status__;
  intptr_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24641
 *     """
 *     cdef GpuInstance gpu_instance
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstanceById(<Device>device, id, &gpu_instance)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24642
 *     cdef GpuInstance gpu_instance
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstanceById(<Device>device, id, &gpu_instance)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <intptr_t>gpu_instance
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceById(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_id, (&__pyx_v_gpu_instance)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24642, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24641
 *     """
 *     cdef GpuInstance gpu_instance
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstanceById(<Device>device, id, &gpu_instance)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24643
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstanceById(<Device>device, id, &gpu_instance)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <intptr_t>gpu_instance
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24643, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24644
 *         __status__ = nvmlDeviceGetGpuInstanceById(<Device>device, id, &gpu_instance)
 *     check_status(__status__)
 *     return <intptr_t>gpu_instance             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((intptr_t)__pyx_v_gpu_instance);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24628
 * 
 * 
 * cpdef intptr_t device_get_gpu_instance_by_id(intptr_t device, unsigned int id) except? 0:             # <<<<<<<<<<<<<<
 *     """Get GPU instances for given instance ID.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_by_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_569device_get_gpu_instance_by_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_568device_get_gpu_instance_by_id, "device_get_gpu_instance_by_id(intptr_t device, unsigned int id) -> intptr_t\n\nGet GPU instances for given instance ID.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    id (unsigned int): The GPU instance ID.\n\nReturns:\n    intptr_t: Returns GPU instance.\n\n.. seealso:: `nvmlDeviceGetGpuInstanceById`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_569device_get_gpu_instance_by_id = {"device_get_gpu_instance_by_id", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_569device_get_gpu_instance_by_id, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_568device_get_gpu_instance_by_id};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_569device_get_gpu_instance_by_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_gpu_instance_by_id (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24628, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24628, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24628, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_gpu_instance_by_id", 0) < (0)) __PYX_ERR(0, 24628, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_gpu_instance_by_id", 1, 2, 2, i); __PYX_ERR(0, 24628, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24628, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24628, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24628, __pyx_L3_error)
    __pyx_v_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24628, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_gpu_instance_by_id", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24628, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_by_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_568device_get_gpu_instance_by_id(__pyx_self, __pyx_v_device, __pyx_v_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_568device_get_gpu_instance_by_id(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  intptr_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpu_instance_by_id", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_by_id(__pyx_v_device, __pyx_v_id, 1); if (unlikely(__pyx_t_1 == ((intptr_t)0) && PyErr_Occurred())) __PYX_ERR(0, 24628, __pyx_L1_error)
  __pyx_t_2 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24628, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_by_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24647
 * 
 * 
 * cpdef object gpu_instance_get_info(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Get GPU instance information.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_571gpu_instance_get_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_info(intptr_t __pyx_v_gpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *__pyx_v_info_py = 0;
  nvmlGpuInstanceInfo_t *__pyx_v_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_info", 0);

  /* "cuda/bindings/_nvml.pyx":24658
 *     .. seealso:: `nvmlGpuInstanceGetInfo`
 *     """
 *     cdef GpuInstanceInfo info_py = GpuInstanceInfo()             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuInstanceInfo_t *info = <nvmlGpuInstanceInfo_t *><intptr_t>(info_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24658, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":24659
 *     """
 *     cdef GpuInstanceInfo info_py = GpuInstanceInfo()
 *     cdef nvmlGpuInstanceInfo_t *info = <nvmlGpuInstanceInfo_t *><intptr_t>(info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetInfo(<GpuInstance>gpu_instance, info)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstanceInfo *)__pyx_v_info_py->__pyx_vtab)->_get_ptr(__pyx_v_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24659, __pyx_L1_error)
  __pyx_v_info = ((nvmlGpuInstanceInfo_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":24660
 *     cdef GpuInstanceInfo info_py = GpuInstanceInfo()
 *     cdef nvmlGpuInstanceInfo_t *info = <nvmlGpuInstanceInfo_t *><intptr_t>(info_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetInfo(<GpuInstance>gpu_instance, info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24661
 *     cdef nvmlGpuInstanceInfo_t *info = <nvmlGpuInstanceInfo_t *><intptr_t>(info_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetInfo(<GpuInstance>gpu_instance, info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetInfo(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24661, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":24660
 *     cdef GpuInstanceInfo info_py = GpuInstanceInfo()
 *     cdef nvmlGpuInstanceInfo_t *info = <nvmlGpuInstanceInfo_t *><intptr_t>(info_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetInfo(<GpuInstance>gpu_instance, info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24662
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetInfo(<GpuInstance>gpu_instance, info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 24662, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24663
 *         __status__ = nvmlGpuInstanceGetInfo(<GpuInstance>gpu_instance, info)
 *     check_status(__status__)
 *     return info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_info_py);
  __pyx_r = ((PyObject *)__pyx_v_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24647
 * 
 * 
 * cpdef object gpu_instance_get_info(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Get GPU instance information.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_571gpu_instance_get_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_570gpu_instance_get_info, "gpu_instance_get_info(intptr_t gpu_instance)\n\nGet GPU instance information.\n\nArgs:\n    gpu_instance (intptr_t): The GPU instance handle.\n\nReturns:\n    nvmlGpuInstanceInfo_t: Return GPU instance information.\n\n.. seealso:: `nvmlGpuInstanceGetInfo`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_571gpu_instance_get_info = {"gpu_instance_get_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_571gpu_instance_get_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_570gpu_instance_get_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_571gpu_instance_get_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_gpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpu_instance_get_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_gpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24647, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24647, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpu_instance_get_info", 0) < (0)) __PYX_ERR(0, 24647, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpu_instance_get_info", 1, 1, 1, i); __PYX_ERR(0, 24647, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24647, __pyx_L3_error)
    }
    __pyx_v_gpu_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_gpu_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24647, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpu_instance_get_info", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24647, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_570gpu_instance_get_info(__pyx_self, __pyx_v_gpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_570gpu_instance_get_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_info(__pyx_v_gpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24666
 * 
 * 
 * cpdef object gpu_instance_get_compute_instance_profile_info_v(intptr_t gpu_instance, unsigned int profile, unsigned int eng_profile):             # <<<<<<<<<<<<<<
 *     """Versioned wrapper around ``nvmlGpuInstanceGetComputeInstanceProfileInfo`` that accepts a versioned ``nvmlComputeInstanceProfileInfo_v2_t`` or later output structure.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_573gpu_instance_get_compute_instance_profile_info_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instance_profile_info_v(intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile, unsigned int __pyx_v_eng_profile, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *__pyx_v_info_py = 0;
  nvmlComputeInstanceProfileInfo_v2_t *__pyx_v_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_compute_instance_profile_info_v", 0);

  /* "cuda/bindings/_nvml.pyx":24679
 *     .. seealso:: `nvmlGpuInstanceGetComputeInstanceProfileInfoV`
 *     """
 *     cdef ComputeInstanceProfileInfo_v2 info_py = ComputeInstanceProfileInfo_v2()             # <<<<<<<<<<<<<<
 *     cdef nvmlComputeInstanceProfileInfo_v2_t *info = <nvmlComputeInstanceProfileInfo_v2_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlComputeInstanceProfileInfo_v2_t) | (2 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24679, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":24680
 *     """
 *     cdef ComputeInstanceProfileInfo_v2 info_py = ComputeInstanceProfileInfo_v2()
 *     cdef nvmlComputeInstanceProfileInfo_v2_t *info = <nvmlComputeInstanceProfileInfo_v2_t *><intptr_t>(info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     info.version = sizeof(nvmlComputeInstanceProfileInfo_v2_t) | (2 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)__pyx_v_info_py->__pyx_vtab)->_get_ptr(__pyx_v_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24680, __pyx_L1_error)
  __pyx_v_info = ((nvmlComputeInstanceProfileInfo_v2_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":24681
 *     cdef ComputeInstanceProfileInfo_v2 info_py = ComputeInstanceProfileInfo_v2()
 *     cdef nvmlComputeInstanceProfileInfo_v2_t *info = <nvmlComputeInstanceProfileInfo_v2_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlComputeInstanceProfileInfo_v2_t) | (2 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstanceProfileInfoV(<GpuInstance>gpu_instance, profile, eng_profile, info)
*/
  __pyx_v_info->version = ((sizeof(nvmlComputeInstanceProfileInfo_v2_t)) | 0x2000000);

  /* "cuda/bindings/_nvml.pyx":24682
 *     cdef nvmlComputeInstanceProfileInfo_v2_t *info = <nvmlComputeInstanceProfileInfo_v2_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlComputeInstanceProfileInfo_v2_t) | (2 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetComputeInstanceProfileInfoV(<GpuInstance>gpu_instance, profile, eng_profile, info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24683
 *     info.version = sizeof(nvmlComputeInstanceProfileInfo_v2_t) | (2 << 24)
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstanceProfileInfoV(<GpuInstance>gpu_instance, profile, eng_profile, info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstanceProfileInfoV(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_profile, __pyx_v_eng_profile, __pyx_v_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24683, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":24682
 *     cdef nvmlComputeInstanceProfileInfo_v2_t *info = <nvmlComputeInstanceProfileInfo_v2_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlComputeInstanceProfileInfo_v2_t) | (2 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetComputeInstanceProfileInfoV(<GpuInstance>gpu_instance, profile, eng_profile, info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24684
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstanceProfileInfoV(<GpuInstance>gpu_instance, profile, eng_profile, info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 24684, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24685
 *         __status__ = nvmlGpuInstanceGetComputeInstanceProfileInfoV(<GpuInstance>gpu_instance, profile, eng_profile, info)
 *     check_status(__status__)
 *     return info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_info_py);
  __pyx_r = ((PyObject *)__pyx_v_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24666
 * 
 * 
 * cpdef object gpu_instance_get_compute_instance_profile_info_v(intptr_t gpu_instance, unsigned int profile, unsigned int eng_profile):             # <<<<<<<<<<<<<<
 *     """Versioned wrapper around ``nvmlGpuInstanceGetComputeInstanceProfileInfo`` that accepts a versioned ``nvmlComputeInstanceProfileInfo_v2_t`` or later output structure.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_compute_instance_profile_info_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_573gpu_instance_get_compute_instance_profile_info_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_572gpu_instance_get_compute_instance_profile_info_v, "gpu_instance_get_compute_instance_profile_info_v(intptr_t gpu_instance, unsigned int profile, unsigned int eng_profile)\n\nVersioned wrapper around ``nvmlGpuInstanceGetComputeInstanceProfileInfo`` that accepts a versioned ``nvmlComputeInstanceProfileInfo_v2_t`` or later output structure.\n\nArgs:\n    gpu_instance (intptr_t): The identifier of the target GPU instance.\n    profile (unsigned int): One of the NVML_COMPUTE_INSTANCE_PROFILE_*.\n    eng_profile (unsigned int): One of the NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_*.\n\nReturns:\n    nvmlComputeInstanceProfileInfo_v2_t: Returns detailed profile information.\n\n.. seealso:: `nvmlGpuInstanceGetComputeInstanceProfileInfoV`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_573gpu_instance_get_compute_instance_profile_info_v = {"gpu_instance_get_compute_instance_profile_info_v", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_573gpu_instance_get_compute_instance_profile_info_v, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_572gpu_instance_get_compute_instance_profile_info_v};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_573gpu_instance_get_compute_instance_profile_info_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_gpu_instance;
  unsigned int __pyx_v_profile;
  unsigned int __pyx_v_eng_profile;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpu_instance_get_compute_instance_profile_info_v (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_gpu_instance,&__pyx_mstate_global->__pyx_n_u_profile,&__pyx_mstate_global->__pyx_n_u_eng_profile,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24666, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24666, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24666, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24666, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpu_instance_get_compute_instance_profile_info_v", 0) < (0)) __PYX_ERR(0, 24666, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpu_instance_get_compute_instance_profile_info_v", 1, 3, 3, i); __PYX_ERR(0, 24666, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24666, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24666, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24666, __pyx_L3_error)
    }
    __pyx_v_gpu_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_gpu_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24666, __pyx_L3_error)
    __pyx_v_profile = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_profile == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24666, __pyx_L3_error)
    __pyx_v_eng_profile = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_eng_profile == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24666, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpu_instance_get_compute_instance_profile_info_v", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 24666, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_compute_instance_profile_info_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_572gpu_instance_get_compute_instance_profile_info_v(__pyx_self, __pyx_v_gpu_instance, __pyx_v_profile, __pyx_v_eng_profile);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_572gpu_instance_get_compute_instance_profile_info_v(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile, unsigned int __pyx_v_eng_profile) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_compute_instance_profile_info_v", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instance_profile_info_v(__pyx_v_gpu_instance, __pyx_v_profile, __pyx_v_eng_profile, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24666, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_compute_instance_profile_info_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24688
 * 
 * 
 * cpdef unsigned int gpu_instance_get_compute_instance_remaining_capacity(intptr_t gpu_instance, unsigned int profile_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Get compute instance profile capacity.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_575gpu_instance_get_compute_instance_remaining_capacity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instance_remaining_capacity(intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_count;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24701
 *     """
 *     cdef unsigned int count
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetComputeInstanceRemainingCapacity(<GpuInstance>gpu_instance, profile_id, &count)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24702
 *     cdef unsigned int count
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstanceRemainingCapacity(<GpuInstance>gpu_instance, profile_id, &count)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return count
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstanceRemainingCapacity(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_profile_id, (&__pyx_v_count)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24702, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24701
 *     """
 *     cdef unsigned int count
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetComputeInstanceRemainingCapacity(<GpuInstance>gpu_instance, profile_id, &count)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24703
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstanceRemainingCapacity(<GpuInstance>gpu_instance, profile_id, &count)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return count
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24703, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24704
 *         __status__ = nvmlGpuInstanceGetComputeInstanceRemainingCapacity(<GpuInstance>gpu_instance, profile_id, &count)
 *     check_status(__status__)
 *     return count             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_count;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24688
 * 
 * 
 * cpdef unsigned int gpu_instance_get_compute_instance_remaining_capacity(intptr_t gpu_instance, unsigned int profile_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Get compute instance profile capacity.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_compute_instance_remaining_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_575gpu_instance_get_compute_instance_remaining_capacity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_574gpu_instance_get_compute_instance_remaining_capacity, "gpu_instance_get_compute_instance_remaining_capacity(intptr_t gpu_instance, unsigned int profile_id) -> unsigned int\n\nGet compute instance profile capacity.\n\nArgs:\n    gpu_instance (intptr_t): The identifier of the target GPU instance.\n    profile_id (unsigned int): The compute instance profile ID. See ``nvmlGpuInstanceGetComputeInstanceProfileInfo``.\n\nReturns:\n    unsigned int: Returns remaining instance count for the profile ID.\n\n.. seealso:: `nvmlGpuInstanceGetComputeInstanceRemainingCapacity`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_575gpu_instance_get_compute_instance_remaining_capacity = {"gpu_instance_get_compute_instance_remaining_capacity", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_575gpu_instance_get_compute_instance_remaining_capacity, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_574gpu_instance_get_compute_instance_remaining_capacity};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_575gpu_instance_get_compute_instance_remaining_capacity(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_gpu_instance;
  unsigned int __pyx_v_profile_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpu_instance_get_compute_instance_remaining_capacity (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_gpu_instance,&__pyx_mstate_global->__pyx_n_u_profile_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24688, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24688, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24688, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpu_instance_get_compute_instance_remaining_capacity", 0) < (0)) __PYX_ERR(0, 24688, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpu_instance_get_compute_instance_remaining_capacity", 1, 2, 2, i); __PYX_ERR(0, 24688, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24688, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24688, __pyx_L3_error)
    }
    __pyx_v_gpu_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_gpu_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24688, __pyx_L3_error)
    __pyx_v_profile_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_profile_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24688, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpu_instance_get_compute_instance_remaining_capacity", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24688, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_compute_instance_remaining_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_574gpu_instance_get_compute_instance_remaining_capacity(__pyx_self, __pyx_v_gpu_instance, __pyx_v_profile_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_574gpu_instance_get_compute_instance_remaining_capacity(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_compute_instance_remaining_capacity", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instance_remaining_capacity(__pyx_v_gpu_instance, __pyx_v_profile_id, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 24688, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24688, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_compute_instance_remaining_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24707
 * 
 * 
 * cpdef object gpu_instance_get_compute_instance_possible_placements(intptr_t gpu_instance, unsigned int profile_id):             # <<<<<<<<<<<<<<
 *     """Get compute instance placements.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_577gpu_instance_get_compute_instance_possible_placements(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instance_possible_placements(intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v_placements = 0;
  nvmlComputeInstancePlacement_t *__pyx_v_placements_ptr;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  size_t __pyx_t_7;
  intptr_t __pyx_t_8;
  int __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_compute_instance_possible_placements", 0);

  /* "cuda/bindings/_nvml.pyx":24716
 *     .. seealso:: `nvmlGpuInstanceGetComputeInstancePossiblePlacements`
 *     """
 *     cdef unsigned int[1] count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstancePossiblePlacements(<GpuInstance>gpu_instance, profile_id, NULL, <unsigned int*>count)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_count[0]), __pyx_t_1, sizeof(__pyx_v_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":24717
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetComputeInstancePossiblePlacements(<GpuInstance>gpu_instance, profile_id, NULL, <unsigned int*>count)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24718
 *     cdef unsigned int[1] count = [0]
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstancePossiblePlacements(<GpuInstance>gpu_instance, profile_id, NULL, <unsigned int*>count)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     cdef ComputeInstancePlacement placements = ComputeInstancePlacement(count[0])
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstancePossiblePlacements(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_profile_id, NULL, ((unsigned int *)__pyx_v_count)); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24718, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":24717
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetComputeInstancePossiblePlacements(<GpuInstance>gpu_instance, profile_id, NULL, <unsigned int*>count)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24719
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstancePossiblePlacements(<GpuInstance>gpu_instance, profile_id, NULL, <unsigned int*>count)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     cdef ComputeInstancePlacement placements = ComputeInstancePlacement(count[0])
 *     cdef nvmlComputeInstancePlacement_t *placements_ptr = <nvmlComputeInstancePlacement_t *><intptr_t>(placements._get_ptr())
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 24719, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24720
 *         __status__ = nvmlGpuInstanceGetComputeInstancePossiblePlacements(<GpuInstance>gpu_instance, profile_id, NULL, <unsigned int*>count)
 *     check_status_size(__status__)
 *     cdef ComputeInstancePlacement placements = ComputeInstancePlacement(count[0])             # <<<<<<<<<<<<<<
 *     cdef nvmlComputeInstancePlacement_t *placements_ptr = <nvmlComputeInstancePlacement_t *><intptr_t>(placements._get_ptr())
 *     if count[0] == 0:
*/
  __pyx_t_5 = NULL;
  __pyx_t_6 = __Pyx_PyLong_From_unsigned_int((__pyx_v_count[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 24720, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_7 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_6};
    __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement, __pyx_callargs+__pyx_t_7, (2-__pyx_t_7) | (__pyx_t_7*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24720, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_4);
  }
  __pyx_v_placements = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_t_4);
  __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":24721
 *     check_status_size(__status__)
 *     cdef ComputeInstancePlacement placements = ComputeInstancePlacement(count[0])
 *     cdef nvmlComputeInstancePlacement_t *placements_ptr = <nvmlComputeInstancePlacement_t *><intptr_t>(placements._get_ptr())             # <<<<<<<<<<<<<<
 *     if count[0] == 0:
 *         return placements
*/
  __pyx_t_8 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v_placements->__pyx_vtab)->_get_ptr(__pyx_v_placements); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24721, __pyx_L1_error)
  __pyx_v_placements_ptr = ((nvmlComputeInstancePlacement_t *)((intptr_t)__pyx_t_8));

  /* "cuda/bindings/_nvml.pyx":24722
 *     cdef ComputeInstancePlacement placements = ComputeInstancePlacement(count[0])
 *     cdef nvmlComputeInstancePlacement_t *placements_ptr = <nvmlComputeInstancePlacement_t *><intptr_t>(placements._get_ptr())
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         return placements
 *     with nogil:
*/
  __pyx_t_9 = ((__pyx_v_count[0]) == 0);
  if (__pyx_t_9) {

    /* "cuda/bindings/_nvml.pyx":24723
 *     cdef nvmlComputeInstancePlacement_t *placements_ptr = <nvmlComputeInstancePlacement_t *><intptr_t>(placements._get_ptr())
 *     if count[0] == 0:
 *         return placements             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstancePossiblePlacements(<GpuInstance>gpu_instance, profile_id, placements_ptr, <unsigned int*>count)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_placements);
    __pyx_r = ((PyObject *)__pyx_v_placements);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":24722
 *     cdef ComputeInstancePlacement placements = ComputeInstancePlacement(count[0])
 *     cdef nvmlComputeInstancePlacement_t *placements_ptr = <nvmlComputeInstancePlacement_t *><intptr_t>(placements._get_ptr())
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         return placements
 *     with nogil:
*/
  }

  /* "cuda/bindings/_nvml.pyx":24724
 *     if count[0] == 0:
 *         return placements
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetComputeInstancePossiblePlacements(<GpuInstance>gpu_instance, profile_id, placements_ptr, <unsigned int*>count)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24725
 *         return placements
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstancePossiblePlacements(<GpuInstance>gpu_instance, profile_id, placements_ptr, <unsigned int*>count)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return placements
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstancePossiblePlacements(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_profile_id, __pyx_v_placements_ptr, ((unsigned int *)__pyx_v_count)); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24725, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":24724
 *     if count[0] == 0:
 *         return placements
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetComputeInstancePossiblePlacements(<GpuInstance>gpu_instance, profile_id, placements_ptr, <unsigned int*>count)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24726
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstancePossiblePlacements(<GpuInstance>gpu_instance, profile_id, placements_ptr, <unsigned int*>count)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return placements
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 24726, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24727
 *         __status__ = nvmlGpuInstanceGetComputeInstancePossiblePlacements(<GpuInstance>gpu_instance, profile_id, placements_ptr, <unsigned int*>count)
 *     check_status(__status__)
 *     return placements             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_placements);
  __pyx_r = ((PyObject *)__pyx_v_placements);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24707
 * 
 * 
 * cpdef object gpu_instance_get_compute_instance_possible_placements(intptr_t gpu_instance, unsigned int profile_id):             # <<<<<<<<<<<<<<
 *     """Get compute instance placements.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_compute_instance_possible_placements", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_placements);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_577gpu_instance_get_compute_instance_possible_placements(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_576gpu_instance_get_compute_instance_possible_placements, "gpu_instance_get_compute_instance_possible_placements(intptr_t gpu_instance, unsigned int profile_id)\n\nGet compute instance placements.\n\nArgs:\n    gpu_instance (intptr_t): The identifier of the target GPU instance.\n    profile_id (unsigned int): The compute instance profile ID. See ``nvmlGpuInstanceGetComputeInstanceProfileInfo``.\n\n.. seealso:: `nvmlGpuInstanceGetComputeInstancePossiblePlacements`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_577gpu_instance_get_compute_instance_possible_placements = {"gpu_instance_get_compute_instance_possible_placements", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_577gpu_instance_get_compute_instance_possible_placements, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_576gpu_instance_get_compute_instance_possible_placements};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_577gpu_instance_get_compute_instance_possible_placements(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_gpu_instance;
  unsigned int __pyx_v_profile_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpu_instance_get_compute_instance_possible_placements (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_gpu_instance,&__pyx_mstate_global->__pyx_n_u_profile_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24707, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24707, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24707, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpu_instance_get_compute_instance_possible_placements", 0) < (0)) __PYX_ERR(0, 24707, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpu_instance_get_compute_instance_possible_placements", 1, 2, 2, i); __PYX_ERR(0, 24707, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24707, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24707, __pyx_L3_error)
    }
    __pyx_v_gpu_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_gpu_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24707, __pyx_L3_error)
    __pyx_v_profile_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_profile_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24707, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpu_instance_get_compute_instance_possible_placements", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24707, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_compute_instance_possible_placements", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_576gpu_instance_get_compute_instance_possible_placements(__pyx_self, __pyx_v_gpu_instance, __pyx_v_profile_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_576gpu_instance_get_compute_instance_possible_placements(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_compute_instance_possible_placements", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instance_possible_placements(__pyx_v_gpu_instance, __pyx_v_profile_id, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24707, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_compute_instance_possible_placements", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24730
 * 
 * 
 * cpdef intptr_t gpu_instance_create_compute_instance(intptr_t gpu_instance, unsigned int profile_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Create compute instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_579gpu_instance_create_compute_instance(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_create_compute_instance(intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml_ComputeInstance __pyx_v_compute_instance;
  nvmlReturn_t __pyx_v___status__;
  intptr_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24743
 *     """
 *     cdef ComputeInstance compute_instance
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceCreateComputeInstance(<GpuInstance>gpu_instance, profile_id, &compute_instance)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24744
 *     cdef ComputeInstance compute_instance
 *     with nogil:
 *         __status__ = nvmlGpuInstanceCreateComputeInstance(<GpuInstance>gpu_instance, profile_id, &compute_instance)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <intptr_t>compute_instance
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceCreateComputeInstance(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_profile_id, (&__pyx_v_compute_instance)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24744, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24743
 *     """
 *     cdef ComputeInstance compute_instance
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceCreateComputeInstance(<GpuInstance>gpu_instance, profile_id, &compute_instance)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24745
 *     with nogil:
 *         __status__ = nvmlGpuInstanceCreateComputeInstance(<GpuInstance>gpu_instance, profile_id, &compute_instance)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <intptr_t>compute_instance
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24745, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24746
 *         __status__ = nvmlGpuInstanceCreateComputeInstance(<GpuInstance>gpu_instance, profile_id, &compute_instance)
 *     check_status(__status__)
 *     return <intptr_t>compute_instance             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((intptr_t)__pyx_v_compute_instance);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24730
 * 
 * 
 * cpdef intptr_t gpu_instance_create_compute_instance(intptr_t gpu_instance, unsigned int profile_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Create compute instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_create_compute_instance", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_579gpu_instance_create_compute_instance(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_578gpu_instance_create_compute_instance, "gpu_instance_create_compute_instance(intptr_t gpu_instance, unsigned int profile_id) -> intptr_t\n\nCreate compute instance.\n\nArgs:\n    gpu_instance (intptr_t): The identifier of the target GPU instance.\n    profile_id (unsigned int): The compute instance profile ID. See ``nvmlGpuInstanceGetComputeInstanceProfileInfo``.\n\nReturns:\n    intptr_t: Returns the compute instance handle.\n\n.. seealso:: `nvmlGpuInstanceCreateComputeInstance`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_579gpu_instance_create_compute_instance = {"gpu_instance_create_compute_instance", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_579gpu_instance_create_compute_instance, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_578gpu_instance_create_compute_instance};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_579gpu_instance_create_compute_instance(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_gpu_instance;
  unsigned int __pyx_v_profile_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpu_instance_create_compute_instance (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_gpu_instance,&__pyx_mstate_global->__pyx_n_u_profile_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24730, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24730, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24730, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpu_instance_create_compute_instance", 0) < (0)) __PYX_ERR(0, 24730, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpu_instance_create_compute_instance", 1, 2, 2, i); __PYX_ERR(0, 24730, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24730, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24730, __pyx_L3_error)
    }
    __pyx_v_gpu_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_gpu_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24730, __pyx_L3_error)
    __pyx_v_profile_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_profile_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24730, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpu_instance_create_compute_instance", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24730, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_create_compute_instance", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_578gpu_instance_create_compute_instance(__pyx_self, __pyx_v_gpu_instance, __pyx_v_profile_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_578gpu_instance_create_compute_instance(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  intptr_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_create_compute_instance", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_create_compute_instance(__pyx_v_gpu_instance, __pyx_v_profile_id, 1); if (unlikely(__pyx_t_1 == ((intptr_t)0) && PyErr_Occurred())) __PYX_ERR(0, 24730, __pyx_L1_error)
  __pyx_t_2 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24730, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_create_compute_instance", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24749
 * 
 * 
 * cpdef intptr_t gpu_instance_create_compute_instance_with_placement(intptr_t gpu_instance, unsigned int profile_id, intptr_t placement) except? 0:             # <<<<<<<<<<<<<<
 *     """Create compute instance with the specified placement.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_581gpu_instance_create_compute_instance_with_placement(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_create_compute_instance_with_placement(intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile_id, intptr_t __pyx_v_placement, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml_ComputeInstance __pyx_v_compute_instance;
  nvmlReturn_t __pyx_v___status__;
  intptr_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24763
 *     """
 *     cdef ComputeInstance compute_instance
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceCreateComputeInstanceWithPlacement(<GpuInstance>gpu_instance, profile_id, <const nvmlComputeInstancePlacement_t*>placement, &compute_instance)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24764
 *     cdef ComputeInstance compute_instance
 *     with nogil:
 *         __status__ = nvmlGpuInstanceCreateComputeInstanceWithPlacement(<GpuInstance>gpu_instance, profile_id, <const nvmlComputeInstancePlacement_t*>placement, &compute_instance)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <intptr_t>compute_instance
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceCreateComputeInstanceWithPlacement(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_profile_id, ((nvmlComputeInstancePlacement_t const *)__pyx_v_placement), (&__pyx_v_compute_instance)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24764, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24763
 *     """
 *     cdef ComputeInstance compute_instance
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceCreateComputeInstanceWithPlacement(<GpuInstance>gpu_instance, profile_id, <const nvmlComputeInstancePlacement_t*>placement, &compute_instance)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24765
 *     with nogil:
 *         __status__ = nvmlGpuInstanceCreateComputeInstanceWithPlacement(<GpuInstance>gpu_instance, profile_id, <const nvmlComputeInstancePlacement_t*>placement, &compute_instance)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <intptr_t>compute_instance
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24765, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24766
 *         __status__ = nvmlGpuInstanceCreateComputeInstanceWithPlacement(<GpuInstance>gpu_instance, profile_id, <const nvmlComputeInstancePlacement_t*>placement, &compute_instance)
 *     check_status(__status__)
 *     return <intptr_t>compute_instance             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((intptr_t)__pyx_v_compute_instance);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24749
 * 
 * 
 * cpdef intptr_t gpu_instance_create_compute_instance_with_placement(intptr_t gpu_instance, unsigned int profile_id, intptr_t placement) except? 0:             # <<<<<<<<<<<<<<
 *     """Create compute instance with the specified placement.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_create_compute_instance_with_placement", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_581gpu_instance_create_compute_instance_with_placement(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_580gpu_instance_create_compute_instance_with_placement, "gpu_instance_create_compute_instance_with_placement(intptr_t gpu_instance, unsigned int profile_id, intptr_t placement) -> intptr_t\n\nCreate compute instance with the specified placement.\n\nArgs:\n    gpu_instance (intptr_t): The identifier of the target GPU instance.\n    profile_id (unsigned int): The compute instance profile ID. See ``nvmlGpuInstanceGetComputeInstanceProfileInfo``.\n    placement (intptr_t): The requested placement. See ``nvmlGpuInstanceGetComputeInstancePossiblePlacements``.\n\nReturns:\n    intptr_t: Returns the compute instance handle.\n\n.. seealso:: `nvmlGpuInstanceCreateComputeInstanceWithPlacement`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_581gpu_instance_create_compute_instance_with_placement = {"gpu_instance_create_compute_instance_with_placement", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_581gpu_instance_create_compute_instance_with_placement, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_580gpu_instance_create_compute_instance_with_placement};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_581gpu_instance_create_compute_instance_with_placement(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_gpu_instance;
  unsigned int __pyx_v_profile_id;
  intptr_t __pyx_v_placement;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpu_instance_create_compute_instance_with_placement (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_gpu_instance,&__pyx_mstate_global->__pyx_n_u_profile_id,&__pyx_mstate_global->__pyx_n_u_placement,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24749, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24749, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24749, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24749, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpu_instance_create_compute_instance_with_placement", 0) < (0)) __PYX_ERR(0, 24749, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpu_instance_create_compute_instance_with_placement", 1, 3, 3, i); __PYX_ERR(0, 24749, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24749, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24749, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24749, __pyx_L3_error)
    }
    __pyx_v_gpu_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_gpu_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24749, __pyx_L3_error)
    __pyx_v_profile_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_profile_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24749, __pyx_L3_error)
    __pyx_v_placement = PyLong_AsSsize_t(values[2]); if (unlikely((__pyx_v_placement == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24749, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpu_instance_create_compute_instance_with_placement", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 24749, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_create_compute_instance_with_placement", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_580gpu_instance_create_compute_instance_with_placement(__pyx_self, __pyx_v_gpu_instance, __pyx_v_profile_id, __pyx_v_placement);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_580gpu_instance_create_compute_instance_with_placement(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile_id, intptr_t __pyx_v_placement) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  intptr_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_create_compute_instance_with_placement", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_create_compute_instance_with_placement(__pyx_v_gpu_instance, __pyx_v_profile_id, __pyx_v_placement, 1); if (unlikely(__pyx_t_1 == ((intptr_t)0) && PyErr_Occurred())) __PYX_ERR(0, 24749, __pyx_L1_error)
  __pyx_t_2 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24749, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_create_compute_instance_with_placement", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24769
 * 
 * 
 * cpdef compute_instance_destroy(intptr_t compute_instance):             # <<<<<<<<<<<<<<
 *     """Destroy compute instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_583compute_instance_destroy(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_compute_instance_destroy(intptr_t __pyx_v_compute_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("compute_instance_destroy", 0);

  /* "cuda/bindings/_nvml.pyx":24777
 *     .. seealso:: `nvmlComputeInstanceDestroy`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlComputeInstanceDestroy(<ComputeInstance>compute_instance)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24778
 *     """
 *     with nogil:
 *         __status__ = nvmlComputeInstanceDestroy(<ComputeInstance>compute_instance)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlComputeInstanceDestroy(((__pyx_t_4cuda_8bindings_5_nvml_ComputeInstance)__pyx_v_compute_instance)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24778, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24777
 *     .. seealso:: `nvmlComputeInstanceDestroy`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlComputeInstanceDestroy(<ComputeInstance>compute_instance)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24779
 *     with nogil:
 *         __status__ = nvmlComputeInstanceDestroy(<ComputeInstance>compute_instance)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24779, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24769
 * 
 * 
 * cpdef compute_instance_destroy(intptr_t compute_instance):             # <<<<<<<<<<<<<<
 *     """Destroy compute instance.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.compute_instance_destroy", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_583compute_instance_destroy(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_582compute_instance_destroy, "compute_instance_destroy(intptr_t compute_instance)\n\nDestroy compute instance.\n\nArgs:\n    compute_instance (intptr_t): The compute instance handle.\n\n.. seealso:: `nvmlComputeInstanceDestroy`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_583compute_instance_destroy = {"compute_instance_destroy", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_583compute_instance_destroy, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_582compute_instance_destroy};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_583compute_instance_destroy(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_compute_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("compute_instance_destroy (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_compute_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24769, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24769, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "compute_instance_destroy", 0) < (0)) __PYX_ERR(0, 24769, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("compute_instance_destroy", 1, 1, 1, i); __PYX_ERR(0, 24769, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24769, __pyx_L3_error)
    }
    __pyx_v_compute_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_compute_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24769, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("compute_instance_destroy", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24769, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.compute_instance_destroy", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_582compute_instance_destroy(__pyx_self, __pyx_v_compute_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_582compute_instance_destroy(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_compute_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("compute_instance_destroy", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_compute_instance_destroy(__pyx_v_compute_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24769, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.compute_instance_destroy", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24782
 * 
 * 
 * cpdef intptr_t gpu_instance_get_compute_instance_by_id(intptr_t gpu_instance, unsigned int id) except? 0:             # <<<<<<<<<<<<<<
 *     """Get compute instance for given instance ID.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_585gpu_instance_get_compute_instance_by_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instance_by_id(intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml_ComputeInstance __pyx_v_compute_instance;
  nvmlReturn_t __pyx_v___status__;
  intptr_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24795
 *     """
 *     cdef ComputeInstance compute_instance
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetComputeInstanceById(<GpuInstance>gpu_instance, id, &compute_instance)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24796
 *     cdef ComputeInstance compute_instance
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstanceById(<GpuInstance>gpu_instance, id, &compute_instance)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <intptr_t>compute_instance
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstanceById(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_id, (&__pyx_v_compute_instance)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24796, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24795
 *     """
 *     cdef ComputeInstance compute_instance
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetComputeInstanceById(<GpuInstance>gpu_instance, id, &compute_instance)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24797
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstanceById(<GpuInstance>gpu_instance, id, &compute_instance)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <intptr_t>compute_instance
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24797, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24798
 *         __status__ = nvmlGpuInstanceGetComputeInstanceById(<GpuInstance>gpu_instance, id, &compute_instance)
 *     check_status(__status__)
 *     return <intptr_t>compute_instance             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((intptr_t)__pyx_v_compute_instance);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24782
 * 
 * 
 * cpdef intptr_t gpu_instance_get_compute_instance_by_id(intptr_t gpu_instance, unsigned int id) except? 0:             # <<<<<<<<<<<<<<
 *     """Get compute instance for given instance ID.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_compute_instance_by_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_585gpu_instance_get_compute_instance_by_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_584gpu_instance_get_compute_instance_by_id, "gpu_instance_get_compute_instance_by_id(intptr_t gpu_instance, unsigned int id) -> intptr_t\n\nGet compute instance for given instance ID.\n\nArgs:\n    gpu_instance (intptr_t): The identifier of the target GPU instance.\n    id (unsigned int): The compute instance ID.\n\nReturns:\n    intptr_t: Returns compute instance.\n\n.. seealso:: `nvmlGpuInstanceGetComputeInstanceById`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_585gpu_instance_get_compute_instance_by_id = {"gpu_instance_get_compute_instance_by_id", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_585gpu_instance_get_compute_instance_by_id, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_584gpu_instance_get_compute_instance_by_id};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_585gpu_instance_get_compute_instance_by_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_gpu_instance;
  unsigned int __pyx_v_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpu_instance_get_compute_instance_by_id (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_gpu_instance,&__pyx_mstate_global->__pyx_n_u_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24782, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24782, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24782, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpu_instance_get_compute_instance_by_id", 0) < (0)) __PYX_ERR(0, 24782, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpu_instance_get_compute_instance_by_id", 1, 2, 2, i); __PYX_ERR(0, 24782, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24782, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24782, __pyx_L3_error)
    }
    __pyx_v_gpu_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_gpu_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24782, __pyx_L3_error)
    __pyx_v_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24782, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpu_instance_get_compute_instance_by_id", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24782, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_compute_instance_by_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_584gpu_instance_get_compute_instance_by_id(__pyx_self, __pyx_v_gpu_instance, __pyx_v_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_584gpu_instance_get_compute_instance_by_id(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  intptr_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_compute_instance_by_id", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instance_by_id(__pyx_v_gpu_instance, __pyx_v_id, 1); if (unlikely(__pyx_t_1 == ((intptr_t)0) && PyErr_Occurred())) __PYX_ERR(0, 24782, __pyx_L1_error)
  __pyx_t_2 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24782, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_compute_instance_by_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24801
 * 
 * 
 * cpdef object compute_instance_get_info_v2(intptr_t compute_instance):             # <<<<<<<<<<<<<<
 *     """Get compute instance information.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_587compute_instance_get_info_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_compute_instance_get_info_v2(intptr_t __pyx_v_compute_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *__pyx_v_info_py = 0;
  nvmlComputeInstanceInfo_t *__pyx_v_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("compute_instance_get_info_v2", 0);

  /* "cuda/bindings/_nvml.pyx":24812
 *     .. seealso:: `nvmlComputeInstanceGetInfo_v2`
 *     """
 *     cdef ComputeInstanceInfo info_py = ComputeInstanceInfo()             # <<<<<<<<<<<<<<
 *     cdef nvmlComputeInstanceInfo_t *info = <nvmlComputeInstanceInfo_t *><intptr_t>(info_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24812, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":24813
 *     """
 *     cdef ComputeInstanceInfo info_py = ComputeInstanceInfo()
 *     cdef nvmlComputeInstanceInfo_t *info = <nvmlComputeInstanceInfo_t *><intptr_t>(info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlComputeInstanceGetInfo_v2(<ComputeInstance>compute_instance, info)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)__pyx_v_info_py->__pyx_vtab)->_get_ptr(__pyx_v_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24813, __pyx_L1_error)
  __pyx_v_info = ((nvmlComputeInstanceInfo_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":24814
 *     cdef ComputeInstanceInfo info_py = ComputeInstanceInfo()
 *     cdef nvmlComputeInstanceInfo_t *info = <nvmlComputeInstanceInfo_t *><intptr_t>(info_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlComputeInstanceGetInfo_v2(<ComputeInstance>compute_instance, info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24815
 *     cdef nvmlComputeInstanceInfo_t *info = <nvmlComputeInstanceInfo_t *><intptr_t>(info_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlComputeInstanceGetInfo_v2(<ComputeInstance>compute_instance, info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlComputeInstanceGetInfo_v2(((__pyx_t_4cuda_8bindings_5_nvml_ComputeInstance)__pyx_v_compute_instance), __pyx_v_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24815, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":24814
 *     cdef ComputeInstanceInfo info_py = ComputeInstanceInfo()
 *     cdef nvmlComputeInstanceInfo_t *info = <nvmlComputeInstanceInfo_t *><intptr_t>(info_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlComputeInstanceGetInfo_v2(<ComputeInstance>compute_instance, info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24816
 *     with nogil:
 *         __status__ = nvmlComputeInstanceGetInfo_v2(<ComputeInstance>compute_instance, info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 24816, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24817
 *         __status__ = nvmlComputeInstanceGetInfo_v2(<ComputeInstance>compute_instance, info)
 *     check_status(__status__)
 *     return info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_info_py);
  __pyx_r = ((PyObject *)__pyx_v_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24801
 * 
 * 
 * cpdef object compute_instance_get_info_v2(intptr_t compute_instance):             # <<<<<<<<<<<<<<
 *     """Get compute instance information.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.compute_instance_get_info_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_587compute_instance_get_info_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_586compute_instance_get_info_v2, "compute_instance_get_info_v2(intptr_t compute_instance)\n\nGet compute instance information.\n\nArgs:\n    compute_instance (intptr_t): The compute instance handle.\n\nReturns:\n    nvmlComputeInstanceInfo_t: Return compute instance information.\n\n.. seealso:: `nvmlComputeInstanceGetInfo_v2`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_587compute_instance_get_info_v2 = {"compute_instance_get_info_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_587compute_instance_get_info_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_586compute_instance_get_info_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_587compute_instance_get_info_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_compute_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("compute_instance_get_info_v2 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_compute_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24801, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24801, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "compute_instance_get_info_v2", 0) < (0)) __PYX_ERR(0, 24801, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("compute_instance_get_info_v2", 1, 1, 1, i); __PYX_ERR(0, 24801, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24801, __pyx_L3_error)
    }
    __pyx_v_compute_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_compute_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24801, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("compute_instance_get_info_v2", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24801, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.compute_instance_get_info_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_586compute_instance_get_info_v2(__pyx_self, __pyx_v_compute_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_586compute_instance_get_info_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_compute_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("compute_instance_get_info_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_compute_instance_get_info_v2(__pyx_v_compute_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24801, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.compute_instance_get_info_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24820
 * 
 * 
 * cpdef unsigned int device_is_mig_device_handle(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Test if the given handle refers to a MIG device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_589device_is_mig_device_handle(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_is_mig_device_handle(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_is_mig_device;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24832
 *     """
 *     cdef unsigned int is_mig_device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceIsMigDeviceHandle(<Device>device, &is_mig_device)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24833
 *     cdef unsigned int is_mig_device
 *     with nogil:
 *         __status__ = nvmlDeviceIsMigDeviceHandle(<Device>device, &is_mig_device)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return is_mig_device
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceIsMigDeviceHandle(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_is_mig_device)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24833, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24832
 *     """
 *     cdef unsigned int is_mig_device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceIsMigDeviceHandle(<Device>device, &is_mig_device)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24834
 *     with nogil:
 *         __status__ = nvmlDeviceIsMigDeviceHandle(<Device>device, &is_mig_device)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return is_mig_device
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24834, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24835
 *         __status__ = nvmlDeviceIsMigDeviceHandle(<Device>device, &is_mig_device)
 *     check_status(__status__)
 *     return is_mig_device             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_is_mig_device;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24820
 * 
 * 
 * cpdef unsigned int device_is_mig_device_handle(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Test if the given handle refers to a MIG device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_is_mig_device_handle", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_589device_is_mig_device_handle(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_588device_is_mig_device_handle, "device_is_mig_device_handle(intptr_t device) -> unsigned int\n\nTest if the given handle refers to a MIG device.\n\nArgs:\n    device (intptr_t): NVML handle to test.\n\nReturns:\n    unsigned int: True when handle refers to a MIG device.\n\n.. seealso:: `nvmlDeviceIsMigDeviceHandle`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_589device_is_mig_device_handle = {"device_is_mig_device_handle", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_589device_is_mig_device_handle, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_588device_is_mig_device_handle};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_589device_is_mig_device_handle(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_is_mig_device_handle (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24820, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24820, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_is_mig_device_handle", 0) < (0)) __PYX_ERR(0, 24820, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_is_mig_device_handle", 1, 1, 1, i); __PYX_ERR(0, 24820, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24820, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24820, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_is_mig_device_handle", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24820, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_is_mig_device_handle", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_588device_is_mig_device_handle(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_588device_is_mig_device_handle(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_is_mig_device_handle", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_is_mig_device_handle(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 24820, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24820, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_is_mig_device_handle", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24838
 * 
 * 
 * cpdef unsigned int device_get_gpu_instance_id(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get GPU instance ID for the given MIG device handle.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_591device_get_gpu_instance_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_id(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_id;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24850
 *     """
 *     cdef unsigned int id
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstanceId(<Device>device, &id)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24851
 *     cdef unsigned int id
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstanceId(<Device>device, &id)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return id
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceId(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_id)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24851, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24850
 *     """
 *     cdef unsigned int id
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstanceId(<Device>device, &id)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24852
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstanceId(<Device>device, &id)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return id
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24852, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24853
 *         __status__ = nvmlDeviceGetGpuInstanceId(<Device>device, &id)
 *     check_status(__status__)
 *     return id             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_id;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24838
 * 
 * 
 * cpdef unsigned int device_get_gpu_instance_id(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get GPU instance ID for the given MIG device handle.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_591device_get_gpu_instance_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_590device_get_gpu_instance_id, "device_get_gpu_instance_id(intptr_t device) -> unsigned int\n\nGet GPU instance ID for the given MIG device handle.\n\nArgs:\n    device (intptr_t): Target MIG device handle.\n\nReturns:\n    unsigned int: GPU instance ID.\n\n.. seealso:: `nvmlDeviceGetGpuInstanceId`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_591device_get_gpu_instance_id = {"device_get_gpu_instance_id", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_591device_get_gpu_instance_id, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_590device_get_gpu_instance_id};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_591device_get_gpu_instance_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_gpu_instance_id (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24838, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24838, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_gpu_instance_id", 0) < (0)) __PYX_ERR(0, 24838, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_gpu_instance_id", 1, 1, 1, i); __PYX_ERR(0, 24838, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24838, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24838, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_gpu_instance_id", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24838, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_590device_get_gpu_instance_id(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_590device_get_gpu_instance_id(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpu_instance_id", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_id(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 24838, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24838, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24856
 * 
 * 
 * cpdef unsigned int device_get_compute_instance_id(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get compute instance ID for the given MIG device handle.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_593device_get_compute_instance_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_compute_instance_id(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_id;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24868
 *     """
 *     cdef unsigned int id
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetComputeInstanceId(<Device>device, &id)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24869
 *     cdef unsigned int id
 *     with nogil:
 *         __status__ = nvmlDeviceGetComputeInstanceId(<Device>device, &id)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return id
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetComputeInstanceId(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_id)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24869, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24868
 *     """
 *     cdef unsigned int id
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetComputeInstanceId(<Device>device, &id)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24870
 *     with nogil:
 *         __status__ = nvmlDeviceGetComputeInstanceId(<Device>device, &id)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return id
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24870, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24871
 *         __status__ = nvmlDeviceGetComputeInstanceId(<Device>device, &id)
 *     check_status(__status__)
 *     return id             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_id;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24856
 * 
 * 
 * cpdef unsigned int device_get_compute_instance_id(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get compute instance ID for the given MIG device handle.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_compute_instance_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_593device_get_compute_instance_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_592device_get_compute_instance_id, "device_get_compute_instance_id(intptr_t device) -> unsigned int\n\nGet compute instance ID for the given MIG device handle.\n\nArgs:\n    device (intptr_t): Target MIG device handle.\n\nReturns:\n    unsigned int: Compute instance ID.\n\n.. seealso:: `nvmlDeviceGetComputeInstanceId`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_593device_get_compute_instance_id = {"device_get_compute_instance_id", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_593device_get_compute_instance_id, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_592device_get_compute_instance_id};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_593device_get_compute_instance_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_compute_instance_id (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24856, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24856, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_compute_instance_id", 0) < (0)) __PYX_ERR(0, 24856, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_compute_instance_id", 1, 1, 1, i); __PYX_ERR(0, 24856, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24856, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24856, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_compute_instance_id", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24856, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_compute_instance_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_592device_get_compute_instance_id(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_592device_get_compute_instance_id(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_compute_instance_id", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_compute_instance_id(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 24856, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24856, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_compute_instance_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24874
 * 
 * 
 * cpdef unsigned int device_get_max_mig_device_count(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get the maximum number of MIG devices that can exist under a given parent NVML device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_595device_get_max_mig_device_count(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_device_get_max_mig_device_count(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_count;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24886
 *     """
 *     cdef unsigned int count
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMaxMigDeviceCount(<Device>device, &count)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24887
 *     cdef unsigned int count
 *     with nogil:
 *         __status__ = nvmlDeviceGetMaxMigDeviceCount(<Device>device, &count)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return count
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxMigDeviceCount(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_count)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24887, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24886
 *     """
 *     cdef unsigned int count
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMaxMigDeviceCount(<Device>device, &count)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24888
 *     with nogil:
 *         __status__ = nvmlDeviceGetMaxMigDeviceCount(<Device>device, &count)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return count
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24888, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24889
 *         __status__ = nvmlDeviceGetMaxMigDeviceCount(<Device>device, &count)
 *     check_status(__status__)
 *     return count             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_count;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24874
 * 
 * 
 * cpdef unsigned int device_get_max_mig_device_count(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get the maximum number of MIG devices that can exist under a given parent NVML device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_max_mig_device_count", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_595device_get_max_mig_device_count(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_594device_get_max_mig_device_count, "device_get_max_mig_device_count(intptr_t device) -> unsigned int\n\nGet the maximum number of MIG devices that can exist under a given parent NVML device.\n\nArgs:\n    device (intptr_t): Target device handle.\n\nReturns:\n    unsigned int: Count of MIG devices.\n\n.. seealso:: `nvmlDeviceGetMaxMigDeviceCount`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_595device_get_max_mig_device_count = {"device_get_max_mig_device_count", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_595device_get_max_mig_device_count, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_594device_get_max_mig_device_count};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_595device_get_max_mig_device_count(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_max_mig_device_count (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24874, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24874, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_max_mig_device_count", 0) < (0)) __PYX_ERR(0, 24874, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_max_mig_device_count", 1, 1, 1, i); __PYX_ERR(0, 24874, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24874, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24874, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_max_mig_device_count", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24874, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_max_mig_device_count", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_594device_get_max_mig_device_count(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_594device_get_max_mig_device_count(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_max_mig_device_count", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_max_mig_device_count(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 24874, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24874, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_max_mig_device_count", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24892
 * 
 * 
 * cpdef intptr_t device_get_mig_device_handle_by_index(intptr_t device, unsigned int ind_ex) except? 0:             # <<<<<<<<<<<<<<
 *     """Get MIG device handle for the given ind_ex under its parent NVML device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_597device_get_mig_device_handle_by_index(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_get_mig_device_handle_by_index(intptr_t __pyx_v_device, unsigned int __pyx_v_ind_ex, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml_Device __pyx_v_mig_device;
  nvmlReturn_t __pyx_v___status__;
  intptr_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24905
 *     """
 *     cdef Device mig_device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMigDeviceHandleByIndex(<Device>device, ind_ex, &mig_device)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24906
 *     cdef Device mig_device
 *     with nogil:
 *         __status__ = nvmlDeviceGetMigDeviceHandleByIndex(<Device>device, ind_ex, &mig_device)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <intptr_t>mig_device
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMigDeviceHandleByIndex(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_ind_ex, (&__pyx_v_mig_device)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24906, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24905
 *     """
 *     cdef Device mig_device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetMigDeviceHandleByIndex(<Device>device, ind_ex, &mig_device)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24907
 *     with nogil:
 *         __status__ = nvmlDeviceGetMigDeviceHandleByIndex(<Device>device, ind_ex, &mig_device)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <intptr_t>mig_device
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24907, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24908
 *         __status__ = nvmlDeviceGetMigDeviceHandleByIndex(<Device>device, ind_ex, &mig_device)
 *     check_status(__status__)
 *     return <intptr_t>mig_device             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((intptr_t)__pyx_v_mig_device);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24892
 * 
 * 
 * cpdef intptr_t device_get_mig_device_handle_by_index(intptr_t device, unsigned int ind_ex) except? 0:             # <<<<<<<<<<<<<<
 *     """Get MIG device handle for the given ind_ex under its parent NVML device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_mig_device_handle_by_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_597device_get_mig_device_handle_by_index(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_596device_get_mig_device_handle_by_index, "device_get_mig_device_handle_by_index(intptr_t device, unsigned int ind_ex) -> intptr_t\n\nGet MIG device handle for the given ind_ex under its parent NVML device.\n\nArgs:\n    device (intptr_t): Reference to the parent GPU device handle.\n    ind_ex (unsigned int): Index of the MIG device.\n\nReturns:\n    intptr_t: Reference to the MIG device handle.\n\n.. seealso:: `nvmlDeviceGetMigDeviceHandleByIndex`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_597device_get_mig_device_handle_by_index = {"device_get_mig_device_handle_by_index", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_597device_get_mig_device_handle_by_index, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_596device_get_mig_device_handle_by_index};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_597device_get_mig_device_handle_by_index(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_ind_ex;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_mig_device_handle_by_index (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_ind_ex,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24892, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24892, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24892, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_mig_device_handle_by_index", 0) < (0)) __PYX_ERR(0, 24892, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_mig_device_handle_by_index", 1, 2, 2, i); __PYX_ERR(0, 24892, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24892, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24892, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24892, __pyx_L3_error)
    __pyx_v_ind_ex = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_ind_ex == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24892, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_mig_device_handle_by_index", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24892, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_mig_device_handle_by_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_596device_get_mig_device_handle_by_index(__pyx_self, __pyx_v_device, __pyx_v_ind_ex);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_596device_get_mig_device_handle_by_index(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_ind_ex) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  intptr_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_mig_device_handle_by_index", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_mig_device_handle_by_index(__pyx_v_device, __pyx_v_ind_ex, 1); if (unlikely(__pyx_t_1 == ((intptr_t)0) && PyErr_Occurred())) __PYX_ERR(0, 24892, __pyx_L1_error)
  __pyx_t_2 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24892, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_mig_device_handle_by_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24911
 * 
 * 
 * cpdef intptr_t device_get_device_handle_from_mig_device_handle(intptr_t mig_device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get parent device handle from a MIG device handle.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_599device_get_device_handle_from_mig_device_handle(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static intptr_t __pyx_f_4cuda_8bindings_5_nvml_device_get_device_handle_from_mig_device_handle(intptr_t __pyx_v_mig_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  __pyx_t_4cuda_8bindings_5_nvml_Device __pyx_v_device;
  nvmlReturn_t __pyx_v___status__;
  intptr_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24923
 *     """
 *     cdef Device device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetDeviceHandleFromMigDeviceHandle(<Device>mig_device, &device)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24924
 *     cdef Device device
 *     with nogil:
 *         __status__ = nvmlDeviceGetDeviceHandleFromMigDeviceHandle(<Device>mig_device, &device)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return <intptr_t>device
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDeviceHandleFromMigDeviceHandle(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_mig_device), (&__pyx_v_device)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24924, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24923
 *     """
 *     cdef Device device
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetDeviceHandleFromMigDeviceHandle(<Device>mig_device, &device)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24925
 *     with nogil:
 *         __status__ = nvmlDeviceGetDeviceHandleFromMigDeviceHandle(<Device>mig_device, &device)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return <intptr_t>device
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24925, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24926
 *         __status__ = nvmlDeviceGetDeviceHandleFromMigDeviceHandle(<Device>mig_device, &device)
 *     check_status(__status__)
 *     return <intptr_t>device             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = ((intptr_t)__pyx_v_device);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24911
 * 
 * 
 * cpdef intptr_t device_get_device_handle_from_mig_device_handle(intptr_t mig_device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get parent device handle from a MIG device handle.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_device_handle_from_mig_device_handle", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_599device_get_device_handle_from_mig_device_handle(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_598device_get_device_handle_from_mig_device_handle, "device_get_device_handle_from_mig_device_handle(intptr_t mig_device) -> intptr_t\n\nGet parent device handle from a MIG device handle.\n\nArgs:\n    mig_device (intptr_t): MIG device handle.\n\nReturns:\n    intptr_t: Device handle.\n\n.. seealso:: `nvmlDeviceGetDeviceHandleFromMigDeviceHandle`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_599device_get_device_handle_from_mig_device_handle = {"device_get_device_handle_from_mig_device_handle", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_599device_get_device_handle_from_mig_device_handle, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_598device_get_device_handle_from_mig_device_handle};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_599device_get_device_handle_from_mig_device_handle(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_mig_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_device_handle_from_mig_device_handle (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_mig_device,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24911, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24911, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_device_handle_from_mig_device_handle", 0) < (0)) __PYX_ERR(0, 24911, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_device_handle_from_mig_device_handle", 1, 1, 1, i); __PYX_ERR(0, 24911, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24911, __pyx_L3_error)
    }
    __pyx_v_mig_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_mig_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24911, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_device_handle_from_mig_device_handle", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24911, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_device_handle_from_mig_device_handle", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_598device_get_device_handle_from_mig_device_handle(__pyx_self, __pyx_v_mig_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_598device_get_device_handle_from_mig_device_handle(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_mig_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  intptr_t __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_device_handle_from_mig_device_handle", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_device_handle_from_mig_device_handle(__pyx_v_mig_device, 1); if (unlikely(__pyx_t_1 == ((intptr_t)0) && PyErr_Occurred())) __PYX_ERR(0, 24911, __pyx_L1_error)
  __pyx_t_2 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24911, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_device_handle_from_mig_device_handle", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24929
 * 
 * 
 * cpdef gpm_sample_get(intptr_t device, intptr_t gpm_sample):             # <<<<<<<<<<<<<<
 *     """Read a sample of GPM metrics into the provided ``gpm_sample`` buffer. After two samples are gathered, you can call nvmlGpmMetricGet on those samples to retrive metrics.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_601gpm_sample_get(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpm_sample_get(intptr_t __pyx_v_device, intptr_t __pyx_v_gpm_sample, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpm_sample_get", 0);

  /* "cuda/bindings/_nvml.pyx":24938
 *     .. seealso:: `nvmlGpmSampleGet`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpmSampleGet(<Device>device, <GpmSample>gpm_sample)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24939
 *     """
 *     with nogil:
 *         __status__ = nvmlGpmSampleGet(<Device>device, <GpmSample>gpm_sample)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmSampleGet(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml_GpmSample)__pyx_v_gpm_sample)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24939, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24938
 *     .. seealso:: `nvmlGpmSampleGet`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpmSampleGet(<Device>device, <GpmSample>gpm_sample)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24940
 *     with nogil:
 *         __status__ = nvmlGpmSampleGet(<Device>device, <GpmSample>gpm_sample)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24940, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24929
 * 
 * 
 * cpdef gpm_sample_get(intptr_t device, intptr_t gpm_sample):             # <<<<<<<<<<<<<<
 *     """Read a sample of GPM metrics into the provided ``gpm_sample`` buffer. After two samples are gathered, you can call nvmlGpmMetricGet on those samples to retrive metrics.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.gpm_sample_get", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_601gpm_sample_get(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_600gpm_sample_get, "gpm_sample_get(intptr_t device, intptr_t gpm_sample)\n\nRead a sample of GPM metrics into the provided ``gpm_sample`` buffer. After two samples are gathered, you can call nvmlGpmMetricGet on those samples to retrive metrics.\n\nArgs:\n    device (intptr_t): Device to get samples for.\n    gpm_sample (intptr_t): Buffer to read samples into.\n\n.. seealso:: `nvmlGpmSampleGet`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_601gpm_sample_get = {"gpm_sample_get", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_601gpm_sample_get, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_600gpm_sample_get};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_601gpm_sample_get(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  intptr_t __pyx_v_gpm_sample;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpm_sample_get (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_gpm_sample,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24929, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24929, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24929, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpm_sample_get", 0) < (0)) __PYX_ERR(0, 24929, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpm_sample_get", 1, 2, 2, i); __PYX_ERR(0, 24929, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24929, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24929, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24929, __pyx_L3_error)
    __pyx_v_gpm_sample = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_gpm_sample == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24929, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpm_sample_get", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24929, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpm_sample_get", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_600gpm_sample_get(__pyx_self, __pyx_v_device, __pyx_v_gpm_sample);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_600gpm_sample_get(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_gpm_sample) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpm_sample_get", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpm_sample_get(__pyx_v_device, __pyx_v_gpm_sample, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24929, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpm_sample_get", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24943
 * 
 * 
 * cpdef gpm_mig_sample_get(intptr_t device, unsigned int gpu_instance_id, intptr_t gpm_sample):             # <<<<<<<<<<<<<<
 *     """Read a sample of GPM metrics into the provided ``gpm_sample`` buffer for a MIG GPU Instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_603gpm_mig_sample_get(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpm_mig_sample_get(intptr_t __pyx_v_device, unsigned int __pyx_v_gpu_instance_id, intptr_t __pyx_v_gpm_sample, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpm_mig_sample_get", 0);

  /* "cuda/bindings/_nvml.pyx":24953
 *     .. seealso:: `nvmlGpmMigSampleGet`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpmMigSampleGet(<Device>device, gpu_instance_id, <GpmSample>gpm_sample)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24954
 *     """
 *     with nogil:
 *         __status__ = nvmlGpmMigSampleGet(<Device>device, gpu_instance_id, <GpmSample>gpm_sample)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmMigSampleGet(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_gpu_instance_id, ((__pyx_t_4cuda_8bindings_5_nvml_GpmSample)__pyx_v_gpm_sample)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24954, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24953
 *     .. seealso:: `nvmlGpmMigSampleGet`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpmMigSampleGet(<Device>device, gpu_instance_id, <GpmSample>gpm_sample)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24955
 *     with nogil:
 *         __status__ = nvmlGpmMigSampleGet(<Device>device, gpu_instance_id, <GpmSample>gpm_sample)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24955, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24943
 * 
 * 
 * cpdef gpm_mig_sample_get(intptr_t device, unsigned int gpu_instance_id, intptr_t gpm_sample):             # <<<<<<<<<<<<<<
 *     """Read a sample of GPM metrics into the provided ``gpm_sample`` buffer for a MIG GPU Instance.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.gpm_mig_sample_get", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_603gpm_mig_sample_get(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_602gpm_mig_sample_get, "gpm_mig_sample_get(intptr_t device, unsigned int gpu_instance_id, intptr_t gpm_sample)\n\nRead a sample of GPM metrics into the provided ``gpm_sample`` buffer for a MIG GPU Instance.\n\nArgs:\n    device (intptr_t): Device to get samples for.\n    gpu_instance_id (unsigned int): MIG GPU Instance ID.\n    gpm_sample (intptr_t): Buffer to read samples into.\n\n.. seealso:: `nvmlGpmMigSampleGet`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_603gpm_mig_sample_get = {"gpm_mig_sample_get", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_603gpm_mig_sample_get, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_602gpm_mig_sample_get};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_603gpm_mig_sample_get(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_gpu_instance_id;
  intptr_t __pyx_v_gpm_sample;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpm_mig_sample_get (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_gpu_instance_id,&__pyx_mstate_global->__pyx_n_u_gpm_sample,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24943, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24943, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24943, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24943, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpm_mig_sample_get", 0) < (0)) __PYX_ERR(0, 24943, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpm_mig_sample_get", 1, 3, 3, i); __PYX_ERR(0, 24943, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24943, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24943, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24943, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24943, __pyx_L3_error)
    __pyx_v_gpu_instance_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_gpu_instance_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24943, __pyx_L3_error)
    __pyx_v_gpm_sample = PyLong_AsSsize_t(values[2]); if (unlikely((__pyx_v_gpm_sample == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24943, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpm_mig_sample_get", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 24943, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpm_mig_sample_get", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_602gpm_mig_sample_get(__pyx_self, __pyx_v_device, __pyx_v_gpu_instance_id, __pyx_v_gpm_sample);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_602gpm_mig_sample_get(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_gpu_instance_id, intptr_t __pyx_v_gpm_sample) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpm_mig_sample_get", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpm_mig_sample_get(__pyx_v_device, __pyx_v_gpu_instance_id, __pyx_v_gpm_sample, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24943, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpm_mig_sample_get", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24958
 * 
 * 
 * cpdef object gpm_query_device_support(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Indicate whether the supplied device supports GPM.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_605gpm_query_device_support(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpm_query_device_support(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *__pyx_v_gpm_support_py = 0;
  nvmlGpmSupport_t *__pyx_v_gpm_support;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpm_query_device_support", 0);

  /* "cuda/bindings/_nvml.pyx":24969
 *     .. seealso:: `nvmlGpmQueryDeviceSupport`
 *     """
 *     cdef GpmSupport gpm_support_py = GpmSupport()             # <<<<<<<<<<<<<<
 *     cdef nvmlGpmSupport_t *gpm_support = <nvmlGpmSupport_t *><intptr_t>(gpm_support_py._get_ptr())
 *     gpm_support.version = 1
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24969, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_gpm_support_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":24970
 *     """
 *     cdef GpmSupport gpm_support_py = GpmSupport()
 *     cdef nvmlGpmSupport_t *gpm_support = <nvmlGpmSupport_t *><intptr_t>(gpm_support_py._get_ptr())             # <<<<<<<<<<<<<<
 *     gpm_support.version = 1
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpmSupport *)__pyx_v_gpm_support_py->__pyx_vtab)->_get_ptr(__pyx_v_gpm_support_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24970, __pyx_L1_error)
  __pyx_v_gpm_support = ((nvmlGpmSupport_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":24971
 *     cdef GpmSupport gpm_support_py = GpmSupport()
 *     cdef nvmlGpmSupport_t *gpm_support = <nvmlGpmSupport_t *><intptr_t>(gpm_support_py._get_ptr())
 *     gpm_support.version = 1             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlGpmQueryDeviceSupport(<Device>device, gpm_support)
*/
  __pyx_v_gpm_support->version = 1;

  /* "cuda/bindings/_nvml.pyx":24972
 *     cdef nvmlGpmSupport_t *gpm_support = <nvmlGpmSupport_t *><intptr_t>(gpm_support_py._get_ptr())
 *     gpm_support.version = 1
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpmQueryDeviceSupport(<Device>device, gpm_support)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24973
 *     gpm_support.version = 1
 *     with nogil:
 *         __status__ = nvmlGpmQueryDeviceSupport(<Device>device, gpm_support)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return gpm_support_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmQueryDeviceSupport(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_gpm_support); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24973, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":24972
 *     cdef nvmlGpmSupport_t *gpm_support = <nvmlGpmSupport_t *><intptr_t>(gpm_support_py._get_ptr())
 *     gpm_support.version = 1
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpmQueryDeviceSupport(<Device>device, gpm_support)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24974
 *     with nogil:
 *         __status__ = nvmlGpmQueryDeviceSupport(<Device>device, gpm_support)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return gpm_support_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 24974, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24975
 *         __status__ = nvmlGpmQueryDeviceSupport(<Device>device, gpm_support)
 *     check_status(__status__)
 *     return gpm_support_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_gpm_support_py);
  __pyx_r = ((PyObject *)__pyx_v_gpm_support_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24958
 * 
 * 
 * cpdef object gpm_query_device_support(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Indicate whether the supplied device supports GPM.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpm_query_device_support", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_gpm_support_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_605gpm_query_device_support(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_604gpm_query_device_support, "gpm_query_device_support(intptr_t device)\n\nIndicate whether the supplied device supports GPM.\n\nArgs:\n    device (intptr_t): NVML device to query for.\n\nReturns:\n    nvmlGpmSupport_t: Structure to indicate GPM support ``nvmlGpmSupport_t``. Indicates GPM support per system for the supplied device.\n\n.. seealso:: `nvmlGpmQueryDeviceSupport`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_605gpm_query_device_support = {"gpm_query_device_support", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_605gpm_query_device_support, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_604gpm_query_device_support};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_605gpm_query_device_support(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpm_query_device_support (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24958, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24958, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpm_query_device_support", 0) < (0)) __PYX_ERR(0, 24958, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpm_query_device_support", 1, 1, 1, i); __PYX_ERR(0, 24958, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24958, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24958, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpm_query_device_support", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24958, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpm_query_device_support", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_604gpm_query_device_support(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_604gpm_query_device_support(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpm_query_device_support", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpm_query_device_support(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24958, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpm_query_device_support", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24978
 * 
 * 
 * cpdef unsigned int gpm_query_if_streaming_enabled(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get GPM stream state.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_607gpm_query_if_streaming_enabled(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static unsigned int __pyx_f_4cuda_8bindings_5_nvml_gpm_query_if_streaming_enabled(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_state;
  nvmlReturn_t __pyx_v___status__;
  unsigned int __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* "cuda/bindings/_nvml.pyx":24990
 *     """
 *     cdef unsigned int state
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpmQueryIfStreamingEnabled(<Device>device, &state)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":24991
 *     cdef unsigned int state
 *     with nogil:
 *         __status__ = nvmlGpmQueryIfStreamingEnabled(<Device>device, &state)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return state
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmQueryIfStreamingEnabled(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_state)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24991, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":24990
 *     """
 *     cdef unsigned int state
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpmQueryIfStreamingEnabled(<Device>device, &state)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":24992
 *     with nogil:
 *         __status__ = nvmlGpmQueryIfStreamingEnabled(<Device>device, &state)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return state
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 24992, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24993
 *         __status__ = nvmlGpmQueryIfStreamingEnabled(<Device>device, &state)
 *     check_status(__status__)
 *     return state             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_r = __pyx_v_state;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":24978
 * 
 * 
 * cpdef unsigned int gpm_query_if_streaming_enabled(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get GPM stream state.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.gpm_query_if_streaming_enabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_607gpm_query_if_streaming_enabled(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_606gpm_query_if_streaming_enabled, "gpm_query_if_streaming_enabled(intptr_t device) -> unsigned int\n\nGet GPM stream state.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    unsigned int: Returns GPM stream state NVML_FEATURE_DISABLED or NVML_FEATURE_ENABLED.\n\n.. seealso:: `nvmlGpmQueryIfStreamingEnabled`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_607gpm_query_if_streaming_enabled = {"gpm_query_if_streaming_enabled", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_607gpm_query_if_streaming_enabled, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_606gpm_query_if_streaming_enabled};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_607gpm_query_if_streaming_enabled(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpm_query_if_streaming_enabled (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24978, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24978, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpm_query_if_streaming_enabled", 0) < (0)) __PYX_ERR(0, 24978, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpm_query_if_streaming_enabled", 1, 1, 1, i); __PYX_ERR(0, 24978, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24978, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24978, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpm_query_if_streaming_enabled", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24978, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpm_query_if_streaming_enabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_606gpm_query_if_streaming_enabled(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_606gpm_query_if_streaming_enabled(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpm_query_if_streaming_enabled", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpm_query_if_streaming_enabled(__pyx_v_device, 1); if (unlikely(__pyx_t_1 == ((unsigned int)0) && PyErr_Occurred())) __PYX_ERR(0, 24978, __pyx_L1_error)
  __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24978, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_r = __pyx_t_2;
  __pyx_t_2 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpm_query_if_streaming_enabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":24996
 * 
 * 
 * cpdef gpm_set_streaming_enabled(intptr_t device, unsigned int state):             # <<<<<<<<<<<<<<
 *     """Set GPM stream state.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_609gpm_set_streaming_enabled(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpm_set_streaming_enabled(intptr_t __pyx_v_device, unsigned int __pyx_v_state, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpm_set_streaming_enabled", 0);

  /* "cuda/bindings/_nvml.pyx":25005
 *     .. seealso:: `nvmlGpmSetStreamingEnabled`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpmSetStreamingEnabled(<Device>device, state)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25006
 *     """
 *     with nogil:
 *         __status__ = nvmlGpmSetStreamingEnabled(<Device>device, state)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmSetStreamingEnabled(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_state); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25006, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":25005
 *     .. seealso:: `nvmlGpmSetStreamingEnabled`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpmSetStreamingEnabled(<Device>device, state)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25007
 *     with nogil:
 *         __status__ = nvmlGpmSetStreamingEnabled(<Device>device, state)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 25007, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":24996
 * 
 * 
 * cpdef gpm_set_streaming_enabled(intptr_t device, unsigned int state):             # <<<<<<<<<<<<<<
 *     """Set GPM stream state.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.gpm_set_streaming_enabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_609gpm_set_streaming_enabled(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_608gpm_set_streaming_enabled, "gpm_set_streaming_enabled(intptr_t device, unsigned int state)\n\nSet GPM stream state.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    state (unsigned int): GPM stream state, NVML_FEATURE_DISABLED or NVML_FEATURE_ENABLED.\n\n.. seealso:: `nvmlGpmSetStreamingEnabled`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_609gpm_set_streaming_enabled = {"gpm_set_streaming_enabled", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_609gpm_set_streaming_enabled, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_608gpm_set_streaming_enabled};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_609gpm_set_streaming_enabled(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_state;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpm_set_streaming_enabled (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24996, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24996, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24996, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpm_set_streaming_enabled", 0) < (0)) __PYX_ERR(0, 24996, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpm_set_streaming_enabled", 1, 2, 2, i); __PYX_ERR(0, 24996, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24996, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24996, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 24996, __pyx_L3_error)
    __pyx_v_state = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_state == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24996, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpm_set_streaming_enabled", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24996, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpm_set_streaming_enabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_608gpm_set_streaming_enabled(__pyx_self, __pyx_v_device, __pyx_v_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_608gpm_set_streaming_enabled(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpm_set_streaming_enabled", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpm_set_streaming_enabled(__pyx_v_device, __pyx_v_state, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24996, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpm_set_streaming_enabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25010
 * 
 * 
 * cpdef object device_get_capabilities(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get device capabilities.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_611device_get_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_capabilities(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *__pyx_v_caps_py = 0;
  nvmlDeviceCapabilities_t *__pyx_v_caps;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_capabilities", 0);

  /* "cuda/bindings/_nvml.pyx":25021
 *     .. seealso:: `nvmlDeviceGetCapabilities`
 *     """
 *     cdef DeviceCapabilities_v1 caps_py = DeviceCapabilities_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlDeviceCapabilities_t *caps = <nvmlDeviceCapabilities_t *><intptr_t>(caps_py._get_ptr())
 *     caps.version = sizeof(nvmlDeviceCapabilities_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25021, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_caps_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25022
 *     """
 *     cdef DeviceCapabilities_v1 caps_py = DeviceCapabilities_v1()
 *     cdef nvmlDeviceCapabilities_t *caps = <nvmlDeviceCapabilities_t *><intptr_t>(caps_py._get_ptr())             # <<<<<<<<<<<<<<
 *     caps.version = sizeof(nvmlDeviceCapabilities_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)__pyx_v_caps_py->__pyx_vtab)->_get_ptr(__pyx_v_caps_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25022, __pyx_L1_error)
  __pyx_v_caps = ((nvmlDeviceCapabilities_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":25023
 *     cdef DeviceCapabilities_v1 caps_py = DeviceCapabilities_v1()
 *     cdef nvmlDeviceCapabilities_t *caps = <nvmlDeviceCapabilities_t *><intptr_t>(caps_py._get_ptr())
 *     caps.version = sizeof(nvmlDeviceCapabilities_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetCapabilities(<Device>device, caps)
*/
  __pyx_v_caps->version = ((sizeof(nvmlDeviceCapabilities_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":25024
 *     cdef nvmlDeviceCapabilities_t *caps = <nvmlDeviceCapabilities_t *><intptr_t>(caps_py._get_ptr())
 *     caps.version = sizeof(nvmlDeviceCapabilities_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCapabilities(<Device>device, caps)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25025
 *     caps.version = sizeof(nvmlDeviceCapabilities_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetCapabilities(<Device>device, caps)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return caps_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCapabilities(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_caps); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25025, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25024
 *     cdef nvmlDeviceCapabilities_t *caps = <nvmlDeviceCapabilities_t *><intptr_t>(caps_py._get_ptr())
 *     caps.version = sizeof(nvmlDeviceCapabilities_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCapabilities(<Device>device, caps)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25026
 *     with nogil:
 *         __status__ = nvmlDeviceGetCapabilities(<Device>device, caps)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return caps_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25026, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25027
 *         __status__ = nvmlDeviceGetCapabilities(<Device>device, caps)
 *     check_status(__status__)
 *     return caps_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_caps_py);
  __pyx_r = ((PyObject *)__pyx_v_caps_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25010
 * 
 * 
 * cpdef object device_get_capabilities(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get device capabilities.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_caps_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_611device_get_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_610device_get_capabilities, "device_get_capabilities(intptr_t device)\n\nGet device capabilities.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlDeviceCapabilities_v1_t: Returns GPU's capabilities.\n\n.. seealso:: `nvmlDeviceGetCapabilities`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_611device_get_capabilities = {"device_get_capabilities", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_611device_get_capabilities, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_610device_get_capabilities};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_611device_get_capabilities(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_capabilities (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25010, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25010, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_capabilities", 0) < (0)) __PYX_ERR(0, 25010, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_capabilities", 1, 1, 1, i); __PYX_ERR(0, 25010, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25010, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25010, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_capabilities", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25010, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_610device_get_capabilities(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_610device_get_capabilities(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_capabilities", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_capabilities(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25010, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_capabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25030
 * 
 * 
 * cpdef device_workload_power_profile_clear_requested_profiles(intptr_t device, intptr_t requested_profiles):             # <<<<<<<<<<<<<<
 *     """Clear Requested Performance Profiles.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_613device_workload_power_profile_clear_requested_profiles(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_workload_power_profile_clear_requested_profiles(intptr_t __pyx_v_device, intptr_t __pyx_v_requested_profiles, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_workload_power_profile_clear_requested_profiles", 0);

  /* "cuda/bindings/_nvml.pyx":25039
 *     .. seealso:: `nvmlDeviceWorkloadPowerProfileClearRequestedProfiles`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(<Device>device, <nvmlWorkloadPowerProfileRequestedProfiles_t*>requested_profiles)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25040
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(<Device>device, <nvmlWorkloadPowerProfileRequestedProfiles_t*>requested_profiles)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlWorkloadPowerProfileRequestedProfiles_t *)__pyx_v_requested_profiles)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25040, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":25039
 *     .. seealso:: `nvmlDeviceWorkloadPowerProfileClearRequestedProfiles`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(<Device>device, <nvmlWorkloadPowerProfileRequestedProfiles_t*>requested_profiles)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25041
 *     with nogil:
 *         __status__ = nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(<Device>device, <nvmlWorkloadPowerProfileRequestedProfiles_t*>requested_profiles)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 25041, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25030
 * 
 * 
 * cpdef device_workload_power_profile_clear_requested_profiles(intptr_t device, intptr_t requested_profiles):             # <<<<<<<<<<<<<<
 *     """Clear Requested Performance Profiles.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_workload_power_profile_clear_requested_profiles", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_613device_workload_power_profile_clear_requested_profiles(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_612device_workload_power_profile_clear_requested_profiles, "device_workload_power_profile_clear_requested_profiles(intptr_t device, intptr_t requested_profiles)\n\nClear Requested Performance Profiles.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    requested_profiles (intptr_t): Reference to struct ``nvmlWorkloadPowerProfileRequestedProfiles_v1_t``.\n\n.. seealso:: `nvmlDeviceWorkloadPowerProfileClearRequestedProfiles`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_613device_workload_power_profile_clear_requested_profiles = {"device_workload_power_profile_clear_requested_profiles", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_613device_workload_power_profile_clear_requested_profiles, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_612device_workload_power_profile_clear_requested_profiles};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_613device_workload_power_profile_clear_requested_profiles(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  intptr_t __pyx_v_requested_profiles;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_workload_power_profile_clear_requested_profiles (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_requested_profiles,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25030, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25030, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25030, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_workload_power_profile_clear_requested_profiles", 0) < (0)) __PYX_ERR(0, 25030, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_workload_power_profile_clear_requested_profiles", 1, 2, 2, i); __PYX_ERR(0, 25030, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25030, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25030, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25030, __pyx_L3_error)
    __pyx_v_requested_profiles = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_requested_profiles == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25030, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_workload_power_profile_clear_requested_profiles", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25030, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_workload_power_profile_clear_requested_profiles", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_612device_workload_power_profile_clear_requested_profiles(__pyx_self, __pyx_v_device, __pyx_v_requested_profiles);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_612device_workload_power_profile_clear_requested_profiles(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_requested_profiles) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_workload_power_profile_clear_requested_profiles", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_workload_power_profile_clear_requested_profiles(__pyx_v_device, __pyx_v_requested_profiles, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_workload_power_profile_clear_requested_profiles", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25044
 * 
 * 
 * cpdef device_power_smoothing_activate_preset_profile(intptr_t device, intptr_t profile):             # <<<<<<<<<<<<<<
 *     """Activiate a specific preset profile for datacenter power smoothing. The API only sets the active preset profile based on the input profileId, and ignores the other parameters of the structure. Requires root/admin permissions.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_615device_power_smoothing_activate_preset_profile(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_power_smoothing_activate_preset_profile(intptr_t __pyx_v_device, intptr_t __pyx_v_profile, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_power_smoothing_activate_preset_profile", 0);

  /* "cuda/bindings/_nvml.pyx":25053
 *     .. seealso:: `nvmlDevicePowerSmoothingActivatePresetProfile`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDevicePowerSmoothingActivatePresetProfile(<Device>device, <nvmlPowerSmoothingProfile_t*>profile)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25054
 *     """
 *     with nogil:
 *         __status__ = nvmlDevicePowerSmoothingActivatePresetProfile(<Device>device, <nvmlPowerSmoothingProfile_t*>profile)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDevicePowerSmoothingActivatePresetProfile(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlPowerSmoothingProfile_t *)__pyx_v_profile)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25054, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":25053
 *     .. seealso:: `nvmlDevicePowerSmoothingActivatePresetProfile`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDevicePowerSmoothingActivatePresetProfile(<Device>device, <nvmlPowerSmoothingProfile_t*>profile)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25055
 *     with nogil:
 *         __status__ = nvmlDevicePowerSmoothingActivatePresetProfile(<Device>device, <nvmlPowerSmoothingProfile_t*>profile)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 25055, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25044
 * 
 * 
 * cpdef device_power_smoothing_activate_preset_profile(intptr_t device, intptr_t profile):             # <<<<<<<<<<<<<<
 *     """Activiate a specific preset profile for datacenter power smoothing. The API only sets the active preset profile based on the input profileId, and ignores the other parameters of the structure. Requires root/admin permissions.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_power_smoothing_activate_preset_profile", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_615device_power_smoothing_activate_preset_profile(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_614device_power_smoothing_activate_preset_profile, "device_power_smoothing_activate_preset_profile(intptr_t device, intptr_t profile)\n\nActiviate a specific preset profile for datacenter power smoothing. The API only sets the active preset profile based on the input profileId, and ignores the other parameters of the structure. Requires root/admin permissions.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    profile (intptr_t): Reference to ``nvmlPowerSmoothingProfile_v1_t``. Note that only ``profile->profileId`` is used and the rest of the structure is ignored.\n\n.. seealso:: `nvmlDevicePowerSmoothingActivatePresetProfile`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_615device_power_smoothing_activate_preset_profile = {"device_power_smoothing_activate_preset_profile", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_615device_power_smoothing_activate_preset_profile, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_614device_power_smoothing_activate_preset_profile};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_615device_power_smoothing_activate_preset_profile(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  intptr_t __pyx_v_profile;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_power_smoothing_activate_preset_profile (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_profile,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25044, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25044, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25044, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_power_smoothing_activate_preset_profile", 0) < (0)) __PYX_ERR(0, 25044, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_power_smoothing_activate_preset_profile", 1, 2, 2, i); __PYX_ERR(0, 25044, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25044, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25044, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25044, __pyx_L3_error)
    __pyx_v_profile = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_profile == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25044, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_power_smoothing_activate_preset_profile", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25044, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_power_smoothing_activate_preset_profile", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_614device_power_smoothing_activate_preset_profile(__pyx_self, __pyx_v_device, __pyx_v_profile);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_614device_power_smoothing_activate_preset_profile(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_profile) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_power_smoothing_activate_preset_profile", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_power_smoothing_activate_preset_profile(__pyx_v_device, __pyx_v_profile, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_power_smoothing_activate_preset_profile", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25058
 * 
 * 
 * cpdef device_power_smoothing_update_preset_profile_param(intptr_t device, intptr_t profile):             # <<<<<<<<<<<<<<
 *     """Update the value of a specific profile parameter contained within ``nvmlPowerSmoothingProfile_v1_t``. Requires root/admin permissions.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_617device_power_smoothing_update_preset_profile_param(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_power_smoothing_update_preset_profile_param(intptr_t __pyx_v_device, intptr_t __pyx_v_profile, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_power_smoothing_update_preset_profile_param", 0);

  /* "cuda/bindings/_nvml.pyx":25067
 *     .. seealso:: `nvmlDevicePowerSmoothingUpdatePresetProfileParam`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDevicePowerSmoothingUpdatePresetProfileParam(<Device>device, <nvmlPowerSmoothingProfile_t*>profile)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25068
 *     """
 *     with nogil:
 *         __status__ = nvmlDevicePowerSmoothingUpdatePresetProfileParam(<Device>device, <nvmlPowerSmoothingProfile_t*>profile)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDevicePowerSmoothingUpdatePresetProfileParam(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlPowerSmoothingProfile_t *)__pyx_v_profile)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25068, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":25067
 *     .. seealso:: `nvmlDevicePowerSmoothingUpdatePresetProfileParam`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDevicePowerSmoothingUpdatePresetProfileParam(<Device>device, <nvmlPowerSmoothingProfile_t*>profile)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25069
 *     with nogil:
 *         __status__ = nvmlDevicePowerSmoothingUpdatePresetProfileParam(<Device>device, <nvmlPowerSmoothingProfile_t*>profile)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 25069, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25058
 * 
 * 
 * cpdef device_power_smoothing_update_preset_profile_param(intptr_t device, intptr_t profile):             # <<<<<<<<<<<<<<
 *     """Update the value of a specific profile parameter contained within ``nvmlPowerSmoothingProfile_v1_t``. Requires root/admin permissions.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_power_smoothing_update_preset_profile_param", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_617device_power_smoothing_update_preset_profile_param(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_616device_power_smoothing_update_preset_profile_param, "device_power_smoothing_update_preset_profile_param(intptr_t device, intptr_t profile)\n\nUpdate the value of a specific profile parameter contained within ``nvmlPowerSmoothingProfile_v1_t``. Requires root/admin permissions.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    profile (intptr_t): Reference to ``nvmlPowerSmoothingProfile_v1_t`` struct.\n\n.. seealso:: `nvmlDevicePowerSmoothingUpdatePresetProfileParam`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_617device_power_smoothing_update_preset_profile_param = {"device_power_smoothing_update_preset_profile_param", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_617device_power_smoothing_update_preset_profile_param, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_616device_power_smoothing_update_preset_profile_param};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_617device_power_smoothing_update_preset_profile_param(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  intptr_t __pyx_v_profile;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_power_smoothing_update_preset_profile_param (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_profile,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25058, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25058, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25058, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_power_smoothing_update_preset_profile_param", 0) < (0)) __PYX_ERR(0, 25058, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_power_smoothing_update_preset_profile_param", 1, 2, 2, i); __PYX_ERR(0, 25058, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25058, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25058, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25058, __pyx_L3_error)
    __pyx_v_profile = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_profile == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25058, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_power_smoothing_update_preset_profile_param", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25058, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_power_smoothing_update_preset_profile_param", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_616device_power_smoothing_update_preset_profile_param(__pyx_self, __pyx_v_device, __pyx_v_profile);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_616device_power_smoothing_update_preset_profile_param(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_profile) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_power_smoothing_update_preset_profile_param", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_power_smoothing_update_preset_profile_param(__pyx_v_device, __pyx_v_profile, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25058, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_power_smoothing_update_preset_profile_param", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25072
 * 
 * 
 * cpdef device_power_smoothing_set_state(intptr_t device, intptr_t state):             # <<<<<<<<<<<<<<
 *     """Enable or disable the Power Smoothing Feature. Requires root/admin permissions.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_619device_power_smoothing_set_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_power_smoothing_set_state(intptr_t __pyx_v_device, intptr_t __pyx_v_state, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_power_smoothing_set_state", 0);

  /* "cuda/bindings/_nvml.pyx":25081
 *     .. seealso:: `nvmlDevicePowerSmoothingSetState`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDevicePowerSmoothingSetState(<Device>device, <nvmlPowerSmoothingState_t*>state)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25082
 *     """
 *     with nogil:
 *         __status__ = nvmlDevicePowerSmoothingSetState(<Device>device, <nvmlPowerSmoothingState_t*>state)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDevicePowerSmoothingSetState(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlPowerSmoothingState_t *)__pyx_v_state)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25082, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":25081
 *     .. seealso:: `nvmlDevicePowerSmoothingSetState`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDevicePowerSmoothingSetState(<Device>device, <nvmlPowerSmoothingState_t*>state)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25083
 *     with nogil:
 *         __status__ = nvmlDevicePowerSmoothingSetState(<Device>device, <nvmlPowerSmoothingState_t*>state)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 25083, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25072
 * 
 * 
 * cpdef device_power_smoothing_set_state(intptr_t device, intptr_t state):             # <<<<<<<<<<<<<<
 *     """Enable or disable the Power Smoothing Feature. Requires root/admin permissions.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_power_smoothing_set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_619device_power_smoothing_set_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_618device_power_smoothing_set_state, "device_power_smoothing_set_state(intptr_t device, intptr_t state)\n\nEnable or disable the Power Smoothing Feature. Requires root/admin permissions.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    state (intptr_t): Reference to ``nvmlPowerSmoothingState_v1_t``.\n\n.. seealso:: `nvmlDevicePowerSmoothingSetState`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_619device_power_smoothing_set_state = {"device_power_smoothing_set_state", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_619device_power_smoothing_set_state, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_618device_power_smoothing_set_state};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_619device_power_smoothing_set_state(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  intptr_t __pyx_v_state;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_power_smoothing_set_state (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25072, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25072, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25072, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_power_smoothing_set_state", 0) < (0)) __PYX_ERR(0, 25072, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_power_smoothing_set_state", 1, 2, 2, i); __PYX_ERR(0, 25072, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25072, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25072, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25072, __pyx_L3_error)
    __pyx_v_state = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_state == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25072, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_power_smoothing_set_state", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25072, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_power_smoothing_set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_618device_power_smoothing_set_state(__pyx_self, __pyx_v_device, __pyx_v_state);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_618device_power_smoothing_set_state(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_power_smoothing_set_state", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_power_smoothing_set_state(__pyx_v_device, __pyx_v_state, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25072, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_power_smoothing_set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25086
 * 
 * 
 * cpdef object device_get_addressing_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the addressing mode for a given GPU. Addressing modes can be one of:.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_621device_get_addressing_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_addressing_mode(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *__pyx_v_mode_py = 0;
  nvmlDeviceAddressingMode_t *__pyx_v_mode;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_addressing_mode", 0);

  /* "cuda/bindings/_nvml.pyx":25097
 *     .. seealso:: `nvmlDeviceGetAddressingMode`
 *     """
 *     cdef DeviceAddressingMode_v1 mode_py = DeviceAddressingMode_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlDeviceAddressingMode_t *mode = <nvmlDeviceAddressingMode_t *><intptr_t>(mode_py._get_ptr())
 *     mode.version = sizeof(nvmlDeviceAddressingMode_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25097, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_mode_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25098
 *     """
 *     cdef DeviceAddressingMode_v1 mode_py = DeviceAddressingMode_v1()
 *     cdef nvmlDeviceAddressingMode_t *mode = <nvmlDeviceAddressingMode_t *><intptr_t>(mode_py._get_ptr())             # <<<<<<<<<<<<<<
 *     mode.version = sizeof(nvmlDeviceAddressingMode_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)__pyx_v_mode_py->__pyx_vtab)->_get_ptr(__pyx_v_mode_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25098, __pyx_L1_error)
  __pyx_v_mode = ((nvmlDeviceAddressingMode_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":25099
 *     cdef DeviceAddressingMode_v1 mode_py = DeviceAddressingMode_v1()
 *     cdef nvmlDeviceAddressingMode_t *mode = <nvmlDeviceAddressingMode_t *><intptr_t>(mode_py._get_ptr())
 *     mode.version = sizeof(nvmlDeviceAddressingMode_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetAddressingMode(<Device>device, mode)
*/
  __pyx_v_mode->version = ((sizeof(nvmlDeviceAddressingMode_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":25100
 *     cdef nvmlDeviceAddressingMode_t *mode = <nvmlDeviceAddressingMode_t *><intptr_t>(mode_py._get_ptr())
 *     mode.version = sizeof(nvmlDeviceAddressingMode_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAddressingMode(<Device>device, mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25101
 *     mode.version = sizeof(nvmlDeviceAddressingMode_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetAddressingMode(<Device>device, mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return mode_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAddressingMode(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_mode); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25101, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25100
 *     cdef nvmlDeviceAddressingMode_t *mode = <nvmlDeviceAddressingMode_t *><intptr_t>(mode_py._get_ptr())
 *     mode.version = sizeof(nvmlDeviceAddressingMode_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetAddressingMode(<Device>device, mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25102
 *     with nogil:
 *         __status__ = nvmlDeviceGetAddressingMode(<Device>device, mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return mode_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25102, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25103
 *         __status__ = nvmlDeviceGetAddressingMode(<Device>device, mode)
 *     check_status(__status__)
 *     return mode_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_mode_py);
  __pyx_r = ((PyObject *)__pyx_v_mode_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25086
 * 
 * 
 * cpdef object device_get_addressing_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the addressing mode for a given GPU. Addressing modes can be one of:.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_addressing_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_mode_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_621device_get_addressing_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_620device_get_addressing_mode, "device_get_addressing_mode(intptr_t device)\n\nGet the addressing mode for a given GPU. Addressing modes can be one of:.\n\nArgs:\n    device (intptr_t): The device handle.\n\nReturns:\n    nvmlDeviceAddressingMode_v1_t: Pointer to addressing mode of the device.\n\n.. seealso:: `nvmlDeviceGetAddressingMode`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_621device_get_addressing_mode = {"device_get_addressing_mode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_621device_get_addressing_mode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_620device_get_addressing_mode};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_621device_get_addressing_mode(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_addressing_mode (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25086, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25086, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_addressing_mode", 0) < (0)) __PYX_ERR(0, 25086, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_addressing_mode", 1, 1, 1, i); __PYX_ERR(0, 25086, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25086, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25086, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_addressing_mode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25086, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_addressing_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_620device_get_addressing_mode(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_620device_get_addressing_mode(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_addressing_mode", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_addressing_mode(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25086, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_addressing_mode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25106
 * 
 * 
 * cpdef object device_get_repair_status(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the repair status for TPC/Channel repair.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_623device_get_repair_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_repair_status(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *__pyx_v_repair_status_py = 0;
  nvmlRepairStatus_t *__pyx_v_repair_status;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_repair_status", 0);

  /* "cuda/bindings/_nvml.pyx":25117
 *     .. seealso:: `nvmlDeviceGetRepairStatus`
 *     """
 *     cdef RepairStatus_v1 repair_status_py = RepairStatus_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlRepairStatus_t *repair_status = <nvmlRepairStatus_t *><intptr_t>(repair_status_py._get_ptr())
 *     repair_status.version = sizeof(nvmlRepairStatus_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25117, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_repair_status_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25118
 *     """
 *     cdef RepairStatus_v1 repair_status_py = RepairStatus_v1()
 *     cdef nvmlRepairStatus_t *repair_status = <nvmlRepairStatus_t *><intptr_t>(repair_status_py._get_ptr())             # <<<<<<<<<<<<<<
 *     repair_status.version = sizeof(nvmlRepairStatus_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_RepairStatus_v1 *)__pyx_v_repair_status_py->__pyx_vtab)->_get_ptr(__pyx_v_repair_status_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25118, __pyx_L1_error)
  __pyx_v_repair_status = ((nvmlRepairStatus_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":25119
 *     cdef RepairStatus_v1 repair_status_py = RepairStatus_v1()
 *     cdef nvmlRepairStatus_t *repair_status = <nvmlRepairStatus_t *><intptr_t>(repair_status_py._get_ptr())
 *     repair_status.version = sizeof(nvmlRepairStatus_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetRepairStatus(<Device>device, repair_status)
*/
  __pyx_v_repair_status->version = ((sizeof(nvmlRepairStatus_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":25120
 *     cdef nvmlRepairStatus_t *repair_status = <nvmlRepairStatus_t *><intptr_t>(repair_status_py._get_ptr())
 *     repair_status.version = sizeof(nvmlRepairStatus_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRepairStatus(<Device>device, repair_status)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25121
 *     repair_status.version = sizeof(nvmlRepairStatus_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetRepairStatus(<Device>device, repair_status)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return repair_status_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRepairStatus(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_repair_status); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25121, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25120
 *     cdef nvmlRepairStatus_t *repair_status = <nvmlRepairStatus_t *><intptr_t>(repair_status_py._get_ptr())
 *     repair_status.version = sizeof(nvmlRepairStatus_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRepairStatus(<Device>device, repair_status)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25122
 *     with nogil:
 *         __status__ = nvmlDeviceGetRepairStatus(<Device>device, repair_status)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return repair_status_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25122, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25123
 *         __status__ = nvmlDeviceGetRepairStatus(<Device>device, repair_status)
 *     check_status(__status__)
 *     return repair_status_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_repair_status_py);
  __pyx_r = ((PyObject *)__pyx_v_repair_status_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25106
 * 
 * 
 * cpdef object device_get_repair_status(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the repair status for TPC/Channel repair.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_repair_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_repair_status_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_623device_get_repair_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_622device_get_repair_status, "device_get_repair_status(intptr_t device)\n\nGet the repair status for TPC/Channel repair.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlRepairStatus_v1_t: Reference to ``nvmlRepairStatus_t``.\n\n.. seealso:: `nvmlDeviceGetRepairStatus`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_623device_get_repair_status = {"device_get_repair_status", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_623device_get_repair_status, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_622device_get_repair_status};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_623device_get_repair_status(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_repair_status (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25106, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25106, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_repair_status", 0) < (0)) __PYX_ERR(0, 25106, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_repair_status", 1, 1, 1, i); __PYX_ERR(0, 25106, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25106, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25106, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_repair_status", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25106, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_repair_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_622device_get_repair_status(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_622device_get_repair_status(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_repair_status", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_repair_status(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25106, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_repair_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25126
 * 
 * 
 * cpdef object device_get_power_mizer_mode_v1(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves current power mizer mode on this device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_625device_get_power_mizer_mode_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_power_mizer_mode_v1(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *__pyx_v_power_mizer_mode_py = 0;
  nvmlDevicePowerMizerModes_v1_t *__pyx_v_power_mizer_mode;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_power_mizer_mode_v1", 0);

  /* "cuda/bindings/_nvml.pyx":25137
 *     .. seealso:: `nvmlDeviceGetPowerMizerMode_v1`
 *     """
 *     cdef DevicePowerMizerModes_v1 power_mizer_mode_py = DevicePowerMizerModes_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlDevicePowerMizerModes_v1_t *power_mizer_mode = <nvmlDevicePowerMizerModes_v1_t *><intptr_t>(power_mizer_mode_py._get_ptr())
 *     with nogil:
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25137, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_power_mizer_mode_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25138
 *     """
 *     cdef DevicePowerMizerModes_v1 power_mizer_mode_py = DevicePowerMizerModes_v1()
 *     cdef nvmlDevicePowerMizerModes_v1_t *power_mizer_mode = <nvmlDevicePowerMizerModes_v1_t *><intptr_t>(power_mizer_mode_py._get_ptr())             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetPowerMizerMode_v1(<Device>device, power_mizer_mode)
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)__pyx_v_power_mizer_mode_py->__pyx_vtab)->_get_ptr(__pyx_v_power_mizer_mode_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25138, __pyx_L1_error)
  __pyx_v_power_mizer_mode = ((nvmlDevicePowerMizerModes_v1_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":25139
 *     cdef DevicePowerMizerModes_v1 power_mizer_mode_py = DevicePowerMizerModes_v1()
 *     cdef nvmlDevicePowerMizerModes_v1_t *power_mizer_mode = <nvmlDevicePowerMizerModes_v1_t *><intptr_t>(power_mizer_mode_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPowerMizerMode_v1(<Device>device, power_mizer_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25140
 *     cdef nvmlDevicePowerMizerModes_v1_t *power_mizer_mode = <nvmlDevicePowerMizerModes_v1_t *><intptr_t>(power_mizer_mode_py._get_ptr())
 *     with nogil:
 *         __status__ = nvmlDeviceGetPowerMizerMode_v1(<Device>device, power_mizer_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return power_mizer_mode_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerMizerMode_v1(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_power_mizer_mode); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25140, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25139
 *     cdef DevicePowerMizerModes_v1 power_mizer_mode_py = DevicePowerMizerModes_v1()
 *     cdef nvmlDevicePowerMizerModes_v1_t *power_mizer_mode = <nvmlDevicePowerMizerModes_v1_t *><intptr_t>(power_mizer_mode_py._get_ptr())
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPowerMizerMode_v1(<Device>device, power_mizer_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25141
 *     with nogil:
 *         __status__ = nvmlDeviceGetPowerMizerMode_v1(<Device>device, power_mizer_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return power_mizer_mode_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25141, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25142
 *         __status__ = nvmlDeviceGetPowerMizerMode_v1(<Device>device, power_mizer_mode)
 *     check_status(__status__)
 *     return power_mizer_mode_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_power_mizer_mode_py);
  __pyx_r = ((PyObject *)__pyx_v_power_mizer_mode_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25126
 * 
 * 
 * cpdef object device_get_power_mizer_mode_v1(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves current power mizer mode on this device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_mizer_mode_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_power_mizer_mode_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_625device_get_power_mizer_mode_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_624device_get_power_mizer_mode_v1, "device_get_power_mizer_mode_v1(intptr_t device)\n\nRetrieves current power mizer mode on this device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlDevicePowerMizerModes_v1_t: Reference in which to return the power mizer mode.\n\n.. seealso:: `nvmlDeviceGetPowerMizerMode_v1`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_625device_get_power_mizer_mode_v1 = {"device_get_power_mizer_mode_v1", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_625device_get_power_mizer_mode_v1, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_624device_get_power_mizer_mode_v1};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_625device_get_power_mizer_mode_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_power_mizer_mode_v1 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25126, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25126, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_power_mizer_mode_v1", 0) < (0)) __PYX_ERR(0, 25126, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_power_mizer_mode_v1", 1, 1, 1, i); __PYX_ERR(0, 25126, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25126, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25126, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_power_mizer_mode_v1", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25126, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_mizer_mode_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_624device_get_power_mizer_mode_v1(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_624device_get_power_mizer_mode_v1(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_power_mizer_mode_v1", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_power_mizer_mode_v1(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25126, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_power_mizer_mode_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25145
 * 
 * 
 * cpdef device_set_power_mizer_mode_v1(intptr_t device, intptr_t power_mizer_mode):             # <<<<<<<<<<<<<<
 *     """Sets the new power mizer mode.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_627device_set_power_mizer_mode_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_power_mizer_mode_v1(intptr_t __pyx_v_device, intptr_t __pyx_v_power_mizer_mode, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_power_mizer_mode_v1", 0);

  /* "cuda/bindings/_nvml.pyx":25154
 *     .. seealso:: `nvmlDeviceSetPowerMizerMode_v1`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetPowerMizerMode_v1(<Device>device, <nvmlDevicePowerMizerModes_v1_t*>power_mizer_mode)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25155
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceSetPowerMizerMode_v1(<Device>device, <nvmlDevicePowerMizerModes_v1_t*>power_mizer_mode)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPowerMizerMode_v1(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlDevicePowerMizerModes_v1_t *)__pyx_v_power_mizer_mode)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25155, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":25154
 *     .. seealso:: `nvmlDeviceSetPowerMizerMode_v1`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetPowerMizerMode_v1(<Device>device, <nvmlDevicePowerMizerModes_v1_t*>power_mizer_mode)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25156
 *     with nogil:
 *         __status__ = nvmlDeviceSetPowerMizerMode_v1(<Device>device, <nvmlDevicePowerMizerModes_v1_t*>power_mizer_mode)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 25156, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25145
 * 
 * 
 * cpdef device_set_power_mizer_mode_v1(intptr_t device, intptr_t power_mizer_mode):             # <<<<<<<<<<<<<<
 *     """Sets the new power mizer mode.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_power_mizer_mode_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_627device_set_power_mizer_mode_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_626device_set_power_mizer_mode_v1, "device_set_power_mizer_mode_v1(intptr_t device, intptr_t power_mizer_mode)\n\nSets the new power mizer mode.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    power_mizer_mode (intptr_t): Reference in which to set the power mizer mode.\n\n.. seealso:: `nvmlDeviceSetPowerMizerMode_v1`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_627device_set_power_mizer_mode_v1 = {"device_set_power_mizer_mode_v1", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_627device_set_power_mizer_mode_v1, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_626device_set_power_mizer_mode_v1};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_627device_set_power_mizer_mode_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  intptr_t __pyx_v_power_mizer_mode;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_power_mizer_mode_v1 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_power_mizer_mode,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25145, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25145, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25145, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_power_mizer_mode_v1", 0) < (0)) __PYX_ERR(0, 25145, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_power_mizer_mode_v1", 1, 2, 2, i); __PYX_ERR(0, 25145, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25145, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25145, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25145, __pyx_L3_error)
    __pyx_v_power_mizer_mode = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_power_mizer_mode == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25145, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_power_mizer_mode_v1", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25145, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_power_mizer_mode_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_626device_set_power_mizer_mode_v1(__pyx_self, __pyx_v_device, __pyx_v_power_mizer_mode);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_626device_set_power_mizer_mode_v1(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_power_mizer_mode) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_power_mizer_mode_v1", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_power_mizer_mode_v1(__pyx_v_device, __pyx_v_power_mizer_mode, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25145, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_power_mizer_mode_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25159
 * 
 * 
 * cpdef object device_get_pdi(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the Per Device Identifier (PDI) associated with this device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_629device_get_pdi(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_pdi(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *__pyx_v_pdi_py = 0;
  nvmlPdi_t *__pyx_v_pdi;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_pdi", 0);

  /* "cuda/bindings/_nvml.pyx":25170
 *     .. seealso:: `nvmlDeviceGetPdi`
 *     """
 *     cdef Pdi_v1 pdi_py = Pdi_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlPdi_t *pdi = <nvmlPdi_t *><intptr_t>(pdi_py._get_ptr())
 *     pdi.version = sizeof(nvmlPdi_v1_t) | (1 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25170, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_pdi_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25171
 *     """
 *     cdef Pdi_v1 pdi_py = Pdi_v1()
 *     cdef nvmlPdi_t *pdi = <nvmlPdi_t *><intptr_t>(pdi_py._get_ptr())             # <<<<<<<<<<<<<<
 *     pdi.version = sizeof(nvmlPdi_v1_t) | (1 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Pdi_v1 *)__pyx_v_pdi_py->__pyx_vtab)->_get_ptr(__pyx_v_pdi_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25171, __pyx_L1_error)
  __pyx_v_pdi = ((nvmlPdi_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":25172
 *     cdef Pdi_v1 pdi_py = Pdi_v1()
 *     cdef nvmlPdi_t *pdi = <nvmlPdi_t *><intptr_t>(pdi_py._get_ptr())
 *     pdi.version = sizeof(nvmlPdi_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetPdi(<Device>device, pdi)
*/
  __pyx_v_pdi->version = ((sizeof(nvmlPdi_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":25173
 *     cdef nvmlPdi_t *pdi = <nvmlPdi_t *><intptr_t>(pdi_py._get_ptr())
 *     pdi.version = sizeof(nvmlPdi_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPdi(<Device>device, pdi)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25174
 *     pdi.version = sizeof(nvmlPdi_v1_t) | (1 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetPdi(<Device>device, pdi)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return pdi_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPdi(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_pdi); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25174, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25173
 *     cdef nvmlPdi_t *pdi = <nvmlPdi_t *><intptr_t>(pdi_py._get_ptr())
 *     pdi.version = sizeof(nvmlPdi_v1_t) | (1 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetPdi(<Device>device, pdi)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25175
 *     with nogil:
 *         __status__ = nvmlDeviceGetPdi(<Device>device, pdi)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return pdi_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25175, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25176
 *         __status__ = nvmlDeviceGetPdi(<Device>device, pdi)
 *     check_status(__status__)
 *     return pdi_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_pdi_py);
  __pyx_r = ((PyObject *)__pyx_v_pdi_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25159
 * 
 * 
 * cpdef object device_get_pdi(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the Per Device Identifier (PDI) associated with this device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pdi", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_pdi_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_629device_get_pdi(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_628device_get_pdi, "device_get_pdi(intptr_t device)\n\nRetrieves the Per Device Identifier (PDI) associated with this device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlPdi_v1_t: Reference to the caller-provided structure to return the GPU PDI.\n\n.. seealso:: `nvmlDeviceGetPdi`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_629device_get_pdi = {"device_get_pdi", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_629device_get_pdi, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_628device_get_pdi};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_629device_get_pdi(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_pdi (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25159, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25159, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_pdi", 0) < (0)) __PYX_ERR(0, 25159, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_pdi", 1, 1, 1, i); __PYX_ERR(0, 25159, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25159, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25159, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_pdi", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25159, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pdi", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_628device_get_pdi(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_628device_get_pdi(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_pdi", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_pdi(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25159, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_pdi", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25179
 * 
 * 
 * cpdef object device_get_nvlink_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Query NVLINK information associated with this device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_631device_get_nvlink_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_info(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *__pyx_v_info_py = 0;
  nvmlNvLinkInfo_t *__pyx_v_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_nvlink_info", 0);

  /* "cuda/bindings/_nvml.pyx":25190
 *     .. seealso:: `nvmlDeviceGetNvLinkInfo`
 *     """
 *     cdef NvLinkInfo_v2 info_py = NvLinkInfo_v2()             # <<<<<<<<<<<<<<
 *     cdef nvmlNvLinkInfo_t *info = <nvmlNvLinkInfo_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlNvLinkInfo_v2_t) | (2 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25190, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25191
 *     """
 *     cdef NvLinkInfo_v2 info_py = NvLinkInfo_v2()
 *     cdef nvmlNvLinkInfo_t *info = <nvmlNvLinkInfo_t *><intptr_t>(info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     info.version = sizeof(nvmlNvLinkInfo_v2_t) | (2 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)__pyx_v_info_py->__pyx_vtab)->_get_ptr(__pyx_v_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25191, __pyx_L1_error)
  __pyx_v_info = ((nvmlNvLinkInfo_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":25192
 *     cdef NvLinkInfo_v2 info_py = NvLinkInfo_v2()
 *     cdef nvmlNvLinkInfo_t *info = <nvmlNvLinkInfo_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlNvLinkInfo_v2_t) | (2 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvLinkInfo(<Device>device, info)
*/
  __pyx_v_info->version = ((sizeof(nvmlNvLinkInfo_v2_t)) | 0x2000000);

  /* "cuda/bindings/_nvml.pyx":25193
 *     cdef nvmlNvLinkInfo_t *info = <nvmlNvLinkInfo_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlNvLinkInfo_v2_t) | (2 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvLinkInfo(<Device>device, info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25194
 *     info.version = sizeof(nvmlNvLinkInfo_v2_t) | (2 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvLinkInfo(<Device>device, info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkInfo(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25194, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25193
 *     cdef nvmlNvLinkInfo_t *info = <nvmlNvLinkInfo_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlNvLinkInfo_v2_t) | (2 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetNvLinkInfo(<Device>device, info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25195
 *     with nogil:
 *         __status__ = nvmlDeviceGetNvLinkInfo(<Device>device, info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25195, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25196
 *         __status__ = nvmlDeviceGetNvLinkInfo(<Device>device, info)
 *     check_status(__status__)
 *     return info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_info_py);
  __pyx_r = ((PyObject *)__pyx_v_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25179
 * 
 * 
 * cpdef object device_get_nvlink_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Query NVLINK information associated with this device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_631device_get_nvlink_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_630device_get_nvlink_info, "device_get_nvlink_info(intptr_t device)\n\nQuery NVLINK information associated with this device.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n\nReturns:\n    nvmlNvLinkInfo_v2_t: Reference to ``nvmlNvLinkInfo_t``.\n\n.. seealso:: `nvmlDeviceGetNvLinkInfo`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_631device_get_nvlink_info = {"device_get_nvlink_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_631device_get_nvlink_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_630device_get_nvlink_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_631device_get_nvlink_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_nvlink_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25179, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25179, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_nvlink_info", 0) < (0)) __PYX_ERR(0, 25179, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_nvlink_info", 1, 1, 1, i); __PYX_ERR(0, 25179, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25179, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25179, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_nvlink_info", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25179, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_630device_get_nvlink_info(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_630device_get_nvlink_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_nvlink_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_info(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_nvlink_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25199
 * 
 * 
 * cpdef device_read_write_prm_v1(intptr_t device, intptr_t buffer):             # <<<<<<<<<<<<<<
 *     """Read or write a GPU PRM register. The input is assumed to be in TLV format in network byte order.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_633device_read_write_prm_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_read_write_prm_v1(intptr_t __pyx_v_device, intptr_t __pyx_v_buffer, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_read_write_prm_v1", 0);

  /* "cuda/bindings/_nvml.pyx":25208
 *     .. seealso:: `nvmlDeviceReadWritePRM_v1`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceReadWritePRM_v1(<Device>device, <nvmlPRMTLV_v1_t*>buffer)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25209
 *     """
 *     with nogil:
 *         __status__ = nvmlDeviceReadWritePRM_v1(<Device>device, <nvmlPRMTLV_v1_t*>buffer)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceReadWritePRM_v1(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlPRMTLV_v1_t *)__pyx_v_buffer)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25209, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":25208
 *     .. seealso:: `nvmlDeviceReadWritePRM_v1`
 *     """
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceReadWritePRM_v1(<Device>device, <nvmlPRMTLV_v1_t*>buffer)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25210
 *     with nogil:
 *         __status__ = nvmlDeviceReadWritePRM_v1(<Device>device, <nvmlPRMTLV_v1_t*>buffer)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 25210, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25199
 * 
 * 
 * cpdef device_read_write_prm_v1(intptr_t device, intptr_t buffer):             # <<<<<<<<<<<<<<
 *     """Read or write a GPU PRM register. The input is assumed to be in TLV format in network byte order.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_AddTraceback("cuda.bindings._nvml.device_read_write_prm_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_633device_read_write_prm_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_632device_read_write_prm_v1, "device_read_write_prm_v1(intptr_t device, intptr_t buffer)\n\nRead or write a GPU PRM register. The input is assumed to be in TLV format in network byte order.\n\nArgs:\n    device (intptr_t): Identifer of target GPU device.\n    buffer (intptr_t): Structure holding the input data in TLV format as well as the PRM register contents in TLV format (in the case of a successful read operation). Note: the input data and any returned data shall be in network byte order.\n\n.. seealso:: `nvmlDeviceReadWritePRM_v1`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_633device_read_write_prm_v1 = {"device_read_write_prm_v1", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_633device_read_write_prm_v1, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_632device_read_write_prm_v1};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_633device_read_write_prm_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  intptr_t __pyx_v_buffer;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_read_write_prm_v1 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_buffer,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25199, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25199, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25199, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_read_write_prm_v1", 0) < (0)) __PYX_ERR(0, 25199, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_read_write_prm_v1", 1, 2, 2, i); __PYX_ERR(0, 25199, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25199, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25199, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25199, __pyx_L3_error)
    __pyx_v_buffer = PyLong_AsSsize_t(values[1]); if (unlikely((__pyx_v_buffer == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25199, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_read_write_prm_v1", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25199, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_read_write_prm_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_632device_read_write_prm_v1(__pyx_self, __pyx_v_device, __pyx_v_buffer);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_632device_read_write_prm_v1(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, intptr_t __pyx_v_buffer) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_read_write_prm_v1", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_read_write_prm_v1(__pyx_v_device, __pyx_v_buffer, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25199, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_read_write_prm_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25213
 * 
 * 
 * cpdef object device_get_gpu_instance_profile_info_by_id_v(intptr_t device, unsigned int profile_id):             # <<<<<<<<<<<<<<
 *     """GPU instance profile query function that accepts profile ID, instead of profile name. It accepts a versioned ``nvmlGpuInstanceProfileInfo_v2_t`` or later output structure.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_635device_get_gpu_instance_profile_info_by_id_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_profile_info_by_id_v(intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *__pyx_v_info_py = 0;
  nvmlGpuInstanceProfileInfo_v2_t *__pyx_v_info;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpu_instance_profile_info_by_id_v", 0);

  /* "cuda/bindings/_nvml.pyx":25225
 *     .. seealso:: `nvmlDeviceGetGpuInstanceProfileInfoByIdV`
 *     """
 *     cdef GpuInstanceProfileInfo_v2 info_py = GpuInstanceProfileInfo_v2()             # <<<<<<<<<<<<<<
 *     cdef nvmlGpuInstanceProfileInfo_v2_t *info = <nvmlGpuInstanceProfileInfo_v2_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlGpuInstanceProfileInfo_v3_t) | (3 << 24)
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25225, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_info_py = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25226
 *     """
 *     cdef GpuInstanceProfileInfo_v2 info_py = GpuInstanceProfileInfo_v2()
 *     cdef nvmlGpuInstanceProfileInfo_v2_t *info = <nvmlGpuInstanceProfileInfo_v2_t *><intptr_t>(info_py._get_ptr())             # <<<<<<<<<<<<<<
 *     info.version = sizeof(nvmlGpuInstanceProfileInfo_v3_t) | (3 << 24)
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)__pyx_v_info_py->__pyx_vtab)->_get_ptr(__pyx_v_info_py); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25226, __pyx_L1_error)
  __pyx_v_info = ((nvmlGpuInstanceProfileInfo_v2_t *)((intptr_t)__pyx_t_4));

  /* "cuda/bindings/_nvml.pyx":25227
 *     cdef GpuInstanceProfileInfo_v2 info_py = GpuInstanceProfileInfo_v2()
 *     cdef nvmlGpuInstanceProfileInfo_v2_t *info = <nvmlGpuInstanceProfileInfo_v2_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlGpuInstanceProfileInfo_v3_t) | (3 << 24)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstanceProfileInfoByIdV(<Device>device, profile_id, info)
*/
  __pyx_v_info->version = ((sizeof(nvmlGpuInstanceProfileInfo_v3_t)) | 0x3000000);

  /* "cuda/bindings/_nvml.pyx":25228
 *     cdef nvmlGpuInstanceProfileInfo_v2_t *info = <nvmlGpuInstanceProfileInfo_v2_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlGpuInstanceProfileInfo_v3_t) | (3 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstanceProfileInfoByIdV(<Device>device, profile_id, info)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25229
 *     info.version = sizeof(nvmlGpuInstanceProfileInfo_v3_t) | (3 << 24)
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstanceProfileInfoByIdV(<Device>device, profile_id, info)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return info_py
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceProfileInfoByIdV(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_profile_id, __pyx_v_info); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25229, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25228
 *     cdef nvmlGpuInstanceProfileInfo_v2_t *info = <nvmlGpuInstanceProfileInfo_v2_t *><intptr_t>(info_py._get_ptr())
 *     info.version = sizeof(nvmlGpuInstanceProfileInfo_v3_t) | (3 << 24)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstanceProfileInfoByIdV(<Device>device, profile_id, info)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25230
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstanceProfileInfoByIdV(<Device>device, profile_id, info)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return info_py
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25230, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25231
 *         __status__ = nvmlDeviceGetGpuInstanceProfileInfoByIdV(<Device>device, profile_id, info)
 *     check_status(__status__)
 *     return info_py             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_info_py);
  __pyx_r = ((PyObject *)__pyx_v_info_py);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25213
 * 
 * 
 * cpdef object device_get_gpu_instance_profile_info_by_id_v(intptr_t device, unsigned int profile_id):             # <<<<<<<<<<<<<<
 *     """GPU instance profile query function that accepts profile ID, instead of profile name. It accepts a versioned ``nvmlGpuInstanceProfileInfo_v2_t`` or later output structure.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_profile_info_by_id_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_info_py);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_635device_get_gpu_instance_profile_info_by_id_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_634device_get_gpu_instance_profile_info_by_id_v, "device_get_gpu_instance_profile_info_by_id_v(intptr_t device, unsigned int profile_id)\n\nGPU instance profile query function that accepts profile ID, instead of profile name. It accepts a versioned ``nvmlGpuInstanceProfileInfo_v2_t`` or later output structure.\n\nArgs:\n    device (intptr_t): The identifier of the target device.\n    profile_id (unsigned int): One of the profile IDs.\n\nReturns:\n    nvmlGpuInstanceProfileInfo_v2_t: Returns detailed profile information.\n\n.. seealso:: `nvmlDeviceGetGpuInstanceProfileInfoByIdV`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_635device_get_gpu_instance_profile_info_by_id_v = {"device_get_gpu_instance_profile_info_by_id_v", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_635device_get_gpu_instance_profile_info_by_id_v, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_634device_get_gpu_instance_profile_info_by_id_v};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_635device_get_gpu_instance_profile_info_by_id_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_profile_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_gpu_instance_profile_info_by_id_v (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_profile_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25213, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25213, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25213, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_gpu_instance_profile_info_by_id_v", 0) < (0)) __PYX_ERR(0, 25213, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_gpu_instance_profile_info_by_id_v", 1, 2, 2, i); __PYX_ERR(0, 25213, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25213, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25213, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25213, __pyx_L3_error)
    __pyx_v_profile_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_profile_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25213, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_gpu_instance_profile_info_by_id_v", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25213, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_profile_info_by_id_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_634device_get_gpu_instance_profile_info_by_id_v(__pyx_self, __pyx_v_device, __pyx_v_profile_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_634device_get_gpu_instance_profile_info_by_id_v(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpu_instance_profile_info_by_id_v", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_profile_info_by_id_v(__pyx_v_device, __pyx_v_profile_id, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25213, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instance_profile_info_by_id_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25234
 * 
 * 
 * cpdef object system_get_topology_gpu_set(unsigned int cpuNumber):             # <<<<<<<<<<<<<<
 *     """Retrieve the set of GPUs that have a CPU affinity with the given CPU number
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_637system_get_topology_gpu_set(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_topology_gpu_set(unsigned int __pyx_v_cpuNumber, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_deviceArray = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_topology_gpu_set", 0);

  /* "cuda/bindings/_nvml.pyx":25243
 *         array: An array of device handles for GPUs found with affinity to  cpuNumber
 *     """
 *     cdef unsigned int[1] count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlSystemGetTopologyGpuSet(cpuNumber, <unsigned int*>count, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_count[0]), __pyx_t_1, sizeof(__pyx_v_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":25244
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetTopologyGpuSet(cpuNumber, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25245
 *     cdef unsigned int[1] count = [0]
 *     with nogil:
 *         __status__ = nvmlSystemGetTopologyGpuSet(cpuNumber, <unsigned int*>count, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     if count[0] == 0:
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetTopologyGpuSet(__pyx_v_cpuNumber, ((unsigned int *)__pyx_v_count), NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25245, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":25244
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetTopologyGpuSet(cpuNumber, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25246
 *     with nogil:
 *         __status__ = nvmlSystemGetTopologyGpuSet(cpuNumber, <unsigned int*>count, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 25246, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25247
 *         __status__ = nvmlSystemGetTopologyGpuSet(cpuNumber, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 *     cdef view.array deviceArray = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
*/
  __pyx_t_4 = ((__pyx_v_count[0]) == 0);
  if (__pyx_t_4) {

    /* "cuda/bindings/_nvml.pyx":25248
 *     check_status_size(__status__)
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]             # <<<<<<<<<<<<<<
 *     cdef view.array deviceArray = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_6 = NULL;
    __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(intptr_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25248, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_8 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_6, NULL};
      __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25248, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_9);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_9, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25248, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_9, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25248, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_i, __pyx_t_9, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25248, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_9, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25248, __pyx_L1_error)
      __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25248, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_5);
    }
    __pyx_t_9 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_5), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25248, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_9;
    __pyx_t_9 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25247
 *         __status__ = nvmlSystemGetTopologyGpuSet(cpuNumber, <unsigned int*>count, NULL)
 *     check_status_size(__status__)
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 *     cdef view.array deviceArray = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":25249
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 *     cdef view.array deviceArray = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="i", mode="c")             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlSystemGetTopologyGpuSet(cpuNumber, <unsigned int*>count, <nvmlDevice_t *>deviceArray.data)
*/
  __pyx_t_5 = NULL;
  __pyx_t_7 = __Pyx_PyLong_From_unsigned_int((__pyx_v_count[0])); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25249, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 25249, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 25249, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(intptr_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25249, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_5, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25249, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_6, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25249, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25249, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_i, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25249, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25249, __pyx_L1_error)
    __pyx_t_9 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25249, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_9);
  }
  __pyx_v_deviceArray = ((struct __pyx_array_obj *)__pyx_t_9);
  __pyx_t_9 = 0;

  /* "cuda/bindings/_nvml.pyx":25250
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 *     cdef view.array deviceArray = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetTopologyGpuSet(cpuNumber, <unsigned int*>count, <nvmlDevice_t *>deviceArray.data)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25251
 *     cdef view.array deviceArray = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:
 *         __status__ = nvmlSystemGetTopologyGpuSet(cpuNumber, <unsigned int*>count, <nvmlDevice_t *>deviceArray.data)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return deviceArray
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetTopologyGpuSet(__pyx_v_cpuNumber, ((unsigned int *)__pyx_v_count), ((nvmlDevice_t *)__pyx_v_deviceArray->data)); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25251, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":25250
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 *     cdef view.array deviceArray = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetTopologyGpuSet(cpuNumber, <unsigned int*>count, <nvmlDevice_t *>deviceArray.data)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25252
 *     with nogil:
 *         __status__ = nvmlSystemGetTopologyGpuSet(cpuNumber, <unsigned int*>count, <nvmlDevice_t *>deviceArray.data)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return deviceArray
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 25252, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25253
 *         __status__ = nvmlSystemGetTopologyGpuSet(cpuNumber, <unsigned int*>count, <nvmlDevice_t *>deviceArray.data)
 *     check_status(__status__)
 *     return deviceArray             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_deviceArray);
  __pyx_r = ((PyObject *)__pyx_v_deviceArray);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25234
 * 
 * 
 * cpdef object system_get_topology_gpu_set(unsigned int cpuNumber):             # <<<<<<<<<<<<<<
 *     """Retrieve the set of GPUs that have a CPU affinity with the given CPU number
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_topology_gpu_set", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_deviceArray);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_637system_get_topology_gpu_set(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_636system_get_topology_gpu_set, "system_get_topology_gpu_set(unsigned int cpuNumber)\n\nRetrieve the set of GPUs that have a CPU affinity with the given CPU number\n\nArgs:\n    cpuNumber (unsigned int): The CPU number\n\nReturns:\n    array: An array of device handles for GPUs found with affinity to  cpuNumber");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_637system_get_topology_gpu_set = {"system_get_topology_gpu_set", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_637system_get_topology_gpu_set, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_636system_get_topology_gpu_set};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_637system_get_topology_gpu_set(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_cpuNumber;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_get_topology_gpu_set (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_cpuNumber,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25234, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25234, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "system_get_topology_gpu_set", 0) < (0)) __PYX_ERR(0, 25234, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("system_get_topology_gpu_set", 1, 1, 1, i); __PYX_ERR(0, 25234, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25234, __pyx_L3_error)
    }
    __pyx_v_cpuNumber = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_cpuNumber == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25234, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("system_get_topology_gpu_set", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25234, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_topology_gpu_set", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_636system_get_topology_gpu_set(__pyx_self, __pyx_v_cpuNumber);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_636system_get_topology_gpu_set(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_cpuNumber) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_topology_gpu_set", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_get_topology_gpu_set(__pyx_v_cpuNumber, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25234, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_topology_gpu_set", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25256
 * 
 * 
 * cpdef str system_get_driver_branch():             # <<<<<<<<<<<<<<
 *     """Retrieves the driver branch of the NVIDIA driver installed on the system.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_639system_get_driver_branch(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_system_get_driver_branch(CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlSystemDriverBranchInfo_t __pyx_v_info;
  unsigned int __pyx_v_length;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_driver_branch", 0);

  /* "cuda/bindings/_nvml.pyx":25266
 *     # Needs to be updated if the version of the nvmlSystemDriverBranchInfo_t
 *     # struct changes in the future.
 *     info.version = sizeof(nvmlSystemDriverBranchInfo_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     cdef unsigned int length = 80
 *     with nogil:
*/
  __pyx_v_info.version = ((sizeof(nvmlSystemDriverBranchInfo_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":25267
 *     # struct changes in the future.
 *     info.version = sizeof(nvmlSystemDriverBranchInfo_v1_t) | (1 << 24)
 *     cdef unsigned int length = 80             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlSystemGetDriverBranch(&info, length)
*/
  __pyx_v_length = 80;

  /* "cuda/bindings/_nvml.pyx":25268
 *     info.version = sizeof(nvmlSystemDriverBranchInfo_v1_t) | (1 << 24)
 *     cdef unsigned int length = 80
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetDriverBranch(&info, length)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25269
 *     cdef unsigned int length = 80
 *     with nogil:
 *         __status__ = nvmlSystemGetDriverBranch(&info, length)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(info.branch)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetDriverBranch((&__pyx_v_info), __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25269, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":25268
 *     info.version = sizeof(nvmlSystemDriverBranchInfo_v1_t) | (1 << 24)
 *     cdef unsigned int length = 80
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlSystemGetDriverBranch(&info, length)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25270
 *     with nogil:
 *         __status__ = nvmlSystemGetDriverBranch(&info, length)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(info.branch)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 25270, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25271
 *         __status__ = nvmlSystemGetDriverBranch(&info, length)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(info.branch)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = PyUnicode_FromString(__pyx_v_info.branch); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25271, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25256
 * 
 * 
 * cpdef str system_get_driver_branch():             # <<<<<<<<<<<<<<
 *     """Retrieves the driver branch of the NVIDIA driver installed on the system.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_driver_branch", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_639system_get_driver_branch(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_638system_get_driver_branch, "system_get_driver_branch() -> str\n\nRetrieves the driver branch of the NVIDIA driver installed on the system.\n\nReturns:\n    str: driver branch.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_639system_get_driver_branch = {"system_get_driver_branch", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_639system_get_driver_branch, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_638system_get_driver_branch};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_639system_get_driver_branch(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("system_get_driver_branch (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_638system_get_driver_branch(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_638system_get_driver_branch(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("system_get_driver_branch", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_system_get_driver_branch(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25256, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.system_get_driver_branch", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25274
 * 
 * 
 * cpdef object unit_get_devices(intptr_t unit):             # <<<<<<<<<<<<<<
 *     """Retrieves the set of GPU devices that are attached to the specified unit.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_641unit_get_devices(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_unit_get_devices(intptr_t __pyx_v_unit, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_deviceCount[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_deviceArray = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("unit_get_devices", 0);

  /* "cuda/bindings/_nvml.pyx":25283
 *         array: An array of device handles for GPUs attached to the unit.
 *     """
 *     cdef unsigned int[1] deviceCount = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlUnitGetDevices(<nvmlUnit_t *>unit, <unsigned int*>deviceCount, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_deviceCount[0]), __pyx_t_1, sizeof(__pyx_v_deviceCount[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":25284
 *     """
 *     cdef unsigned int[1] deviceCount = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetDevices(<nvmlUnit_t *>unit, <unsigned int*>deviceCount, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25285
 *     cdef unsigned int[1] deviceCount = [0]
 *     with nogil:
 *         __status__ = nvmlUnitGetDevices(<nvmlUnit_t *>unit, <unsigned int*>deviceCount, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     if deviceCount[0] == 0:
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetDevices(((nvmlUnit_t *)__pyx_v_unit), ((unsigned int *)__pyx_v_deviceCount), NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25285, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":25284
 *     """
 *     cdef unsigned int[1] deviceCount = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetDevices(<nvmlUnit_t *>unit, <unsigned int*>deviceCount, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25286
 *     with nogil:
 *         __status__ = nvmlUnitGetDevices(<nvmlUnit_t *>unit, <unsigned int*>deviceCount, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     if deviceCount[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 25286, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25287
 *         __status__ = nvmlUnitGetDevices(<nvmlUnit_t *>unit, <unsigned int*>deviceCount, NULL)
 *     check_status_size(__status__)
 *     if deviceCount[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 *     cdef view.array deviceArray = view.array(shape=(deviceCount[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
*/
  __pyx_t_4 = ((__pyx_v_deviceCount[0]) == 0);
  if (__pyx_t_4) {

    /* "cuda/bindings/_nvml.pyx":25288
 *     check_status_size(__status__)
 *     if deviceCount[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]             # <<<<<<<<<<<<<<
 *     cdef view.array deviceArray = view.array(shape=(deviceCount[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_6 = NULL;
    __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(intptr_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25288, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_8 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_6, NULL};
      __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25288, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_9);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_9, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25288, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_9, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25288, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_i, __pyx_t_9, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25288, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_9, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25288, __pyx_L1_error)
      __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25288, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_5);
    }
    __pyx_t_9 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_5), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25288, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_9;
    __pyx_t_9 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25287
 *         __status__ = nvmlUnitGetDevices(<nvmlUnit_t *>unit, <unsigned int*>deviceCount, NULL)
 *     check_status_size(__status__)
 *     if deviceCount[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 *     cdef view.array deviceArray = view.array(shape=(deviceCount[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":25289
 *     if deviceCount[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 *     cdef view.array deviceArray = view.array(shape=(deviceCount[0],), itemsize=sizeof(intptr_t), format="i", mode="c")             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlUnitGetDevices(<nvmlUnit_t *>unit, <unsigned int*>deviceCount, <nvmlDevice_t *>deviceArray.data)
*/
  __pyx_t_5 = NULL;
  __pyx_t_7 = __Pyx_PyLong_From_unsigned_int((__pyx_v_deviceCount[0])); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25289, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 25289, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 25289, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(intptr_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25289, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_5, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25289, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_6, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25289, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25289, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_i, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25289, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25289, __pyx_L1_error)
    __pyx_t_9 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25289, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_9);
  }
  __pyx_v_deviceArray = ((struct __pyx_array_obj *)__pyx_t_9);
  __pyx_t_9 = 0;

  /* "cuda/bindings/_nvml.pyx":25290
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 *     cdef view.array deviceArray = view.array(shape=(deviceCount[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetDevices(<nvmlUnit_t *>unit, <unsigned int*>deviceCount, <nvmlDevice_t *>deviceArray.data)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25291
 *     cdef view.array deviceArray = view.array(shape=(deviceCount[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:
 *         __status__ = nvmlUnitGetDevices(<nvmlUnit_t *>unit, <unsigned int*>deviceCount, <nvmlDevice_t *>deviceArray.data)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return deviceArray
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetDevices(((nvmlUnit_t *)__pyx_v_unit), ((unsigned int *)__pyx_v_deviceCount), ((nvmlDevice_t *)__pyx_v_deviceArray->data)); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25291, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":25290
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 *     cdef view.array deviceArray = view.array(shape=(deviceCount[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlUnitGetDevices(<nvmlUnit_t *>unit, <unsigned int*>deviceCount, <nvmlDevice_t *>deviceArray.data)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25292
 *     with nogil:
 *         __status__ = nvmlUnitGetDevices(<nvmlUnit_t *>unit, <unsigned int*>deviceCount, <nvmlDevice_t *>deviceArray.data)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return deviceArray
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 25292, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25293
 *         __status__ = nvmlUnitGetDevices(<nvmlUnit_t *>unit, <unsigned int*>deviceCount, <nvmlDevice_t *>deviceArray.data)
 *     check_status(__status__)
 *     return deviceArray             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_deviceArray);
  __pyx_r = ((PyObject *)__pyx_v_deviceArray);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25274
 * 
 * 
 * cpdef object unit_get_devices(intptr_t unit):             # <<<<<<<<<<<<<<
 *     """Retrieves the set of GPU devices that are attached to the specified unit.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_devices", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_deviceArray);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_641unit_get_devices(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_640unit_get_devices, "unit_get_devices(intptr_t unit)\n\nRetrieves the set of GPU devices that are attached to the specified unit.\n\nArgs:\n    unit (Unit): The identifier of the target unit.\n\nReturns:\n    array: An array of device handles for GPUs attached to the unit.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_641unit_get_devices = {"unit_get_devices", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_641unit_get_devices, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_640unit_get_devices};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_641unit_get_devices(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_unit;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("unit_get_devices (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_unit,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25274, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25274, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "unit_get_devices", 0) < (0)) __PYX_ERR(0, 25274, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("unit_get_devices", 1, 1, 1, i); __PYX_ERR(0, 25274, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25274, __pyx_L3_error)
    }
    __pyx_v_unit = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_unit == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25274, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("unit_get_devices", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25274, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_devices", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_640unit_get_devices(__pyx_self, __pyx_v_unit);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_640unit_get_devices(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_unit) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("unit_get_devices", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_unit_get_devices(__pyx_v_unit, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.unit_get_devices", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25296
 * 
 * 
 * cpdef object device_get_topology_nearest_gpus(intptr_t device, unsigned int level):             # <<<<<<<<<<<<<<
 *     """Retrieve the set of GPUs that are nearest to a given device at a specific interconnectivity level
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_643device_get_topology_nearest_gpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_topology_nearest_gpus(intptr_t __pyx_v_device, unsigned int __pyx_v_level, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_deviceArray = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_topology_nearest_gpus", 0);

  /* "cuda/bindings/_nvml.pyx":25306
 *         array: An array of device handles for GPUs found at level
 *     """
 *     cdef unsigned int[1] count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetTopologyNearestGpus(
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_count[0]), __pyx_t_1, sizeof(__pyx_v_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":25307
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetTopologyNearestGpus(
 *             <Device>device,
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25308
 *     cdef unsigned int[1] count = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetTopologyNearestGpus(             # <<<<<<<<<<<<<<
 *             <Device>device,
 *             <nvmlGpuTopologyLevel_t>level,
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTopologyNearestGpus(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlGpuTopologyLevel_t)__pyx_v_level), __pyx_v_count, NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25308, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":25307
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetTopologyNearestGpus(
 *             <Device>device,
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25314
 *             NULL
 *         )
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 25314, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25315
 *         )
 *     check_status_size(__status__)
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 *     cdef view.array deviceArray = view.array(shape=(deviceCount[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
*/
  __pyx_t_4 = ((__pyx_v_count[0]) == 0);
  if (__pyx_t_4) {

    /* "cuda/bindings/_nvml.pyx":25316
 *     check_status_size(__status__)
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]             # <<<<<<<<<<<<<<
 *     cdef view.array deviceArray = view.array(shape=(deviceCount[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_6 = NULL;
    __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(intptr_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25316, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_8 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_6, NULL};
      __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25316, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_9);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_9, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25316, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_9, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25316, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_i, __pyx_t_9, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25316, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_9, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25316, __pyx_L1_error)
      __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25316, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_5);
    }
    __pyx_t_9 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_5), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25316, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_9;
    __pyx_t_9 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25315
 *         )
 *     check_status_size(__status__)
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 *     cdef view.array deviceArray = view.array(shape=(deviceCount[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":25317
 *     if count[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 *     cdef view.array deviceArray = view.array(shape=(deviceCount[0],), itemsize=sizeof(intptr_t), format="i", mode="c")             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetTopologyNearestGpus(
*/
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_mstate_global->__pyx_n_u_deviceCount); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25317, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_7, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 25317, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25317, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 25317, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t((sizeof(intptr_t))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 25317, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_5, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25317, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_7, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25317, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_6, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25317, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_i, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25317, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25317, __pyx_L1_error)
    __pyx_t_9 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25317, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_9);
  }
  __pyx_v_deviceArray = ((struct __pyx_array_obj *)__pyx_t_9);
  __pyx_t_9 = 0;

  /* "cuda/bindings/_nvml.pyx":25318
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 *     cdef view.array deviceArray = view.array(shape=(deviceCount[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetTopologyNearestGpus(
 *             <Device>device,
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25319
 *     cdef view.array deviceArray = view.array(shape=(deviceCount[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:
 *         __status__ = nvmlDeviceGetTopologyNearestGpus(             # <<<<<<<<<<<<<<
 *             <Device>device,
 *             <nvmlGpuTopologyLevel_t>level,
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTopologyNearestGpus(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlGpuTopologyLevel_t)__pyx_v_level), __pyx_v_count, ((nvmlDevice_t *)__pyx_v_deviceArray->data)); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25319, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":25318
 *         return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 *     cdef view.array deviceArray = view.array(shape=(deviceCount[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetTopologyNearestGpus(
 *             <Device>device,
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25325
 *             <nvmlDevice_t *>deviceArray.data
 *         )
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return deviceArray
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 25325, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25326
 *         )
 *     check_status(__status__)
 *     return deviceArray             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_deviceArray);
  __pyx_r = ((PyObject *)__pyx_v_deviceArray);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25296
 * 
 * 
 * cpdef object device_get_topology_nearest_gpus(intptr_t device, unsigned int level):             # <<<<<<<<<<<<<<
 *     """Retrieve the set of GPUs that are nearest to a given device at a specific interconnectivity level
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_topology_nearest_gpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_deviceArray);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_643device_get_topology_nearest_gpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_642device_get_topology_nearest_gpus, "device_get_topology_nearest_gpus(intptr_t device, unsigned int level)\n\nRetrieve the set of GPUs that are nearest to a given device at a specific interconnectivity level\n\nArgs:\n    device (Device): The identifier of the first device\n    level (GpuTopologyLevel): The level to search for other GPUs\n\nReturns:\n    array: An array of device handles for GPUs found at level");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_643device_get_topology_nearest_gpus = {"device_get_topology_nearest_gpus", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_643device_get_topology_nearest_gpus, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_642device_get_topology_nearest_gpus};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_643device_get_topology_nearest_gpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_level;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_topology_nearest_gpus (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_level,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25296, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25296, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25296, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_topology_nearest_gpus", 0) < (0)) __PYX_ERR(0, 25296, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_topology_nearest_gpus", 1, 2, 2, i); __PYX_ERR(0, 25296, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25296, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25296, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25296, __pyx_L3_error)
    __pyx_v_level = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_level == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25296, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_topology_nearest_gpus", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25296, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_topology_nearest_gpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_642device_get_topology_nearest_gpus(__pyx_self, __pyx_v_device, __pyx_v_level);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_642device_get_topology_nearest_gpus(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_level) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_topology_nearest_gpus", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_topology_nearest_gpus(__pyx_v_device, __pyx_v_level, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25296, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_topology_nearest_gpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25329
 * 
 * 
 * cpdef object device_get_temperature_v(intptr_t device, nvmlTemperatureSensors_t sensorType):             # <<<<<<<<<<<<<<
 *     """Retrieves the current temperature readings (in degrees C) for the given device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_645device_get_temperature_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_temperature_v(intptr_t __pyx_v_device, nvmlTemperatureSensors_t __pyx_v_sensorType, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlTemperature_v1_t __pyx_v_temperature[1];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_temperature_v", 0);

  /* "cuda/bindings/_nvml.pyx":25341
 *     """
 *     cdef nvmlTemperature_v1_t[1] temperature
 *     temperature[0].version = sizeof(nvmlTemperature_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *     temperature[0].sensorType = <nvmlTemperatureSensors_t>sensorType
 * 
*/
  (__pyx_v_temperature[0]).version = ((sizeof(nvmlTemperature_v1_t)) | 0x1000000);

  /* "cuda/bindings/_nvml.pyx":25342
 *     cdef nvmlTemperature_v1_t[1] temperature
 *     temperature[0].version = sizeof(nvmlTemperature_v1_t) | (1 << 24)
 *     temperature[0].sensorType = <nvmlTemperatureSensors_t>sensorType             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  (__pyx_v_temperature[0]).sensorType = ((nvmlTemperatureSensors_t)__pyx_v_sensorType);

  /* "cuda/bindings/_nvml.pyx":25344
 *     temperature[0].sensorType = <nvmlTemperatureSensors_t>sensorType
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetTemperatureV(<Device>device, temperature)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25345
 * 
 *     with nogil:
 *         __status__ = nvmlDeviceGetTemperatureV(<Device>device, temperature)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return temperature.temperature
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTemperatureV(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_temperature); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25345, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":25344
 *     temperature[0].sensorType = <nvmlTemperatureSensors_t>sensorType
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetTemperatureV(<Device>device, temperature)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25346
 *     with nogil:
 *         __status__ = nvmlDeviceGetTemperatureV(<Device>device, temperature)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return temperature.temperature
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 25346, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25347
 *         __status__ = nvmlDeviceGetTemperatureV(<Device>device, temperature)
 *     check_status(__status__)
 *     return temperature.temperature             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = __Pyx_PyLong_From_int(__pyx_v_temperature->temperature); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25347, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = __pyx_t_3;
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25329
 * 
 * 
 * cpdef object device_get_temperature_v(intptr_t device, nvmlTemperatureSensors_t sensorType):             # <<<<<<<<<<<<<<
 *     """Retrieves the current temperature readings (in degrees C) for the given device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_temperature_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_645device_get_temperature_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_644device_get_temperature_v, "device_get_temperature_v(intptr_t device, nvmlTemperatureSensors_t sensorType)\n\nRetrieves the current temperature readings (in degrees C) for the given device.\n\nArgs:\n    device (intptr_t): Target device identifier.\n\nReturns:\n    nvmlTemperature_v1_t: Structure specifying the sensor type (input) and retrieved temperature value (output).\n\n.. seealso:: `nvmlDeviceGetTemperatureV`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_645device_get_temperature_v = {"device_get_temperature_v", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_645device_get_temperature_v, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_644device_get_temperature_v};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_645device_get_temperature_v(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  nvmlTemperatureSensors_t __pyx_v_sensorType;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_temperature_v (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_sensorType,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25329, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25329, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25329, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_temperature_v", 0) < (0)) __PYX_ERR(0, 25329, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_temperature_v", 1, 2, 2, i); __PYX_ERR(0, 25329, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25329, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25329, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25329, __pyx_L3_error)
    __pyx_v_sensorType = ((nvmlTemperatureSensors_t)__Pyx_PyLong_As_nvmlTemperatureSensors_t(values[1])); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25329, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_temperature_v", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25329, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_temperature_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_644device_get_temperature_v(__pyx_self, __pyx_v_device, __pyx_v_sensorType);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_644device_get_temperature_v(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, nvmlTemperatureSensors_t __pyx_v_sensorType) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_temperature_v", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_temperature_v(__pyx_v_device, __pyx_v_sensorType, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_temperature_v", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25350
 * 
 * 
 * cpdef object device_get_supported_performance_states(intptr_t device, unsigned int size):             # <<<<<<<<<<<<<<
 *     """Get all supported Performance States (P-States) for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_647device_get_supported_performance_states(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_supported_performance_states(intptr_t __pyx_v_device, unsigned int __pyx_v_size, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_array_obj *__pyx_v_pstates = 0;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  nvmlReturn_t __pyx_t_8;
  int __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_supported_performance_states", 0);

  /* "cuda/bindings/_nvml.pyx":25357
 *         size (unsigned int): The number of states to return.
 *     """
 *     if size == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array pstates = view.array(shape=(size,), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  __pyx_t_1 = (__pyx_v_size == 0);
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":25358
 *     """
 *     if size == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]             # <<<<<<<<<<<<<<
 *     cdef view.array pstates = view.array(shape=(size,), itemsize=sizeof(unsigned int), format="I", mode="c")
 * 
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_3 = NULL;
    __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25358, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_3, NULL};
      __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 25358, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_6);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_6, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25358, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_6, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25358, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_6, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25358, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_6, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25358, __pyx_L1_error)
      __pyx_t_2 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6);
      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
      if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25358, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_2);
    }
    __pyx_t_6 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_2), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 25358, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0;
    __pyx_r = __pyx_t_6;
    __pyx_t_6 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25357
 *         size (unsigned int): The number of states to return.
 *     """
 *     if size == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array pstates = view.array(shape=(size,), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":25359
 *     if size == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array pstates = view.array(shape=(size,), itemsize=sizeof(unsigned int), format="I", mode="c")             # <<<<<<<<<<<<<<
 * 
 *     # The header says "size is the size of the pstates array in bytes".
*/
  __pyx_t_2 = NULL;
  __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25359, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25359, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 25359, __pyx_L1_error);
  __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25359, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_7 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25359, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_3, __pyx_t_7, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25359, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_4, __pyx_t_7, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25359, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_7, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25359, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_7, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25359, __pyx_L1_error)
    __pyx_t_6 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_7);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 25359, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_6);
  }
  __pyx_v_pstates = ((struct __pyx_array_obj *)__pyx_t_6);
  __pyx_t_6 = 0;

  /* "cuda/bindings/_nvml.pyx":25363
 *     # The header says "size is the size of the pstates array in bytes".
 *     # The size of an enum in C is implementation-defined, so we multiply by `sizeof(nvmlPstates_t)` here.
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedPerformanceStates(
 *             <Device>device,
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25364
 *     # The size of an enum in C is implementation-defined, so we multiply by `sizeof(nvmlPstates_t)` here.
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedPerformanceStates(             # <<<<<<<<<<<<<<
 *             <Device>device,
 *             <nvmlPstates_t *>pstates.data,
*/
        __pyx_t_8 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedPerformanceStates(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlPstates_t *)__pyx_v_pstates->data), (__pyx_v_size * (sizeof(nvmlPstates_t)))); if (unlikely(__pyx_t_8 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25364, __pyx_L5_error)
        __pyx_v___status__ = __pyx_t_8;
      }

      /* "cuda/bindings/_nvml.pyx":25363
 *     # The header says "size is the size of the pstates array in bytes".
 *     # The size of an enum in C is implementation-defined, so we multiply by `sizeof(nvmlPstates_t)` here.
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedPerformanceStates(
 *             <Device>device,
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L6;
        }
        __pyx_L5_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L6:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25369
 *             size * sizeof(nvmlPstates_t)
 *         )
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return pstates
 * 
*/
  __pyx_t_9 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_9 == ((int)1))) __PYX_ERR(0, 25369, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25370
 *         )
 *     check_status(__status__)
 *     return pstates             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_pstates);
  __pyx_r = ((PyObject *)__pyx_v_pstates);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25350
 * 
 * 
 * cpdef object device_get_supported_performance_states(intptr_t device, unsigned int size):             # <<<<<<<<<<<<<<
 *     """Get all supported Performance States (P-States) for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_performance_states", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_pstates);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_647device_get_supported_performance_states(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_646device_get_supported_performance_states, "device_get_supported_performance_states(intptr_t device, unsigned int size)\n\nGet all supported Performance States (P-States) for the device.\n\nArgs:\n    device (Device): The identifier of the target device.\n    size (unsigned int): The number of states to return.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_647device_get_supported_performance_states = {"device_get_supported_performance_states", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_647device_get_supported_performance_states, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_646device_get_supported_performance_states};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_647device_get_supported_performance_states(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_size;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_supported_performance_states (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_size,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25350, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25350, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25350, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_supported_performance_states", 0) < (0)) __PYX_ERR(0, 25350, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_supported_performance_states", 1, 2, 2, i); __PYX_ERR(0, 25350, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25350, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25350, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25350, __pyx_L3_error)
    __pyx_v_size = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_size == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25350, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_supported_performance_states", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25350, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_performance_states", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_646device_get_supported_performance_states(__pyx_self, __pyx_v_device, __pyx_v_size);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_646device_get_supported_performance_states(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_size) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_supported_performance_states", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_supported_performance_states(__pyx_v_device, __pyx_v_size, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25350, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_performance_states", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25373
 * 
 * 
 * cpdef object device_get_running_process_detail_list(intptr_t device, unsigned int mode):             # <<<<<<<<<<<<<<
 *     """Get information about running processes on a device for input context
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_649device_get_running_process_detail_list(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_running_process_detail_list(intptr_t __pyx_v_device, unsigned int __pyx_v_mode, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *__pyx_v_plist = 0;
  nvmlProcessDetailList_v1_t *__pyx_v_ptr;
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v_procArray = NULL;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_t_7;
  PyObject *__pyx_t_8 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_running_process_detail_list", 0);

  /* "cuda/bindings/_nvml.pyx":25381
 *     """
 * 
 *     cdef ProcessDetailList_v1 plist = ProcessDetailList_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlProcessDetailList_v1_t *ptr = <nvmlProcessDetailList_v1_t *>plist._get_ptr()
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25381, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_plist = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25382
 * 
 *     cdef ProcessDetailList_v1 plist = ProcessDetailList_v1()
 *     cdef nvmlProcessDetailList_v1_t *ptr = <nvmlProcessDetailList_v1_t *>plist._get_ptr()             # <<<<<<<<<<<<<<
 * 
 *     # Get size of array
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)__pyx_v_plist->__pyx_vtab)->_get_ptr(__pyx_v_plist); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25382, __pyx_L1_error)
  __pyx_v_ptr = ((nvmlProcessDetailList_v1_t *)__pyx_t_4);

  /* "cuda/bindings/_nvml.pyx":25385
 * 
 *     # Get size of array
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlProcessDetailList_v1_t) | (1 << 24)
 *         ptr.mode = mode
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25386
 *     # Get size of array
 *     with nogil:
 *         ptr.version = sizeof(nvmlProcessDetailList_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *         ptr.mode = mode
 *         ptr.numProcArrayEntries = 0
*/
        __pyx_v_ptr->version = ((sizeof(nvmlProcessDetailList_v1_t)) | 0x1000000);

        /* "cuda/bindings/_nvml.pyx":25387
 *     with nogil:
 *         ptr.version = sizeof(nvmlProcessDetailList_v1_t) | (1 << 24)
 *         ptr.mode = mode             # <<<<<<<<<<<<<<
 *         ptr.numProcArrayEntries = 0
 *         ptr.procArray = NULL
*/
        __pyx_v_ptr->mode = __pyx_v_mode;

        /* "cuda/bindings/_nvml.pyx":25388
 *         ptr.version = sizeof(nvmlProcessDetailList_v1_t) | (1 << 24)
 *         ptr.mode = mode
 *         ptr.numProcArrayEntries = 0             # <<<<<<<<<<<<<<
 *         ptr.procArray = NULL
 *         __status__ = nvmlDeviceGetRunningProcessDetailList(<Device>device, ptr)
*/
        __pyx_v_ptr->numProcArrayEntries = 0;

        /* "cuda/bindings/_nvml.pyx":25389
 *         ptr.mode = mode
 *         ptr.numProcArrayEntries = 0
 *         ptr.procArray = NULL             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRunningProcessDetailList(<Device>device, ptr)
 *     check_status_size(__status__)
*/
        __pyx_v_ptr->procArray = NULL;

        /* "cuda/bindings/_nvml.pyx":25390
 *         ptr.numProcArrayEntries = 0
 *         ptr.procArray = NULL
 *         __status__ = nvmlDeviceGetRunningProcessDetailList(<Device>device, ptr)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 * 
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRunningProcessDetailList(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25390, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25385
 * 
 *     # Get size of array
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlProcessDetailList_v1_t) | (1 << 24)
 *         ptr.mode = mode
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25391
 *         ptr.procArray = NULL
 *         __status__ = nvmlDeviceGetRunningProcessDetailList(<Device>device, ptr)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 * 
 *     if ptr.numProcArrayEntries == 0:
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25391, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25393
 *     check_status_size(__status__)
 * 
 *     if ptr.numProcArrayEntries == 0:             # <<<<<<<<<<<<<<
 *         return plist
 * 
*/
  __pyx_t_7 = (__pyx_v_ptr->numProcArrayEntries == 0);
  if (__pyx_t_7) {

    /* "cuda/bindings/_nvml.pyx":25394
 * 
 *     if ptr.numProcArrayEntries == 0:
 *         return plist             # <<<<<<<<<<<<<<
 * 
 *     procArray = ProcessDetail_v1(ptr.numProcArrayEntries)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_plist);
    __pyx_r = ((PyObject *)__pyx_v_plist);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25393
 *     check_status_size(__status__)
 * 
 *     if ptr.numProcArrayEntries == 0:             # <<<<<<<<<<<<<<
 *         return plist
 * 
*/
  }

  /* "cuda/bindings/_nvml.pyx":25396
 *         return plist
 * 
 *     procArray = ProcessDetail_v1(ptr.numProcArrayEntries)             # <<<<<<<<<<<<<<
 *     plist.proc_array = procArray
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_8 = __Pyx_PyLong_From_unsigned_int(__pyx_v_ptr->numProcArrayEntries); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_8};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1, __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25396, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_procArray = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25397
 * 
 *     procArray = ProcessDetail_v1(ptr.numProcArrayEntries)
 *     plist.proc_array = procArray             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  if (__Pyx_PyObject_SetAttrStr(((PyObject *)__pyx_v_plist), __pyx_mstate_global->__pyx_n_u_proc_array, ((PyObject *)__pyx_v_procArray)) < (0)) __PYX_ERR(0, 25397, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25399
 *     plist.proc_array = procArray
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRunningProcessDetailList(<Device>device, ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25400
 * 
 *     with nogil:
 *         __status__ = nvmlDeviceGetRunningProcessDetailList(<Device>device, ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRunningProcessDetailList(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25400, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25399
 *     plist.proc_array = procArray
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRunningProcessDetailList(<Device>device, ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25401
 *     with nogil:
 *         __status__ = nvmlDeviceGetRunningProcessDetailList(<Device>device, ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25401, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25373
 * 
 * 
 * cpdef object device_get_running_process_detail_list(intptr_t device, unsigned int mode):             # <<<<<<<<<<<<<<
 *     """Get information about running processes on a device for input context
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_running_process_detail_list", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_plist);
  __Pyx_XDECREF((PyObject *)__pyx_v_procArray);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_649device_get_running_process_detail_list(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_648device_get_running_process_detail_list, "device_get_running_process_detail_list(intptr_t device, unsigned int mode)\n\nGet information about running processes on a device for input context\n\nArgs:\n    device (Device): The device handle or MIG handle\n    mode (unsigned int): The process mode (Compute/Graphics/MPSCompute)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_649device_get_running_process_detail_list = {"device_get_running_process_detail_list", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_649device_get_running_process_detail_list, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_648device_get_running_process_detail_list};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_649device_get_running_process_detail_list(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_mode;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_running_process_detail_list (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_mode,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25373, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25373, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25373, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_running_process_detail_list", 0) < (0)) __PYX_ERR(0, 25373, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_running_process_detail_list", 1, 2, 2, i); __PYX_ERR(0, 25373, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25373, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25373, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25373, __pyx_L3_error)
    __pyx_v_mode = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_mode == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25373, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_running_process_detail_list", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25373, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_running_process_detail_list", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_648device_get_running_process_detail_list(__pyx_self, __pyx_v_device, __pyx_v_mode);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_648device_get_running_process_detail_list(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_mode) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_running_process_detail_list", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_running_process_detail_list(__pyx_v_device, __pyx_v_mode, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25373, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_running_process_detail_list", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25404
 * 
 * 
 * cpdef object device_get_samples(intptr_t device, int type, unsigned long long last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """Gets recent samples for the GPU.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_651device_get_samples(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_samples(intptr_t __pyx_v_device, int __pyx_v_type, unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_sample_count[1];
  unsigned int __pyx_v_sample_val_type[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v_samples = 0;
  nvmlSample_t *__pyx_v_samples_ptr;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  unsigned int __pyx_t_2[1];
  nvmlReturn_t __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  intptr_t __pyx_t_9;
  int __pyx_t_10;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_samples", 0);

  /* "cuda/bindings/_nvml.pyx":25414
 *     .. seealso:: `nvmlDeviceGetSamples`
 *     """
 *     cdef unsigned int[1] sample_count = [0]             # <<<<<<<<<<<<<<
 *     cdef unsigned int[1] sample_val_type = [0]
 *     with nogil:
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_sample_count[0]), __pyx_t_1, sizeof(__pyx_v_sample_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":25415
 *     """
 *     cdef unsigned int[1] sample_count = [0]
 *     cdef unsigned int[1] sample_val_type = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetSamples(<Device>device, <_SamplingType>type, last_seen_time_stamp, <_ValueType*>sample_val_type, <unsigned int*>sample_count, NULL)
*/
  __pyx_t_2[0] = 0;
  memcpy(&(__pyx_v_sample_val_type[0]), __pyx_t_2, sizeof(__pyx_v_sample_val_type[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":25416
 *     cdef unsigned int[1] sample_count = [0]
 *     cdef unsigned int[1] sample_val_type = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSamples(<Device>device, <_SamplingType>type, last_seen_time_stamp, <_ValueType*>sample_val_type, <unsigned int*>sample_count, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25417
 *     cdef unsigned int[1] sample_val_type = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetSamples(<Device>device, <_SamplingType>type, last_seen_time_stamp, <_ValueType*>sample_val_type, <unsigned int*>sample_count, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     cdef Sample samples = Sample(sample_count[0])
*/
        __pyx_t_3 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSamples(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__SamplingType)__pyx_v_type), __pyx_v_last_seen_time_stamp, ((__pyx_t_4cuda_8bindings_5_nvml__ValueType *)__pyx_v_sample_val_type), ((unsigned int *)__pyx_v_sample_count), NULL); if (unlikely(__pyx_t_3 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25417, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_3;
      }

      /* "cuda/bindings/_nvml.pyx":25416
 *     cdef unsigned int[1] sample_count = [0]
 *     cdef unsigned int[1] sample_val_type = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSamples(<Device>device, <_SamplingType>type, last_seen_time_stamp, <_ValueType*>sample_val_type, <unsigned int*>sample_count, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25418
 *     with nogil:
 *         __status__ = nvmlDeviceGetSamples(<Device>device, <_SamplingType>type, last_seen_time_stamp, <_ValueType*>sample_val_type, <unsigned int*>sample_count, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     cdef Sample samples = Sample(sample_count[0])
 *     cdef nvmlSample_t *samples_ptr = <nvmlSample_t *>samples._get_ptr()
*/
  __pyx_t_4 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_4 == ((int)1))) __PYX_ERR(0, 25418, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25419
 *         __status__ = nvmlDeviceGetSamples(<Device>device, <_SamplingType>type, last_seen_time_stamp, <_ValueType*>sample_val_type, <unsigned int*>sample_count, NULL)
 *     check_status_size(__status__)
 *     cdef Sample samples = Sample(sample_count[0])             # <<<<<<<<<<<<<<
 *     cdef nvmlSample_t *samples_ptr = <nvmlSample_t *>samples._get_ptr()
 *     if sample_count[0] == 0:
*/
  __pyx_t_6 = NULL;
  __pyx_t_7 = __Pyx_PyLong_From_unsigned_int((__pyx_v_sample_count[0])); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25419, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_6, __pyx_t_7};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Sample, __pyx_callargs+__pyx_t_8, (2-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25419, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_5);
  }
  __pyx_v_samples = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_t_5);
  __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25420
 *     check_status_size(__status__)
 *     cdef Sample samples = Sample(sample_count[0])
 *     cdef nvmlSample_t *samples_ptr = <nvmlSample_t *>samples._get_ptr()             # <<<<<<<<<<<<<<
 *     if sample_count[0] == 0:
 *         return samples
*/
  __pyx_t_9 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Sample *)__pyx_v_samples->__pyx_vtab)->_get_ptr(__pyx_v_samples); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25420, __pyx_L1_error)
  __pyx_v_samples_ptr = ((nvmlSample_t *)__pyx_t_9);

  /* "cuda/bindings/_nvml.pyx":25421
 *     cdef Sample samples = Sample(sample_count[0])
 *     cdef nvmlSample_t *samples_ptr = <nvmlSample_t *>samples._get_ptr()
 *     if sample_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return samples
 *     with nogil:
*/
  __pyx_t_10 = ((__pyx_v_sample_count[0]) == 0);
  if (__pyx_t_10) {

    /* "cuda/bindings/_nvml.pyx":25422
 *     cdef nvmlSample_t *samples_ptr = <nvmlSample_t *>samples._get_ptr()
 *     if sample_count[0] == 0:
 *         return samples             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetSamples(<Device>device, <_SamplingType>type, last_seen_time_stamp, <_ValueType*>sample_val_type, <unsigned int*>sample_count, samples_ptr)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_samples);
    __pyx_r = ((PyObject *)__pyx_v_samples);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25421
 *     cdef Sample samples = Sample(sample_count[0])
 *     cdef nvmlSample_t *samples_ptr = <nvmlSample_t *>samples._get_ptr()
 *     if sample_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return samples
 *     with nogil:
*/
  }

  /* "cuda/bindings/_nvml.pyx":25423
 *     if sample_count[0] == 0:
 *         return samples
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSamples(<Device>device, <_SamplingType>type, last_seen_time_stamp, <_ValueType*>sample_val_type, <unsigned int*>sample_count, samples_ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25424
 *         return samples
 *     with nogil:
 *         __status__ = nvmlDeviceGetSamples(<Device>device, <_SamplingType>type, last_seen_time_stamp, <_ValueType*>sample_val_type, <unsigned int*>sample_count, samples_ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (sample_val_type[0], samples)
*/
        __pyx_t_3 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSamples(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__SamplingType)__pyx_v_type), __pyx_v_last_seen_time_stamp, ((__pyx_t_4cuda_8bindings_5_nvml__ValueType *)__pyx_v_sample_val_type), ((unsigned int *)__pyx_v_sample_count), __pyx_v_samples_ptr); if (unlikely(__pyx_t_3 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25424, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_3;
      }

      /* "cuda/bindings/_nvml.pyx":25423
 *     if sample_count[0] == 0:
 *         return samples
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSamples(<Device>device, <_SamplingType>type, last_seen_time_stamp, <_ValueType*>sample_val_type, <unsigned int*>sample_count, samples_ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25425
 *     with nogil:
 *         __status__ = nvmlDeviceGetSamples(<Device>device, <_SamplingType>type, last_seen_time_stamp, <_ValueType*>sample_val_type, <unsigned int*>sample_count, samples_ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (sample_val_type[0], samples)
 * 
*/
  __pyx_t_4 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_4 == ((int)1))) __PYX_ERR(0, 25425, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25426
 *         __status__ = nvmlDeviceGetSamples(<Device>device, <_SamplingType>type, last_seen_time_stamp, <_ValueType*>sample_val_type, <unsigned int*>sample_count, samples_ptr)
 *     check_status(__status__)
 *     return (sample_val_type[0], samples)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = __Pyx_PyLong_From_unsigned_int((__pyx_v_sample_val_type[0])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25426, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25426, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_5);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 25426, __pyx_L1_error);
  __Pyx_INCREF((PyObject *)__pyx_v_samples);
  __Pyx_GIVEREF((PyObject *)__pyx_v_samples);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 1, ((PyObject *)__pyx_v_samples)) != (0)) __PYX_ERR(0, 25426, __pyx_L1_error);
  __pyx_t_5 = 0;
  __pyx_r = __pyx_t_7;
  __pyx_t_7 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25404
 * 
 * 
 * cpdef object device_get_samples(intptr_t device, int type, unsigned long long last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """Gets recent samples for the GPU.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_samples", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_samples);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_651device_get_samples(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_650device_get_samples, "device_get_samples(intptr_t device, int type, unsigned long long last_seen_time_stamp)\n\nGets recent samples for the GPU.\n\nArgs:\n    device (intptr_t): The identifier for the target device.\n    type (SamplingType): Type of sampling event.\n    last_seen_time_stamp (unsigned long long): Return only samples with timestamp greater than last_seen_time_stamp.\n\n.. seealso:: `nvmlDeviceGetSamples`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_651device_get_samples = {"device_get_samples", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_651device_get_samples, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_650device_get_samples};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_651device_get_samples(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_type;
  unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_samples (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_type,&__pyx_mstate_global->__pyx_n_u_last_seen_time_stamp,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25404, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 25404, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25404, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25404, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_samples", 0) < (0)) __PYX_ERR(0, 25404, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_samples", 1, 3, 3, i); __PYX_ERR(0, 25404, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25404, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25404, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 25404, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25404, __pyx_L3_error)
    __pyx_v_type = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25404, __pyx_L3_error)
    __pyx_v_last_seen_time_stamp = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[2]); if (unlikely((__pyx_v_last_seen_time_stamp == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 25404, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_samples", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 25404, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_samples", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_650device_get_samples(__pyx_self, __pyx_v_device, __pyx_v_type, __pyx_v_last_seen_time_stamp);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_650device_get_samples(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_type, unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_samples", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_samples(__pyx_v_device, __pyx_v_type, __pyx_v_last_seen_time_stamp, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25404, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_samples", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25429
 * 
 * 
 * cpdef object device_get_retired_pages_v2(intptr_t device, int cause):             # <<<<<<<<<<<<<<
 *     """Returns the list of retired pages by source, including pages that are pending retirement
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_653device_get_retired_pages_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_retired_pages_v2(intptr_t __pyx_v_device, int __pyx_v_cause, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_page_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_addresses = 0;
  struct __pyx_array_obj *__pyx_v_timestamps = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_retired_pages_v2", 0);

  /* "cuda/bindings/_nvml.pyx":25439
 *         tuple: A tuple of two arrays (addresses, timestamps).
 *     """
 *     cdef unsigned int[1] page_count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetRetiredPages_v2(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, NULL, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_page_count[0]), __pyx_t_1, sizeof(__pyx_v_page_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":25440
 *     """
 *     cdef unsigned int[1] page_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRetiredPages_v2(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, NULL, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25441
 *     cdef unsigned int[1] page_count = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetRetiredPages_v2(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, NULL, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     if page_count[0] == 0:
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRetiredPages_v2(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__PageRetirementCause)__pyx_v_cause), ((unsigned int *)__pyx_v_page_count), NULL, NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25441, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":25440
 *     """
 *     cdef unsigned int[1] page_count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRetiredPages_v2(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, NULL, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25442
 *     with nogil:
 *         __status__ = nvmlDeviceGetRetiredPages_v2(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, NULL, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     if page_count[0] == 0:
 *         return (
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 25442, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25443
 *         __status__ = nvmlDeviceGetRetiredPages_v2(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, NULL, NULL)
 *     check_status_size(__status__)
 *     if page_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return (
 *             view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0],
*/
  __pyx_t_4 = ((__pyx_v_page_count[0]) == 0);
  if (__pyx_t_4) {

    /* "cuda/bindings/_nvml.pyx":25444
 *     check_status_size(__status__)
 *     if page_count[0] == 0:
 *         return (             # <<<<<<<<<<<<<<
 *             view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0],
 *             view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0]
*/
    __Pyx_XDECREF(__pyx_r);

    /* "cuda/bindings/_nvml.pyx":25445
 *     if page_count[0] == 0:
 *         return (
 *             view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0],             # <<<<<<<<<<<<<<
 *             view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0]
 *         )
*/
    __pyx_t_6 = NULL;
    __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(unsigned PY_LONG_LONG))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25445, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_8 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_6, NULL};
      __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25445, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_9);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_9, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25445, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_9, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25445, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_Q, __pyx_t_9, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25445, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_9, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25445, __pyx_L1_error)
      __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25445, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_5);
    }
    __pyx_t_9 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_5), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25445, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0;

    /* "cuda/bindings/_nvml.pyx":25446
 *         return (
 *             view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0],
 *             view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0]             # <<<<<<<<<<<<<<
 *         )
 *     cdef view.array addresses = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c")
*/
    __pyx_t_7 = NULL;
    __pyx_t_6 = __Pyx_PyLong_FromSize_t((sizeof(unsigned PY_LONG_LONG))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 25446, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __pyx_t_8 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_7, NULL};
      __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25446, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_10);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25446, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_6, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25446, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_Q, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25446, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25446, __pyx_L1_error)
      __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
      __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
      __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
      __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25446, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_5);
    }
    __pyx_t_10 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_5), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25446, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0;

    /* "cuda/bindings/_nvml.pyx":25445
 *     if page_count[0] == 0:
 *         return (
 *             view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0],             # <<<<<<<<<<<<<<
 *             view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0]
 *         )
*/
    __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25445, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __Pyx_GIVEREF(__pyx_t_9);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_9) != (0)) __PYX_ERR(0, 25445, __pyx_L1_error);
    __Pyx_GIVEREF(__pyx_t_10);
    if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_10) != (0)) __PYX_ERR(0, 25445, __pyx_L1_error);
    __pyx_t_9 = 0;
    __pyx_t_10 = 0;
    __pyx_r = __pyx_t_5;
    __pyx_t_5 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25443
 *         __status__ = nvmlDeviceGetRetiredPages_v2(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, NULL, NULL)
 *     check_status_size(__status__)
 *     if page_count[0] == 0:             # <<<<<<<<<<<<<<
 *         return (
 *             view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0],
*/
  }

  /* "cuda/bindings/_nvml.pyx":25448
 *             view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0]
 *         )
 *     cdef view.array addresses = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c")             # <<<<<<<<<<<<<<
 *     cdef view.array timestamps = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c")
 *     with nogil:
*/
  __pyx_t_10 = NULL;
  __pyx_t_9 = __Pyx_PyLong_From_unsigned_int((__pyx_v_page_count[0])); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25448, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 25448, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_9) != (0)) __PYX_ERR(0, 25448, __pyx_L1_error);
  __pyx_t_9 = 0;
  __pyx_t_9 = __Pyx_PyLong_FromSize_t((sizeof(unsigned PY_LONG_LONG))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25448, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_10, NULL};
    __pyx_t_7 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25448, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_6, __pyx_t_7, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25448, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_9, __pyx_t_7, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25448, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_Q, __pyx_t_7, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25448, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_7, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25448, __pyx_L1_error)
    __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_7);
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25448, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_5);
  }
  __pyx_v_addresses = ((struct __pyx_array_obj *)__pyx_t_5);
  __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25449
 *         )
 *     cdef view.array addresses = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c")
 *     cdef view.array timestamps = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c")             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetRetiredPages_v2(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, <unsigned long long *>addresses.data, <unsigned long long *>timestamps.data)
*/
  __pyx_t_7 = NULL;
  __pyx_t_9 = __Pyx_PyLong_From_unsigned_int((__pyx_v_page_count[0])); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25449, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 25449, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_9);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_9) != (0)) __PYX_ERR(0, 25449, __pyx_L1_error);
  __pyx_t_9 = 0;
  __pyx_t_9 = __Pyx_PyLong_FromSize_t((sizeof(unsigned PY_LONG_LONG))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25449, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_7, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25449, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_6, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25449, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_9, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25449, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_Q, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25449, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25449, __pyx_L1_error)
    __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25449, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_5);
  }
  __pyx_v_timestamps = ((struct __pyx_array_obj *)__pyx_t_5);
  __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25450
 *     cdef view.array addresses = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c")
 *     cdef view.array timestamps = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRetiredPages_v2(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, <unsigned long long *>addresses.data, <unsigned long long *>timestamps.data)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25451
 *     cdef view.array timestamps = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c")
 *     with nogil:
 *         __status__ = nvmlDeviceGetRetiredPages_v2(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, <unsigned long long *>addresses.data, <unsigned long long *>timestamps.data)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (addresses, timestamps)
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRetiredPages_v2(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((__pyx_t_4cuda_8bindings_5_nvml__PageRetirementCause)__pyx_v_cause), ((unsigned int *)__pyx_v_page_count), ((unsigned PY_LONG_LONG *)__pyx_v_addresses->data), ((unsigned PY_LONG_LONG *)__pyx_v_timestamps->data)); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25451, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":25450
 *     cdef view.array addresses = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c")
 *     cdef view.array timestamps = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetRetiredPages_v2(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, <unsigned long long *>addresses.data, <unsigned long long *>timestamps.data)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25452
 *     with nogil:
 *         __status__ = nvmlDeviceGetRetiredPages_v2(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, <unsigned long long *>addresses.data, <unsigned long long *>timestamps.data)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (addresses, timestamps)
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 25452, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25453
 *         __status__ = nvmlDeviceGetRetiredPages_v2(<Device>device, <_PageRetirementCause>cause, <unsigned int*>page_count, <unsigned long long *>addresses.data, <unsigned long long *>timestamps.data)
 *     check_status(__status__)
 *     return (addresses, timestamps)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25453, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_INCREF((PyObject *)__pyx_v_addresses);
  __Pyx_GIVEREF((PyObject *)__pyx_v_addresses);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_addresses)) != (0)) __PYX_ERR(0, 25453, __pyx_L1_error);
  __Pyx_INCREF((PyObject *)__pyx_v_timestamps);
  __Pyx_GIVEREF((PyObject *)__pyx_v_timestamps);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_timestamps)) != (0)) __PYX_ERR(0, 25453, __pyx_L1_error);
  __pyx_r = __pyx_t_5;
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25429
 * 
 * 
 * cpdef object device_get_retired_pages_v2(intptr_t device, int cause):             # <<<<<<<<<<<<<<
 *     """Returns the list of retired pages by source, including pages that are pending retirement
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_retired_pages_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_addresses);
  __Pyx_XDECREF((PyObject *)__pyx_v_timestamps);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_653device_get_retired_pages_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_652device_get_retired_pages_v2, "device_get_retired_pages_v2(intptr_t device, int cause)\n\nReturns the list of retired pages by source, including pages that are pending retirement\n\nArgs:\n    device (Device): The identifier of the target device.\n    cause (PageRetirementCause): Filter page addresses by cause of retirement.\n\nReturns:\n    tuple: A tuple of two arrays (addresses, timestamps).");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_653device_get_retired_pages_v2 = {"device_get_retired_pages_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_653device_get_retired_pages_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_652device_get_retired_pages_v2};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_653device_get_retired_pages_v2(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  int __pyx_v_cause;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_retired_pages_v2 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_cause,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25429, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25429, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25429, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_retired_pages_v2", 0) < (0)) __PYX_ERR(0, 25429, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_retired_pages_v2", 1, 2, 2, i); __PYX_ERR(0, 25429, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25429, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25429, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25429, __pyx_L3_error)
    __pyx_v_cause = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_cause == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25429, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_retired_pages_v2", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25429, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_retired_pages_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_652device_get_retired_pages_v2(__pyx_self, __pyx_v_device, __pyx_v_cause);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_652device_get_retired_pages_v2(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, int __pyx_v_cause) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_retired_pages_v2", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_retired_pages_v2(__pyx_v_device, __pyx_v_cause, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25429, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_retired_pages_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25456
 * 
 * 
 * cpdef object device_get_processes_utilization_info(intptr_t device, unsigned long long last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """Retrieves the recent utilization and process ID for all running processes
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_655device_get_processes_utilization_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_processes_utilization_info(intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *__pyx_v_procesesUtilInfo = 0;
  nvmlProcessesUtilizationInfo_t *__pyx_v_ptr;
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v_procUtilArray = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_t_7;
  PyObject *__pyx_t_8 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_processes_utilization_info", 0);

  /* "cuda/bindings/_nvml.pyx":25468
 *         ProcessesUtilizationInfo_v1: The processes utilization information structure.
 *     """
 *     cdef ProcessesUtilizationInfo_v1 procesesUtilInfo = ProcessesUtilizationInfo_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlProcessesUtilizationInfo_t *ptr = <nvmlProcessesUtilizationInfo_v1_t *>procesesUtilInfo._get_ptr()
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25468, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_procesesUtilInfo = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25469
 *     """
 *     cdef ProcessesUtilizationInfo_v1 procesesUtilInfo = ProcessesUtilizationInfo_v1()
 *     cdef nvmlProcessesUtilizationInfo_t *ptr = <nvmlProcessesUtilizationInfo_v1_t *>procesesUtilInfo._get_ptr()             # <<<<<<<<<<<<<<
 * 
 *     # Get size of array
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)__pyx_v_procesesUtilInfo->__pyx_vtab)->_get_ptr(__pyx_v_procesesUtilInfo); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25469, __pyx_L1_error)
  __pyx_v_ptr = ((nvmlProcessesUtilizationInfo_v1_t *)__pyx_t_4);

  /* "cuda/bindings/_nvml.pyx":25472
 * 
 *     # Get size of array
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlProcessesUtilizationInfo_v1_t) | (1 << 24)
 *         ptr.processSamplesCount = 0
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25473
 *     # Get size of array
 *     with nogil:
 *         ptr.version = sizeof(nvmlProcessesUtilizationInfo_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *         ptr.processSamplesCount = 0
 *         ptr.lastSeenTimeStamp = last_seen_time_stamp
*/
        __pyx_v_ptr->version = ((sizeof(nvmlProcessesUtilizationInfo_v1_t)) | 0x1000000);

        /* "cuda/bindings/_nvml.pyx":25474
 *     with nogil:
 *         ptr.version = sizeof(nvmlProcessesUtilizationInfo_v1_t) | (1 << 24)
 *         ptr.processSamplesCount = 0             # <<<<<<<<<<<<<<
 *         ptr.lastSeenTimeStamp = last_seen_time_stamp
 *         ptr.procUtilArray = NULL
*/
        __pyx_v_ptr->processSamplesCount = 0;

        /* "cuda/bindings/_nvml.pyx":25475
 *         ptr.version = sizeof(nvmlProcessesUtilizationInfo_v1_t) | (1 << 24)
 *         ptr.processSamplesCount = 0
 *         ptr.lastSeenTimeStamp = last_seen_time_stamp             # <<<<<<<<<<<<<<
 *         ptr.procUtilArray = NULL
 *         __status__ = nvmlDeviceGetProcessesUtilizationInfo(
*/
        __pyx_v_ptr->lastSeenTimeStamp = __pyx_v_last_seen_time_stamp;

        /* "cuda/bindings/_nvml.pyx":25476
 *         ptr.processSamplesCount = 0
 *         ptr.lastSeenTimeStamp = last_seen_time_stamp
 *         ptr.procUtilArray = NULL             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetProcessesUtilizationInfo(
 *             <nvmlDevice_t>device, ptr
*/
        __pyx_v_ptr->procUtilArray = NULL;

        /* "cuda/bindings/_nvml.pyx":25477
 *         ptr.lastSeenTimeStamp = last_seen_time_stamp
 *         ptr.procUtilArray = NULL
 *         __status__ = nvmlDeviceGetProcessesUtilizationInfo(             # <<<<<<<<<<<<<<
 *             <nvmlDevice_t>device, ptr
 *         )
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetProcessesUtilizationInfo(((nvmlDevice_t)__pyx_v_device), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25477, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25472
 * 
 *     # Get size of array
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlProcessesUtilizationInfo_v1_t) | (1 << 24)
 *         ptr.processSamplesCount = 0
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25480
 *             <nvmlDevice_t>device, ptr
 *         )
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 * 
 *     if ptr.processSamplesCount == 0:
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25480, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25482
 *     check_status_size(__status__)
 * 
 *     if ptr.processSamplesCount == 0:             # <<<<<<<<<<<<<<
 *         return procesesUtilInfo
 * 
*/
  __pyx_t_7 = (__pyx_v_ptr->processSamplesCount == 0);
  if (__pyx_t_7) {

    /* "cuda/bindings/_nvml.pyx":25483
 * 
 *     if ptr.processSamplesCount == 0:
 *         return procesesUtilInfo             # <<<<<<<<<<<<<<
 * 
 *     cdef ProcessUtilizationInfo_v1 procUtilArray = ProcessUtilizationInfo_v1(ptr.processSamplesCount)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_procesesUtilInfo);
    __pyx_r = ((PyObject *)__pyx_v_procesesUtilInfo);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25482
 *     check_status_size(__status__)
 * 
 *     if ptr.processSamplesCount == 0:             # <<<<<<<<<<<<<<
 *         return procesesUtilInfo
 * 
*/
  }

  /* "cuda/bindings/_nvml.pyx":25485
 *         return procesesUtilInfo
 * 
 *     cdef ProcessUtilizationInfo_v1 procUtilArray = ProcessUtilizationInfo_v1(ptr.processSamplesCount)             # <<<<<<<<<<<<<<
 *     procesesUtilInfo.proc_util_array = procUtilArray
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_8 = __Pyx_PyLong_From_unsigned_int(__pyx_v_ptr->processSamplesCount); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25485, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_8};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25485, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_procUtilArray = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25486
 * 
 *     cdef ProcessUtilizationInfo_v1 procUtilArray = ProcessUtilizationInfo_v1(ptr.processSamplesCount)
 *     procesesUtilInfo.proc_util_array = procUtilArray             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  if (__Pyx_PyObject_SetAttrStr(((PyObject *)__pyx_v_procesesUtilInfo), __pyx_mstate_global->__pyx_n_u_proc_util_array, ((PyObject *)__pyx_v_procUtilArray)) < (0)) __PYX_ERR(0, 25486, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25488
 *     procesesUtilInfo.proc_util_array = procUtilArray
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetProcessesUtilizationInfo(
 *             <nvmlDevice_t>device, ptr
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25489
 * 
 *     with nogil:
 *         __status__ = nvmlDeviceGetProcessesUtilizationInfo(             # <<<<<<<<<<<<<<
 *             <nvmlDevice_t>device, ptr
 *         )
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetProcessesUtilizationInfo(((nvmlDevice_t)__pyx_v_device), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25489, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25488
 *     procesesUtilInfo.proc_util_array = procUtilArray
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetProcessesUtilizationInfo(
 *             <nvmlDevice_t>device, ptr
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25492
 *             <nvmlDevice_t>device, ptr
 *         )
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 *     return procesesUtilInfo
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25492, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25494
 *     check_status(__status__)
 * 
 *     return procesesUtilInfo             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_procesesUtilInfo);
  __pyx_r = ((PyObject *)__pyx_v_procesesUtilInfo);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25456
 * 
 * 
 * cpdef object device_get_processes_utilization_info(intptr_t device, unsigned long long last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """Retrieves the recent utilization and process ID for all running processes
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_processes_utilization_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_procesesUtilInfo);
  __Pyx_XDECREF((PyObject *)__pyx_v_procUtilArray);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_655device_get_processes_utilization_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_654device_get_processes_utilization_info, "device_get_processes_utilization_info(intptr_t device, unsigned long long last_seen_time_stamp)\n\nRetrieves the recent utilization and process ID for all running processes\n\nArgs:\n    device (Device): The identifier of the target device.\n    last_seen_time_stamp (unsigned long long): Timestamp in microseconds. Set it to 0 to read utilization based\n      on all the samples maintained by the driver's internal sample buffer. Set to a timeStamp retrieved from\n      a previous query to read utilization since the previous query.\n\nReturns:\n    ProcessesUtilizationInfo_v1: The processes utilization information structure.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_655device_get_processes_utilization_info = {"device_get_processes_utilization_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_655device_get_processes_utilization_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_654device_get_processes_utilization_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_655device_get_processes_utilization_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_processes_utilization_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_last_seen_time_stamp,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25456, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25456, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25456, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_processes_utilization_info", 0) < (0)) __PYX_ERR(0, 25456, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_processes_utilization_info", 1, 2, 2, i); __PYX_ERR(0, 25456, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25456, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25456, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25456, __pyx_L3_error)
    __pyx_v_last_seen_time_stamp = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[1]); if (unlikely((__pyx_v_last_seen_time_stamp == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 25456, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_processes_utilization_info", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25456, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_processes_utilization_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_654device_get_processes_utilization_info(__pyx_self, __pyx_v_device, __pyx_v_last_seen_time_stamp);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_654device_get_processes_utilization_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_last_seen_time_stamp) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_processes_utilization_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_processes_utilization_info(__pyx_v_device, __pyx_v_last_seen_time_stamp, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25456, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_processes_utilization_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25497
 * 
 * 
 * cpdef device_set_hostname_v1(intptr_t device, str hostname):             # <<<<<<<<<<<<<<
 *     """Set the hostname for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_657device_set_hostname_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_set_hostname_v1(intptr_t __pyx_v_device, PyObject *__pyx_v_hostname, CYTHON_UNUSED int __pyx_skip_dispatch) {
  PyObject *__pyx_v_bytes = 0;
  nvmlHostname_v1_t __pyx_v_hostname_struct;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  Py_ssize_t __pyx_t_2;
  int __pyx_t_3;
  PyObject *__pyx_t_4 = NULL;
  size_t __pyx_t_5;
  char *__pyx_t_6;
  nvmlReturn_t __pyx_t_7;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_hostname_v1", 0);

  /* "cuda/bindings/_nvml.pyx":25504
 *         hostname (str): The new hostname to set.
 *     """
 *     cdef bytes = cpython.PyUnicode_AsASCIIString(hostname)             # <<<<<<<<<<<<<<
 *     if len(bytes) > 64:
 *         raise ValueError("hostname must 64 characters or less")
*/
  __pyx_t_1 = PyUnicode_AsASCIIString(__pyx_v_hostname); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v_bytes = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25505
 *     """
 *     cdef bytes = cpython.PyUnicode_AsASCIIString(hostname)
 *     if len(bytes) > 64:             # <<<<<<<<<<<<<<
 *         raise ValueError("hostname must 64 characters or less")
 * 
*/
  __pyx_t_2 = PyObject_Length(__pyx_v_bytes); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25505, __pyx_L1_error)
  __pyx_t_3 = (__pyx_t_2 > 64);
  if (unlikely(__pyx_t_3)) {

    /* "cuda/bindings/_nvml.pyx":25506
 *     cdef bytes = cpython.PyUnicode_AsASCIIString(hostname)
 *     if len(bytes) > 64:
 *         raise ValueError("hostname must 64 characters or less")             # <<<<<<<<<<<<<<
 * 
 *     cdef nvmlHostname_v1_t hostname_struct
*/
    __pyx_t_4 = NULL;
    __pyx_t_5 = 1;
    {
      PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_mstate_global->__pyx_kp_u_hostname_must_64_characters_or_l};
      __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)(((PyTypeObject*)PyExc_ValueError)), __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
      __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
      if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25506, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_1);
    }
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 25506, __pyx_L1_error)

    /* "cuda/bindings/_nvml.pyx":25505
 *     """
 *     cdef bytes = cpython.PyUnicode_AsASCIIString(hostname)
 *     if len(bytes) > 64:             # <<<<<<<<<<<<<<
 *         raise ValueError("hostname must 64 characters or less")
 * 
*/
  }

  /* "cuda/bindings/_nvml.pyx":25509
 * 
 *     cdef nvmlHostname_v1_t hostname_struct
 *     memcpy(<void *>hostname_struct.value, <void *>cpython.PyBytes_AsString(bytes), len(bytes))             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  __pyx_t_6 = PyBytes_AsString(__pyx_v_bytes); if (unlikely(__pyx_t_6 == ((void *)NULL))) __PYX_ERR(0, 25509, __pyx_L1_error)
  __pyx_t_2 = PyObject_Length(__pyx_v_bytes); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25509, __pyx_L1_error)
  (void)(memcpy(((void *)__pyx_v_hostname_struct.value), ((void *)__pyx_t_6), __pyx_t_2));

  /* "cuda/bindings/_nvml.pyx":25511
 *     memcpy(<void *>hostname_struct.value, <void *>cpython.PyBytes_AsString(bytes), len(bytes))
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetHostname_v1(<Device>device, &hostname_struct)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25512
 * 
 *     with nogil:
 *         __status__ = nvmlDeviceSetHostname_v1(<Device>device, &hostname_struct)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_7 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetHostname_v1(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_hostname_struct)); if (unlikely(__pyx_t_7 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25512, __pyx_L5_error)
        __pyx_v___status__ = __pyx_t_7;
      }

      /* "cuda/bindings/_nvml.pyx":25511
 *     memcpy(<void *>hostname_struct.value, <void *>cpython.PyBytes_AsString(bytes), len(bytes))
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceSetHostname_v1(<Device>device, &hostname_struct)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L6;
        }
        __pyx_L5_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L6:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25513
 *     with nogil:
 *         __status__ = nvmlDeviceSetHostname_v1(<Device>device, &hostname_struct)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_8 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_8 == ((int)1))) __PYX_ERR(0, 25513, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25497
 * 
 * 
 * cpdef device_set_hostname_v1(intptr_t device, str hostname):             # <<<<<<<<<<<<<<
 *     """Set the hostname for the device.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_hostname_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v_bytes);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_657device_set_hostname_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_656device_set_hostname_v1, "device_set_hostname_v1(intptr_t device, str hostname)\n\nSet the hostname for the device.\n\nArgs:\n    device (Device): The identifier of the target device.\n    hostname (str): The new hostname to set.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_657device_set_hostname_v1 = {"device_set_hostname_v1", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_657device_set_hostname_v1, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_656device_set_hostname_v1};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_657device_set_hostname_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  PyObject *__pyx_v_hostname = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_set_hostname_v1 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_hostname,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25497, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25497, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25497, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_set_hostname_v1", 0) < (0)) __PYX_ERR(0, 25497, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_set_hostname_v1", 1, 2, 2, i); __PYX_ERR(0, 25497, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25497, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25497, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25497, __pyx_L3_error)
    __pyx_v_hostname = ((PyObject*)values[1]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_set_hostname_v1", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25497, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_hostname_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_hostname), (&PyUnicode_Type), 1, "hostname", 1))) __PYX_ERR(0, 25497, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_656device_set_hostname_v1(__pyx_self, __pyx_v_device, __pyx_v_hostname);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_656device_set_hostname_v1(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, PyObject *__pyx_v_hostname) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_set_hostname_v1", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_set_hostname_v1(__pyx_v_device, __pyx_v_hostname, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25497, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_set_hostname_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25516
 * 
 * 
 * cpdef str device_get_hostname_v1(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the hostname for the device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_659device_get_hostname_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_hostname_v1(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  nvmlHostname_v1_t __pyx_v_hostname;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_hostname_v1", 0);

  /* "cuda/bindings/_nvml.pyx":25526
 *     """
 *     cdef nvmlHostname_v1_t hostname
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetHostname_v1(<Device>device, &hostname)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25527
 *     cdef nvmlHostname_v1_t hostname
 *     with nogil:
 *         __status__ = nvmlDeviceGetHostname_v1(<Device>device, &hostname)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(hostname.value)
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHostname_v1(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), (&__pyx_v_hostname)); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25527, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":25526
 *     """
 *     cdef nvmlHostname_v1_t hostname
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetHostname_v1(<Device>device, &hostname)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25528
 *     with nogil:
 *         __status__ = nvmlDeviceGetHostname_v1(<Device>device, &hostname)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return cpython.PyUnicode_FromString(hostname.value)
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 25528, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25529
 *         __status__ = nvmlDeviceGetHostname_v1(<Device>device, &hostname)
 *     check_status(__status__)
 *     return cpython.PyUnicode_FromString(hostname.value)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = PyUnicode_FromString(__pyx_v_hostname.value); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25529, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_r = ((PyObject*)__pyx_t_3);
  __pyx_t_3 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25516
 * 
 * 
 * cpdef str device_get_hostname_v1(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the hostname for the device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_hostname_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_659device_get_hostname_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_658device_get_hostname_v1, "device_get_hostname_v1(intptr_t device) -> str\n\nGet the hostname for the device.\n\nArgs:\n    device (Device): The identifier of the target device.\n\nReturns:\n    str: Hostname of the device.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_659device_get_hostname_v1 = {"device_get_hostname_v1", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_659device_get_hostname_v1, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_658device_get_hostname_v1};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_659device_get_hostname_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_hostname_v1 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25516, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25516, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_hostname_v1", 0) < (0)) __PYX_ERR(0, 25516, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_hostname_v1", 1, 1, 1, i); __PYX_ERR(0, 25516, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25516, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25516, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_hostname_v1", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25516, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_hostname_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_658device_get_hostname_v1(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_658device_get_hostname_v1(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_hostname_v1", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_hostname_v1(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_hostname_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25532
 * 
 * 
 * cdef FieldValue _cast_field_values(values):             # <<<<<<<<<<<<<<
 *     if isinstance(values, FieldValue):
 *         return values
*/

static struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_f_4cuda_8bindings_5_nvml__cast_field_values(PyObject *__pyx_v_values) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_values_ = 0;
  unsigned int __pyx_v_valuesCount;
  PyObject *__pyx_v_i = NULL;
  PyObject *__pyx_v_v = NULL;
  struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  Py_ssize_t __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  PyObject *(*__pyx_t_7)(PyObject *);
  PyObject *__pyx_t_8 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("_cast_field_values", 0);

  /* "cuda/bindings/_nvml.pyx":25533
 * 
 * cdef FieldValue _cast_field_values(values):
 *     if isinstance(values, FieldValue):             # <<<<<<<<<<<<<<
 *         return values
 *     cdef FieldValue values_
*/
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_values, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue); 
  if (__pyx_t_1) {

    /* "cuda/bindings/_nvml.pyx":25534
 * cdef FieldValue _cast_field_values(values):
 *     if isinstance(values, FieldValue):
 *         return values             # <<<<<<<<<<<<<<
 *     cdef FieldValue values_
 *     cdef unsigned int valuesCount = len(values)
*/
    __Pyx_XDECREF((PyObject *)__pyx_r);
    if (!(likely(((__pyx_v_values) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_values, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue))))) __PYX_ERR(0, 25534, __pyx_L1_error)
    __Pyx_INCREF(__pyx_v_values);
    __pyx_r = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_values);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25533
 * 
 * cdef FieldValue _cast_field_values(values):
 *     if isinstance(values, FieldValue):             # <<<<<<<<<<<<<<
 *         return values
 *     cdef FieldValue values_
*/
  }

  /* "cuda/bindings/_nvml.pyx":25536
 *         return values
 *     cdef FieldValue values_
 *     cdef unsigned int valuesCount = len(values)             # <<<<<<<<<<<<<<
 *     values_ = FieldValue(valuesCount)
 *     for i, v in enumerate(values):
*/
  __pyx_t_2 = PyObject_Length(__pyx_v_values); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25536, __pyx_L1_error)
  __pyx_v_valuesCount = __pyx_t_2;

  /* "cuda/bindings/_nvml.pyx":25537
 *     cdef FieldValue values_
 *     cdef unsigned int valuesCount = len(values)
 *     values_ = FieldValue(valuesCount)             # <<<<<<<<<<<<<<
 *     for i, v in enumerate(values):
 *         if isinstance(v, tuple):
*/
  __pyx_t_4 = NULL;
  __pyx_t_5 = __Pyx_PyLong_From_unsigned_int(__pyx_v_valuesCount); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25537, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_5};
    __pyx_t_3 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25537, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_3);
  }
  __pyx_v_values_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_t_3);
  __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":25538
 *     cdef unsigned int valuesCount = len(values)
 *     values_ = FieldValue(valuesCount)
 *     for i, v in enumerate(values):             # <<<<<<<<<<<<<<
 *         if isinstance(v, tuple):
 *             values_[i].field_id = v[0]
*/
  __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0);
  __pyx_t_3 = __pyx_mstate_global->__pyx_int_0;
  if (likely(PyList_CheckExact(__pyx_v_values)) || PyTuple_CheckExact(__pyx_v_values)) {
    __pyx_t_5 = __pyx_v_values; __Pyx_INCREF(__pyx_t_5);
    __pyx_t_2 = 0;
    __pyx_t_7 = NULL;
  } else {
    __pyx_t_2 = -1; __pyx_t_5 = PyObject_GetIter(__pyx_v_values); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25538, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
    __pyx_t_7 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25538, __pyx_L1_error)
  }
  for (;;) {
    if (likely(!__pyx_t_7)) {
      if (likely(PyList_CheckExact(__pyx_t_5))) {
        {
          Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_5);
          #if !CYTHON_ASSUME_SAFE_SIZE
          if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 25538, __pyx_L1_error)
          #endif
          if (__pyx_t_2 >= __pyx_temp) break;
        }
        __pyx_t_4 = __Pyx_PyList_GetItemRefFast(__pyx_t_5, __pyx_t_2, __Pyx_ReferenceSharing_OwnStrongReference);
        ++__pyx_t_2;
      } else {
        {
          Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_5);
          #if !CYTHON_ASSUME_SAFE_SIZE
          if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 25538, __pyx_L1_error)
          #endif
          if (__pyx_t_2 >= __pyx_temp) break;
        }
        #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
        __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_2));
        #else
        __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_5, __pyx_t_2);
        #endif
        ++__pyx_t_2;
      }
      if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25538, __pyx_L1_error)
    } else {
      __pyx_t_4 = __pyx_t_7(__pyx_t_5);
      if (unlikely(!__pyx_t_4)) {
        PyObject* exc_type = PyErr_Occurred();
        if (exc_type) {
          if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 25538, __pyx_L1_error)
          PyErr_Clear();
        }
        break;
      }
    }
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_XDECREF_SET(__pyx_v_v, __pyx_t_4);
    __pyx_t_4 = 0;
    __Pyx_INCREF(__pyx_t_3);
    __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_3);
    __pyx_t_4 = __Pyx_PyLong_AddObjC(__pyx_t_3, __pyx_mstate_global->__pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25538, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_3);
    __pyx_t_3 = __pyx_t_4;
    __pyx_t_4 = 0;

    /* "cuda/bindings/_nvml.pyx":25539
 *     values_ = FieldValue(valuesCount)
 *     for i, v in enumerate(values):
 *         if isinstance(v, tuple):             # <<<<<<<<<<<<<<
 *             values_[i].field_id = v[0]
 *             values_[i].scope_id = v[1]
*/
    __pyx_t_1 = PyTuple_Check(__pyx_v_v); 
    if (__pyx_t_1) {

      /* "cuda/bindings/_nvml.pyx":25540
 *     for i, v in enumerate(values):
 *         if isinstance(v, tuple):
 *             values_[i].field_id = v[0]             # <<<<<<<<<<<<<<
 *             values_[i].scope_id = v[1]
 *         else:
*/
      __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_v, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25540, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      __pyx_t_8 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_values_), __pyx_v_i); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25540, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_8);
      if (__Pyx_PyObject_SetAttrStr(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_field_id, __pyx_t_4) < (0)) __PYX_ERR(0, 25540, __pyx_L1_error)
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;

      /* "cuda/bindings/_nvml.pyx":25541
 *         if isinstance(v, tuple):
 *             values_[i].field_id = v[0]
 *             values_[i].scope_id = v[1]             # <<<<<<<<<<<<<<
 *         else:
 *             values_[i].field_id = v
*/
      __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_v, 1, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25541, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_8);
      __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_values_), __pyx_v_i); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25541, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      if (__Pyx_PyObject_SetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_scope_id, __pyx_t_8) < (0)) __PYX_ERR(0, 25541, __pyx_L1_error)
      __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

      /* "cuda/bindings/_nvml.pyx":25539
 *     values_ = FieldValue(valuesCount)
 *     for i, v in enumerate(values):
 *         if isinstance(v, tuple):             # <<<<<<<<<<<<<<
 *             values_[i].field_id = v[0]
 *             values_[i].scope_id = v[1]
*/
      goto __pyx_L6;
    }

    /* "cuda/bindings/_nvml.pyx":25543
 *             values_[i].scope_id = v[1]
 *         else:
 *             values_[i].field_id = v             # <<<<<<<<<<<<<<
 *     return values_
 * 
*/
    /*else*/ {
      __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_values_), __pyx_v_i); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25543, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_4);
      if (__Pyx_PyObject_SetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_field_id, __pyx_v_v) < (0)) __PYX_ERR(0, 25543, __pyx_L1_error)
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    }
    __pyx_L6:;

    /* "cuda/bindings/_nvml.pyx":25538
 *     cdef unsigned int valuesCount = len(values)
 *     values_ = FieldValue(valuesCount)
 *     for i, v in enumerate(values):             # <<<<<<<<<<<<<<
 *         if isinstance(v, tuple):
 *             values_[i].field_id = v[0]
*/
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;

  /* "cuda/bindings/_nvml.pyx":25544
 *         else:
 *             values_[i].field_id = v
 *     return values_             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF((PyObject *)__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_values_);
  __pyx_r = __pyx_v_values_;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25532
 * 
 * 
 * cdef FieldValue _cast_field_values(values):             # <<<<<<<<<<<<<<
 *     if isinstance(values, FieldValue):
 *         return values
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml._cast_field_values", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_values_);
  __Pyx_XDECREF(__pyx_v_i);
  __Pyx_XDECREF(__pyx_v_v);
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25547
 * 
 * 
 * cpdef object device_get_field_values(intptr_t device, values):             # <<<<<<<<<<<<<<
 *     """Request values for a list of fields for a device. This API allows multiple fields to be queried at once. If any of the underlying fieldIds are populated by the same driver call, the results for those field IDs will be populated from a single call rather than making a driver call for each fieldId.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_661device_get_field_values(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_field_values(intptr_t __pyx_v_device, PyObject *__pyx_v_values, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_values_ = 0;
  nvmlFieldValue_t *__pyx_v_ptr;
  unsigned int __pyx_v_valuesCount;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  intptr_t __pyx_t_2;
  Py_ssize_t __pyx_t_3;
  nvmlReturn_t __pyx_t_4;
  int __pyx_t_5;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  PyObject *__pyx_t_8 = NULL;
  size_t __pyx_t_9;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_field_values", 0);

  /* "cuda/bindings/_nvml.pyx":25556
 *     .. seealso:: `nvmlDeviceGetFieldValues`
 *     """
 *     cdef FieldValue values_ = _cast_field_values(values)             # <<<<<<<<<<<<<<
 *     cdef nvmlFieldValue_t *ptr = <nvmlFieldValue_t *>values_._get_ptr()
 *     cdef unsigned int valuesCount = len(values)
*/
  __pyx_t_1 = ((PyObject *)__pyx_f_4cuda_8bindings_5_nvml__cast_field_values(__pyx_v_values)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25556, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v_values_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25557
 *     """
 *     cdef FieldValue values_ = _cast_field_values(values)
 *     cdef nvmlFieldValue_t *ptr = <nvmlFieldValue_t *>values_._get_ptr()             # <<<<<<<<<<<<<<
 *     cdef unsigned int valuesCount = len(values)
 *     with nogil:
*/
  __pyx_t_2 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_values_->__pyx_vtab)->_get_ptr(__pyx_v_values_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25557, __pyx_L1_error)
  __pyx_v_ptr = ((nvmlFieldValue_t *)__pyx_t_2);

  /* "cuda/bindings/_nvml.pyx":25558
 *     cdef FieldValue values_ = _cast_field_values(values)
 *     cdef nvmlFieldValue_t *ptr = <nvmlFieldValue_t *>values_._get_ptr()
 *     cdef unsigned int valuesCount = len(values)             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetFieldValues(<Device>device, valuesCount, ptr)
*/
  __pyx_t_3 = PyObject_Length(__pyx_v_values); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25558, __pyx_L1_error)
  __pyx_v_valuesCount = __pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":25559
 *     cdef nvmlFieldValue_t *ptr = <nvmlFieldValue_t *>values_._get_ptr()
 *     cdef unsigned int valuesCount = len(values)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetFieldValues(<Device>device, valuesCount, ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25560
 *     cdef unsigned int valuesCount = len(values)
 *     with nogil:
 *         __status__ = nvmlDeviceGetFieldValues(<Device>device, valuesCount, ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_4 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFieldValues(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_valuesCount, __pyx_v_ptr); if (unlikely(__pyx_t_4 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25560, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_4;
      }

      /* "cuda/bindings/_nvml.pyx":25559
 *     cdef nvmlFieldValue_t *ptr = <nvmlFieldValue_t *>values_._get_ptr()
 *     cdef unsigned int valuesCount = len(values)
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetFieldValues(<Device>device, valuesCount, ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25561
 *     with nogil:
 *         __status__ = nvmlDeviceGetFieldValues(<Device>device, valuesCount, ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 *     return FieldValue.from_ptr(<intptr_t>ptr, valuesCount)
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_5 == ((int)1))) __PYX_ERR(0, 25561, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25563
 *     check_status(__status__)
 * 
 *     return FieldValue.from_ptr(<intptr_t>ptr, valuesCount)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_6 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue);
  __Pyx_INCREF(__pyx_t_6);
  __pyx_t_7 = PyLong_FromSsize_t(((intptr_t)__pyx_v_ptr)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25563, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_8 = __Pyx_PyLong_From_unsigned_int(__pyx_v_valuesCount); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25563, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_9 = 0;
  {
    PyObject *__pyx_callargs[3] = {__pyx_t_6, __pyx_t_7, __pyx_t_8};
    __pyx_t_1 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_callargs+__pyx_t_9, (3-__pyx_t_9) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25563, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
  }
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25547
 * 
 * 
 * cpdef object device_get_field_values(intptr_t device, values):             # <<<<<<<<<<<<<<
 *     """Request values for a list of fields for a device. This API allows multiple fields to be queried at once. If any of the underlying fieldIds are populated by the same driver call, the results for those field IDs will be populated from a single call rather than making a driver call for each fieldId.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_field_values", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_values_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_661device_get_field_values(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_660device_get_field_values, "device_get_field_values(intptr_t device, values)\n\nRequest values for a list of fields for a device. This API allows multiple fields to be queried at once. If any of the underlying fieldIds are populated by the same driver call, the results for those field IDs will be populated from a single call rather than making a driver call for each fieldId.\n\nArgs:\n    device (intptr_t): The device handle of the GPU to request field values for.\n    values (FieldValue): Array of FieldValue specifying what to retrieve.\n\n.. seealso:: `nvmlDeviceGetFieldValues`");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_661device_get_field_values = {"device_get_field_values", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_661device_get_field_values, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_660device_get_field_values};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_661device_get_field_values(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  PyObject *__pyx_v_values = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_field_values (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_values,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25547, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25547, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25547, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_field_values", 0) < (0)) __PYX_ERR(0, 25547, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_field_values", 1, 2, 2, i); __PYX_ERR(0, 25547, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25547, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25547, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25547, __pyx_L3_error)
    __pyx_v_values = values[1];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_field_values", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25547, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_field_values", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_660device_get_field_values(__pyx_self, __pyx_v_device, __pyx_v_values);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_660device_get_field_values(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, PyObject *__pyx_v_values) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_field_values", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_field_values(__pyx_v_device, __pyx_v_values, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25547, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_field_values", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25566
 * 
 * 
 * cpdef object device_clear_field_values(intptr_t device, values):             # <<<<<<<<<<<<<<
 *     """Clear values for a list of fields for a device. This API allows multiple fields to be cleared at once.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_663device_clear_field_values(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_clear_field_values(intptr_t __pyx_v_device, PyObject *__pyx_v_values, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v_values_ = 0;
  nvmlFieldValue_t *__pyx_v_ptr;
  unsigned int __pyx_v_valuesCount;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  intptr_t __pyx_t_2;
  Py_ssize_t __pyx_t_3;
  nvmlReturn_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_clear_field_values", 0);

  /* "cuda/bindings/_nvml.pyx":25574
 *             prior to this call
 *     """
 *     cdef FieldValue values_ = _cast_field_values(values)             # <<<<<<<<<<<<<<
 *     cdef nvmlFieldValue_t *ptr = <nvmlFieldValue_t *>values_._get_ptr()
 *     cdef unsigned int valuesCount = len(values)
*/
  __pyx_t_1 = ((PyObject *)__pyx_f_4cuda_8bindings_5_nvml__cast_field_values(__pyx_v_values)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25574, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_v_values_ = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25575
 *     """
 *     cdef FieldValue values_ = _cast_field_values(values)
 *     cdef nvmlFieldValue_t *ptr = <nvmlFieldValue_t *>values_._get_ptr()             # <<<<<<<<<<<<<<
 *     cdef unsigned int valuesCount = len(values)
 * 
*/
  __pyx_t_2 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v_values_->__pyx_vtab)->_get_ptr(__pyx_v_values_); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25575, __pyx_L1_error)
  __pyx_v_ptr = ((nvmlFieldValue_t *)__pyx_t_2);

  /* "cuda/bindings/_nvml.pyx":25576
 *     cdef FieldValue values_ = _cast_field_values(values)
 *     cdef nvmlFieldValue_t *ptr = <nvmlFieldValue_t *>values_._get_ptr()
 *     cdef unsigned int valuesCount = len(values)             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  __pyx_t_3 = PyObject_Length(__pyx_v_values); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25576, __pyx_L1_error)
  __pyx_v_valuesCount = __pyx_t_3;

  /* "cuda/bindings/_nvml.pyx":25578
 *     cdef unsigned int valuesCount = len(values)
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceClearFieldValues(<Device>device, valuesCount, ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25579
 * 
 *     with nogil:
 *         __status__ = nvmlDeviceClearFieldValues(<Device>device, valuesCount, ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_4 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearFieldValues(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_valuesCount, __pyx_v_ptr); if (unlikely(__pyx_t_4 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25579, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_4;
      }

      /* "cuda/bindings/_nvml.pyx":25578
 *     cdef unsigned int valuesCount = len(values)
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceClearFieldValues(<Device>device, valuesCount, ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25580
 *     with nogil:
 *         __status__ = nvmlDeviceClearFieldValues(<Device>device, valuesCount, ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_5 == ((int)1))) __PYX_ERR(0, 25580, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25566
 * 
 * 
 * cpdef object device_clear_field_values(intptr_t device, values):             # <<<<<<<<<<<<<<
 *     """Clear values for a list of fields for a device. This API allows multiple fields to be cleared at once.
 * 
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_clear_field_values", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_values_);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_663device_clear_field_values(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_662device_clear_field_values, "device_clear_field_values(intptr_t device, values)\n\nClear values for a list of fields for a device. This API allows multiple fields to be cleared at once.\n\nArgs:\n    device (Device): The device handle of the GPU to request field values for\n    values (FieldValue): FieldValue instance to hold field values. Each value's fieldId must be populated\n        prior to this call");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_663device_clear_field_values = {"device_clear_field_values", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_663device_clear_field_values, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_662device_clear_field_values};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_663device_clear_field_values(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  PyObject *__pyx_v_values = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_clear_field_values (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_values,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25566, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25566, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25566, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_clear_field_values", 0) < (0)) __PYX_ERR(0, 25566, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_clear_field_values", 1, 2, 2, i); __PYX_ERR(0, 25566, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25566, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25566, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25566, __pyx_L3_error)
    __pyx_v_values = values[1];
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_clear_field_values", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25566, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_clear_field_values", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_662device_clear_field_values(__pyx_self, __pyx_v_device, __pyx_v_values);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_662device_clear_field_values(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, PyObject *__pyx_v_values) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_clear_field_values", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_clear_field_values(__pyx_v_device, __pyx_v_values, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25566, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_clear_field_values", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25583
 * 
 * 
 * cpdef object device_get_supported_vgpus(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the supported vGPU types on a physical GPU (device).
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_665device_get_supported_vgpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_supported_vgpus(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_vgpuCount[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_vgpuTypeIds = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_supported_vgpus", 0);

  /* "cuda/bindings/_nvml.pyx":25592
 *         array: An array of supported vGPU type IDs.
 *     """
 *     cdef unsigned int[1] vgpuCount = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedVgpus(<Device>device, vgpuCount, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_vgpuCount[0]), __pyx_t_1, sizeof(__pyx_v_vgpuCount[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":25593
 *     """
 *     cdef unsigned int[1] vgpuCount = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedVgpus(<Device>device, vgpuCount, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25594
 *     cdef unsigned int[1] vgpuCount = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedVgpus(<Device>device, vgpuCount, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     if vgpuCount[0] == 0:
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedVgpus(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_vgpuCount, NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25594, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":25593
 *     """
 *     cdef unsigned int[1] vgpuCount = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedVgpus(<Device>device, vgpuCount, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25595
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedVgpus(<Device>device, vgpuCount, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     if vgpuCount[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 25595, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25596
 *         __status__ = nvmlDeviceGetSupportedVgpus(<Device>device, vgpuCount, NULL)
 *     check_status_size(__status__)
 *     if vgpuCount[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  __pyx_t_4 = ((__pyx_v_vgpuCount[0]) == 0);
  if (__pyx_t_4) {

    /* "cuda/bindings/_nvml.pyx":25597
 *     check_status_size(__status__)
 *     if vgpuCount[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]             # <<<<<<<<<<<<<<
 *     cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     with nogil:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_6 = NULL;
    __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25597, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_8 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_6, NULL};
      __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25597, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_9);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_9, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25597, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_9, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25597, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_9, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25597, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_9, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25597, __pyx_L1_error)
      __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25597, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_5);
    }
    __pyx_t_9 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_5), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25597, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_9;
    __pyx_t_9 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25596
 *         __status__ = nvmlDeviceGetSupportedVgpus(<Device>device, vgpuCount, NULL)
 *     check_status_size(__status__)
 *     if vgpuCount[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":25598
 *     if vgpuCount[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedVgpus(<Device>device, vgpuCount, <nvmlVgpuTypeId_t *>vgpuTypeIds.data)
*/
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_mstate_global->__pyx_n_u_deviceCount); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25598, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_7, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 25598, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25598, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 25598, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 25598, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_5, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25598, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_7, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25598, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_6, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25598, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25598, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25598, __pyx_L1_error)
    __pyx_t_9 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25598, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_9);
  }
  __pyx_v_vgpuTypeIds = ((struct __pyx_array_obj *)__pyx_t_9);
  __pyx_t_9 = 0;

  /* "cuda/bindings/_nvml.pyx":25599
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedVgpus(<Device>device, vgpuCount, <nvmlVgpuTypeId_t *>vgpuTypeIds.data)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25600
 *     cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedVgpus(<Device>device, vgpuCount, <nvmlVgpuTypeId_t *>vgpuTypeIds.data)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return vgpuTypeIds
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedVgpus(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_vgpuCount, ((nvmlVgpuTypeId_t *)__pyx_v_vgpuTypeIds->data)); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25600, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":25599
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSupportedVgpus(<Device>device, vgpuCount, <nvmlVgpuTypeId_t *>vgpuTypeIds.data)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25601
 *     with nogil:
 *         __status__ = nvmlDeviceGetSupportedVgpus(<Device>device, vgpuCount, <nvmlVgpuTypeId_t *>vgpuTypeIds.data)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return vgpuTypeIds
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 25601, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25602
 *         __status__ = nvmlDeviceGetSupportedVgpus(<Device>device, vgpuCount, <nvmlVgpuTypeId_t *>vgpuTypeIds.data)
 *     check_status(__status__)
 *     return vgpuTypeIds             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_vgpuTypeIds);
  __pyx_r = ((PyObject *)__pyx_v_vgpuTypeIds);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25583
 * 
 * 
 * cpdef object device_get_supported_vgpus(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the supported vGPU types on a physical GPU (device).
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_vgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_vgpuTypeIds);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_665device_get_supported_vgpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_664device_get_supported_vgpus, "device_get_supported_vgpus(intptr_t device)\n\nRetrieve the supported vGPU types on a physical GPU (device).\n\nArgs:\n    device (Device): The identifier of the target device.\n\nReturns:\n    array: An array of supported vGPU type IDs.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_665device_get_supported_vgpus = {"device_get_supported_vgpus", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_665device_get_supported_vgpus, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_664device_get_supported_vgpus};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_665device_get_supported_vgpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_supported_vgpus (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25583, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25583, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_supported_vgpus", 0) < (0)) __PYX_ERR(0, 25583, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_supported_vgpus", 1, 1, 1, i); __PYX_ERR(0, 25583, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25583, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25583, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_supported_vgpus", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25583, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_vgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_664device_get_supported_vgpus(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_664device_get_supported_vgpus(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_supported_vgpus", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_supported_vgpus(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25583, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_supported_vgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25605
 * 
 * 
 * cpdef object device_get_creatable_vgpus(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the currently creatable vGPU types on a physical GPU (device).
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_667device_get_creatable_vgpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_creatable_vgpus(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_vgpuCount[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_vgpuTypeIds = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_creatable_vgpus", 0);

  /* "cuda/bindings/_nvml.pyx":25614
 *         array: An array of createable vGPU type IDs.
 *     """
 *     cdef unsigned int[1] vgpuCount = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetCreatableVgpus(<Device>device, vgpuCount, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_vgpuCount[0]), __pyx_t_1, sizeof(__pyx_v_vgpuCount[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":25615
 *     """
 *     cdef unsigned int[1] vgpuCount = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCreatableVgpus(<Device>device, vgpuCount, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25616
 *     cdef unsigned int[1] vgpuCount = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetCreatableVgpus(<Device>device, vgpuCount, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     if vgpuCount[0] == 0:
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCreatableVgpus(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_vgpuCount, NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25616, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":25615
 *     """
 *     cdef unsigned int[1] vgpuCount = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCreatableVgpus(<Device>device, vgpuCount, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25617
 *     with nogil:
 *         __status__ = nvmlDeviceGetCreatableVgpus(<Device>device, vgpuCount, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     if vgpuCount[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 25617, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25618
 *         __status__ = nvmlDeviceGetCreatableVgpus(<Device>device, vgpuCount, NULL)
 *     check_status_size(__status__)
 *     if vgpuCount[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  __pyx_t_4 = ((__pyx_v_vgpuCount[0]) == 0);
  if (__pyx_t_4) {

    /* "cuda/bindings/_nvml.pyx":25619
 *     check_status_size(__status__)
 *     if vgpuCount[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]             # <<<<<<<<<<<<<<
 *     cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     with nogil:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_6 = NULL;
    __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25619, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_8 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_6, NULL};
      __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25619, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_9);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_9, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25619, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_9, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25619, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_9, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25619, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_9, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25619, __pyx_L1_error)
      __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25619, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_5);
    }
    __pyx_t_9 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_5), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25619, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_9;
    __pyx_t_9 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25618
 *         __status__ = nvmlDeviceGetCreatableVgpus(<Device>device, vgpuCount, NULL)
 *     check_status_size(__status__)
 *     if vgpuCount[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":25620
 *     if vgpuCount[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetCreatableVgpus(<Device>device, vgpuCount, <nvmlVgpuTypeId_t *>vgpuTypeIds.data)
*/
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_mstate_global->__pyx_n_u_deviceCount); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25620, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_7, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 25620, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25620, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 25620, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 25620, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_5, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25620, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_7, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25620, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_6, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25620, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25620, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25620, __pyx_L1_error)
    __pyx_t_9 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25620, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_9);
  }
  __pyx_v_vgpuTypeIds = ((struct __pyx_array_obj *)__pyx_t_9);
  __pyx_t_9 = 0;

  /* "cuda/bindings/_nvml.pyx":25621
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCreatableVgpus(<Device>device, vgpuCount, <nvmlVgpuTypeId_t *>vgpuTypeIds.data)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25622
 *     cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     with nogil:
 *         __status__ = nvmlDeviceGetCreatableVgpus(<Device>device, vgpuCount, <nvmlVgpuTypeId_t *>vgpuTypeIds.data)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return vgpuTypeIds
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCreatableVgpus(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_vgpuCount, ((nvmlVgpuTypeId_t *)__pyx_v_vgpuTypeIds->data)); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25622, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":25621
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetCreatableVgpus(<Device>device, vgpuCount, <nvmlVgpuTypeId_t *>vgpuTypeIds.data)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25623
 *     with nogil:
 *         __status__ = nvmlDeviceGetCreatableVgpus(<Device>device, vgpuCount, <nvmlVgpuTypeId_t *>vgpuTypeIds.data)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return vgpuTypeIds
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 25623, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25624
 *         __status__ = nvmlDeviceGetCreatableVgpus(<Device>device, vgpuCount, <nvmlVgpuTypeId_t *>vgpuTypeIds.data)
 *     check_status(__status__)
 *     return vgpuTypeIds             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_vgpuTypeIds);
  __pyx_r = ((PyObject *)__pyx_v_vgpuTypeIds);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25605
 * 
 * 
 * cpdef object device_get_creatable_vgpus(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the currently creatable vGPU types on a physical GPU (device).
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_creatable_vgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_vgpuTypeIds);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_667device_get_creatable_vgpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_666device_get_creatable_vgpus, "device_get_creatable_vgpus(intptr_t device)\n\nRetrieve the currently creatable vGPU types on a physical GPU (device).\n\nArgs:\n    device (Device): The identifier of the target device.\n\nReturns:\n    array: An array of createable vGPU type IDs.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_667device_get_creatable_vgpus = {"device_get_creatable_vgpus", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_667device_get_creatable_vgpus, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_666device_get_creatable_vgpus};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_667device_get_creatable_vgpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_creatable_vgpus (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25605, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25605, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_creatable_vgpus", 0) < (0)) __PYX_ERR(0, 25605, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_creatable_vgpus", 1, 1, 1, i); __PYX_ERR(0, 25605, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25605, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25605, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_creatable_vgpus", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25605, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_creatable_vgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_666device_get_creatable_vgpus(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_666device_get_creatable_vgpus(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_creatable_vgpus", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_creatable_vgpus(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25605, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_creatable_vgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25627
 * 
 * 
 * cpdef object device_get_active_vgpus(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the active vGPU instances on a device.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_669device_get_active_vgpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_active_vgpus(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_vgpuCount[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_vgpuInstances = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_active_vgpus", 0);

  /* "cuda/bindings/_nvml.pyx":25636
 *         array: An array of active vGPU instance IDs.
 *     """
 *     cdef unsigned int[1] vgpuCount = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetActiveVgpus(<Device>device, vgpuCount, NULL)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_vgpuCount[0]), __pyx_t_1, sizeof(__pyx_v_vgpuCount[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":25637
 *     """
 *     cdef unsigned int[1] vgpuCount = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetActiveVgpus(<Device>device, vgpuCount, NULL)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25638
 *     cdef unsigned int[1] vgpuCount = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetActiveVgpus(<Device>device, vgpuCount, NULL)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 *     if vgpuCount[0] == 0:
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetActiveVgpus(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_vgpuCount, NULL); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25638, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":25637
 *     """
 *     cdef unsigned int[1] vgpuCount = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetActiveVgpus(<Device>device, vgpuCount, NULL)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25639
 *     with nogil:
 *         __status__ = nvmlDeviceGetActiveVgpus(<Device>device, vgpuCount, NULL)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 *     if vgpuCount[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 25639, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25640
 *         __status__ = nvmlDeviceGetActiveVgpus(<Device>device, vgpuCount, NULL)
 *     check_status_size(__status__)
 *     if vgpuCount[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array vgpuInstances = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  __pyx_t_4 = ((__pyx_v_vgpuCount[0]) == 0);
  if (__pyx_t_4) {

    /* "cuda/bindings/_nvml.pyx":25641
 *     check_status_size(__status__)
 *     if vgpuCount[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]             # <<<<<<<<<<<<<<
 *     cdef view.array vgpuInstances = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     with nogil:
*/
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_6 = NULL;
    __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25641, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_8 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_6, NULL};
      __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25641, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_9);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_9, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25641, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_9, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25641, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_9, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25641, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_9, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25641, __pyx_L1_error)
      __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25641, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_5);
    }
    __pyx_t_9 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_5), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25641, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0;
    __pyx_r = __pyx_t_9;
    __pyx_t_9 = 0;
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25640
 *         __status__ = nvmlDeviceGetActiveVgpus(<Device>device, vgpuCount, NULL)
 *     check_status_size(__status__)
 *     if vgpuCount[0] == 0:             # <<<<<<<<<<<<<<
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array vgpuInstances = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
  }

  /* "cuda/bindings/_nvml.pyx":25642
 *     if vgpuCount[0] == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array vgpuInstances = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetActiveVgpus(<Device>device, vgpuCount, <nvmlVgpuInstance_t *>vgpuInstances.data)
*/
  __pyx_t_5 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_mstate_global->__pyx_n_u_deviceCount); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25642, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_7, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 25642, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
  __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25642, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __Pyx_GIVEREF(__pyx_t_6);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 25642, __pyx_L1_error);
  __pyx_t_6 = 0;
  __pyx_t_6 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 25642, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_5, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25642, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_7, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25642, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_6, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25642, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25642, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25642, __pyx_L1_error)
    __pyx_t_9 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25642, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_9);
  }
  __pyx_v_vgpuInstances = ((struct __pyx_array_obj *)__pyx_t_9);
  __pyx_t_9 = 0;

  /* "cuda/bindings/_nvml.pyx":25643
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array vgpuInstances = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetActiveVgpus(<Device>device, vgpuCount, <nvmlVgpuInstance_t *>vgpuInstances.data)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25644
 *     cdef view.array vgpuInstances = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     with nogil:
 *         __status__ = nvmlDeviceGetActiveVgpus(<Device>device, vgpuCount, <nvmlVgpuInstance_t *>vgpuInstances.data)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return vgpuInstances
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetActiveVgpus(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_vgpuCount, ((nvmlVgpuInstance_t *)__pyx_v_vgpuInstances->data)); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25644, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":25643
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0]
 *     cdef view.array vgpuInstances = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetActiveVgpus(<Device>device, vgpuCount, <nvmlVgpuInstance_t *>vgpuInstances.data)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25645
 *     with nogil:
 *         __status__ = nvmlDeviceGetActiveVgpus(<Device>device, vgpuCount, <nvmlVgpuInstance_t *>vgpuInstances.data)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return vgpuInstances
 * 
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 25645, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25646
 *         __status__ = nvmlDeviceGetActiveVgpus(<Device>device, vgpuCount, <nvmlVgpuInstance_t *>vgpuInstances.data)
 *     check_status(__status__)
 *     return vgpuInstances             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_vgpuInstances);
  __pyx_r = ((PyObject *)__pyx_v_vgpuInstances);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25627
 * 
 * 
 * cpdef object device_get_active_vgpus(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the active vGPU instances on a device.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_active_vgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_vgpuInstances);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_669device_get_active_vgpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_668device_get_active_vgpus, "device_get_active_vgpus(intptr_t device)\n\nRetrieve the active vGPU instances on a device.\n\nArgs:\n    device (Device): The identifier of the target device.\n\nReturns:\n    array: An array of active vGPU instance IDs.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_669device_get_active_vgpus = {"device_get_active_vgpus", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_669device_get_active_vgpus, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_668device_get_active_vgpus};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_669device_get_active_vgpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_active_vgpus (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25627, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25627, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_active_vgpus", 0) < (0)) __PYX_ERR(0, 25627, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_active_vgpus", 1, 1, 1, i); __PYX_ERR(0, 25627, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25627, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25627, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_active_vgpus", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25627, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_active_vgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_668device_get_active_vgpus(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_668device_get_active_vgpus(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_active_vgpus", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_active_vgpus(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25627, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_active_vgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25649
 * 
 * 
 * cpdef str vgpu_instance_get_vm_id(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the VM ID associated with a vGPU instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_671vgpu_instance_get_vm_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_vm_id(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_size;
  char __pyx_v_vmId[80];
  nvmlVgpuVmIdType_t __pyx_v_vmIdType[1];
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  nvmlReturn_t __pyx_t_1;
  int __pyx_t_2;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_vm_id", 0);

  /* "cuda/bindings/_nvml.pyx":25658
 *         tuple[str, VgpuVmIdType]: A tuple of (id, id_type).
 *     """
 *     cdef unsigned int size = 80             # <<<<<<<<<<<<<<
 *     cdef char[80] vmId
 *     cdef nvmlVgpuVmIdType_t[1] vmIdType
*/
  __pyx_v_size = 80;

  /* "cuda/bindings/_nvml.pyx":25661
 *     cdef char[80] vmId
 *     cdef nvmlVgpuVmIdType_t[1] vmIdType
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetVmID(<nvmlVgpuInstance_t>vgpu_instance, vmId, size, vmIdType)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25662
 *     cdef nvmlVgpuVmIdType_t[1] vmIdType
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetVmID(<nvmlVgpuInstance_t>vgpu_instance, vmId, size, vmIdType)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 *     return (cpython.PyUnicode_FromString(vmId), vmIdType[0])
*/
        __pyx_t_1 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetVmID(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), __pyx_v_vmId, __pyx_v_size, __pyx_v_vmIdType); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25662, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_1;
      }

      /* "cuda/bindings/_nvml.pyx":25661
 *     cdef char[80] vmId
 *     cdef nvmlVgpuVmIdType_t[1] vmIdType
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetVmID(<nvmlVgpuInstance_t>vgpu_instance, vmId, size, vmIdType)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25663
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetVmID(<nvmlVgpuInstance_t>vgpu_instance, vmId, size, vmIdType)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (cpython.PyUnicode_FromString(vmId), vmIdType[0])
 * 
*/
  __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_2 == ((int)1))) __PYX_ERR(0, 25663, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25664
 *         __status__ = nvmlVgpuInstanceGetVmID(<nvmlVgpuInstance_t>vgpu_instance, vmId, size, vmIdType)
 *     check_status(__status__)
 *     return (cpython.PyUnicode_FromString(vmId), vmIdType[0])             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_3 = PyUnicode_FromString(__pyx_v_vmId); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25664, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __Pyx_PyLong_From_nvmlVgpuVmIdType_t((__pyx_v_vmIdType[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25664, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25664, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_GIVEREF(__pyx_t_3);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 25664, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_t_4);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 25664, __pyx_L1_error);
  __pyx_t_3 = 0;
  __pyx_t_4 = 0;
  if (!(likely(PyUnicode_CheckExact(__pyx_t_5)) || __Pyx_RaiseUnexpectedTypeError("str", __pyx_t_5))) __PYX_ERR(0, 25664, __pyx_L1_error)
  __pyx_r = ((PyObject*)__pyx_t_5);
  __pyx_t_5 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25649
 * 
 * 
 * cpdef str vgpu_instance_get_vm_id(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the VM ID associated with a vGPU instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_vm_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_671vgpu_instance_get_vm_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_670vgpu_instance_get_vm_id, "vgpu_instance_get_vm_id(unsigned int vgpu_instance) -> str\n\nRetrieve the VM ID associated with a vGPU instance.\n\nArgs:\n    vgpu_instance (unsigned int): The identifier of the target vGPU instance.\n\nReturns:\n    tuple[str, VgpuVmIdType]: A tuple of (id, id_type).");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_671vgpu_instance_get_vm_id = {"vgpu_instance_get_vm_id", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_671vgpu_instance_get_vm_id, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_670vgpu_instance_get_vm_id};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_671vgpu_instance_get_vm_id(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_vm_id (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25649, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25649, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_vm_id", 0) < (0)) __PYX_ERR(0, 25649, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_vm_id", 1, 1, 1, i); __PYX_ERR(0, 25649, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25649, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25649, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_vm_id", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25649, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_vm_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_670vgpu_instance_get_vm_id(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_670vgpu_instance_get_vm_id(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_vm_id", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_vm_id(__pyx_v_vgpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25649, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_vm_id", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25667
 * 
 * 
 * cpdef object gpu_instance_get_creatable_vgpus(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Query the currently creatable vGPU types on a specific GPU Instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_673gpu_instance_get_creatable_vgpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_creatable_vgpus(intptr_t __pyx_v_gpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *__pyx_v_pVgpus = 0;
  nvmlVgpuTypeIdInfo_v1_t *__pyx_v_ptr;
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_vgpuTypeIds = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_t_7;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_creatable_vgpus", 0);

  /* "cuda/bindings/_nvml.pyx":25677
 *     """
 * 
 *     cdef VgpuTypeIdInfo_v1 pVgpus = VgpuTypeIdInfo_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuTypeIdInfo_v1_t *ptr = <nvmlVgpuTypeIdInfo_v1_t *>pVgpus._get_ptr()
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25677, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_pVgpus = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25678
 * 
 *     cdef VgpuTypeIdInfo_v1 pVgpus = VgpuTypeIdInfo_v1()
 *     cdef nvmlVgpuTypeIdInfo_v1_t *ptr = <nvmlVgpuTypeIdInfo_v1_t *>pVgpus._get_ptr()             # <<<<<<<<<<<<<<
 * 
 *     # Get size of array
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)__pyx_v_pVgpus->__pyx_vtab)->_get_ptr(__pyx_v_pVgpus); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25678, __pyx_L1_error)
  __pyx_v_ptr = ((nvmlVgpuTypeIdInfo_v1_t *)__pyx_t_4);

  /* "cuda/bindings/_nvml.pyx":25681
 * 
 *     # Get size of array
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlVgpuTypeIdInfo_v1_t) | (1 << 24)
 *         ptr.vgpuCount = 0
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25682
 *     # Get size of array
 *     with nogil:
 *         ptr.version = sizeof(nvmlVgpuTypeIdInfo_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *         ptr.vgpuCount = 0
 *         ptr.vgpuTypeIds = NULL
*/
        __pyx_v_ptr->version = ((sizeof(nvmlVgpuTypeIdInfo_v1_t)) | 0x1000000);

        /* "cuda/bindings/_nvml.pyx":25683
 *     with nogil:
 *         ptr.version = sizeof(nvmlVgpuTypeIdInfo_v1_t) | (1 << 24)
 *         ptr.vgpuCount = 0             # <<<<<<<<<<<<<<
 *         ptr.vgpuTypeIds = NULL
 *         __status__ = nvmlGpuInstanceGetCreatableVgpus(<GpuInstance>gpu_instance, ptr)
*/
        __pyx_v_ptr->vgpuCount = 0;

        /* "cuda/bindings/_nvml.pyx":25684
 *         ptr.version = sizeof(nvmlVgpuTypeIdInfo_v1_t) | (1 << 24)
 *         ptr.vgpuCount = 0
 *         ptr.vgpuTypeIds = NULL             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetCreatableVgpus(<GpuInstance>gpu_instance, ptr)
 *     check_status_size(__status__)
*/
        __pyx_v_ptr->vgpuTypeIds = NULL;

        /* "cuda/bindings/_nvml.pyx":25685
 *         ptr.vgpuCount = 0
 *         ptr.vgpuTypeIds = NULL
 *         __status__ = nvmlGpuInstanceGetCreatableVgpus(<GpuInstance>gpu_instance, ptr)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 * 
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetCreatableVgpus(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25685, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25681
 * 
 *     # Get size of array
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlVgpuTypeIdInfo_v1_t) | (1 << 24)
 *         ptr.vgpuCount = 0
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25686
 *         ptr.vgpuTypeIds = NULL
 *         __status__ = nvmlGpuInstanceGetCreatableVgpus(<GpuInstance>gpu_instance, ptr)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 * 
 *     if ptr.vgpuCount == 0:
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25686, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25688
 *     check_status_size(__status__)
 * 
 *     if ptr.vgpuCount == 0:             # <<<<<<<<<<<<<<
 *         return pVgpus
 * 
*/
  __pyx_t_7 = (__pyx_v_ptr->vgpuCount == 0);
  if (__pyx_t_7) {

    /* "cuda/bindings/_nvml.pyx":25689
 * 
 *     if ptr.vgpuCount == 0:
 *         return pVgpus             # <<<<<<<<<<<<<<
 * 
 *     cdef view.array vgpuTypeIds = view.array(shape=(ptr.vgpuCount,), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_pVgpus);
    __pyx_r = ((PyObject *)__pyx_v_pVgpus);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25688
 *     check_status_size(__status__)
 * 
 *     if ptr.vgpuCount == 0:             # <<<<<<<<<<<<<<
 *         return pVgpus
 * 
*/
  }

  /* "cuda/bindings/_nvml.pyx":25691
 *         return pVgpus
 * 
 *     cdef view.array vgpuTypeIds = view.array(shape=(ptr.vgpuCount,), itemsize=sizeof(unsigned int), format="I", mode="c")             # <<<<<<<<<<<<<<
 *     pVgpus.vgpu_type_ids = vgpuTypeIds
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_8 = __Pyx_PyLong_From_unsigned_int(__pyx_v_ptr->vgpuCount); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25691, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25691, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 25691, __pyx_L1_error);
  __pyx_t_8 = 0;
  __pyx_t_8 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25691, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25691, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_9, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25691, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_8, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25691, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25691, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25691, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25691, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_vgpuTypeIds = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25692
 * 
 *     cdef view.array vgpuTypeIds = view.array(shape=(ptr.vgpuCount,), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     pVgpus.vgpu_type_ids = vgpuTypeIds             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  if (__Pyx_PyObject_SetAttrStr(((PyObject *)__pyx_v_pVgpus), __pyx_mstate_global->__pyx_n_u_vgpu_type_ids, ((PyObject *)__pyx_v_vgpuTypeIds)) < (0)) __PYX_ERR(0, 25692, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25694
 *     pVgpus.vgpu_type_ids = vgpuTypeIds
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetCreatableVgpus(<GpuInstance>gpu_instance, ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25695
 * 
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetCreatableVgpus(<GpuInstance>gpu_instance, ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetCreatableVgpus(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25695, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25694
 *     pVgpus.vgpu_type_ids = vgpuTypeIds
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetCreatableVgpus(<GpuInstance>gpu_instance, ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25696
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetCreatableVgpus(<GpuInstance>gpu_instance, ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 *     return pVgpus
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25696, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25698
 *     check_status(__status__)
 * 
 *     return pVgpus             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_pVgpus);
  __pyx_r = ((PyObject *)__pyx_v_pVgpus);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25667
 * 
 * 
 * cpdef object gpu_instance_get_creatable_vgpus(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Query the currently creatable vGPU types on a specific GPU Instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_creatable_vgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_pVgpus);
  __Pyx_XDECREF((PyObject *)__pyx_v_vgpuTypeIds);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_673gpu_instance_get_creatable_vgpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_672gpu_instance_get_creatable_vgpus, "gpu_instance_get_creatable_vgpus(intptr_t gpu_instance)\n\nQuery the currently creatable vGPU types on a specific GPU Instance.\n\nArgs:\n    gpu_instance (GpuInstance): The identifier of the target GPU Instance.\n\nReturns:\n    VgpuTypeIdInfo_v1: The vGPU type ID information structure.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_673gpu_instance_get_creatable_vgpus = {"gpu_instance_get_creatable_vgpus", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_673gpu_instance_get_creatable_vgpus, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_672gpu_instance_get_creatable_vgpus};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_673gpu_instance_get_creatable_vgpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_gpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpu_instance_get_creatable_vgpus (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_gpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25667, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25667, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpu_instance_get_creatable_vgpus", 0) < (0)) __PYX_ERR(0, 25667, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpu_instance_get_creatable_vgpus", 1, 1, 1, i); __PYX_ERR(0, 25667, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25667, __pyx_L3_error)
    }
    __pyx_v_gpu_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_gpu_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25667, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpu_instance_get_creatable_vgpus", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25667, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_creatable_vgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_672gpu_instance_get_creatable_vgpus(__pyx_self, __pyx_v_gpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_672gpu_instance_get_creatable_vgpus(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_creatable_vgpus", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_creatable_vgpus(__pyx_v_gpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25667, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_creatable_vgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25701
 * 
 * 
 * cpdef object gpu_instance_get_active_vgpus(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the active vGPU instances within a GPU instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_675gpu_instance_get_active_vgpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_active_vgpus(intptr_t __pyx_v_gpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *__pyx_v_activeVgpuInfo = 0;
  nvmlActiveVgpuInstanceInfo_v1_t *__pyx_v_ptr;
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_vgpuInstances = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_t_7;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_active_vgpus", 0);

  /* "cuda/bindings/_nvml.pyx":25710
 *         ActiveVgpuInstanceInfo: The vGPU instance ID information structure.
 *     """
 *     cdef ActiveVgpuInstanceInfo_v1 activeVgpuInfo = ActiveVgpuInstanceInfo_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlActiveVgpuInstanceInfo_v1_t *ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>activeVgpuInfo._get_ptr()
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25710, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_activeVgpuInfo = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25711
 *     """
 *     cdef ActiveVgpuInstanceInfo_v1 activeVgpuInfo = ActiveVgpuInstanceInfo_v1()
 *     cdef nvmlActiveVgpuInstanceInfo_v1_t *ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>activeVgpuInfo._get_ptr()             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)__pyx_v_activeVgpuInfo->__pyx_vtab)->_get_ptr(__pyx_v_activeVgpuInfo); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25711, __pyx_L1_error)
  __pyx_v_ptr = ((nvmlActiveVgpuInstanceInfo_v1_t *)__pyx_t_4);

  /* "cuda/bindings/_nvml.pyx":25713
 *     cdef nvmlActiveVgpuInstanceInfo_v1_t *ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>activeVgpuInfo._get_ptr()
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlActiveVgpuInstanceInfo_v1_t) | (1 << 24)
 *         ptr.vgpuCount = 0
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25714
 * 
 *     with nogil:
 *         ptr.version = sizeof(nvmlActiveVgpuInstanceInfo_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *         ptr.vgpuCount = 0
 *         ptr.vgpuInstances = NULL
*/
        __pyx_v_ptr->version = ((sizeof(nvmlActiveVgpuInstanceInfo_v1_t)) | 0x1000000);

        /* "cuda/bindings/_nvml.pyx":25715
 *     with nogil:
 *         ptr.version = sizeof(nvmlActiveVgpuInstanceInfo_v1_t) | (1 << 24)
 *         ptr.vgpuCount = 0             # <<<<<<<<<<<<<<
 *         ptr.vgpuInstances = NULL
 *         __status__ = nvmlGpuInstanceGetActiveVgpus(<GpuInstance>gpu_instance, ptr)
*/
        __pyx_v_ptr->vgpuCount = 0;

        /* "cuda/bindings/_nvml.pyx":25716
 *         ptr.version = sizeof(nvmlActiveVgpuInstanceInfo_v1_t) | (1 << 24)
 *         ptr.vgpuCount = 0
 *         ptr.vgpuInstances = NULL             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetActiveVgpus(<GpuInstance>gpu_instance, ptr)
 *     check_status_size(__status__)
*/
        __pyx_v_ptr->vgpuInstances = NULL;

        /* "cuda/bindings/_nvml.pyx":25717
 *         ptr.vgpuCount = 0
 *         ptr.vgpuInstances = NULL
 *         __status__ = nvmlGpuInstanceGetActiveVgpus(<GpuInstance>gpu_instance, ptr)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 * 
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetActiveVgpus(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25717, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25713
 *     cdef nvmlActiveVgpuInstanceInfo_v1_t *ptr = <nvmlActiveVgpuInstanceInfo_v1_t *>activeVgpuInfo._get_ptr()
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlActiveVgpuInstanceInfo_v1_t) | (1 << 24)
 *         ptr.vgpuCount = 0
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25718
 *         ptr.vgpuInstances = NULL
 *         __status__ = nvmlGpuInstanceGetActiveVgpus(<GpuInstance>gpu_instance, ptr)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 * 
 *     if ptr.vgpuCount == 0:
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25718, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25720
 *     check_status_size(__status__)
 * 
 *     if ptr.vgpuCount == 0:             # <<<<<<<<<<<<<<
 *         return activeVgpuInfo
 * 
*/
  __pyx_t_7 = (__pyx_v_ptr->vgpuCount == 0);
  if (__pyx_t_7) {

    /* "cuda/bindings/_nvml.pyx":25721
 * 
 *     if ptr.vgpuCount == 0:
 *         return activeVgpuInfo             # <<<<<<<<<<<<<<
 * 
 *     cdef view.array vgpuInstances = view.array(shape=(ptr.vgpuCount,), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_activeVgpuInfo);
    __pyx_r = ((PyObject *)__pyx_v_activeVgpuInfo);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25720
 *     check_status_size(__status__)
 * 
 *     if ptr.vgpuCount == 0:             # <<<<<<<<<<<<<<
 *         return activeVgpuInfo
 * 
*/
  }

  /* "cuda/bindings/_nvml.pyx":25723
 *         return activeVgpuInfo
 * 
 *     cdef view.array vgpuInstances = view.array(shape=(ptr.vgpuCount,), itemsize=sizeof(unsigned int), format="I", mode="c")             # <<<<<<<<<<<<<<
 *     activeVgpuInfo.vgpu_instances = vgpuInstances
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_8 = __Pyx_PyLong_From_unsigned_int(__pyx_v_ptr->vgpuCount); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25723, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25723, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 25723, __pyx_L1_error);
  __pyx_t_8 = 0;
  __pyx_t_8 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25723, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25723, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_9, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25723, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_8, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25723, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25723, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25723, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25723, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_vgpuInstances = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25724
 * 
 *     cdef view.array vgpuInstances = view.array(shape=(ptr.vgpuCount,), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     activeVgpuInfo.vgpu_instances = vgpuInstances             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  if (__Pyx_PyObject_SetAttrStr(((PyObject *)__pyx_v_activeVgpuInfo), __pyx_mstate_global->__pyx_n_u_vgpu_instances, ((PyObject *)__pyx_v_vgpuInstances)) < (0)) __PYX_ERR(0, 25724, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25726
 *     activeVgpuInfo.vgpu_instances = vgpuInstances
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetActiveVgpus(<GpuInstance>gpu_instance, ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25727
 * 
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetActiveVgpus(<GpuInstance>gpu_instance, ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetActiveVgpus(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25727, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25726
 *     activeVgpuInfo.vgpu_instances = vgpuInstances
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetActiveVgpus(<GpuInstance>gpu_instance, ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25728
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetActiveVgpus(<GpuInstance>gpu_instance, ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 *     return activeVgpuInfo
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25728, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25730
 *     check_status(__status__)
 * 
 *     return activeVgpuInfo             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_activeVgpuInfo);
  __pyx_r = ((PyObject *)__pyx_v_activeVgpuInfo);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25701
 * 
 * 
 * cpdef object gpu_instance_get_active_vgpus(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the active vGPU instances within a GPU instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_active_vgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_activeVgpuInfo);
  __Pyx_XDECREF((PyObject *)__pyx_v_vgpuInstances);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_675gpu_instance_get_active_vgpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_674gpu_instance_get_active_vgpus, "gpu_instance_get_active_vgpus(intptr_t gpu_instance)\n\nRetrieve the active vGPU instances within a GPU instance.\n\nArgs:\n    gpu_instance (GpuInstance): The identifier of the target GPU Instance.\n\nReturns:\n    ActiveVgpuInstanceInfo: The vGPU instance ID information structure.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_675gpu_instance_get_active_vgpus = {"gpu_instance_get_active_vgpus", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_675gpu_instance_get_active_vgpus, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_674gpu_instance_get_active_vgpus};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_675gpu_instance_get_active_vgpus(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_gpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpu_instance_get_active_vgpus (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_gpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25701, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25701, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpu_instance_get_active_vgpus", 0) < (0)) __PYX_ERR(0, 25701, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpu_instance_get_active_vgpus", 1, 1, 1, i); __PYX_ERR(0, 25701, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25701, __pyx_L3_error)
    }
    __pyx_v_gpu_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_gpu_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25701, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpu_instance_get_active_vgpus", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25701, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_active_vgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_674gpu_instance_get_active_vgpus(__pyx_self, __pyx_v_gpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_674gpu_instance_get_active_vgpus(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_active_vgpus", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_active_vgpus(__pyx_v_gpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25701, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_active_vgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25733
 * 
 * 
 * cpdef object gpu_instance_get_vgpu_type_creatable_placements(intptr_t gpu_instance, unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Query the creatable vGPU placement ID of the vGPU type within a GPU instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_677gpu_instance_get_vgpu_type_creatable_placements(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_vgpu_type_creatable_placements(intptr_t __pyx_v_gpu_instance, CYTHON_UNUSED unsigned int __pyx_v_vgpu_type_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *__pyx_v_pCreatablePlacementInfo = 0;
  nvmlVgpuCreatablePlacementInfo_v1_t *__pyx_v_ptr;
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_placementIds = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_t_7;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_vgpu_type_creatable_placements", 0);

  /* "cuda/bindings/_nvml.pyx":25744
 *     """
 * 
 *     cdef VgpuCreatablePlacementInfo_v1 pCreatablePlacementInfo = VgpuCreatablePlacementInfo_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuCreatablePlacementInfo_v1_t *ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>pCreatablePlacementInfo._get_ptr()
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25744, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_pCreatablePlacementInfo = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25745
 * 
 *     cdef VgpuCreatablePlacementInfo_v1 pCreatablePlacementInfo = VgpuCreatablePlacementInfo_v1()
 *     cdef nvmlVgpuCreatablePlacementInfo_v1_t *ptr = <nvmlVgpuCreatablePlacementInfo_v1_t *>pCreatablePlacementInfo._get_ptr()             # <<<<<<<<<<<<<<
 * 
 *     # Get size of array
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)__pyx_v_pCreatablePlacementInfo->__pyx_vtab)->_get_ptr(__pyx_v_pCreatablePlacementInfo); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25745, __pyx_L1_error)
  __pyx_v_ptr = ((nvmlVgpuCreatablePlacementInfo_v1_t *)__pyx_t_4);

  /* "cuda/bindings/_nvml.pyx":25748
 * 
 *     # Get size of array
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlVgpuCreatablePlacementInfo_v1_t) | (1 << 24)
 *         ptr.count = 0
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25749
 *     # Get size of array
 *     with nogil:
 *         ptr.version = sizeof(nvmlVgpuCreatablePlacementInfo_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *         ptr.count = 0
 *         ptr.placementIds = NULL
*/
        __pyx_v_ptr->version = ((sizeof(nvmlVgpuCreatablePlacementInfo_v1_t)) | 0x1000000);

        /* "cuda/bindings/_nvml.pyx":25750
 *     with nogil:
 *         ptr.version = sizeof(nvmlVgpuCreatablePlacementInfo_v1_t) | (1 << 24)
 *         ptr.count = 0             # <<<<<<<<<<<<<<
 *         ptr.placementIds = NULL
 *         __status__ = nvmlGpuInstanceGetVgpuTypeCreatablePlacements(<GpuInstance>gpu_instance, ptr)
*/
        __pyx_v_ptr->count = 0;

        /* "cuda/bindings/_nvml.pyx":25751
 *         ptr.version = sizeof(nvmlVgpuCreatablePlacementInfo_v1_t) | (1 << 24)
 *         ptr.count = 0
 *         ptr.placementIds = NULL             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetVgpuTypeCreatablePlacements(<GpuInstance>gpu_instance, ptr)
 *     check_status_size(__status__)
*/
        __pyx_v_ptr->placementIds = NULL;

        /* "cuda/bindings/_nvml.pyx":25752
 *         ptr.count = 0
 *         ptr.placementIds = NULL
 *         __status__ = nvmlGpuInstanceGetVgpuTypeCreatablePlacements(<GpuInstance>gpu_instance, ptr)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 * 
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuTypeCreatablePlacements(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25752, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25748
 * 
 *     # Get size of array
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlVgpuCreatablePlacementInfo_v1_t) | (1 << 24)
 *         ptr.count = 0
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25753
 *         ptr.placementIds = NULL
 *         __status__ = nvmlGpuInstanceGetVgpuTypeCreatablePlacements(<GpuInstance>gpu_instance, ptr)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 * 
 *     if ptr.count == 0:
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25753, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25755
 *     check_status_size(__status__)
 * 
 *     if ptr.count == 0:             # <<<<<<<<<<<<<<
 *         return pCreatablePlacementInfo
 * 
*/
  __pyx_t_7 = (__pyx_v_ptr->count == 0);
  if (__pyx_t_7) {

    /* "cuda/bindings/_nvml.pyx":25756
 * 
 *     if ptr.count == 0:
 *         return pCreatablePlacementInfo             # <<<<<<<<<<<<<<
 * 
 *     cdef view.array placementIds = view.array(shape=(ptr.count,), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_pCreatablePlacementInfo);
    __pyx_r = ((PyObject *)__pyx_v_pCreatablePlacementInfo);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25755
 *     check_status_size(__status__)
 * 
 *     if ptr.count == 0:             # <<<<<<<<<<<<<<
 *         return pCreatablePlacementInfo
 * 
*/
  }

  /* "cuda/bindings/_nvml.pyx":25758
 *         return pCreatablePlacementInfo
 * 
 *     cdef view.array placementIds = view.array(shape=(ptr.count,), itemsize=sizeof(unsigned int), format="I", mode="c")             # <<<<<<<<<<<<<<
 *     pCreatablePlacementInfo.placement_ids = placementIds
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_8 = __Pyx_PyLong_From_unsigned_int(__pyx_v_ptr->count); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25758, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25758, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 25758, __pyx_L1_error);
  __pyx_t_8 = 0;
  __pyx_t_8 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25758, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25758, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_9, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25758, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_8, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25758, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25758, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25758, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25758, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_placementIds = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25759
 * 
 *     cdef view.array placementIds = view.array(shape=(ptr.count,), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     pCreatablePlacementInfo.placement_ids = placementIds             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  if (__Pyx_PyObject_SetAttrStr(((PyObject *)__pyx_v_pCreatablePlacementInfo), __pyx_mstate_global->__pyx_n_u_placement_ids, ((PyObject *)__pyx_v_placementIds)) < (0)) __PYX_ERR(0, 25759, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25761
 *     pCreatablePlacementInfo.placement_ids = placementIds
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetVgpuTypeCreatablePlacements(<GpuInstance>gpu_instance, ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25762
 * 
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetVgpuTypeCreatablePlacements(<GpuInstance>gpu_instance, ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuTypeCreatablePlacements(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25762, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25761
 *     pCreatablePlacementInfo.placement_ids = placementIds
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetVgpuTypeCreatablePlacements(<GpuInstance>gpu_instance, ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25763
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetVgpuTypeCreatablePlacements(<GpuInstance>gpu_instance, ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 *     return pCreatablePlacementInfo
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25763, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25765
 *     check_status(__status__)
 * 
 *     return pCreatablePlacementInfo             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_pCreatablePlacementInfo);
  __pyx_r = ((PyObject *)__pyx_v_pCreatablePlacementInfo);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25733
 * 
 * 
 * cpdef object gpu_instance_get_vgpu_type_creatable_placements(intptr_t gpu_instance, unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Query the creatable vGPU placement ID of the vGPU type within a GPU instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_vgpu_type_creatable_placements", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_pCreatablePlacementInfo);
  __Pyx_XDECREF((PyObject *)__pyx_v_placementIds);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_677gpu_instance_get_vgpu_type_creatable_placements(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_676gpu_instance_get_vgpu_type_creatable_placements, "gpu_instance_get_vgpu_type_creatable_placements(intptr_t gpu_instance, unsigned int vgpu_type_id)\n\nQuery the creatable vGPU placement ID of the vGPU type within a GPU instance.\n\nArgs:\n    gpu_instance (GpuInstance): The identifier of the target GPU Instance.\n    vgpu_type_id (unsigned int): The vGPU type ID.\n\nReturns:\n    VgpuPlacementList_v2: The vGPU placement list structure.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_677gpu_instance_get_vgpu_type_creatable_placements = {"gpu_instance_get_vgpu_type_creatable_placements", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_677gpu_instance_get_vgpu_type_creatable_placements, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_676gpu_instance_get_vgpu_type_creatable_placements};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_677gpu_instance_get_vgpu_type_creatable_placements(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_gpu_instance;
  unsigned int __pyx_v_vgpu_type_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpu_instance_get_vgpu_type_creatable_placements (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_gpu_instance,&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25733, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25733, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25733, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpu_instance_get_vgpu_type_creatable_placements", 0) < (0)) __PYX_ERR(0, 25733, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpu_instance_get_vgpu_type_creatable_placements", 1, 2, 2, i); __PYX_ERR(0, 25733, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25733, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25733, __pyx_L3_error)
    }
    __pyx_v_gpu_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_gpu_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25733, __pyx_L3_error)
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25733, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpu_instance_get_vgpu_type_creatable_placements", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25733, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_vgpu_type_creatable_placements", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_676gpu_instance_get_vgpu_type_creatable_placements(__pyx_self, __pyx_v_gpu_instance, __pyx_v_vgpu_type_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_676gpu_instance_get_vgpu_type_creatable_placements(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_vgpu_type_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_vgpu_type_creatable_placements", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_vgpu_type_creatable_placements(__pyx_v_gpu_instance, __pyx_v_vgpu_type_id, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25733, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_vgpu_type_creatable_placements", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25768
 * 
 * 
 * cpdef object device_get_vgpu_type_creatable_placements(intptr_t device, unsigned int vgpu_type_id, unsigned int mode):             # <<<<<<<<<<<<<<
 *     """Query the creatable vGPU placement ID of the vGPU type within a GPU instance.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_679device_get_vgpu_type_creatable_placements(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_type_creatable_placements(intptr_t __pyx_v_device, unsigned int __pyx_v_vgpu_type_id, unsigned int __pyx_v_mode, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *__pyx_v_pPlacementList = 0;
  nvmlVgpuPlacementList_v2_t *__pyx_v_ptr;
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_placementIds = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_t_7;
  PyObject *__pyx_t_8 = NULL;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_type_creatable_placements", 0);

  /* "cuda/bindings/_nvml.pyx":25780
 *     """
 * 
 *     cdef VgpuPlacementList_v2 pPlacementList = VgpuPlacementList_v2()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuPlacementList_v2_t *ptr = <nvmlVgpuPlacementList_v2_t *>pPlacementList._get_ptr()
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25780, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_pPlacementList = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25781
 * 
 *     cdef VgpuPlacementList_v2 pPlacementList = VgpuPlacementList_v2()
 *     cdef nvmlVgpuPlacementList_v2_t *ptr = <nvmlVgpuPlacementList_v2_t *>pPlacementList._get_ptr()             # <<<<<<<<<<<<<<
 * 
 *     # Get size of array
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)__pyx_v_pPlacementList->__pyx_vtab)->_get_ptr(__pyx_v_pPlacementList); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25781, __pyx_L1_error)
  __pyx_v_ptr = ((nvmlVgpuPlacementList_v2_t *)__pyx_t_4);

  /* "cuda/bindings/_nvml.pyx":25784
 * 
 *     # Get size of array
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlVgpuPlacementList_v2_t) | (2 << 24)
 *         ptr.count = 0
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25785
 *     # Get size of array
 *     with nogil:
 *         ptr.version = sizeof(nvmlVgpuPlacementList_v2_t) | (2 << 24)             # <<<<<<<<<<<<<<
 *         ptr.count = 0
 *         ptr.placementIds = NULL
*/
        __pyx_v_ptr->version = ((sizeof(nvmlVgpuPlacementList_v2_t)) | 0x2000000);

        /* "cuda/bindings/_nvml.pyx":25786
 *     with nogil:
 *         ptr.version = sizeof(nvmlVgpuPlacementList_v2_t) | (2 << 24)
 *         ptr.count = 0             # <<<<<<<<<<<<<<
 *         ptr.placementIds = NULL
 *         ptr.mode = mode
*/
        __pyx_v_ptr->count = 0;

        /* "cuda/bindings/_nvml.pyx":25787
 *         ptr.version = sizeof(nvmlVgpuPlacementList_v2_t) | (2 << 24)
 *         ptr.count = 0
 *         ptr.placementIds = NULL             # <<<<<<<<<<<<<<
 *         ptr.mode = mode
 *         __status__ = nvmlDeviceGetVgpuTypeCreatablePlacements(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, ptr)
*/
        __pyx_v_ptr->placementIds = NULL;

        /* "cuda/bindings/_nvml.pyx":25788
 *         ptr.count = 0
 *         ptr.placementIds = NULL
 *         ptr.mode = mode             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuTypeCreatablePlacements(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, ptr)
 *     check_status_size(__status__)
*/
        __pyx_v_ptr->mode = __pyx_v_mode;

        /* "cuda/bindings/_nvml.pyx":25789
 *         ptr.placementIds = NULL
 *         ptr.mode = mode
 *         __status__ = nvmlDeviceGetVgpuTypeCreatablePlacements(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, ptr)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 * 
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuTypeCreatablePlacements(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25789, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25784
 * 
 *     # Get size of array
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlVgpuPlacementList_v2_t) | (2 << 24)
 *         ptr.count = 0
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25790
 *         ptr.mode = mode
 *         __status__ = nvmlDeviceGetVgpuTypeCreatablePlacements(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, ptr)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 * 
 *     if ptr.count == 0:
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25790, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25792
 *     check_status_size(__status__)
 * 
 *     if ptr.count == 0:             # <<<<<<<<<<<<<<
 *         return pPlacementList
 * 
*/
  __pyx_t_7 = (__pyx_v_ptr->count == 0);
  if (__pyx_t_7) {

    /* "cuda/bindings/_nvml.pyx":25793
 * 
 *     if ptr.count == 0:
 *         return pPlacementList             # <<<<<<<<<<<<<<
 * 
 *     cdef view.array placementIds = view.array(shape=(ptr.count,), itemsize=sizeof(unsigned int), format="I", mode="c")
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_pPlacementList);
    __pyx_r = ((PyObject *)__pyx_v_pPlacementList);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25792
 *     check_status_size(__status__)
 * 
 *     if ptr.count == 0:             # <<<<<<<<<<<<<<
 *         return pPlacementList
 * 
*/
  }

  /* "cuda/bindings/_nvml.pyx":25795
 *         return pPlacementList
 * 
 *     cdef view.array placementIds = view.array(shape=(ptr.count,), itemsize=sizeof(unsigned int), format="I", mode="c")             # <<<<<<<<<<<<<<
 *     pPlacementList.placement_ids = placementIds
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_8 = __Pyx_PyLong_From_unsigned_int(__pyx_v_ptr->count); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25795, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25795, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_9);
  __Pyx_GIVEREF(__pyx_t_8);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 25795, __pyx_L1_error);
  __pyx_t_8 = 0;
  __pyx_t_8 = __Pyx_PyLong_FromSize_t((sizeof(unsigned int))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25795, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_2, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25795, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_9, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25795, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_8, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25795, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_I, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25795, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25795, __pyx_L1_error)
    __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25795, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_placementIds = ((struct __pyx_array_obj *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25796
 * 
 *     cdef view.array placementIds = view.array(shape=(ptr.count,), itemsize=sizeof(unsigned int), format="I", mode="c")
 *     pPlacementList.placement_ids = placementIds             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  if (__Pyx_PyObject_SetAttrStr(((PyObject *)__pyx_v_pPlacementList), __pyx_mstate_global->__pyx_n_u_placement_ids, ((PyObject *)__pyx_v_placementIds)) < (0)) __PYX_ERR(0, 25796, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25798
 *     pPlacementList.placement_ids = placementIds
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuTypeCreatablePlacements(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25799
 * 
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuTypeCreatablePlacements(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuTypeCreatablePlacements(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), ((nvmlVgpuTypeId_t)__pyx_v_vgpu_type_id), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25799, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25798
 *     pPlacementList.placement_ids = placementIds
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuTypeCreatablePlacements(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25800
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuTypeCreatablePlacements(<Device>device, <nvmlVgpuTypeId_t>vgpu_type_id, ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 *     return pPlacementList
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25800, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25802
 *     check_status(__status__)
 * 
 *     return pPlacementList             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_pPlacementList);
  __pyx_r = ((PyObject *)__pyx_v_pPlacementList);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25768
 * 
 * 
 * cpdef object device_get_vgpu_type_creatable_placements(intptr_t device, unsigned int vgpu_type_id, unsigned int mode):             # <<<<<<<<<<<<<<
 *     """Query the creatable vGPU placement ID of the vGPU type within a GPU instance.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_type_creatable_placements", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_pPlacementList);
  __Pyx_XDECREF((PyObject *)__pyx_v_placementIds);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_679device_get_vgpu_type_creatable_placements(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_678device_get_vgpu_type_creatable_placements, "device_get_vgpu_type_creatable_placements(intptr_t device, unsigned int vgpu_type_id, unsigned int mode)\n\nQuery the creatable vGPU placement ID of the vGPU type within a GPU instance.\n\nArgs:\n    device (Device): The identifier of the target device.\n    vgpu_type_id (unsigned int): The vGPU type ID.\n    mode (unsigned int): The placement mode. 0: Heterogeneous, 1: Homogeneous.\n\nReturns:\n    VgpuPlacementList_v2: The vGPU placement list structure.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_679device_get_vgpu_type_creatable_placements = {"device_get_vgpu_type_creatable_placements", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_679device_get_vgpu_type_creatable_placements, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_678device_get_vgpu_type_creatable_placements};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_679device_get_vgpu_type_creatable_placements(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_vgpu_type_id;
  unsigned int __pyx_v_mode;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_vgpu_type_creatable_placements (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_vgpu_type_id,&__pyx_mstate_global->__pyx_n_u_mode,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25768, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 25768, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25768, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25768, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_vgpu_type_creatable_placements", 0) < (0)) __PYX_ERR(0, 25768, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_vgpu_type_creatable_placements", 1, 3, 3, i); __PYX_ERR(0, 25768, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25768, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25768, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 25768, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25768, __pyx_L3_error)
    __pyx_v_vgpu_type_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_vgpu_type_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25768, __pyx_L3_error)
    __pyx_v_mode = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_mode == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25768, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_vgpu_type_creatable_placements", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 25768, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_type_creatable_placements", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_678device_get_vgpu_type_creatable_placements(__pyx_self, __pyx_v_device, __pyx_v_vgpu_type_id, __pyx_v_mode);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_678device_get_vgpu_type_creatable_placements(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_vgpu_type_id, unsigned int __pyx_v_mode) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_type_creatable_placements", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_type_creatable_placements(__pyx_v_device, __pyx_v_vgpu_type_id, __pyx_v_mode, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25768, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_type_creatable_placements", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25805
 * 
 * 
 * cpdef object vgpu_instance_get_metadata(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Returns vGPU metadata structure for a running vGPU. The structure contains information about the vGPU and its
 *     associated VM such as the currently installed NVIDIA guest driver version, together with host driver version and
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_681vgpu_instance_get_metadata(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_metadata(unsigned int __pyx_v_vgpu_instance, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_vgpuMetadata = 0;
  unsigned int __pyx_v_bufferSize[1];
  nvmlVgpuMetadata_t *__pyx_v_ptr;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4[1];
  intptr_t __pyx_t_5;
  nvmlReturn_t __pyx_t_6;
  int __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_metadata", 0);

  /* "cuda/bindings/_nvml.pyx":25816
 *         VgpuMetadata: Metadata.
 *     """
 *     cdef VgpuMetadata vgpuMetadata = VgpuMetadata()             # <<<<<<<<<<<<<<
 *     cdef unsigned int[1] bufferSize = [sizeof(nvmlVgpuMetadata_t)]
 *     cdef nvmlVgpuMetadata_t *ptr = <nvmlVgpuMetadata_t *>vgpuMetadata._get_ptr()
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25816, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_vgpuMetadata = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25817
 *     """
 *     cdef VgpuMetadata vgpuMetadata = VgpuMetadata()
 *     cdef unsigned int[1] bufferSize = [sizeof(nvmlVgpuMetadata_t)]             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuMetadata_t *ptr = <nvmlVgpuMetadata_t *>vgpuMetadata._get_ptr()
 * 
*/
  __pyx_t_4[0] = (sizeof(nvmlVgpuMetadata_t));
  memcpy(&(__pyx_v_bufferSize[0]), __pyx_t_4, sizeof(__pyx_v_bufferSize[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":25818
 *     cdef VgpuMetadata vgpuMetadata = VgpuMetadata()
 *     cdef unsigned int[1] bufferSize = [sizeof(nvmlVgpuMetadata_t)]
 *     cdef nvmlVgpuMetadata_t *ptr = <nvmlVgpuMetadata_t *>vgpuMetadata._get_ptr()             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  __pyx_t_5 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_vgpuMetadata->__pyx_vtab)->_get_ptr(__pyx_v_vgpuMetadata); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25818, __pyx_L1_error)
  __pyx_v_ptr = ((nvmlVgpuMetadata_t *)__pyx_t_5);

  /* "cuda/bindings/_nvml.pyx":25820
 *     cdef nvmlVgpuMetadata_t *ptr = <nvmlVgpuMetadata_t *>vgpuMetadata._get_ptr()
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetMetadata(<nvmlVgpuInstance_t>vgpu_instance, ptr, bufferSize)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25821
 * 
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetMetadata(<nvmlVgpuInstance_t>vgpu_instance, ptr, bufferSize)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 * 
*/
        __pyx_t_6 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetMetadata(((nvmlVgpuInstance_t)__pyx_v_vgpu_instance), __pyx_v_ptr, __pyx_v_bufferSize); if (unlikely(__pyx_t_6 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25821, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_6;
      }

      /* "cuda/bindings/_nvml.pyx":25820
 *     cdef nvmlVgpuMetadata_t *ptr = <nvmlVgpuMetadata_t *>vgpuMetadata._get_ptr()
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlVgpuInstanceGetMetadata(<nvmlVgpuInstance_t>vgpu_instance, ptr, bufferSize)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25822
 *     with nogil:
 *         __status__ = nvmlVgpuInstanceGetMetadata(<nvmlVgpuInstance_t>vgpu_instance, ptr, bufferSize)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 * 
 *     return vgpuMetadata
*/
  __pyx_t_7 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_7 == ((int)1))) __PYX_ERR(0, 25822, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25824
 *     check_status_size(__status__)
 * 
 *     return vgpuMetadata             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_vgpuMetadata);
  __pyx_r = ((PyObject *)__pyx_v_vgpuMetadata);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25805
 * 
 * 
 * cpdef object vgpu_instance_get_metadata(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Returns vGPU metadata structure for a running vGPU. The structure contains information about the vGPU and its
 *     associated VM such as the currently installed NVIDIA guest driver version, together with host driver version and
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_metadata", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_vgpuMetadata);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_681vgpu_instance_get_metadata(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_680vgpu_instance_get_metadata, "vgpu_instance_get_metadata(unsigned int vgpu_instance)\n\nReturns vGPU metadata structure for a running vGPU. The structure contains information about the vGPU and its\nassociated VM such as the currently installed NVIDIA guest driver version, together with host driver version and\nan opaque data section containing internal state.\n\nArgs:\n    vgpu_instance (unsigned int): The identifier of the target vGPU instance.\n\nReturns:\n    VgpuMetadata: Metadata.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_681vgpu_instance_get_metadata = {"vgpu_instance_get_metadata", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_681vgpu_instance_get_metadata, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_680vgpu_instance_get_metadata};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_681vgpu_instance_get_metadata(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  unsigned int __pyx_v_vgpu_instance;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("vgpu_instance_get_metadata (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_instance,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25805, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25805, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "vgpu_instance_get_metadata", 0) < (0)) __PYX_ERR(0, 25805, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_metadata", 1, 1, 1, i); __PYX_ERR(0, 25805, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25805, __pyx_L3_error)
    }
    __pyx_v_vgpu_instance = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_vgpu_instance == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25805, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("vgpu_instance_get_metadata", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25805, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_metadata", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_680vgpu_instance_get_metadata(__pyx_self, __pyx_v_vgpu_instance);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_680vgpu_instance_get_metadata(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_vgpu_instance) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("vgpu_instance_get_metadata", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_metadata(__pyx_v_vgpu_instance, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25805, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.vgpu_instance_get_metadata", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25827
 * 
 * 
 * cpdef object device_get_vgpu_metadata(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Returns a vGPU metadata structure for the physical GPU indicated by device. The structure contains
 *     information about the GPU and the currently installed NVIDIA host driver version that's controlling it,
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_683device_get_vgpu_metadata(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_metadata(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_pgpuMetadata = 0;
  unsigned int __pyx_v_bufferSize[1];
  nvmlVgpuPgpuMetadata_t *__pyx_v_ptr;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  unsigned int __pyx_t_4[1];
  intptr_t __pyx_t_5;
  nvmlReturn_t __pyx_t_6;
  int __pyx_t_7;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_metadata", 0);

  /* "cuda/bindings/_nvml.pyx":25838
 *         VgpuPgpuMetadata: Metadata.
 *     """
 *     cdef VgpuPgpuMetadata pgpuMetadata = VgpuPgpuMetadata()             # <<<<<<<<<<<<<<
 *     cdef unsigned int[1] bufferSize = [sizeof(nvmlVgpuPgpuMetadata_t)]
 *     cdef nvmlVgpuPgpuMetadata_t *ptr = <nvmlVgpuPgpuMetadata_t *>pgpuMetadata._get_ptr()
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25838, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_pgpuMetadata = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25839
 *     """
 *     cdef VgpuPgpuMetadata pgpuMetadata = VgpuPgpuMetadata()
 *     cdef unsigned int[1] bufferSize = [sizeof(nvmlVgpuPgpuMetadata_t)]             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuPgpuMetadata_t *ptr = <nvmlVgpuPgpuMetadata_t *>pgpuMetadata._get_ptr()
 * 
*/
  __pyx_t_4[0] = (sizeof(nvmlVgpuPgpuMetadata_t));
  memcpy(&(__pyx_v_bufferSize[0]), __pyx_t_4, sizeof(__pyx_v_bufferSize[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":25840
 *     cdef VgpuPgpuMetadata pgpuMetadata = VgpuPgpuMetadata()
 *     cdef unsigned int[1] bufferSize = [sizeof(nvmlVgpuPgpuMetadata_t)]
 *     cdef nvmlVgpuPgpuMetadata_t *ptr = <nvmlVgpuPgpuMetadata_t *>pgpuMetadata._get_ptr()             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  __pyx_t_5 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_pgpuMetadata->__pyx_vtab)->_get_ptr(__pyx_v_pgpuMetadata); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25840, __pyx_L1_error)
  __pyx_v_ptr = ((nvmlVgpuPgpuMetadata_t *)__pyx_t_5);

  /* "cuda/bindings/_nvml.pyx":25842
 *     cdef nvmlVgpuPgpuMetadata_t *ptr = <nvmlVgpuPgpuMetadata_t *>pgpuMetadata._get_ptr()
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuMetadata(<Device>device, ptr, bufferSize)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25843
 * 
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuMetadata(<Device>device, ptr, bufferSize)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 * 
*/
        __pyx_t_6 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuMetadata(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_ptr, __pyx_v_bufferSize); if (unlikely(__pyx_t_6 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25843, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_6;
      }

      /* "cuda/bindings/_nvml.pyx":25842
 *     cdef nvmlVgpuPgpuMetadata_t *ptr = <nvmlVgpuPgpuMetadata_t *>pgpuMetadata._get_ptr()
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuMetadata(<Device>device, ptr, bufferSize)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25844
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuMetadata(<Device>device, ptr, bufferSize)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 * 
 *     return pgpuMetadata
*/
  __pyx_t_7 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_7 == ((int)1))) __PYX_ERR(0, 25844, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25846
 *     check_status_size(__status__)
 * 
 *     return pgpuMetadata             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_pgpuMetadata);
  __pyx_r = ((PyObject *)__pyx_v_pgpuMetadata);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25827
 * 
 * 
 * cpdef object device_get_vgpu_metadata(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Returns a vGPU metadata structure for the physical GPU indicated by device. The structure contains
 *     information about the GPU and the currently installed NVIDIA host driver version that's controlling it,
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_metadata", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_pgpuMetadata);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_683device_get_vgpu_metadata(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_682device_get_vgpu_metadata, "device_get_vgpu_metadata(intptr_t device)\n\nReturns a vGPU metadata structure for the physical GPU indicated by device. The structure contains\ninformation about the GPU and the currently installed NVIDIA host driver version that's controlling it,\ntogether with an opaque data section containing internal state.\n\nArgs:\n    device (Device): The identifier of the target device.\n\nReturns:\n    VgpuPgpuMetadata: Metadata.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_683device_get_vgpu_metadata = {"device_get_vgpu_metadata", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_683device_get_vgpu_metadata, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_682device_get_vgpu_metadata};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_683device_get_vgpu_metadata(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_vgpu_metadata (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25827, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25827, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_vgpu_metadata", 0) < (0)) __PYX_ERR(0, 25827, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_vgpu_metadata", 1, 1, 1, i); __PYX_ERR(0, 25827, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25827, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25827, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_vgpu_metadata", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25827, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_metadata", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_682device_get_vgpu_metadata(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_682device_get_vgpu_metadata(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_metadata", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_metadata(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25827, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_metadata", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25849
 * 
 * 
 * cpdef object get_vgpu_compatibility(VgpuMetadata vgpu_metadata, VgpuPgpuMetadata pgpu_metadata):             # <<<<<<<<<<<<<<
 *     """Takes a vGPU instance metadata structure read from vgpu_instance_get_metadata() and a vGPU metadata structure
 *     for a physical GPU read from device_get_vgpu_metadata, and returns compatibility information of the vGPU instance
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_685get_vgpu_compatibility(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_get_vgpu_compatibility(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_vgpu_metadata, struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_pgpu_metadata, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *__pyx_v_compatibilityInfo = 0;
  nvmlVgpuPgpuCompatibility_t *__pyx_v_ptr;
  nvmlVgpuMetadata_t *__pyx_v_vgpu_metadata_ptr;
  nvmlVgpuPgpuMetadata_t *__pyx_v_pgpu_metadata_ptr;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("get_vgpu_compatibility", 0);

  /* "cuda/bindings/_nvml.pyx":25861
 *         VgpuPgpuCompatibility: Compatibility information.
 *     """
 *     cdef VgpuPgpuCompatibility compatibilityInfo = VgpuPgpuCompatibility()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuPgpuCompatibility_t *ptr = <nvmlVgpuPgpuCompatibility_t *>compatibilityInfo._get_ptr()
 *     cdef nvmlVgpuMetadata_t *vgpu_metadata_ptr = <nvmlVgpuMetadata_t *>vgpu_metadata._get_ptr()
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25861, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_compatibilityInfo = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25862
 *     """
 *     cdef VgpuPgpuCompatibility compatibilityInfo = VgpuPgpuCompatibility()
 *     cdef nvmlVgpuPgpuCompatibility_t *ptr = <nvmlVgpuPgpuCompatibility_t *>compatibilityInfo._get_ptr()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuMetadata_t *vgpu_metadata_ptr = <nvmlVgpuMetadata_t *>vgpu_metadata._get_ptr()
 *     cdef nvmlVgpuPgpuMetadata_t *pgpu_metadata_ptr = <nvmlVgpuPgpuMetadata_t *>pgpu_metadata._get_ptr()
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)__pyx_v_compatibilityInfo->__pyx_vtab)->_get_ptr(__pyx_v_compatibilityInfo); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25862, __pyx_L1_error)
  __pyx_v_ptr = ((nvmlVgpuPgpuCompatibility_t *)__pyx_t_4);

  /* "cuda/bindings/_nvml.pyx":25863
 *     cdef VgpuPgpuCompatibility compatibilityInfo = VgpuPgpuCompatibility()
 *     cdef nvmlVgpuPgpuCompatibility_t *ptr = <nvmlVgpuPgpuCompatibility_t *>compatibilityInfo._get_ptr()
 *     cdef nvmlVgpuMetadata_t *vgpu_metadata_ptr = <nvmlVgpuMetadata_t *>vgpu_metadata._get_ptr()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuPgpuMetadata_t *pgpu_metadata_ptr = <nvmlVgpuPgpuMetadata_t *>pgpu_metadata._get_ptr()
 * 
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuMetadata *)__pyx_v_vgpu_metadata->__pyx_vtab)->_get_ptr(__pyx_v_vgpu_metadata); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25863, __pyx_L1_error)
  __pyx_v_vgpu_metadata_ptr = ((nvmlVgpuMetadata_t *)__pyx_t_4);

  /* "cuda/bindings/_nvml.pyx":25864
 *     cdef nvmlVgpuPgpuCompatibility_t *ptr = <nvmlVgpuPgpuCompatibility_t *>compatibilityInfo._get_ptr()
 *     cdef nvmlVgpuMetadata_t *vgpu_metadata_ptr = <nvmlVgpuMetadata_t *>vgpu_metadata._get_ptr()
 *     cdef nvmlVgpuPgpuMetadata_t *pgpu_metadata_ptr = <nvmlVgpuPgpuMetadata_t *>pgpu_metadata._get_ptr()             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)__pyx_v_pgpu_metadata->__pyx_vtab)->_get_ptr(__pyx_v_pgpu_metadata); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25864, __pyx_L1_error)
  __pyx_v_pgpu_metadata_ptr = ((nvmlVgpuPgpuMetadata_t *)__pyx_t_4);

  /* "cuda/bindings/_nvml.pyx":25866
 *     cdef nvmlVgpuPgpuMetadata_t *pgpu_metadata_ptr = <nvmlVgpuPgpuMetadata_t *>pgpu_metadata._get_ptr()
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGetVgpuCompatibility(vgpu_metadata_ptr, pgpu_metadata_ptr, ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25867
 * 
 *     with nogil:
 *         __status__ = nvmlGetVgpuCompatibility(vgpu_metadata_ptr, pgpu_metadata_ptr, ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetVgpuCompatibility(__pyx_v_vgpu_metadata_ptr, __pyx_v_pgpu_metadata_ptr, __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25867, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25866
 *     cdef nvmlVgpuPgpuMetadata_t *pgpu_metadata_ptr = <nvmlVgpuPgpuMetadata_t *>pgpu_metadata._get_ptr()
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGetVgpuCompatibility(vgpu_metadata_ptr, pgpu_metadata_ptr, ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25868
 *     with nogil:
 *         __status__ = nvmlGetVgpuCompatibility(vgpu_metadata_ptr, pgpu_metadata_ptr, ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 *     return compatibilityInfo
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25868, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25870
 *     check_status(__status__)
 * 
 *     return compatibilityInfo             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_compatibilityInfo);
  __pyx_r = ((PyObject *)__pyx_v_compatibilityInfo);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25849
 * 
 * 
 * cpdef object get_vgpu_compatibility(VgpuMetadata vgpu_metadata, VgpuPgpuMetadata pgpu_metadata):             # <<<<<<<<<<<<<<
 *     """Takes a vGPU instance metadata structure read from vgpu_instance_get_metadata() and a vGPU metadata structure
 *     for a physical GPU read from device_get_vgpu_metadata, and returns compatibility information of the vGPU instance
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.get_vgpu_compatibility", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_compatibilityInfo);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_685get_vgpu_compatibility(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_684get_vgpu_compatibility, "get_vgpu_compatibility(VgpuMetadata vgpu_metadata, VgpuPgpuMetadata pgpu_metadata)\n\nTakes a vGPU instance metadata structure read from vgpu_instance_get_metadata() and a vGPU metadata structure\nfor a physical GPU read from device_get_vgpu_metadata, and returns compatibility information of the vGPU instance\nand the physical GPU.\n\nArgs:\n    vgpu_metadata (VgpuMetadata): The vGPU instance metadata.\n    pgpu_metadata (VgpuPgpuMetadata): The physical GPU metadata.\n\nReturns:\n    VgpuPgpuCompatibility: Compatibility information.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_685get_vgpu_compatibility = {"get_vgpu_compatibility", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_685get_vgpu_compatibility, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_684get_vgpu_compatibility};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_685get_vgpu_compatibility(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_vgpu_metadata = 0;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_pgpu_metadata = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("get_vgpu_compatibility (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_vgpu_metadata,&__pyx_mstate_global->__pyx_n_u_pgpu_metadata,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25849, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25849, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25849, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "get_vgpu_compatibility", 0) < (0)) __PYX_ERR(0, 25849, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("get_vgpu_compatibility", 1, 2, 2, i); __PYX_ERR(0, 25849, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25849, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25849, __pyx_L3_error)
    }
    __pyx_v_vgpu_metadata = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)values[0]);
    __pyx_v_pgpu_metadata = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)values[1]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("get_vgpu_compatibility", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25849, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.get_vgpu_compatibility", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_vgpu_metadata), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata, 1, "vgpu_metadata", 0))) __PYX_ERR(0, 25849, __pyx_L1_error)
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pgpu_metadata), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata, 1, "pgpu_metadata", 0))) __PYX_ERR(0, 25849, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_684get_vgpu_compatibility(__pyx_self, __pyx_v_vgpu_metadata, __pyx_v_pgpu_metadata);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_684get_vgpu_compatibility(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *__pyx_v_vgpu_metadata, struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *__pyx_v_pgpu_metadata) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("get_vgpu_compatibility", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_get_vgpu_compatibility(__pyx_v_vgpu_metadata, __pyx_v_pgpu_metadata, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.get_vgpu_compatibility", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25873
 * 
 * 
 * cpdef tuple get_vgpu_version():             # <<<<<<<<<<<<<<
 *     """Query the ranges of supported vGPU versions.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_687get_vgpu_version(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_get_vgpu_version(CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_supported = 0;
  nvmlVgpuVersion_t *__pyx_v_supported_ptr;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *__pyx_v_current = 0;
  nvmlVgpuVersion_t *__pyx_v_current_ptr;
  nvmlReturn_t __pyx_v___status__;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("get_vgpu_version", 0);

  /* "cuda/bindings/_nvml.pyx":25879
 *         tuple: A tuple of (VgpuVersion supported, VgpuVersion current).
 *     """
 *     cdef VgpuVersion supported = VgpuVersion()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuVersion_t *supported_ptr = <nvmlVgpuVersion_t *>supported._get_ptr()
 *     cdef VgpuVersion current = VgpuVersion()
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25879, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_supported = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25880
 *     """
 *     cdef VgpuVersion supported = VgpuVersion()
 *     cdef nvmlVgpuVersion_t *supported_ptr = <nvmlVgpuVersion_t *>supported._get_ptr()             # <<<<<<<<<<<<<<
 *     cdef VgpuVersion current = VgpuVersion()
 *     cdef nvmlVgpuVersion_t *current_ptr = <nvmlVgpuVersion_t *>current._get_ptr()
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_v_supported->__pyx_vtab)->_get_ptr(__pyx_v_supported); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25880, __pyx_L1_error)
  __pyx_v_supported_ptr = ((nvmlVgpuVersion_t *)__pyx_t_4);

  /* "cuda/bindings/_nvml.pyx":25881
 *     cdef VgpuVersion supported = VgpuVersion()
 *     cdef nvmlVgpuVersion_t *supported_ptr = <nvmlVgpuVersion_t *>supported._get_ptr()
 *     cdef VgpuVersion current = VgpuVersion()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuVersion_t *current_ptr = <nvmlVgpuVersion_t *>current._get_ptr()
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25881, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_current = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25882
 *     cdef nvmlVgpuVersion_t *supported_ptr = <nvmlVgpuVersion_t *>supported._get_ptr()
 *     cdef VgpuVersion current = VgpuVersion()
 *     cdef nvmlVgpuVersion_t *current_ptr = <nvmlVgpuVersion_t *>current._get_ptr()             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuVersion *)__pyx_v_current->__pyx_vtab)->_get_ptr(__pyx_v_current); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25882, __pyx_L1_error)
  __pyx_v_current_ptr = ((nvmlVgpuVersion_t *)__pyx_t_4);

  /* "cuda/bindings/_nvml.pyx":25884
 *     cdef nvmlVgpuVersion_t *current_ptr = <nvmlVgpuVersion_t *>current._get_ptr()
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGetVgpuVersion(supported_ptr, current_ptr)
 * 
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25885
 * 
 *     with nogil:
 *         __status__ = nvmlGetVgpuVersion(supported_ptr, current_ptr)             # <<<<<<<<<<<<<<
 * 
 *     check_status(__status__)
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetVgpuVersion(__pyx_v_supported_ptr, __pyx_v_current_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25885, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25884
 *     cdef nvmlVgpuVersion_t *current_ptr = <nvmlVgpuVersion_t *>current._get_ptr()
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGetVgpuVersion(supported_ptr, current_ptr)
 * 
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25887
 *         __status__ = nvmlGetVgpuVersion(supported_ptr, current_ptr)
 * 
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 *     return (supported, current)
 * 
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25887, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25888
 * 
 *     check_status(__status__)
 *     return (supported, current)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25888, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF((PyObject *)__pyx_v_supported);
  __Pyx_GIVEREF((PyObject *)__pyx_v_supported);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_supported)) != (0)) __PYX_ERR(0, 25888, __pyx_L1_error);
  __Pyx_INCREF((PyObject *)__pyx_v_current);
  __Pyx_GIVEREF((PyObject *)__pyx_v_current);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_v_current)) != (0)) __PYX_ERR(0, 25888, __pyx_L1_error);
  __pyx_r = ((PyObject*)__pyx_t_1);
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25873
 * 
 * 
 * cpdef tuple get_vgpu_version():             # <<<<<<<<<<<<<<
 *     """Query the ranges of supported vGPU versions.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_AddTraceback("cuda.bindings._nvml.get_vgpu_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_supported);
  __Pyx_XDECREF((PyObject *)__pyx_v_current);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_687get_vgpu_version(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_686get_vgpu_version, "get_vgpu_version() -> tuple\n\nQuery the ranges of supported vGPU versions.\n\nReturns:\n    tuple: A tuple of (VgpuVersion supported, VgpuVersion current).");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_687get_vgpu_version = {"get_vgpu_version", (PyCFunction)__pyx_pw_4cuda_8bindings_5_nvml_687get_vgpu_version, METH_NOARGS, __pyx_doc_4cuda_8bindings_5_nvml_686get_vgpu_version};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_687get_vgpu_version(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("get_vgpu_version (wrapper)", 0);
  __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_686get_vgpu_version(__pyx_self);

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_686get_vgpu_version(CYTHON_UNUSED PyObject *__pyx_self) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("get_vgpu_version", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_get_vgpu_version(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25873, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.get_vgpu_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25891
 * 
 * 
 * cpdef object device_get_vgpu_instances_utilization_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """
 *     Retrieves recent utilization for vGPU instances running on a physical GPU (device).
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_689device_get_vgpu_instances_utilization_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_instances_utilization_info(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *__pyx_v_vgpuUtilInfo = 0;
  nvmlVgpuInstancesUtilizationInfo_v1_t *__pyx_v_ptr;
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v_vgpuUtilArray = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_t_7;
  PyObject *__pyx_t_8 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_instances_utilization_info", 0);

  /* "cuda/bindings/_nvml.pyx":25901
 *         VgpuInstancesUtilizationInfo_v1: The vGPU instances utilization information structure.
 *     """
 *     cdef VgpuInstancesUtilizationInfo_v1 vgpuUtilInfo = VgpuInstancesUtilizationInfo_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuInstancesUtilizationInfo_v1_t *ptr = <nvmlVgpuInstancesUtilizationInfo_t *>vgpuUtilInfo._get_ptr()
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25901, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_vgpuUtilInfo = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25902
 *     """
 *     cdef VgpuInstancesUtilizationInfo_v1 vgpuUtilInfo = VgpuInstancesUtilizationInfo_v1()
 *     cdef nvmlVgpuInstancesUtilizationInfo_v1_t *ptr = <nvmlVgpuInstancesUtilizationInfo_t *>vgpuUtilInfo._get_ptr()             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)__pyx_v_vgpuUtilInfo->__pyx_vtab)->_get_ptr(__pyx_v_vgpuUtilInfo); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25902, __pyx_L1_error)
  __pyx_v_ptr = ((nvmlVgpuInstancesUtilizationInfo_t *)__pyx_t_4);

  /* "cuda/bindings/_nvml.pyx":25904
 *     cdef nvmlVgpuInstancesUtilizationInfo_v1_t *ptr = <nvmlVgpuInstancesUtilizationInfo_t *>vgpuUtilInfo._get_ptr()
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t) | (1 << 24)
 *         ptr.vgpuInstanceCount = 0
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25905
 * 
 *     with nogil:
 *         ptr.version = sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *         ptr.vgpuInstanceCount = 0
 *         ptr.vgpuUtilArray = NULL
*/
        __pyx_v_ptr->version = ((sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t)) | 0x1000000);

        /* "cuda/bindings/_nvml.pyx":25906
 *     with nogil:
 *         ptr.version = sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t) | (1 << 24)
 *         ptr.vgpuInstanceCount = 0             # <<<<<<<<<<<<<<
 *         ptr.vgpuUtilArray = NULL
 *         __status__ = nvmlDeviceGetVgpuInstancesUtilizationInfo(<Device>device, ptr)
*/
        __pyx_v_ptr->vgpuInstanceCount = 0;

        /* "cuda/bindings/_nvml.pyx":25907
 *         ptr.version = sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t) | (1 << 24)
 *         ptr.vgpuInstanceCount = 0
 *         ptr.vgpuUtilArray = NULL             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuInstancesUtilizationInfo(<Device>device, ptr)
 *     check_status_size(__status__)
*/
        __pyx_v_ptr->vgpuUtilArray = NULL;

        /* "cuda/bindings/_nvml.pyx":25908
 *         ptr.vgpuInstanceCount = 0
 *         ptr.vgpuUtilArray = NULL
 *         __status__ = nvmlDeviceGetVgpuInstancesUtilizationInfo(<Device>device, ptr)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 * 
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuInstancesUtilizationInfo(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25908, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25904
 *     cdef nvmlVgpuInstancesUtilizationInfo_v1_t *ptr = <nvmlVgpuInstancesUtilizationInfo_t *>vgpuUtilInfo._get_ptr()
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t) | (1 << 24)
 *         ptr.vgpuInstanceCount = 0
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25909
 *         ptr.vgpuUtilArray = NULL
 *         __status__ = nvmlDeviceGetVgpuInstancesUtilizationInfo(<Device>device, ptr)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 * 
 *     if ptr.vgpuInstanceCount == 0:
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25909, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25911
 *     check_status_size(__status__)
 * 
 *     if ptr.vgpuInstanceCount == 0:             # <<<<<<<<<<<<<<
 *         return vgpuUtilInfo
 * 
*/
  __pyx_t_7 = (__pyx_v_ptr->vgpuInstanceCount == 0);
  if (__pyx_t_7) {

    /* "cuda/bindings/_nvml.pyx":25912
 * 
 *     if ptr.vgpuInstanceCount == 0:
 *         return vgpuUtilInfo             # <<<<<<<<<<<<<<
 * 
 *     cdef VgpuInstanceUtilizationInfo_v1 vgpuUtilArray = VgpuInstanceUtilizationInfo_v1(ptr.vgpuInstanceCount)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_vgpuUtilInfo);
    __pyx_r = ((PyObject *)__pyx_v_vgpuUtilInfo);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25911
 *     check_status_size(__status__)
 * 
 *     if ptr.vgpuInstanceCount == 0:             # <<<<<<<<<<<<<<
 *         return vgpuUtilInfo
 * 
*/
  }

  /* "cuda/bindings/_nvml.pyx":25914
 *         return vgpuUtilInfo
 * 
 *     cdef VgpuInstanceUtilizationInfo_v1 vgpuUtilArray = VgpuInstanceUtilizationInfo_v1(ptr.vgpuInstanceCount)             # <<<<<<<<<<<<<<
 *     vgpuUtilInfo.vgpu_util_array = vgpuUtilArray
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_8 = __Pyx_PyLong_From_unsigned_int(__pyx_v_ptr->vgpuInstanceCount); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25914, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_8};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25914, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_vgpuUtilArray = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25915
 * 
 *     cdef VgpuInstanceUtilizationInfo_v1 vgpuUtilArray = VgpuInstanceUtilizationInfo_v1(ptr.vgpuInstanceCount)
 *     vgpuUtilInfo.vgpu_util_array = vgpuUtilArray             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  if (__Pyx_PyObject_SetAttrStr(((PyObject *)__pyx_v_vgpuUtilInfo), __pyx_mstate_global->__pyx_n_u_vgpu_util_array, ((PyObject *)__pyx_v_vgpuUtilArray)) < (0)) __PYX_ERR(0, 25915, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25917
 *     vgpuUtilInfo.vgpu_util_array = vgpuUtilArray
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuInstancesUtilizationInfo(<Device>device, ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25918
 * 
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuInstancesUtilizationInfo(<Device>device, ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuInstancesUtilizationInfo(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25918, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25917
 *     vgpuUtilInfo.vgpu_util_array = vgpuUtilArray
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuInstancesUtilizationInfo(<Device>device, ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25919
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuInstancesUtilizationInfo(<Device>device, ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 *     return vgpuUtilInfo
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25919, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25921
 *     check_status(__status__)
 * 
 *     return vgpuUtilInfo             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_vgpuUtilInfo);
  __pyx_r = ((PyObject *)__pyx_v_vgpuUtilInfo);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25891
 * 
 * 
 * cpdef object device_get_vgpu_instances_utilization_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """
 *     Retrieves recent utilization for vGPU instances running on a physical GPU (device).
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_instances_utilization_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_vgpuUtilInfo);
  __Pyx_XDECREF((PyObject *)__pyx_v_vgpuUtilArray);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_689device_get_vgpu_instances_utilization_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_688device_get_vgpu_instances_utilization_info, "device_get_vgpu_instances_utilization_info(intptr_t device)\n\nRetrieves recent utilization for vGPU instances running on a physical GPU (device).\n\nArgs:\n    device (Device): The identifier of the target device.\n\nReturns:\n    VgpuInstancesUtilizationInfo_v1: The vGPU instances utilization information structure.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_689device_get_vgpu_instances_utilization_info = {"device_get_vgpu_instances_utilization_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_689device_get_vgpu_instances_utilization_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_688device_get_vgpu_instances_utilization_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_689device_get_vgpu_instances_utilization_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_vgpu_instances_utilization_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25891, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25891, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_vgpu_instances_utilization_info", 0) < (0)) __PYX_ERR(0, 25891, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_vgpu_instances_utilization_info", 1, 1, 1, i); __PYX_ERR(0, 25891, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25891, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25891, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_vgpu_instances_utilization_info", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25891, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_instances_utilization_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_688device_get_vgpu_instances_utilization_info(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_688device_get_vgpu_instances_utilization_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_instances_utilization_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_instances_utilization_info(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25891, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_instances_utilization_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25924
 * 
 * 
 * cpdef object device_get_vgpu_processes_utilization_info(intptr_t device, unsigned int last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """
 *     Retrieves recent utilization for processes running on vGPU instances on a physical GPU (device).
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_691device_get_vgpu_processes_utilization_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_processes_utilization_info(intptr_t __pyx_v_device, unsigned int __pyx_v_last_seen_time_stamp, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *__pyx_v_vgpuProcUtilInfo = 0;
  nvmlVgpuProcessesUtilizationInfo_v1_t *__pyx_v_ptr;
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v_vgpuProcUtilArray = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  int __pyx_t_7;
  PyObject *__pyx_t_8 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_processes_utilization_info", 0);

  /* "cuda/bindings/_nvml.pyx":25934
 *         VgpuProcessesUtilizationInfo: The vGPU processes utilization information structure.
 *     """
 *     cdef VgpuProcessesUtilizationInfo_v1 vgpuProcUtilInfo = VgpuProcessesUtilizationInfo_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlVgpuProcessesUtilizationInfo_v1_t *ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>vgpuProcUtilInfo._get_ptr()
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25934, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_vgpuProcUtilInfo = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25935
 *     """
 *     cdef VgpuProcessesUtilizationInfo_v1 vgpuProcUtilInfo = VgpuProcessesUtilizationInfo_v1()
 *     cdef nvmlVgpuProcessesUtilizationInfo_v1_t *ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>vgpuProcUtilInfo._get_ptr()             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)__pyx_v_vgpuProcUtilInfo->__pyx_vtab)->_get_ptr(__pyx_v_vgpuProcUtilInfo); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25935, __pyx_L1_error)
  __pyx_v_ptr = ((nvmlVgpuProcessesUtilizationInfo_v1_t *)__pyx_t_4);

  /* "cuda/bindings/_nvml.pyx":25937
 *     cdef nvmlVgpuProcessesUtilizationInfo_v1_t *ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>vgpuProcUtilInfo._get_ptr()
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t) | (1 << 24)
 *         ptr.vgpuProcessCount = 0
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25938
 * 
 *     with nogil:
 *         ptr.version = sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *         ptr.vgpuProcessCount = 0
 *         ptr.vgpuProcUtilArray = NULL
*/
        __pyx_v_ptr->version = ((sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t)) | 0x1000000);

        /* "cuda/bindings/_nvml.pyx":25939
 *     with nogil:
 *         ptr.version = sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t) | (1 << 24)
 *         ptr.vgpuProcessCount = 0             # <<<<<<<<<<<<<<
 *         ptr.vgpuProcUtilArray = NULL
 *         ptr.lastSeenTimeStamp = last_seen_time_stamp
*/
        __pyx_v_ptr->vgpuProcessCount = 0;

        /* "cuda/bindings/_nvml.pyx":25940
 *         ptr.version = sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t) | (1 << 24)
 *         ptr.vgpuProcessCount = 0
 *         ptr.vgpuProcUtilArray = NULL             # <<<<<<<<<<<<<<
 *         ptr.lastSeenTimeStamp = last_seen_time_stamp
 *         __status__ = nvmlDeviceGetVgpuProcessesUtilizationInfo(<Device>device, ptr)
*/
        __pyx_v_ptr->vgpuProcUtilArray = NULL;

        /* "cuda/bindings/_nvml.pyx":25941
 *         ptr.vgpuProcessCount = 0
 *         ptr.vgpuProcUtilArray = NULL
 *         ptr.lastSeenTimeStamp = last_seen_time_stamp             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuProcessesUtilizationInfo(<Device>device, ptr)
 *     check_status_size(__status__)
*/
        __pyx_v_ptr->lastSeenTimeStamp = __pyx_v_last_seen_time_stamp;

        /* "cuda/bindings/_nvml.pyx":25942
 *         ptr.vgpuProcUtilArray = NULL
 *         ptr.lastSeenTimeStamp = last_seen_time_stamp
 *         __status__ = nvmlDeviceGetVgpuProcessesUtilizationInfo(<Device>device, ptr)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 * 
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuProcessesUtilizationInfo(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25942, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25937
 *     cdef nvmlVgpuProcessesUtilizationInfo_v1_t *ptr = <nvmlVgpuProcessesUtilizationInfo_v1_t *>vgpuProcUtilInfo._get_ptr()
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t) | (1 << 24)
 *         ptr.vgpuProcessCount = 0
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25943
 *         ptr.lastSeenTimeStamp = last_seen_time_stamp
 *         __status__ = nvmlDeviceGetVgpuProcessesUtilizationInfo(<Device>device, ptr)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 * 
 *     if ptr.vgpuProcessCount == 0:
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25943, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25945
 *     check_status_size(__status__)
 * 
 *     if ptr.vgpuProcessCount == 0:             # <<<<<<<<<<<<<<
 *         return vgpuProcUtilInfo
 * 
*/
  __pyx_t_7 = (__pyx_v_ptr->vgpuProcessCount == 0);
  if (__pyx_t_7) {

    /* "cuda/bindings/_nvml.pyx":25946
 * 
 *     if ptr.vgpuProcessCount == 0:
 *         return vgpuProcUtilInfo             # <<<<<<<<<<<<<<
 * 
 *     cdef VgpuProcessUtilizationInfo_v1 vgpuProcUtilArray = VgpuProcessUtilizationInfo_v1(ptr.vgpuProcessCount)
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_vgpuProcUtilInfo);
    __pyx_r = ((PyObject *)__pyx_v_vgpuProcUtilInfo);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":25945
 *     check_status_size(__status__)
 * 
 *     if ptr.vgpuProcessCount == 0:             # <<<<<<<<<<<<<<
 *         return vgpuProcUtilInfo
 * 
*/
  }

  /* "cuda/bindings/_nvml.pyx":25948
 *         return vgpuProcUtilInfo
 * 
 *     cdef VgpuProcessUtilizationInfo_v1 vgpuProcUtilArray = VgpuProcessUtilizationInfo_v1(ptr.vgpuProcessCount)             # <<<<<<<<<<<<<<
 *     vgpuProcUtilInfo.vgpu_proc_util_array = vgpuProcUtilArray
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_8 = __Pyx_PyLong_From_unsigned_int(__pyx_v_ptr->vgpuProcessCount); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25948, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_8);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_8};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25948, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_vgpuProcUtilArray = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":25949
 * 
 *     cdef VgpuProcessUtilizationInfo_v1 vgpuProcUtilArray = VgpuProcessUtilizationInfo_v1(ptr.vgpuProcessCount)
 *     vgpuProcUtilInfo.vgpu_proc_util_array = vgpuProcUtilArray             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  if (__Pyx_PyObject_SetAttrStr(((PyObject *)__pyx_v_vgpuProcUtilInfo), __pyx_mstate_global->__pyx_n_u_vgpu_proc_util_array, ((PyObject *)__pyx_v_vgpuProcUtilArray)) < (0)) __PYX_ERR(0, 25949, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25951
 *     vgpuProcUtilInfo.vgpu_proc_util_array = vgpuProcUtilArray
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuProcessesUtilizationInfo(<Device>device, ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25952
 * 
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuProcessesUtilizationInfo(<Device>device, ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuProcessesUtilizationInfo(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25952, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":25951
 *     vgpuProcUtilInfo.vgpu_proc_util_array = vgpuProcUtilArray
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetVgpuProcessesUtilizationInfo(<Device>device, ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25953
 *     with nogil:
 *         __status__ = nvmlDeviceGetVgpuProcessesUtilizationInfo(<Device>device, ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 *     return vgpuProcUtilInfo
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 25953, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25955
 *     check_status(__status__)
 * 
 *     return vgpuProcUtilInfo             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_vgpuProcUtilInfo);
  __pyx_r = ((PyObject *)__pyx_v_vgpuProcUtilInfo);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25924
 * 
 * 
 * cpdef object device_get_vgpu_processes_utilization_info(intptr_t device, unsigned int last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """
 *     Retrieves recent utilization for processes running on vGPU instances on a physical GPU (device).
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_processes_utilization_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_vgpuProcUtilInfo);
  __Pyx_XDECREF((PyObject *)__pyx_v_vgpuProcUtilArray);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_691device_get_vgpu_processes_utilization_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_690device_get_vgpu_processes_utilization_info, "device_get_vgpu_processes_utilization_info(intptr_t device, unsigned int last_seen_time_stamp)\n\nRetrieves recent utilization for processes running on vGPU instances on a physical GPU (device).\n\nArgs:\n    device (Device): The identifier of the target device.\n\nReturns:\n    VgpuProcessesUtilizationInfo: The vGPU processes utilization information structure.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_691device_get_vgpu_processes_utilization_info = {"device_get_vgpu_processes_utilization_info", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_691device_get_vgpu_processes_utilization_info, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_690device_get_vgpu_processes_utilization_info};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_691device_get_vgpu_processes_utilization_info(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_last_seen_time_stamp;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_vgpu_processes_utilization_info (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_last_seen_time_stamp,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25924, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25924, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25924, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_vgpu_processes_utilization_info", 0) < (0)) __PYX_ERR(0, 25924, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_vgpu_processes_utilization_info", 1, 2, 2, i); __PYX_ERR(0, 25924, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25924, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25924, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25924, __pyx_L3_error)
    __pyx_v_last_seen_time_stamp = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_last_seen_time_stamp == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25924, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_vgpu_processes_utilization_info", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25924, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_processes_utilization_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_690device_get_vgpu_processes_utilization_info(__pyx_self, __pyx_v_device, __pyx_v_last_seen_time_stamp);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_690device_get_vgpu_processes_utilization_info(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_last_seen_time_stamp) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_vgpu_processes_utilization_info", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_processes_utilization_info(__pyx_v_device, __pyx_v_last_seen_time_stamp, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25924, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_vgpu_processes_utilization_info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25958
 * 
 * 
 * cpdef object device_get_gpu_instances(intptr_t device, unsigned int profile_id):             # <<<<<<<<<<<<<<
 *     """Get GPU instances for given profile ID.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_693device_get_gpu_instances(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instances(intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_gpuInstances = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpu_instances", 0);

  /* "cuda/bindings/_nvml.pyx":25968
 *         array: An array of GPU instance handles.
 *     """
 *     cdef unsigned int[1] count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstances(<Device>device, profile_id, NULL, count)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_count[0]), __pyx_t_1, sizeof(__pyx_v_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":25969
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstances(<Device>device, profile_id, NULL, count)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25970
 *     cdef unsigned int[1] count = [0]
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstances(<Device>device, profile_id, NULL, count)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 * 
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstances(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_profile_id, NULL, __pyx_v_count); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25970, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":25969
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstances(<Device>device, profile_id, NULL, count)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25971
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstances(<Device>device, profile_id, NULL, count)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 * 
 *     if count[0] == 0:
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 25971, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25973
 *     check_status_size(__status__)
 * 
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 * 
*/
  __pyx_t_4 = ((__pyx_v_count[0]) == 0);
  if (__pyx_t_4) {

    /* "cuda/bindings/_nvml.pyx":25974
 * 
 *     if count[0] == 0:
 *         view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]             # <<<<<<<<<<<<<<
 * 
 *     cdef view.array gpuInstances = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
*/
    __pyx_t_6 = NULL;
    __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(intptr_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25974, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_8 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_6, NULL};
      __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25974, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_9);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_9, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25974, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_9, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25974, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_i, __pyx_t_9, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25974, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_9, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25974, __pyx_L1_error)
      __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25974, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_5);
    }
    __pyx_t_9 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_5), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25974, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;

    /* "cuda/bindings/_nvml.pyx":25973
 *     check_status_size(__status__)
 * 
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 * 
*/
  }

  /* "cuda/bindings/_nvml.pyx":25976
 *         view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 * 
 *     cdef view.array gpuInstances = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="i", mode="c")             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstances(<Device>device, profile_id, <nvmlGpuInstance_t *>gpuInstances.data, count)
*/
  __pyx_t_5 = NULL;
  __pyx_t_7 = __Pyx_PyLong_From_unsigned_int((__pyx_v_count[0])); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25976, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 25976, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 25976, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(intptr_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25976, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_5, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25976, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_6, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 25976, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 25976, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_i, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 25976, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 25976, __pyx_L1_error)
    __pyx_t_9 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25976, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_9);
  }
  __pyx_v_gpuInstances = ((struct __pyx_array_obj *)__pyx_t_9);
  __pyx_t_9 = 0;

  /* "cuda/bindings/_nvml.pyx":25977
 * 
 *     cdef view.array gpuInstances = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstances(<Device>device, profile_id, <nvmlGpuInstance_t *>gpuInstances.data, count)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25978
 *     cdef view.array gpuInstances = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstances(<Device>device, profile_id, <nvmlGpuInstance_t *>gpuInstances.data, count)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstances(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_profile_id, ((nvmlGpuInstance_t *)__pyx_v_gpuInstances->data), __pyx_v_count); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25978, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":25977
 * 
 *     cdef view.array gpuInstances = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetGpuInstances(<Device>device, profile_id, <nvmlGpuInstance_t *>gpuInstances.data, count)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25979
 *     with nogil:
 *         __status__ = nvmlDeviceGetGpuInstances(<Device>device, profile_id, <nvmlGpuInstance_t *>gpuInstances.data, count)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 *     return gpuInstances
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 25979, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25981
 *     check_status(__status__)
 * 
 *     return gpuInstances             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_gpuInstances);
  __pyx_r = ((PyObject *)__pyx_v_gpuInstances);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25958
 * 
 * 
 * cpdef object device_get_gpu_instances(intptr_t device, unsigned int profile_id):             # <<<<<<<<<<<<<<
 *     """Get GPU instances for given profile ID.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instances", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_gpuInstances);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_693device_get_gpu_instances(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_692device_get_gpu_instances, "device_get_gpu_instances(intptr_t device, unsigned int profile_id)\n\nGet GPU instances for given profile ID.\n\nArgs:\n    device (Device): The identifier of the target device.\n    profile_id (unsigned int): The GPU instance profile ID. See device_get_gpu_instance_profile_info().\n\nReturns:\n    array: An array of GPU instance handles.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_693device_get_gpu_instances = {"device_get_gpu_instances", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_693device_get_gpu_instances, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_692device_get_gpu_instances};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_693device_get_gpu_instances(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  unsigned int __pyx_v_profile_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_gpu_instances (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_profile_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25958, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25958, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25958, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_gpu_instances", 0) < (0)) __PYX_ERR(0, 25958, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_gpu_instances", 1, 2, 2, i); __PYX_ERR(0, 25958, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25958, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25958, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25958, __pyx_L3_error)
    __pyx_v_profile_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_profile_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25958, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_gpu_instances", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25958, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instances", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_692device_get_gpu_instances(__pyx_self, __pyx_v_device, __pyx_v_profile_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_692device_get_gpu_instances(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device, unsigned int __pyx_v_profile_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_gpu_instances", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instances(__pyx_v_device, __pyx_v_profile_id, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25958, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_gpu_instances", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":25984
 * 
 * 
 * cpdef object gpu_instance_get_compute_instances(intptr_t gpu_instance, unsigned int profile_id):             # <<<<<<<<<<<<<<
 *     """Get Compute instances for given profile ID.
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_695gpu_instance_get_compute_instances(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instances(intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile_id, CYTHON_UNUSED int __pyx_skip_dispatch) {
  unsigned int __pyx_v_count[1];
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_array_obj *__pyx_v_computeInstances = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  unsigned int __pyx_t_1[1];
  nvmlReturn_t __pyx_t_2;
  int __pyx_t_3;
  int __pyx_t_4;
  PyObject *__pyx_t_5 = NULL;
  PyObject *__pyx_t_6 = NULL;
  PyObject *__pyx_t_7 = NULL;
  size_t __pyx_t_8;
  PyObject *__pyx_t_9 = NULL;
  PyObject *__pyx_t_10 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_compute_instances", 0);

  /* "cuda/bindings/_nvml.pyx":25994
 *         array: An array of Compute instance handles.
 *     """
 *     cdef unsigned int[1] count = [0]             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstances(<GpuInstance>gpu_instance, profile_id, NULL, count)
*/
  __pyx_t_1[0] = 0;
  memcpy(&(__pyx_v_count[0]), __pyx_t_1, sizeof(__pyx_v_count[0]) * (1));

  /* "cuda/bindings/_nvml.pyx":25995
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetComputeInstances(<GpuInstance>gpu_instance, profile_id, NULL, count)
 *     check_status_size(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":25996
 *     cdef unsigned int[1] count = [0]
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstances(<GpuInstance>gpu_instance, profile_id, NULL, count)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 * 
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstances(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_profile_id, NULL, __pyx_v_count); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25996, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":25995
 *     """
 *     cdef unsigned int[1] count = [0]
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetComputeInstances(<GpuInstance>gpu_instance, profile_id, NULL, count)
 *     check_status_size(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":25997
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstances(<GpuInstance>gpu_instance, profile_id, NULL, count)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 * 
 *     if count[0] == 0:
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 25997, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":25999
 *     check_status_size(__status__)
 * 
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 * 
*/
  __pyx_t_4 = ((__pyx_v_count[0]) == 0);
  if (__pyx_t_4) {

    /* "cuda/bindings/_nvml.pyx":26000
 * 
 *     if count[0] == 0:
 *         view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]             # <<<<<<<<<<<<<<
 * 
 *     cdef view.array computeInstances = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
*/
    __pyx_t_6 = NULL;
    __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(intptr_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 26000, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_8 = 1;
    {
      PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_6, NULL};
      __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26000, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_9);
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_mstate_global->__pyx_tuple[10], __pyx_t_9, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 26000, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_9, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 26000, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_i, __pyx_t_9, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 26000, __pyx_L1_error)
      if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_9, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 26000, __pyx_L1_error)
      __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9);
      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
      if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26000, __pyx_L1_error)
      __Pyx_GOTREF((PyObject *)__pyx_t_5);
    }
    __pyx_t_9 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_t_5), 0, 0, NULL, NULL, &__pyx_mstate_global->__pyx_slice[1], 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26000, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_9);
    __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;

    /* "cuda/bindings/_nvml.pyx":25999
 *     check_status_size(__status__)
 * 
 *     if count[0] == 0:             # <<<<<<<<<<<<<<
 *         view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 * 
*/
  }

  /* "cuda/bindings/_nvml.pyx":26002
 *         view.array(shape=(1,), itemsize=sizeof(intptr_t), format="i", mode="c")[:0]
 * 
 *     cdef view.array computeInstances = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="i", mode="c")             # <<<<<<<<<<<<<<
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstances(<GpuInstance>gpu_instance, profile_id, <nvmlComputeInstance_t *>computeInstances.data, count)
*/
  __pyx_t_5 = NULL;
  __pyx_t_7 = __Pyx_PyLong_From_unsigned_int((__pyx_v_count[0])); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 26002, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 26002, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_6);
  __Pyx_GIVEREF(__pyx_t_7);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 26002, __pyx_L1_error);
  __pyx_t_7 = 0;
  __pyx_t_7 = __Pyx_PyLong_FromSize_t((sizeof(intptr_t))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 26002, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_8 = 1;
  {
    PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 4 : 0)] = {__pyx_t_5, NULL};
    __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26002, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_10);
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_shape, __pyx_t_6, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 26002, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_itemsize, __pyx_t_7, __pyx_t_10, __pyx_callargs+1, 1) < (0)) __PYX_ERR(0, 26002, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_format, __pyx_mstate_global->__pyx_n_u_i, __pyx_t_10, __pyx_callargs+1, 2) < (0)) __PYX_ERR(0, 26002, __pyx_L1_error)
    if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_mode, __pyx_mstate_global->__pyx_n_u_c, __pyx_t_10, __pyx_callargs+1, 3) < (0)) __PYX_ERR(0, 26002, __pyx_L1_error)
    __pyx_t_9 = __Pyx_Object_Vectorcall_CallFromBuilder((PyObject*)__pyx_mstate_global->__pyx_array_type, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26002, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_9);
  }
  __pyx_v_computeInstances = ((struct __pyx_array_obj *)__pyx_t_9);
  __pyx_t_9 = 0;

  /* "cuda/bindings/_nvml.pyx":26003
 * 
 *     cdef view.array computeInstances = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetComputeInstances(<GpuInstance>gpu_instance, profile_id, <nvmlComputeInstance_t *>computeInstances.data, count)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":26004
 *     cdef view.array computeInstances = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstances(<GpuInstance>gpu_instance, profile_id, <nvmlComputeInstance_t *>computeInstances.data, count)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_2 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstances(((__pyx_t_4cuda_8bindings_5_nvml_GpuInstance)__pyx_v_gpu_instance), __pyx_v_profile_id, ((nvmlComputeInstance_t *)__pyx_v_computeInstances->data), __pyx_v_count); if (unlikely(__pyx_t_2 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26004, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_2;
      }

      /* "cuda/bindings/_nvml.pyx":26003
 * 
 *     cdef view.array computeInstances = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="i", mode="c")
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlGpuInstanceGetComputeInstances(<GpuInstance>gpu_instance, profile_id, <nvmlComputeInstance_t *>computeInstances.data, count)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":26005
 *     with nogil:
 *         __status__ = nvmlGpuInstanceGetComputeInstances(<GpuInstance>gpu_instance, profile_id, <nvmlComputeInstance_t *>computeInstances.data, count)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 *     return computeInstances
*/
  __pyx_t_3 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_3 == ((int)1))) __PYX_ERR(0, 26005, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":26007
 *     check_status(__status__)
 * 
 *     return computeInstances             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_computeInstances);
  __pyx_r = ((PyObject *)__pyx_v_computeInstances);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":25984
 * 
 * 
 * cpdef object gpu_instance_get_compute_instances(intptr_t gpu_instance, unsigned int profile_id):             # <<<<<<<<<<<<<<
 *     """Get Compute instances for given profile ID.
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_compute_instances", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_computeInstances);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_695gpu_instance_get_compute_instances(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_694gpu_instance_get_compute_instances, "gpu_instance_get_compute_instances(intptr_t gpu_instance, unsigned int profile_id)\n\nGet Compute instances for given profile ID.\n\nArgs:\n    gpu_instance (GpuInstance): The identifier of the target GPU Instance.\n    profile_id (unsigned int): The Compute instance profile ID.\n\nReturns:\n    array: An array of Compute instance handles.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_695gpu_instance_get_compute_instances = {"gpu_instance_get_compute_instances", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_695gpu_instance_get_compute_instances, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_694gpu_instance_get_compute_instances};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_695gpu_instance_get_compute_instances(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_gpu_instance;
  unsigned int __pyx_v_profile_id;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[2] = {0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gpu_instance_get_compute_instances (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_gpu_instance,&__pyx_mstate_global->__pyx_n_u_profile_id,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25984, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25984, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25984, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "gpu_instance_get_compute_instances", 0) < (0)) __PYX_ERR(0, 25984, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 2; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("gpu_instance_get_compute_instances", 1, 2, 2, i); __PYX_ERR(0, 25984, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 2)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25984, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25984, __pyx_L3_error)
    }
    __pyx_v_gpu_instance = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_gpu_instance == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 25984, __pyx_L3_error)
    __pyx_v_profile_id = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_profile_id == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25984, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gpu_instance_get_compute_instances", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25984, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_compute_instances", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_694gpu_instance_get_compute_instances(__pyx_self, __pyx_v_gpu_instance, __pyx_v_profile_id);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_694gpu_instance_get_compute_instances(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_gpu_instance, unsigned int __pyx_v_profile_id) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("gpu_instance_get_compute_instances", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instances(__pyx_v_gpu_instance, __pyx_v_profile_id, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25984, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.gpu_instance_get_compute_instances", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "cuda/bindings/_nvml.pyx":26010
 * 
 * 
 * cpdef object device_get_sram_unique_uncorrected_ecc_error_counts(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the counts of SRAM unique uncorrected ECC errors
 * 
*/

static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_697device_get_sram_unique_uncorrected_ecc_error_counts(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
static PyObject *__pyx_f_4cuda_8bindings_5_nvml_device_get_sram_unique_uncorrected_ecc_error_counts(intptr_t __pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *__pyx_v_errorCounts = 0;
  nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *__pyx_v_ptr;
  nvmlReturn_t __pyx_v___status__;
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v_entries = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  size_t __pyx_t_3;
  intptr_t __pyx_t_4;
  nvmlReturn_t __pyx_t_5;
  int __pyx_t_6;
  PyObject *__pyx_t_7 = NULL;
  int __pyx_t_8;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_sram_unique_uncorrected_ecc_error_counts", 0);

  /* "cuda/bindings/_nvml.pyx":26020
 *     """
 * 
 *     cdef EccSramUniqueUncorrectedErrorCounts_v1 errorCounts = EccSramUniqueUncorrectedErrorCounts_v1()             # <<<<<<<<<<<<<<
 *     cdef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>errorCounts._get_ptr()
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26020, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_errorCounts = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":26021
 * 
 *     cdef EccSramUniqueUncorrectedErrorCounts_v1 errorCounts = EccSramUniqueUncorrectedErrorCounts_v1()
 *     cdef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>errorCounts._get_ptr()             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
  __pyx_t_4 = ((struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)__pyx_v_errorCounts->__pyx_vtab)->_get_ptr(__pyx_v_errorCounts); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 26021, __pyx_L1_error)
  __pyx_v_ptr = ((nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *)__pyx_t_4);

  /* "cuda/bindings/_nvml.pyx":26023
 *     cdef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>errorCounts._get_ptr()
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t) | (1 << 24)
 *         ptr.entryCount = 0
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":26024
 * 
 *     with nogil:
 *         ptr.version = sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t) | (1 << 24)             # <<<<<<<<<<<<<<
 *         ptr.entryCount = 0
 *         ptr.entries = NULL
*/
        __pyx_v_ptr->version = ((sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t)) | 0x1000000);

        /* "cuda/bindings/_nvml.pyx":26025
 *     with nogil:
 *         ptr.version = sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t) | (1 << 24)
 *         ptr.entryCount = 0             # <<<<<<<<<<<<<<
 *         ptr.entries = NULL
 *         __status__ = nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(<Device>device, ptr)
*/
        __pyx_v_ptr->entryCount = 0;

        /* "cuda/bindings/_nvml.pyx":26026
 *         ptr.version = sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t) | (1 << 24)
 *         ptr.entryCount = 0
 *         ptr.entries = NULL             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(<Device>device, ptr)
 *     check_status_size(__status__)
*/
        __pyx_v_ptr->entries = NULL;

        /* "cuda/bindings/_nvml.pyx":26027
 *         ptr.entryCount = 0
 *         ptr.entries = NULL
 *         __status__ = nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(<Device>device, ptr)             # <<<<<<<<<<<<<<
 *     check_status_size(__status__)
 * 
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26027, __pyx_L4_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":26023
 *     cdef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *ptr = <nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *>errorCounts._get_ptr()
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         ptr.version = sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t) | (1 << 24)
 *         ptr.entryCount = 0
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":26028
 *         ptr.entries = NULL
 *         __status__ = nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(<Device>device, ptr)
 *     check_status_size(__status__)             # <<<<<<<<<<<<<<
 * 
 *     cdef EccSramUniqueUncorrectedErrorEntry_v1 entries = EccSramUniqueUncorrectedErrorEntry_v1(ptr.entryCount)
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status_size(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 26028, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":26030
 *     check_status_size(__status__)
 * 
 *     cdef EccSramUniqueUncorrectedErrorEntry_v1 entries = EccSramUniqueUncorrectedErrorEntry_v1(ptr.entryCount)             # <<<<<<<<<<<<<<
 *     errorCounts.entries = entries
 * 
*/
  __pyx_t_2 = NULL;
  __pyx_t_7 = __Pyx_PyLong_From_unsigned_int(__pyx_v_ptr->entryCount); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 26030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  __pyx_t_3 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_7};
    __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, __pyx_callargs+__pyx_t_3, (2-__pyx_t_3) | (__pyx_t_3*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26030, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_1);
  }
  __pyx_v_entries = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_t_1);
  __pyx_t_1 = 0;

  /* "cuda/bindings/_nvml.pyx":26031
 * 
 *     cdef EccSramUniqueUncorrectedErrorEntry_v1 entries = EccSramUniqueUncorrectedErrorEntry_v1(ptr.entryCount)
 *     errorCounts.entries = entries             # <<<<<<<<<<<<<<
 * 
 *     if ptr.entryCount == 0:
*/
  if (__Pyx_PyObject_SetAttrStr(((PyObject *)__pyx_v_errorCounts), __pyx_mstate_global->__pyx_n_u_entries, ((PyObject *)__pyx_v_entries)) < (0)) __PYX_ERR(0, 26031, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":26033
 *     errorCounts.entries = entries
 * 
 *     if ptr.entryCount == 0:             # <<<<<<<<<<<<<<
 *         return errorCounts
 * 
*/
  __pyx_t_8 = (__pyx_v_ptr->entryCount == 0);
  if (__pyx_t_8) {

    /* "cuda/bindings/_nvml.pyx":26034
 * 
 *     if ptr.entryCount == 0:
 *         return errorCounts             # <<<<<<<<<<<<<<
 * 
 *     with nogil:
*/
    __Pyx_XDECREF(__pyx_r);
    __Pyx_INCREF((PyObject *)__pyx_v_errorCounts);
    __pyx_r = ((PyObject *)__pyx_v_errorCounts);
    goto __pyx_L0;

    /* "cuda/bindings/_nvml.pyx":26033
 *     errorCounts.entries = entries
 * 
 *     if ptr.entryCount == 0:             # <<<<<<<<<<<<<<
 *         return errorCounts
 * 
*/
  }

  /* "cuda/bindings/_nvml.pyx":26036
 *         return errorCounts
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(<Device>device, ptr)
 *     check_status(__status__)
*/
  {
      PyThreadState * _save;
      _save = PyEval_SaveThread();
      __Pyx_FastGIL_Remember();
      /*try:*/ {

        /* "cuda/bindings/_nvml.pyx":26037
 * 
 *     with nogil:
 *         __status__ = nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(<Device>device, ptr)             # <<<<<<<<<<<<<<
 *     check_status(__status__)
 * 
*/
        __pyx_t_5 = __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(((__pyx_t_4cuda_8bindings_5_nvml_Device)__pyx_v_device), __pyx_v_ptr); if (unlikely(__pyx_t_5 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26037, __pyx_L8_error)
        __pyx_v___status__ = __pyx_t_5;
      }

      /* "cuda/bindings/_nvml.pyx":26036
 *         return errorCounts
 * 
 *     with nogil:             # <<<<<<<<<<<<<<
 *         __status__ = nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(<Device>device, ptr)
 *     check_status(__status__)
*/
      /*finally:*/ {
        /*normal exit:*/{
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L9;
        }
        __pyx_L8_error: {
          __Pyx_FastGIL_Forget();
          PyEval_RestoreThread(_save);
          goto __pyx_L1_error;
        }
        __pyx_L9:;
      }
  }

  /* "cuda/bindings/_nvml.pyx":26038
 *     with nogil:
 *         __status__ = nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(<Device>device, ptr)
 *     check_status(__status__)             # <<<<<<<<<<<<<<
 * 
 *     return errorCounts
*/
  __pyx_t_6 = __pyx_f_4cuda_8bindings_5_nvml_check_status(__pyx_v___status__, 0); if (unlikely(__pyx_t_6 == ((int)1))) __PYX_ERR(0, 26038, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":26040
 *     check_status(__status__)
 * 
 *     return errorCounts             # <<<<<<<<<<<<<<
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF((PyObject *)__pyx_v_errorCounts);
  __pyx_r = ((PyObject *)__pyx_v_errorCounts);
  goto __pyx_L0;

  /* "cuda/bindings/_nvml.pyx":26010
 * 
 * 
 * cpdef object device_get_sram_unique_uncorrected_ecc_error_counts(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the counts of SRAM unique uncorrected ECC errors
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_sram_unique_uncorrected_ecc_error_counts", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_errorCounts);
  __Pyx_XDECREF((PyObject *)__pyx_v_entries);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_697device_get_sram_unique_uncorrected_ecc_error_counts(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_696device_get_sram_unique_uncorrected_ecc_error_counts, "device_get_sram_unique_uncorrected_ecc_error_counts(intptr_t device)\n\nRetrieves the counts of SRAM unique uncorrected ECC errors\n\nArgs:\n    device (Device): The identifier of the target device.\n\nReturns:\n    EccSramUniqueUncorrectedErrorCounts_v1: The ECC SRAM unique uncorrected error counts structure.");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_697device_get_sram_unique_uncorrected_ecc_error_counts = {"device_get_sram_unique_uncorrected_ecc_error_counts", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_697device_get_sram_unique_uncorrected_ecc_error_counts, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_696device_get_sram_unique_uncorrected_ecc_error_counts};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_697device_get_sram_unique_uncorrected_ecc_error_counts(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  intptr_t __pyx_v_device;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[1] = {0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("device_get_sram_unique_uncorrected_ecc_error_counts (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 26010, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26010, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "device_get_sram_unique_uncorrected_ecc_error_counts", 0) < (0)) __PYX_ERR(0, 26010, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 1; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("device_get_sram_unique_uncorrected_ecc_error_counts", 1, 1, 1, i); __PYX_ERR(0, 26010, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 1)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26010, __pyx_L3_error)
    }
    __pyx_v_device = PyLong_AsSsize_t(values[0]); if (unlikely((__pyx_v_device == ((intptr_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 26010, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("device_get_sram_unique_uncorrected_ecc_error_counts", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 26010, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_sram_unique_uncorrected_ecc_error_counts", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_696device_get_sram_unique_uncorrected_ecc_error_counts(__pyx_self, __pyx_v_device);

  /* function exit code */
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_696device_get_sram_unique_uncorrected_ecc_error_counts(CYTHON_UNUSED PyObject *__pyx_self, intptr_t __pyx_v_device) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("device_get_sram_unique_uncorrected_ecc_error_counts", 0);
  __Pyx_XDECREF(__pyx_r);
  __pyx_t_1 = __pyx_f_4cuda_8bindings_5_nvml_device_get_sram_unique_uncorrected_ecc_error_counts(__pyx_v_device, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26010, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = __pyx_t_1;
  __pyx_t_1 = 0;
  goto __pyx_L0;

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.device_get_sram_unique_uncorrected_ecc_error_counts", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_ProcessInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_699__pyx_unpickle_ProcessInfo(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_698__pyx_unpickle_ProcessInfo, "__pyx_unpickle_ProcessInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_699__pyx_unpickle_ProcessInfo = {"__pyx_unpickle_ProcessInfo", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_699__pyx_unpickle_ProcessInfo, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_698__pyx_unpickle_ProcessInfo};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_699__pyx_unpickle_ProcessInfo(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_ProcessInfo (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_ProcessInfo", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_ProcessInfo", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_ProcessInfo", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ProcessInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_698__pyx_unpickle_ProcessInfo(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_698__pyx_unpickle_ProcessInfo(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_ProcessInfo", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_ProcessInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = ProcessInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ProcessInfo.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ProcessInfo__set_state(<ProcessInfo> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ProcessInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_ProcessInfo__set_state(<ProcessInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = ProcessInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ProcessInfo__set_state(<ProcessInfo> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_ProcessInfo__set_state(ProcessInfo __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ProcessInfo__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ProcessInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_ProcessInfo__set_state(<ProcessInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ProcessInfo__set_state(<ProcessInfo> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_ProcessInfo__set_state(ProcessInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_ProcessInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ProcessInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_ProcessInfo__set_state(<ProcessInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_ProcessInfo__set_state(ProcessInfo __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ProcessInfo__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_ProcessInfo__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_ProcessInfo__set_state(ProcessInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_ProcessInfo__set_state(ProcessInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_ProcessInfo__set_state(<ProcessInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_ProcessInfo__set_state(ProcessInfo __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ProcessInfo__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_ProcessDetail_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_701__pyx_unpickle_ProcessDetail_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_700__pyx_unpickle_ProcessDetail_v1, "__pyx_unpickle_ProcessDetail_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_701__pyx_unpickle_ProcessDetail_v1 = {"__pyx_unpickle_ProcessDetail_v1", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_701__pyx_unpickle_ProcessDetail_v1, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_700__pyx_unpickle_ProcessDetail_v1};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_701__pyx_unpickle_ProcessDetail_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_ProcessDetail_v1 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_ProcessDetail_v1", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_ProcessDetail_v1", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_ProcessDetail_v1", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ProcessDetail_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_700__pyx_unpickle_ProcessDetail_v1(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_700__pyx_unpickle_ProcessDetail_v1(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_ProcessDetail_v1", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_ProcessDetail_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = ProcessDetail_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ProcessDetail_v1.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ProcessDetail_v1__set_state(<ProcessDetail_v1> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ProcessDetail_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_ProcessDetail_v1__set_state(<ProcessDetail_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = ProcessDetail_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ProcessDetail_v1__set_state(<ProcessDetail_v1> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_ProcessDetail_v1__set_state(ProcessDetail_v1 __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ProcessDetail_v1__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ProcessDetail_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_ProcessDetail_v1__set_state(<ProcessDetail_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ProcessDetail_v1__set_state(<ProcessDetail_v1> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_ProcessDetail_v1__set_state(ProcessDetail_v1 __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_ProcessDetail_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ProcessDetail_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_ProcessDetail_v1__set_state(<ProcessDetail_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_ProcessDetail_v1__set_state(ProcessDetail_v1 __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ProcessDetail_v1__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_ProcessDetail_v1__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_ProcessDetail_v1__set_state(ProcessDetail_v1 __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_ProcessDetail_v1__set_state(ProcessDetail_v1 __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_ProcessDetail_v1__set_state(<ProcessDetail_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_ProcessDetail_v1__set_state(ProcessDetail_v1 __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ProcessDetail_v1__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_BridgeChipInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_703__pyx_unpickle_BridgeChipInfo(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_702__pyx_unpickle_BridgeChipInfo, "__pyx_unpickle_BridgeChipInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_703__pyx_unpickle_BridgeChipInfo = {"__pyx_unpickle_BridgeChipInfo", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_703__pyx_unpickle_BridgeChipInfo, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_702__pyx_unpickle_BridgeChipInfo};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_703__pyx_unpickle_BridgeChipInfo(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_BridgeChipInfo (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_BridgeChipInfo", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_BridgeChipInfo", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_BridgeChipInfo", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_BridgeChipInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_702__pyx_unpickle_BridgeChipInfo(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_702__pyx_unpickle_BridgeChipInfo(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_BridgeChipInfo", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_BridgeChipInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = BridgeChipInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = BridgeChipInfo.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_BridgeChipInfo__set_state(<BridgeChipInfo> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = BridgeChipInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_BridgeChipInfo__set_state(<BridgeChipInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = BridgeChipInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_BridgeChipInfo__set_state(<BridgeChipInfo> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_BridgeChipInfo__set_state(BridgeChipInfo __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_BridgeChipInfo__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = BridgeChipInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_BridgeChipInfo__set_state(<BridgeChipInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_BridgeChipInfo__set_state(<BridgeChipInfo> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_BridgeChipInfo__set_state(BridgeChipInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_BridgeChipInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_BridgeChipInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_BridgeChipInfo__set_state(<BridgeChipInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_BridgeChipInfo__set_state(BridgeChipInfo __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_BridgeChipInfo__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_BridgeChipInfo__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_BridgeChipInfo__set_state(BridgeChipInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_BridgeChipInfo__set_state(BridgeChipInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_BridgeChipInfo__set_state(<BridgeChipInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_BridgeChipInfo__set_state(BridgeChipInfo __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_BridgeChipInfo__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_ClkMonFaultInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_705__pyx_unpickle_ClkMonFaultInfo(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_704__pyx_unpickle_ClkMonFaultInfo, "__pyx_unpickle_ClkMonFaultInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_705__pyx_unpickle_ClkMonFaultInfo = {"__pyx_unpickle_ClkMonFaultInfo", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_705__pyx_unpickle_ClkMonFaultInfo, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_704__pyx_unpickle_ClkMonFaultInfo};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_705__pyx_unpickle_ClkMonFaultInfo(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_ClkMonFaultInfo (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_ClkMonFaultInfo", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_ClkMonFaultInfo", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_ClkMonFaultInfo", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ClkMonFaultInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_704__pyx_unpickle_ClkMonFaultInfo(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_704__pyx_unpickle_ClkMonFaultInfo(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_ClkMonFaultInfo", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_ClkMonFaultInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = ClkMonFaultInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ClkMonFaultInfo.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ClkMonFaultInfo__set_state(<ClkMonFaultInfo> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ClkMonFaultInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_ClkMonFaultInfo__set_state(<ClkMonFaultInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = ClkMonFaultInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ClkMonFaultInfo__set_state(<ClkMonFaultInfo> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_ClkMonFaultInfo__set_state(ClkMonFaultInfo __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ClkMonFaultInfo__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ClkMonFaultInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_ClkMonFaultInfo__set_state(<ClkMonFaultInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ClkMonFaultInfo__set_state(<ClkMonFaultInfo> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_ClkMonFaultInfo__set_state(ClkMonFaultInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_ClkMonFaultInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ClkMonFaultInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_ClkMonFaultInfo__set_state(<ClkMonFaultInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_ClkMonFaultInfo__set_state(ClkMonFaultInfo __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ClkMonFaultInfo__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_ClkMonFaultInfo__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_ClkMonFaultInfo__set_state(ClkMonFaultInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_ClkMonFaultInfo__set_state(ClkMonFaultInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_ClkMonFaultInfo__set_state(<ClkMonFaultInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_ClkMonFaultInfo__set_state(ClkMonFaultInfo __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ClkMonFaultInfo__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_ProcessUtilizationSample(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_707__pyx_unpickle_ProcessUtilizationSample(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_706__pyx_unpickle_ProcessUtilizationSample, "__pyx_unpickle_ProcessUtilizationSample(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_707__pyx_unpickle_ProcessUtilizationSample = {"__pyx_unpickle_ProcessUtilizationSample", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_707__pyx_unpickle_ProcessUtilizationSample, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_706__pyx_unpickle_ProcessUtilizationSample};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_707__pyx_unpickle_ProcessUtilizationSample(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_ProcessUtilizationSample (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_ProcessUtilizationSample", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_ProcessUtilizationSample", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_ProcessUtilizationSample", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ProcessUtilizationSample", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_706__pyx_unpickle_ProcessUtilizationSample(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_706__pyx_unpickle_ProcessUtilizationSample(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_ProcessUtilizationSample", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_ProcessUtilizationSample(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = ProcessUtilizationSample.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ProcessUtilizationSample.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ProcessUtilizationSample__set_state(<ProcessUtilizationSample> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ProcessUtilizationSample.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_ProcessUtilizationSample__set_state(<ProcessUtilizationSample> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = ProcessUtilizationSample.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ProcessUtilizationSample__set_state(<ProcessUtilizationSample> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_ProcessUtilizationSample__set_state(ProcessUtilizationSample __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ProcessUtilizationSample__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ProcessUtilizationSample.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_ProcessUtilizationSample__set_state(<ProcessUtilizationSample> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ProcessUtilizationSample__set_state(<ProcessUtilizationSample> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_ProcessUtilizationSample__set_state(ProcessUtilizationSample __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_ProcessUtilizationSample(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ProcessUtilizationSample", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_ProcessUtilizationSample__set_state(<ProcessUtilizationSample> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_ProcessUtilizationSample__set_state(ProcessUtilizationSample __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ProcessUtilizationSample__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_ProcessUtilizationSample__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_ProcessUtilizationSample__set_state(ProcessUtilizationSample __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_ProcessUtilizationSample__set_state(ProcessUtilizationSample __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_ProcessUtilizationSample__set_state(<ProcessUtilizationSample> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_ProcessUtilizationSample__set_state(ProcessUtilizationSample __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ProcessUtilizationSample__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_ProcessUtilizationInfo_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_709__pyx_unpickle_ProcessUtilizationInfo_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_708__pyx_unpickle_ProcessUtilizationInfo_v1, "__pyx_unpickle_ProcessUtilizationInfo_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_709__pyx_unpickle_ProcessUtilizationInfo_v1 = {"__pyx_unpickle_ProcessUtilizationInfo_v1", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_709__pyx_unpickle_ProcessUtilizationInfo_v1, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_708__pyx_unpickle_ProcessUtilizationInfo_v1};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_709__pyx_unpickle_ProcessUtilizationInfo_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_ProcessUtilizationInfo_v1 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_ProcessUtilizationInfo_v1", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_ProcessUtilizationInfo_v1", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_ProcessUtilizationInfo_v1", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ProcessUtilizationInfo_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_708__pyx_unpickle_ProcessUtilizationInfo_v1(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_708__pyx_unpickle_ProcessUtilizationInfo_v1(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_ProcessUtilizationInfo_v1", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_ProcessUtilizationInfo_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = ProcessUtilizationInfo_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ProcessUtilizationInfo_v1.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(<ProcessUtilizationInfo_v1> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ProcessUtilizationInfo_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(<ProcessUtilizationInfo_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = ProcessUtilizationInfo_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(<ProcessUtilizationInfo_v1> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(ProcessUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ProcessUtilizationInfo_v1__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ProcessUtilizationInfo_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(<ProcessUtilizationInfo_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(<ProcessUtilizationInfo_v1> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(ProcessUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_ProcessUtilizationInfo_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ProcessUtilizationInfo_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(<ProcessUtilizationInfo_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(ProcessUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ProcessUtilizationInfo_v1__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_ProcessUtilizationInfo_v1__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(ProcessUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(ProcessUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(<ProcessUtilizationInfo_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(ProcessUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ProcessUtilizationInfo_v1__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_VgpuProcessUtilizationInfo_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_711__pyx_unpickle_VgpuProcessUtilizationInfo_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_710__pyx_unpickle_VgpuProcessUtilizationInfo_v1, "__pyx_unpickle_VgpuProcessUtilizationInfo_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_711__pyx_unpickle_VgpuProcessUtilizationInfo_v1 = {"__pyx_unpickle_VgpuProcessUtilizationInfo_v1", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_711__pyx_unpickle_VgpuProcessUtilizationInfo_v1, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_710__pyx_unpickle_VgpuProcessUtilizationInfo_v1};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_711__pyx_unpickle_VgpuProcessUtilizationInfo_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_VgpuProcessUtilizationInfo_v1 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_VgpuProcessUtilizationInfo_v1", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_VgpuProcessUtilizationInfo_v1", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_VgpuProcessUtilizationInfo_v1", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_VgpuProcessUtilizationInfo_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_710__pyx_unpickle_VgpuProcessUtilizationInfo_v1(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_710__pyx_unpickle_VgpuProcessUtilizationInfo_v1(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_VgpuProcessUtilizationInfo_v1", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_VgpuProcessUtilizationInfo_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = VgpuProcessUtilizationInfo_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = VgpuProcessUtilizationInfo_v1.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(<VgpuProcessUtilizationInfo_v1> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = VgpuProcessUtilizationInfo_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(<VgpuProcessUtilizationInfo_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = VgpuProcessUtilizationInfo_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(<VgpuProcessUtilizationInfo_v1> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(VgpuProcessUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = VgpuProcessUtilizationInfo_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(<VgpuProcessUtilizationInfo_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(<VgpuProcessUtilizationInfo_v1> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(VgpuProcessUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_VgpuProcessUtilizationInfo_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_VgpuProcessUtilizationInfo_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(<VgpuProcessUtilizationInfo_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(VgpuProcessUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(VgpuProcessUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(VgpuProcessUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(<VgpuProcessUtilizationInfo_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(VgpuProcessUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_VgpuSchedulerLogEntry(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_713__pyx_unpickle_VgpuSchedulerLogEntry(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_712__pyx_unpickle_VgpuSchedulerLogEntry, "__pyx_unpickle_VgpuSchedulerLogEntry(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_713__pyx_unpickle_VgpuSchedulerLogEntry = {"__pyx_unpickle_VgpuSchedulerLogEntry", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_713__pyx_unpickle_VgpuSchedulerLogEntry, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_712__pyx_unpickle_VgpuSchedulerLogEntry};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_713__pyx_unpickle_VgpuSchedulerLogEntry(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_VgpuSchedulerLogEntry (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_VgpuSchedulerLogEntry", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_VgpuSchedulerLogEntry", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_VgpuSchedulerLogEntry", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_VgpuSchedulerLogEntry", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_712__pyx_unpickle_VgpuSchedulerLogEntry(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_712__pyx_unpickle_VgpuSchedulerLogEntry(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_VgpuSchedulerLogEntry", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_VgpuSchedulerLogEntry(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = VgpuSchedulerLogEntry.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = VgpuSchedulerLogEntry.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_VgpuSchedulerLogEntry__set_state(<VgpuSchedulerLogEntry> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = VgpuSchedulerLogEntry.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_VgpuSchedulerLogEntry__set_state(<VgpuSchedulerLogEntry> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = VgpuSchedulerLogEntry.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_VgpuSchedulerLogEntry__set_state(<VgpuSchedulerLogEntry> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_VgpuSchedulerLogEntry__set_state(VgpuSchedulerLogEntry __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_VgpuSchedulerLogEntry__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = VgpuSchedulerLogEntry.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_VgpuSchedulerLogEntry__set_state(<VgpuSchedulerLogEntry> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_VgpuSchedulerLogEntry__set_state(<VgpuSchedulerLogEntry> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_VgpuSchedulerLogEntry__set_state(VgpuSchedulerLogEntry __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_VgpuSchedulerLogEntry(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_VgpuSchedulerLogEntry", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_VgpuSchedulerLogEntry__set_state(<VgpuSchedulerLogEntry> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_VgpuSchedulerLogEntry__set_state(VgpuSchedulerLogEntry __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_VgpuSchedulerLogEntry__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_VgpuSchedulerLogEntry__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_VgpuSchedulerLogEntry__set_state(VgpuSchedulerLogEntry __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_VgpuSchedulerLogEntry__set_state(VgpuSchedulerLogEntry __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_VgpuSchedulerLogEntry__set_state(<VgpuSchedulerLogEntry> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_VgpuSchedulerLogEntry__set_state(VgpuSchedulerLogEntry __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_VgpuSchedulerLogEntry__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_HwbcEntry(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_715__pyx_unpickle_HwbcEntry(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_714__pyx_unpickle_HwbcEntry, "__pyx_unpickle_HwbcEntry(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_715__pyx_unpickle_HwbcEntry = {"__pyx_unpickle_HwbcEntry", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_715__pyx_unpickle_HwbcEntry, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_714__pyx_unpickle_HwbcEntry};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_715__pyx_unpickle_HwbcEntry(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_HwbcEntry (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_HwbcEntry", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_HwbcEntry", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_HwbcEntry", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_HwbcEntry", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_714__pyx_unpickle_HwbcEntry(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_714__pyx_unpickle_HwbcEntry(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_HwbcEntry", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_HwbcEntry(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = HwbcEntry.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = HwbcEntry.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_HwbcEntry__set_state(<HwbcEntry> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = HwbcEntry.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_HwbcEntry__set_state(<HwbcEntry> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = HwbcEntry.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_HwbcEntry__set_state(<HwbcEntry> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_HwbcEntry__set_state(HwbcEntry __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_HwbcEntry__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = HwbcEntry.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_HwbcEntry__set_state(<HwbcEntry> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_HwbcEntry__set_state(<HwbcEntry> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_HwbcEntry__set_state(HwbcEntry __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_HwbcEntry(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_HwbcEntry", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_HwbcEntry__set_state(<HwbcEntry> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_HwbcEntry__set_state(HwbcEntry __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_HwbcEntry__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_HwbcEntry__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_HwbcEntry__set_state(HwbcEntry __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_HwbcEntry__set_state(HwbcEntry __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_HwbcEntry__set_state(<HwbcEntry> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_HwbcEntry__set_state(HwbcEntry __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_HwbcEntry__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_UnitFanInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_717__pyx_unpickle_UnitFanInfo(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_716__pyx_unpickle_UnitFanInfo, "__pyx_unpickle_UnitFanInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_717__pyx_unpickle_UnitFanInfo = {"__pyx_unpickle_UnitFanInfo", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_717__pyx_unpickle_UnitFanInfo, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_716__pyx_unpickle_UnitFanInfo};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_717__pyx_unpickle_UnitFanInfo(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_UnitFanInfo (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_UnitFanInfo", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_UnitFanInfo", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_UnitFanInfo", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_UnitFanInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_716__pyx_unpickle_UnitFanInfo(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_716__pyx_unpickle_UnitFanInfo(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_UnitFanInfo", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_UnitFanInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = UnitFanInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = UnitFanInfo.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_UnitFanInfo__set_state(<UnitFanInfo> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = UnitFanInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_UnitFanInfo__set_state(<UnitFanInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = UnitFanInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_UnitFanInfo__set_state(<UnitFanInfo> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_UnitFanInfo__set_state(UnitFanInfo __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_UnitFanInfo__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = UnitFanInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_UnitFanInfo__set_state(<UnitFanInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_UnitFanInfo__set_state(<UnitFanInfo> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_UnitFanInfo__set_state(UnitFanInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_UnitFanInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_UnitFanInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_UnitFanInfo__set_state(<UnitFanInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_UnitFanInfo__set_state(UnitFanInfo __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_UnitFanInfo__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_UnitFanInfo__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_UnitFanInfo__set_state(UnitFanInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_UnitFanInfo__set_state(UnitFanInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_UnitFanInfo__set_state(<UnitFanInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_UnitFanInfo__set_state(UnitFanInfo __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_UnitFanInfo__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_EncoderSessionInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_719__pyx_unpickle_EncoderSessionInfo(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_718__pyx_unpickle_EncoderSessionInfo, "__pyx_unpickle_EncoderSessionInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_719__pyx_unpickle_EncoderSessionInfo = {"__pyx_unpickle_EncoderSessionInfo", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_719__pyx_unpickle_EncoderSessionInfo, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_718__pyx_unpickle_EncoderSessionInfo};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_719__pyx_unpickle_EncoderSessionInfo(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_EncoderSessionInfo (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_EncoderSessionInfo", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_EncoderSessionInfo", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_EncoderSessionInfo", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_EncoderSessionInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_718__pyx_unpickle_EncoderSessionInfo(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_718__pyx_unpickle_EncoderSessionInfo(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_EncoderSessionInfo", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_EncoderSessionInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = EncoderSessionInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = EncoderSessionInfo.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_EncoderSessionInfo__set_state(<EncoderSessionInfo> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = EncoderSessionInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_EncoderSessionInfo__set_state(<EncoderSessionInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = EncoderSessionInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_EncoderSessionInfo__set_state(<EncoderSessionInfo> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_EncoderSessionInfo__set_state(EncoderSessionInfo __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_EncoderSessionInfo__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = EncoderSessionInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_EncoderSessionInfo__set_state(<EncoderSessionInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_EncoderSessionInfo__set_state(<EncoderSessionInfo> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_EncoderSessionInfo__set_state(EncoderSessionInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_EncoderSessionInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_EncoderSessionInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_EncoderSessionInfo__set_state(<EncoderSessionInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_EncoderSessionInfo__set_state(EncoderSessionInfo __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_EncoderSessionInfo__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_EncoderSessionInfo__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_EncoderSessionInfo__set_state(EncoderSessionInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_EncoderSessionInfo__set_state(EncoderSessionInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_EncoderSessionInfo__set_state(<EncoderSessionInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_EncoderSessionInfo__set_state(EncoderSessionInfo __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_EncoderSessionInfo__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_FBCSessionInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_721__pyx_unpickle_FBCSessionInfo(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_720__pyx_unpickle_FBCSessionInfo, "__pyx_unpickle_FBCSessionInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_721__pyx_unpickle_FBCSessionInfo = {"__pyx_unpickle_FBCSessionInfo", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_721__pyx_unpickle_FBCSessionInfo, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_720__pyx_unpickle_FBCSessionInfo};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_721__pyx_unpickle_FBCSessionInfo(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_FBCSessionInfo (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_FBCSessionInfo", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_FBCSessionInfo", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_FBCSessionInfo", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_FBCSessionInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_720__pyx_unpickle_FBCSessionInfo(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_720__pyx_unpickle_FBCSessionInfo(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_FBCSessionInfo", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_FBCSessionInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = FBCSessionInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = FBCSessionInfo.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_FBCSessionInfo__set_state(<FBCSessionInfo> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = FBCSessionInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_FBCSessionInfo__set_state(<FBCSessionInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = FBCSessionInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_FBCSessionInfo__set_state(<FBCSessionInfo> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_FBCSessionInfo__set_state(FBCSessionInfo __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_FBCSessionInfo__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = FBCSessionInfo.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_FBCSessionInfo__set_state(<FBCSessionInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_FBCSessionInfo__set_state(<FBCSessionInfo> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_FBCSessionInfo__set_state(FBCSessionInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_FBCSessionInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_FBCSessionInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_FBCSessionInfo__set_state(<FBCSessionInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_FBCSessionInfo__set_state(FBCSessionInfo __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_FBCSessionInfo__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_FBCSessionInfo__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_FBCSessionInfo__set_state(FBCSessionInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_FBCSessionInfo__set_state(FBCSessionInfo __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_FBCSessionInfo__set_state(<FBCSessionInfo> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_FBCSessionInfo__set_state(FBCSessionInfo __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_FBCSessionInfo__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_GpuInstancePlacement(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_723__pyx_unpickle_GpuInstancePlacement(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_722__pyx_unpickle_GpuInstancePlacement, "__pyx_unpickle_GpuInstancePlacement(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_723__pyx_unpickle_GpuInstancePlacement = {"__pyx_unpickle_GpuInstancePlacement", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_723__pyx_unpickle_GpuInstancePlacement, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_722__pyx_unpickle_GpuInstancePlacement};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_723__pyx_unpickle_GpuInstancePlacement(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_GpuInstancePlacement (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_GpuInstancePlacement", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_GpuInstancePlacement", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_GpuInstancePlacement", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_GpuInstancePlacement", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_722__pyx_unpickle_GpuInstancePlacement(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_722__pyx_unpickle_GpuInstancePlacement(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_GpuInstancePlacement", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_GpuInstancePlacement(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = GpuInstancePlacement.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = GpuInstancePlacement.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_GpuInstancePlacement__set_state(<GpuInstancePlacement> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = GpuInstancePlacement.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_GpuInstancePlacement__set_state(<GpuInstancePlacement> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = GpuInstancePlacement.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_GpuInstancePlacement__set_state(<GpuInstancePlacement> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_GpuInstancePlacement__set_state(GpuInstancePlacement __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_GpuInstancePlacement__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = GpuInstancePlacement.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_GpuInstancePlacement__set_state(<GpuInstancePlacement> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_GpuInstancePlacement__set_state(<GpuInstancePlacement> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_GpuInstancePlacement__set_state(GpuInstancePlacement __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_GpuInstancePlacement(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_GpuInstancePlacement", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_GpuInstancePlacement__set_state(<GpuInstancePlacement> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_GpuInstancePlacement__set_state(GpuInstancePlacement __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_GpuInstancePlacement__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_GpuInstancePlacement__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_GpuInstancePlacement__set_state(GpuInstancePlacement __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_GpuInstancePlacement__set_state(GpuInstancePlacement __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_GpuInstancePlacement__set_state(<GpuInstancePlacement> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_GpuInstancePlacement__set_state(GpuInstancePlacement __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_GpuInstancePlacement__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_ComputeInstancePlacement(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_725__pyx_unpickle_ComputeInstancePlacement(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_724__pyx_unpickle_ComputeInstancePlacement, "__pyx_unpickle_ComputeInstancePlacement(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_725__pyx_unpickle_ComputeInstancePlacement = {"__pyx_unpickle_ComputeInstancePlacement", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_725__pyx_unpickle_ComputeInstancePlacement, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_724__pyx_unpickle_ComputeInstancePlacement};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_725__pyx_unpickle_ComputeInstancePlacement(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_ComputeInstancePlacement (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_ComputeInstancePlacement", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_ComputeInstancePlacement", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_ComputeInstancePlacement", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ComputeInstancePlacement", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_724__pyx_unpickle_ComputeInstancePlacement(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_724__pyx_unpickle_ComputeInstancePlacement(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_ComputeInstancePlacement", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_ComputeInstancePlacement(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = ComputeInstancePlacement.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ComputeInstancePlacement.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ComputeInstancePlacement__set_state(<ComputeInstancePlacement> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ComputeInstancePlacement.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_ComputeInstancePlacement__set_state(<ComputeInstancePlacement> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = ComputeInstancePlacement.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ComputeInstancePlacement__set_state(<ComputeInstancePlacement> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_ComputeInstancePlacement__set_state(ComputeInstancePlacement __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ComputeInstancePlacement__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = ComputeInstancePlacement.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_ComputeInstancePlacement__set_state(<ComputeInstancePlacement> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_ComputeInstancePlacement__set_state(<ComputeInstancePlacement> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_ComputeInstancePlacement__set_state(ComputeInstancePlacement __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_ComputeInstancePlacement(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ComputeInstancePlacement", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_ComputeInstancePlacement__set_state(<ComputeInstancePlacement> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_ComputeInstancePlacement__set_state(ComputeInstancePlacement __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_ComputeInstancePlacement__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_ComputeInstancePlacement__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_ComputeInstancePlacement__set_state(ComputeInstancePlacement __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_ComputeInstancePlacement__set_state(ComputeInstancePlacement __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_ComputeInstancePlacement__set_state(<ComputeInstancePlacement> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_ComputeInstancePlacement__set_state(ComputeInstancePlacement __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_ComputeInstancePlacement__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_727__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_726__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1, "__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_727__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1 = {"__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_727__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_726__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_727__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_726__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_726__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = EccSramUniqueUncorrectedErrorEntry_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = EccSramUniqueUncorrectedErrorEntry_v1.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(<EccSramUniqueUncorrectedErrorEntry_v1> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = EccSramUniqueUncorrectedErrorEntry_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(<EccSramUniqueUncorrectedErrorEntry_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = EccSramUniqueUncorrectedErrorEntry_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(<EccSramUniqueUncorrectedErrorEntry_v1> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(EccSramUniqueUncorrectedErrorEntry_v1 __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = EccSramUniqueUncorrectedErrorEntry_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(<EccSramUniqueUncorrectedErrorEntry_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(<EccSramUniqueUncorrectedErrorEntry_v1> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(EccSramUniqueUncorrectedErrorEntry_v1 __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(<EccSramUniqueUncorrectedErrorEntry_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(EccSramUniqueUncorrectedErrorEntry_v1 __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(EccSramUniqueUncorrectedErrorEntry_v1 __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(EccSramUniqueUncorrectedErrorEntry_v1 __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(<EccSramUniqueUncorrectedErrorEntry_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(EccSramUniqueUncorrectedErrorEntry_v1 __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_Sample(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_729__pyx_unpickle_Sample(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_728__pyx_unpickle_Sample, "__pyx_unpickle_Sample(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_729__pyx_unpickle_Sample = {"__pyx_unpickle_Sample", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_729__pyx_unpickle_Sample, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_728__pyx_unpickle_Sample};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_729__pyx_unpickle_Sample(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_Sample (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_Sample", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Sample", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Sample", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_Sample", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_728__pyx_unpickle_Sample(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_728__pyx_unpickle_Sample(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_Sample", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_Sample(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = Sample.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = Sample.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_Sample__set_state(<Sample> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Sample);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = Sample.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_Sample__set_state(<Sample> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = Sample.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_Sample__set_state(<Sample> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_Sample__set_state(Sample __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_Sample__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = Sample.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_Sample__set_state(<Sample> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_Sample__set_state(<Sample> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_Sample__set_state(Sample __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_Sample(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_Sample", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_Sample__set_state(<Sample> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_Sample__set_state(Sample __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_Sample__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_Sample__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_Sample__set_state(Sample __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_Sample__set_state(Sample __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_Sample__set_state(<Sample> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_Sample__set_state(Sample __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_Sample__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_VgpuInstanceUtilizationInfo_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_731__pyx_unpickle_VgpuInstanceUtilizationInfo_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_730__pyx_unpickle_VgpuInstanceUtilizationInfo_v1, "__pyx_unpickle_VgpuInstanceUtilizationInfo_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_731__pyx_unpickle_VgpuInstanceUtilizationInfo_v1 = {"__pyx_unpickle_VgpuInstanceUtilizationInfo_v1", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_731__pyx_unpickle_VgpuInstanceUtilizationInfo_v1, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_730__pyx_unpickle_VgpuInstanceUtilizationInfo_v1};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_731__pyx_unpickle_VgpuInstanceUtilizationInfo_v1(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_VgpuInstanceUtilizationInfo_v1 (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_VgpuInstanceUtilizationInfo_v1", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_VgpuInstanceUtilizationInfo_v1", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_VgpuInstanceUtilizationInfo_v1", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_VgpuInstanceUtilizationInfo_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_730__pyx_unpickle_VgpuInstanceUtilizationInfo_v1(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_730__pyx_unpickle_VgpuInstanceUtilizationInfo_v1(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_VgpuInstanceUtilizationInfo_v1", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_VgpuInstanceUtilizationInfo_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = VgpuInstanceUtilizationInfo_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = VgpuInstanceUtilizationInfo_v1.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(<VgpuInstanceUtilizationInfo_v1> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = VgpuInstanceUtilizationInfo_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(<VgpuInstanceUtilizationInfo_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = VgpuInstanceUtilizationInfo_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(<VgpuInstanceUtilizationInfo_v1> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(VgpuInstanceUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = VgpuInstanceUtilizationInfo_v1.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(<VgpuInstanceUtilizationInfo_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(<VgpuInstanceUtilizationInfo_v1> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(VgpuInstanceUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_VgpuInstanceUtilizationInfo_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_VgpuInstanceUtilizationInfo_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(<VgpuInstanceUtilizationInfo_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(VgpuInstanceUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(VgpuInstanceUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(VgpuInstanceUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(<VgpuInstanceUtilizationInfo_v1> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(VgpuInstanceUtilizationInfo_v1 __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_FieldValue(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_733__pyx_unpickle_FieldValue(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_732__pyx_unpickle_FieldValue, "__pyx_unpickle_FieldValue(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_733__pyx_unpickle_FieldValue = {"__pyx_unpickle_FieldValue", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_733__pyx_unpickle_FieldValue, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_732__pyx_unpickle_FieldValue};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_733__pyx_unpickle_FieldValue(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_FieldValue (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_FieldValue", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_FieldValue", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_FieldValue", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_FieldValue", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_732__pyx_unpickle_FieldValue(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_732__pyx_unpickle_FieldValue(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_FieldValue", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_FieldValue(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = FieldValue.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = FieldValue.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_FieldValue__set_state(<FieldValue> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = FieldValue.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_FieldValue__set_state(<FieldValue> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = FieldValue.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_FieldValue__set_state(<FieldValue> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_FieldValue__set_state(FieldValue __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_FieldValue__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = FieldValue.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_FieldValue__set_state(<FieldValue> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_FieldValue__set_state(<FieldValue> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_FieldValue__set_state(FieldValue __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_FieldValue(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_FieldValue", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_FieldValue__set_state(<FieldValue> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_FieldValue__set_state(FieldValue __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_FieldValue__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_FieldValue__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_FieldValue__set_state(FieldValue __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_FieldValue__set_state(FieldValue __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_FieldValue__set_state(<FieldValue> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_FieldValue__set_state(FieldValue __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_FieldValue__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_GridLicensableFeature(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

/* Python wrapper */
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_735__pyx_unpickle_GridLicensableFeature(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_5_nvml_734__pyx_unpickle_GridLicensableFeature, "__pyx_unpickle_GridLicensableFeature(__pyx_type, long __pyx_checksum, tuple __pyx_state)");
static PyMethodDef __pyx_mdef_4cuda_8bindings_5_nvml_735__pyx_unpickle_GridLicensableFeature = {"__pyx_unpickle_GridLicensableFeature", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_735__pyx_unpickle_GridLicensableFeature, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_734__pyx_unpickle_GridLicensableFeature};
static PyObject *__pyx_pw_4cuda_8bindings_5_nvml_735__pyx_unpickle_GridLicensableFeature(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  PyObject *__pyx_v___pyx_type = 0;
  long __pyx_v___pyx_checksum;
  PyObject *__pyx_v___pyx_state = 0;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject* values[3] = {0,0,0};
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("__pyx_unpickle_GridLicensableFeature (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_SIZE
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_type,&__pyx_mstate_global->__pyx_n_u_pyx_checksum,&__pyx_mstate_global->__pyx_n_u_pyx_state,0};
    const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0;
    if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(1, 4, __pyx_L3_error)
    if (__pyx_kwds_len > 0) {
      switch (__pyx_nargs) {
        case  3:
        values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  2:
        values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  1:
        values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
        if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      const Py_ssize_t kwd_pos_args = __pyx_nargs;
      if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__pyx_unpickle_GridLicensableFeature", 0) < (0)) __PYX_ERR(1, 4, __pyx_L3_error)
      for (Py_ssize_t i = __pyx_nargs; i < 3; i++) {
        if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_GridLicensableFeature", 1, 3, 3, i); __PYX_ERR(1, 4, __pyx_L3_error) }
      }
    } else if (unlikely(__pyx_nargs != 3)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(1, 4, __pyx_L3_error)
      values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2);
      if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(1, 4, __pyx_L3_error)
    }
    __pyx_v___pyx_type = values[0];
    __pyx_v___pyx_checksum = __Pyx_PyLong_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 4, __pyx_L3_error)
    __pyx_v___pyx_state = ((PyObject*)values[2]);
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_GridLicensableFeature", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 4, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_GridLicensableFeature", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v___pyx_state), (&PyTuple_Type), 1, "__pyx_state", 1))) __PYX_ERR(1, 4, __pyx_L1_error)
  __pyx_r = __pyx_pf_4cuda_8bindings_5_nvml_734__pyx_unpickle_GridLicensableFeature(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  goto __pyx_L7_cleaned_up;
  __pyx_L0:;
  for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
    Py_XDECREF(values[__pyx_temp]);
  }
  __pyx_L7_cleaned_up:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_4cuda_8bindings_5_nvml_734__pyx_unpickle_GridLicensableFeature(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_v___pyx_result = 0;
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  int __pyx_t_1;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  size_t __pyx_t_4;
  int __pyx_t_5;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_GridLicensableFeature", 0);

  /* "(tree fragment)":6
 * def __pyx_unpickle_GridLicensableFeature(__pyx_type, long __pyx_checksum, tuple __pyx_state):
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')             # <<<<<<<<<<<<<<
 *     __pyx_result = GridLicensableFeature.__new__(__pyx_type)
 *     if __pyx_state is not None:
*/
  __pyx_t_1 = __Pyx_CheckUnpickleChecksum(__pyx_v___pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, __pyx_k_data_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 6, __pyx_L1_error)

  /* "(tree fragment)":7
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = GridLicensableFeature.__new__(__pyx_type)             # <<<<<<<<<<<<<<
 *     if __pyx_state is not None:
 *         __pyx_unpickle_GridLicensableFeature__set_state(<GridLicensableFeature> __pyx_result, __pyx_state)
*/
  __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature);
  __Pyx_INCREF(__pyx_t_3);
  __pyx_t_4 = 0;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v___pyx_type};
    __pyx_t_2 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_new, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
  }
  __pyx_v___pyx_result = __pyx_t_2;
  __pyx_t_2 = 0;

  /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = GridLicensableFeature.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_GridLicensableFeature__set_state(<GridLicensableFeature> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  __pyx_t_5 = (__pyx_v___pyx_state != ((PyObject*)Py_None));
  if (__pyx_t_5) {

    /* "(tree fragment)":9
 *     __pyx_result = GridLicensableFeature.__new__(__pyx_type)
 *     if __pyx_state is not None:
 *         __pyx_unpickle_GridLicensableFeature__set_state(<GridLicensableFeature> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
 *     return __pyx_result
 * cdef __pyx_unpickle_GridLicensableFeature__set_state(GridLicensableFeature __pyx_result, __pyx_state: tuple):
*/
    if (unlikely(__pyx_v___pyx_state == Py_None)) {
      PyErr_SetString(PyExc_TypeError, "cannot pass None into a C function argument that is declared 'not None'");
      __PYX_ERR(1, 9, __pyx_L1_error)
    }
    __pyx_t_2 = __pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_GridLicensableFeature__set_state(((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)__pyx_v___pyx_result), __pyx_v___pyx_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 9, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

    /* "(tree fragment)":8
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
 *     __pyx_result = GridLicensableFeature.__new__(__pyx_type)
 *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
 *         __pyx_unpickle_GridLicensableFeature__set_state(<GridLicensableFeature> __pyx_result, __pyx_state)
 *     return __pyx_result
*/
  }

  /* "(tree fragment)":10
 *     if __pyx_state is not None:
 *         __pyx_unpickle_GridLicensableFeature__set_state(<GridLicensableFeature> __pyx_result, __pyx_state)
 *     return __pyx_result             # <<<<<<<<<<<<<<
 * cdef __pyx_unpickle_GridLicensableFeature__set_state(GridLicensableFeature __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
*/
  __Pyx_XDECREF(__pyx_r);
  __Pyx_INCREF(__pyx_v___pyx_result);
  __pyx_r = __pyx_v___pyx_result;
  goto __pyx_L0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_GridLicensableFeature(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/

  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_GridLicensableFeature", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF(__pyx_v___pyx_result);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

/* "(tree fragment)":11
 *         __pyx_unpickle_GridLicensableFeature__set_state(<GridLicensableFeature> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_GridLicensableFeature__set_state(GridLicensableFeature __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

static PyObject *__pyx_f_4cuda_8bindings_5_nvml___pyx_unpickle_GridLicensableFeature__set_state(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  PyObject *__pyx_t_1 = NULL;
  int __pyx_t_2;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__pyx_unpickle_GridLicensableFeature__set_state", 0);

  /* "(tree fragment)":12
 *     return __pyx_result
 * cdef __pyx_unpickle_GridLicensableFeature__set_state(GridLicensableFeature __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]             # <<<<<<<<<<<<<<
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/
  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_1);
  __Pyx_GOTREF(__pyx_v___pyx_result->_data);
  __Pyx_DECREF(__pyx_v___pyx_result->_data);
  __pyx_v___pyx_result->_data = __pyx_t_1;
  __pyx_t_1 = 0;

  /* "(tree fragment)":13
 * cdef __pyx_unpickle_GridLicensableFeature__set_state(GridLicensableFeature __pyx_result, __pyx_state: tuple):
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)             # <<<<<<<<<<<<<<
*/
  __pyx_t_2 = __Pyx_UpdateUnpickledDict(((PyObject *)__pyx_v___pyx_result), __pyx_v___pyx_state, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)

  /* "(tree fragment)":11
 *         __pyx_unpickle_GridLicensableFeature__set_state(<GridLicensableFeature> __pyx_result, __pyx_state)
 *     return __pyx_result
 * cdef __pyx_unpickle_GridLicensableFeature__set_state(GridLicensableFeature __pyx_result, __pyx_state: tuple):             # <<<<<<<<<<<<<<
 *     __pyx_result._data = __pyx_state[0]
 *     __Pyx_UpdateUnpickledDict(__pyx_result, __pyx_state, 1)
*/

  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("cuda.bindings._nvml.__pyx_unpickle_GridLicensableFeature__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
/* #### Code section: module_exttypes ### */
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PciInfoExt_v1 __pyx_vtable_4cuda_8bindings_5_nvml_PciInfoExt_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_PciInfoExt_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_PciInfoExt_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_PciInfoExt_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_PciInfoExt_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_PciInfoExt_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_PciInfoExt_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_PciInfoExt_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_PciInfoExt_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_domain(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6domain_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_domain(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6domain_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_bus(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3bus_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_bus(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_3bus_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_device_(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7device__1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_device_(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7device__3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_pci_device_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13pci_device_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_pci_device_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13pci_device_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_pci_sub_system_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17pci_sub_system_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_pci_sub_system_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17pci_sub_system_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_base_class(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_10base_class_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_base_class(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_10base_class_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_sub_class(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_9sub_class_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_sub_class(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_9sub_class_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_bus_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6bus_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_bus_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_6bus_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_PciInfoExt_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13PciInfoExt_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13PciInfoExt_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13PciInfoExt_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13PciInfoExt_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_PciInfoExt_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_version, PyDoc_STR("int: The version number of this struct."), 0},
  {"domain", __pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_domain, __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_domain, PyDoc_STR("int: The PCI domain on which the device's bus resides, 0 to 0xffffffff."), 0},
  {"bus", __pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_bus, __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_bus, PyDoc_STR("int: The bus on which the device resides, 0 to 0xff."), 0},
  {"device_", __pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_device_, __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_device_, PyDoc_STR("int: The device's id on the bus, 0 to 31."), 0},
  {"pci_device_id", __pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_pci_device_id, __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_pci_device_id, PyDoc_STR("int: The combined 16-bit device id and 16-bit vendor id."), 0},
  {"pci_sub_system_id", __pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_pci_sub_system_id, __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_pci_sub_system_id, PyDoc_STR("int: The 32-bit Sub System Device ID."), 0},
  {"base_class", __pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_base_class, __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_base_class, PyDoc_STR("int: The 8-bit PCI base class code."), 0},
  {"sub_class", __pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_sub_class, __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_sub_class, PyDoc_STR("int: The 8-bit PCI sub class code."), 0},
  {"bus_id", __pyx_getprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_bus_id, __pyx_setprop_4cuda_8bindings_5_nvml_13PciInfoExt_v1_bus_id, PyDoc_STR("~_numpy.int8: (array of length 32).The tuple domain:bus:device.function PCI identifier (& NULL terminator)"), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_PciInfoExt_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_PciInfoExt_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_PciInfoExt_v1},
  {Py_tp_doc, (void *)PyDoc_STR("PciInfoExt_v1()\n\nEmpty-initialize an instance of `nvmlPciInfoExt_v1_t`.\n\n\n.. seealso:: `nvmlPciInfoExt_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_PciInfoExt_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_PciInfoExt_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_PciInfoExt_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_PciInfoExt_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_PciInfoExt_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_PciInfoExt_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_PciInfoExt_v1_spec = {
  "cuda.bindings._nvml.PciInfoExt_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_PciInfoExt_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_PciInfoExt_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_PciInfoExt_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_PciInfoExt_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_PciInfoExt_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""PciInfoExt_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_PciInfoExt_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_PciInfoExt_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_PciInfoExt_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("PciInfoExt_v1()\n\nEmpty-initialize an instance of `nvmlPciInfoExt_v1_t`.\n\n\n.. seealso:: `nvmlPciInfoExt_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_PciInfoExt_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_PciInfoExt_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_PciInfoExt_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_PciInfoExt_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_PciInfoExt_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_13PciInfoExt_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_PciInfoExt_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PciInfo __pyx_vtable_4cuda_8bindings_5_nvml_PciInfo;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_PciInfo(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_PciInfo;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_PciInfo(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_PciInfo) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_PciInfo(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_PciInfo(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_PciInfo(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_PciInfo(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_7PciInfo_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_7PciInfo_bus_id_legacy(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_13bus_id_legacy_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_7PciInfo_bus_id_legacy(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_13bus_id_legacy_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_7PciInfo_domain(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_6domain_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_7PciInfo_domain(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_6domain_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_7PciInfo_bus(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_3bus_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_7PciInfo_bus(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_3bus_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_7PciInfo_device_(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_7device__1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_7PciInfo_device_(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_7device__3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_7PciInfo_pci_device_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_13pci_device_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_7PciInfo_pci_device_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_13pci_device_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_7PciInfo_pci_sub_system_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_17pci_sub_system_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_7PciInfo_pci_sub_system_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_17pci_sub_system_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_7PciInfo_bus_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_6bus_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_7PciInfo_bus_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_6bus_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_PciInfo[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_7PciInfo_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_7PciInfo_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_7PciInfo_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_7PciInfo_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_PciInfo[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_7PciInfo_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"bus_id_legacy", __pyx_getprop_4cuda_8bindings_5_nvml_7PciInfo_bus_id_legacy, __pyx_setprop_4cuda_8bindings_5_nvml_7PciInfo_bus_id_legacy, PyDoc_STR("~_numpy.int8: (array of length 16)."), 0},
  {"domain", __pyx_getprop_4cuda_8bindings_5_nvml_7PciInfo_domain, __pyx_setprop_4cuda_8bindings_5_nvml_7PciInfo_domain, PyDoc_STR("int: "), 0},
  {"bus", __pyx_getprop_4cuda_8bindings_5_nvml_7PciInfo_bus, __pyx_setprop_4cuda_8bindings_5_nvml_7PciInfo_bus, PyDoc_STR("int: "), 0},
  {"device_", __pyx_getprop_4cuda_8bindings_5_nvml_7PciInfo_device_, __pyx_setprop_4cuda_8bindings_5_nvml_7PciInfo_device_, PyDoc_STR("int: "), 0},
  {"pci_device_id", __pyx_getprop_4cuda_8bindings_5_nvml_7PciInfo_pci_device_id, __pyx_setprop_4cuda_8bindings_5_nvml_7PciInfo_pci_device_id, PyDoc_STR("int: "), 0},
  {"pci_sub_system_id", __pyx_getprop_4cuda_8bindings_5_nvml_7PciInfo_pci_sub_system_id, __pyx_setprop_4cuda_8bindings_5_nvml_7PciInfo_pci_sub_system_id, PyDoc_STR("int: "), 0},
  {"bus_id", __pyx_getprop_4cuda_8bindings_5_nvml_7PciInfo_bus_id, __pyx_setprop_4cuda_8bindings_5_nvml_7PciInfo_bus_id, PyDoc_STR("~_numpy.int8: (array of length 32)."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_PciInfo_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_PciInfo},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_PciInfo},
  {Py_tp_doc, (void *)PyDoc_STR("PciInfo()\n\nEmpty-initialize an instance of `nvmlPciInfo_t`.\n\n\n.. seealso:: `nvmlPciInfo_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_PciInfo},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_PciInfo},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_PciInfo},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_PciInfo},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_PciInfo},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_PciInfo},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_PciInfo_spec = {
  "cuda.bindings._nvml.PciInfo",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_PciInfo_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_PciInfo = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_PciInfo = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_PciInfo, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_PciInfo = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""PciInfo", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_PciInfo, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_PciInfo, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_PciInfo, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("PciInfo()\n\nEmpty-initialize an instance of `nvmlPciInfo_t`.\n\n\n.. seealso:: `nvmlPciInfo_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_PciInfo, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_PciInfo, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_PciInfo, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_PciInfo, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_PciInfo, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_7PciInfo_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_PciInfo, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Utilization __pyx_vtable_4cuda_8bindings_5_nvml_Utilization;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_Utilization(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_Utilization;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Utilization(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Utilization) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_Utilization(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_Utilization(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Utilization(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_Utilization(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_11Utilization_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_11Utilization_gpu(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_3gpu_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_11Utilization_gpu(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_3gpu_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_11Utilization_memory(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_6memory_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_11Utilization_memory(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_6memory_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_Utilization[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11Utilization_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11Utilization_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11Utilization_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11Utilization_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_Utilization[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_11Utilization_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"gpu", __pyx_getprop_4cuda_8bindings_5_nvml_11Utilization_gpu, __pyx_setprop_4cuda_8bindings_5_nvml_11Utilization_gpu, PyDoc_STR("int: "), 0},
  {"memory", __pyx_getprop_4cuda_8bindings_5_nvml_11Utilization_memory, __pyx_setprop_4cuda_8bindings_5_nvml_11Utilization_memory, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_Utilization_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_Utilization},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Utilization},
  {Py_tp_doc, (void *)PyDoc_STR("Utilization()\n\nEmpty-initialize an instance of `nvmlUtilization_t`.\n\n\n.. seealso:: `nvmlUtilization_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_Utilization},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_Utilization},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_Utilization},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_Utilization},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_Utilization},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11Utilization_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_Utilization},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_Utilization_spec = {
  "cuda.bindings._nvml.Utilization",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_Utilization_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_Utilization = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_Utilization = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Utilization, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_Utilization = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""Utilization", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Utilization, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_Utilization, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_Utilization, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("Utilization()\n\nEmpty-initialize an instance of `nvmlUtilization_t`.\n\n\n.. seealso:: `nvmlUtilization_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_Utilization, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_Utilization, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_Utilization, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_Utilization, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_Utilization, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_11Utilization_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_Utilization, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Memory __pyx_vtable_4cuda_8bindings_5_nvml_Memory;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_Memory(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_Memory;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Memory(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Memory) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_6Memory_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_Memory(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_Memory(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Memory(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_6Memory_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_Memory(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_6Memory_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_6Memory_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_6Memory_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_6Memory_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_6Memory_total(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_6Memory_5total_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_6Memory_total(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_6Memory_5total_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_6Memory_free(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_6Memory_4free_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_6Memory_free(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_6Memory_4free_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_6Memory_used(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_6Memory_4used_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_6Memory_used(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_6Memory_4used_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_Memory[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Memory_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Memory_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Memory_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Memory_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Memory_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Memory_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Memory_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Memory_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_Memory[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_6Memory_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"total", __pyx_getprop_4cuda_8bindings_5_nvml_6Memory_total, __pyx_setprop_4cuda_8bindings_5_nvml_6Memory_total, PyDoc_STR("int: "), 0},
  {"free", __pyx_getprop_4cuda_8bindings_5_nvml_6Memory_free, __pyx_setprop_4cuda_8bindings_5_nvml_6Memory_free, PyDoc_STR("int: "), 0},
  {"used", __pyx_getprop_4cuda_8bindings_5_nvml_6Memory_used, __pyx_setprop_4cuda_8bindings_5_nvml_6Memory_used, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_Memory_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_Memory},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_6Memory_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_6Memory_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Memory},
  {Py_tp_doc, (void *)PyDoc_STR("Memory()\n\nEmpty-initialize an instance of `nvmlMemory_t`.\n\n\n.. seealso:: `nvmlMemory_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_Memory},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_Memory},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_Memory},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_Memory},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_Memory},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_6Memory_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_Memory},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_Memory_spec = {
  "cuda.bindings._nvml.Memory",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_Memory_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_Memory = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_6Memory_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_Memory = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Memory, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_Memory = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""Memory", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Memory, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_6Memory_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_Memory, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_Memory, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("Memory()\n\nEmpty-initialize an instance of `nvmlMemory_t`.\n\n\n.. seealso:: `nvmlMemory_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_Memory, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_Memory, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_Memory, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_Memory, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_Memory, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_6Memory_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_Memory, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Memory_v2 __pyx_vtable_4cuda_8bindings_5_nvml_Memory_v2;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_Memory_v2(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_Memory_v2;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Memory_v2(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Memory_v2) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_Memory_v2(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_Memory_v2(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Memory_v2(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_Memory_v2(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_9Memory_v2_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_9Memory_v2_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_9Memory_v2_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_9Memory_v2_total(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_5total_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_9Memory_v2_total(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_5total_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_9Memory_v2_free(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_4free_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_9Memory_v2_free(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_4free_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_9Memory_v2_used(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_4used_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_9Memory_v2_used(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_4used_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_Memory_v2[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9Memory_v2_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9Memory_v2_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9Memory_v2_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9Memory_v2_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_Memory_v2[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_9Memory_v2_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_9Memory_v2_version, __pyx_setprop_4cuda_8bindings_5_nvml_9Memory_v2_version, PyDoc_STR("int: "), 0},
  {"total", __pyx_getprop_4cuda_8bindings_5_nvml_9Memory_v2_total, __pyx_setprop_4cuda_8bindings_5_nvml_9Memory_v2_total, PyDoc_STR("int: "), 0},
  {"free", __pyx_getprop_4cuda_8bindings_5_nvml_9Memory_v2_free, __pyx_setprop_4cuda_8bindings_5_nvml_9Memory_v2_free, PyDoc_STR("int: "), 0},
  {"used", __pyx_getprop_4cuda_8bindings_5_nvml_9Memory_v2_used, __pyx_setprop_4cuda_8bindings_5_nvml_9Memory_v2_used, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_Memory_v2_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_Memory_v2},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Memory_v2},
  {Py_tp_doc, (void *)PyDoc_STR("Memory_v2()\n\nEmpty-initialize an instance of `nvmlMemory_v2_t`.\n\n\n.. seealso:: `nvmlMemory_v2_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_Memory_v2},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_Memory_v2},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_Memory_v2},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_Memory_v2},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_Memory_v2},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_Memory_v2},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_Memory_v2_spec = {
  "cuda.bindings._nvml.Memory_v2",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_Memory_v2_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_Memory_v2 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_Memory_v2 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Memory_v2, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_Memory_v2 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""Memory_v2", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Memory_v2, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_Memory_v2, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_Memory_v2, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("Memory_v2()\n\nEmpty-initialize an instance of `nvmlMemory_v2_t`.\n\n\n.. seealso:: `nvmlMemory_v2_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_Memory_v2, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_Memory_v2, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_Memory_v2, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_Memory_v2, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_Memory_v2, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_9Memory_v2_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_Memory_v2, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_BAR1Memory __pyx_vtable_4cuda_8bindings_5_nvml_BAR1Memory;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_BAR1Memory(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_BAR1Memory;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_BAR1Memory(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_BAR1Memory) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_BAR1Memory(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_BAR1Memory(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_BAR1Memory(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_BAR1Memory(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_10BAR1Memory_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_10BAR1Memory_bar1total(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1total_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_10BAR1Memory_bar1total(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1total_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_10BAR1Memory_bar1free(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_8bar1free_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_10BAR1Memory_bar1free(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_8bar1free_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_10BAR1Memory_bar1_used(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1_used_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_10BAR1Memory_bar1_used(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_9bar1_used_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_BAR1Memory[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10BAR1Memory_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10BAR1Memory_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10BAR1Memory_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10BAR1Memory_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_BAR1Memory[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_10BAR1Memory_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"bar1total", __pyx_getprop_4cuda_8bindings_5_nvml_10BAR1Memory_bar1total, __pyx_setprop_4cuda_8bindings_5_nvml_10BAR1Memory_bar1total, PyDoc_STR("int: "), 0},
  {"bar1free", __pyx_getprop_4cuda_8bindings_5_nvml_10BAR1Memory_bar1free, __pyx_setprop_4cuda_8bindings_5_nvml_10BAR1Memory_bar1free, PyDoc_STR("int: "), 0},
  {"bar1_used", __pyx_getprop_4cuda_8bindings_5_nvml_10BAR1Memory_bar1_used, __pyx_setprop_4cuda_8bindings_5_nvml_10BAR1Memory_bar1_used, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_BAR1Memory_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_BAR1Memory},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_BAR1Memory},
  {Py_tp_doc, (void *)PyDoc_STR("BAR1Memory()\n\nEmpty-initialize an instance of `nvmlBAR1Memory_t`.\n\n\n.. seealso:: `nvmlBAR1Memory_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_BAR1Memory},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_BAR1Memory},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_BAR1Memory},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_BAR1Memory},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_BAR1Memory},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_BAR1Memory},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_BAR1Memory_spec = {
  "cuda.bindings._nvml.BAR1Memory",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_BAR1Memory_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_BAR1Memory = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_BAR1Memory = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_BAR1Memory, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_BAR1Memory = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""BAR1Memory", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_BAR1Memory, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_BAR1Memory, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_BAR1Memory, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("BAR1Memory()\n\nEmpty-initialize an instance of `nvmlBAR1Memory_t`.\n\n\n.. seealso:: `nvmlBAR1Memory_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_BAR1Memory, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_BAR1Memory, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_BAR1Memory, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_BAR1Memory, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_BAR1Memory, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_10BAR1Memory_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_BAR1Memory, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessInfo __pyx_vtable_4cuda_8bindings_5_nvml_ProcessInfo;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessInfo(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessInfo;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessInfo(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessInfo) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessInfo(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessInfo(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_ProcessInfo(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessInfo(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessInfo(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_11ProcessInfo_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_11ProcessInfo_pid(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_3pid_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_11ProcessInfo_pid(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_3pid_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_11ProcessInfo_used_gpu_memory(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_15used_gpu_memory_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_11ProcessInfo_used_gpu_memory(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_15used_gpu_memory_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_11ProcessInfo_gpu_instance_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_15gpu_instance_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_11ProcessInfo_gpu_instance_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_15gpu_instance_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_11ProcessInfo_compute_instance_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_19compute_instance_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_11ProcessInfo_compute_instance_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_19compute_instance_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_11ProcessInfo__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ProcessInfo[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11ProcessInfo_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11ProcessInfo_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11ProcessInfo_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11ProcessInfo_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ProcessInfo[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_11ProcessInfo_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"pid", __pyx_getprop_4cuda_8bindings_5_nvml_11ProcessInfo_pid, __pyx_setprop_4cuda_8bindings_5_nvml_11ProcessInfo_pid, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"used_gpu_memory", __pyx_getprop_4cuda_8bindings_5_nvml_11ProcessInfo_used_gpu_memory, __pyx_setprop_4cuda_8bindings_5_nvml_11ProcessInfo_used_gpu_memory, PyDoc_STR("Union[~_numpy.uint64, int]: "), 0},
  {"gpu_instance_id", __pyx_getprop_4cuda_8bindings_5_nvml_11ProcessInfo_gpu_instance_id, __pyx_setprop_4cuda_8bindings_5_nvml_11ProcessInfo_gpu_instance_id, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"compute_instance_id", __pyx_getprop_4cuda_8bindings_5_nvml_11ProcessInfo_compute_instance_id, __pyx_setprop_4cuda_8bindings_5_nvml_11ProcessInfo_compute_instance_id, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_11ProcessInfo__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ProcessInfo_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessInfo},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_ProcessInfo},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessInfo},
  {Py_tp_doc, (void *)PyDoc_STR("ProcessInfo(size=1)\n\nEmpty-initialize an array of `nvmlProcessInfo_t`.\n\nThe resulting object is of length `size` and of dtype `process_info_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlProcessInfo_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessInfo},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessInfo},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessInfo},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ProcessInfo},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ProcessInfo},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessInfo},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ProcessInfo_spec = {
  "cuda.bindings._nvml.ProcessInfo",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ProcessInfo_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ProcessInfo = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_ProcessInfo = {
  __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_ProcessInfo, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_ProcessInfo = {
  __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessInfo, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ProcessInfo = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ProcessInfo", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessInfo, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ProcessInfo, /*tp_as_number*/
  &__pyx_tp_as_sequence_ProcessInfo, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ProcessInfo, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ProcessInfo(size=1)\n\nEmpty-initialize an array of `nvmlProcessInfo_t`.\n\nThe resulting object is of length `size` and of dtype `process_info_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlProcessInfo_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessInfo, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessInfo, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessInfo, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ProcessInfo, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ProcessInfo, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_11ProcessInfo_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ProcessInfo, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessDetail_v1 __pyx_vtable_4cuda_8bindings_5_nvml_ProcessDetail_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessDetail_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessDetail_v1;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessDetail_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessDetail_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessDetail_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessDetail_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_ProcessDetail_v1(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessDetail_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessDetail_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_pid(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3pid_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_pid(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3pid_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_used_gpu_memory(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15used_gpu_memory_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_used_gpu_memory(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15used_gpu_memory_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_gpu_instance_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15gpu_instance_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_gpu_instance_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15gpu_instance_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_compute_instance_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19compute_instance_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_compute_instance_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19compute_instance_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_used_gpu_cc_protected_memory(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_28used_gpu_cc_protected_memory_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_used_gpu_cc_protected_memory(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_28used_gpu_cc_protected_memory_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ProcessDetail_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16ProcessDetail_v1_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16ProcessDetail_v1_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16ProcessDetail_v1_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16ProcessDetail_v1_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ProcessDetail_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"pid", __pyx_getprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_pid, __pyx_setprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_pid, PyDoc_STR("Union[~_numpy.uint32, int]: Process ID."), 0},
  {"used_gpu_memory", __pyx_getprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_used_gpu_memory, __pyx_setprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_used_gpu_memory, PyDoc_STR("Union[~_numpy.uint64, int]: Amount of used GPU memory in bytes. Under WDDM, NVML_VALUE_NOT_AVAILABLE is always reported because Windows KMD manages all the memory and not the NVIDIA driver"), 0},
  {"gpu_instance_id", __pyx_getprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_gpu_instance_id, __pyx_setprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_gpu_instance_id, PyDoc_STR("Union[~_numpy.uint32, int]: If MIG is enabled, stores a valid GPU instance ID. gpuInstanceId is."), 0},
  {"compute_instance_id", __pyx_getprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_compute_instance_id, __pyx_setprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_compute_instance_id, PyDoc_STR("Union[~_numpy.uint32, int]: If MIG is enabled, stores a valid compute instance ID. computeInstanceId."), 0},
  {"used_gpu_cc_protected_memory", __pyx_getprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_used_gpu_cc_protected_memory, __pyx_setprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1_used_gpu_cc_protected_memory, PyDoc_STR("Union[~_numpy.uint64, int]: Amount of used GPU conf compute protected memory in bytes."), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_16ProcessDetail_v1__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ProcessDetail_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessDetail_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_ProcessDetail_v1},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessDetail_v1},
  {Py_tp_doc, (void *)PyDoc_STR("ProcessDetail_v1(size=1)\n\nEmpty-initialize an array of `nvmlProcessDetail_v1_t`.\n\nThe resulting object is of length `size` and of dtype `process_detail_v1_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlProcessDetail_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessDetail_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessDetail_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessDetail_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ProcessDetail_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ProcessDetail_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessDetail_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ProcessDetail_v1_spec = {
  "cuda.bindings._nvml.ProcessDetail_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ProcessDetail_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ProcessDetail_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_ProcessDetail_v1 = {
  __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_ProcessDetail_v1, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_ProcessDetail_v1 = {
  __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessDetail_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ProcessDetail_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ProcessDetail_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessDetail_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ProcessDetail_v1, /*tp_as_number*/
  &__pyx_tp_as_sequence_ProcessDetail_v1, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ProcessDetail_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ProcessDetail_v1(size=1)\n\nEmpty-initialize an array of `nvmlProcessDetail_v1_t`.\n\nThe resulting object is of length `size` and of dtype `process_detail_v1_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlProcessDetail_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessDetail_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessDetail_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessDetail_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ProcessDetail_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ProcessDetail_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_16ProcessDetail_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ProcessDetail_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceAttributes __pyx_vtable_4cuda_8bindings_5_nvml_DeviceAttributes;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_DeviceAttributes(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_DeviceAttributes;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DeviceAttributes(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DeviceAttributes) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_DeviceAttributes(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_DeviceAttributes(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DeviceAttributes(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_DeviceAttributes(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_multiprocessor_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20multiprocessor_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_multiprocessor_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20multiprocessor_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_copy_engine_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_24shared_copy_engine_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_copy_engine_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_24shared_copy_engine_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_decoder_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_decoder_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_decoder_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_decoder_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_encoder_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_encoder_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_encoder_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_20shared_encoder_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_jpeg_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_17shared_jpeg_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_jpeg_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_17shared_jpeg_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_ofa_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_16shared_ofa_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_ofa_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_16shared_ofa_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_gpu_instance_slice_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_24gpu_instance_slice_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_gpu_instance_slice_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_24gpu_instance_slice_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_compute_instance_slice_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_28compute_instance_slice_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_compute_instance_slice_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_28compute_instance_slice_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_memory_size_mb(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_14memory_size_mb_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_memory_size_mb(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_14memory_size_mb_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_DeviceAttributes[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16DeviceAttributes_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16DeviceAttributes_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16DeviceAttributes_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16DeviceAttributes_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_DeviceAttributes[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"multiprocessor_count", __pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_multiprocessor_count, __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_multiprocessor_count, PyDoc_STR("int: "), 0},
  {"shared_copy_engine_count", __pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_copy_engine_count, __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_copy_engine_count, PyDoc_STR("int: "), 0},
  {"shared_decoder_count", __pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_decoder_count, __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_decoder_count, PyDoc_STR("int: "), 0},
  {"shared_encoder_count", __pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_encoder_count, __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_encoder_count, PyDoc_STR("int: "), 0},
  {"shared_jpeg_count", __pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_jpeg_count, __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_jpeg_count, PyDoc_STR("int: "), 0},
  {"shared_ofa_count", __pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_ofa_count, __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_shared_ofa_count, PyDoc_STR("int: "), 0},
  {"gpu_instance_slice_count", __pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_gpu_instance_slice_count, __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_gpu_instance_slice_count, PyDoc_STR("int: "), 0},
  {"compute_instance_slice_count", __pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_compute_instance_slice_count, __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_compute_instance_slice_count, PyDoc_STR("int: "), 0},
  {"memory_size_mb", __pyx_getprop_4cuda_8bindings_5_nvml_16DeviceAttributes_memory_size_mb, __pyx_setprop_4cuda_8bindings_5_nvml_16DeviceAttributes_memory_size_mb, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_DeviceAttributes_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_DeviceAttributes},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DeviceAttributes},
  {Py_tp_doc, (void *)PyDoc_STR("DeviceAttributes()\n\nEmpty-initialize an instance of `nvmlDeviceAttributes_t`.\n\n\n.. seealso:: `nvmlDeviceAttributes_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_DeviceAttributes},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_DeviceAttributes},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_DeviceAttributes},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_DeviceAttributes},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_DeviceAttributes},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_DeviceAttributes},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_DeviceAttributes_spec = {
  "cuda.bindings._nvml.DeviceAttributes",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_DeviceAttributes_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_DeviceAttributes = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_DeviceAttributes = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DeviceAttributes, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_DeviceAttributes = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""DeviceAttributes", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DeviceAttributes, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_DeviceAttributes, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_DeviceAttributes, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("DeviceAttributes()\n\nEmpty-initialize an instance of `nvmlDeviceAttributes_t`.\n\n\n.. seealso:: `nvmlDeviceAttributes_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_DeviceAttributes, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_DeviceAttributes, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_DeviceAttributes, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_DeviceAttributes, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_DeviceAttributes, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_16DeviceAttributes_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_DeviceAttributes, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_C2cModeInfo_v1 __pyx_vtable_4cuda_8bindings_5_nvml_C2cModeInfo_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_C2cModeInfo_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_C2cModeInfo_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_C2cModeInfo_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_C2cModeInfo_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_C2cModeInfo_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_C2cModeInfo_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_C2cModeInfo_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_C2cModeInfo_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_is_c2c_enabled(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14is_c2c_enabled_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_is_c2c_enabled(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14is_c2c_enabled_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_C2cModeInfo_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_C2cModeInfo_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"is_c2c_enabled", __pyx_getprop_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_is_c2c_enabled, __pyx_setprop_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_is_c2c_enabled, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_C2cModeInfo_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_C2cModeInfo_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_C2cModeInfo_v1},
  {Py_tp_doc, (void *)PyDoc_STR("C2cModeInfo_v1()\n\nEmpty-initialize an instance of `nvmlC2cModeInfo_v1_t`.\n\n\n.. seealso:: `nvmlC2cModeInfo_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_C2cModeInfo_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_C2cModeInfo_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_C2cModeInfo_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_C2cModeInfo_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_C2cModeInfo_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_C2cModeInfo_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_C2cModeInfo_v1_spec = {
  "cuda.bindings._nvml.C2cModeInfo_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_C2cModeInfo_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_C2cModeInfo_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_C2cModeInfo_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_C2cModeInfo_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_C2cModeInfo_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""C2cModeInfo_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_C2cModeInfo_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_C2cModeInfo_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_C2cModeInfo_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("C2cModeInfo_v1()\n\nEmpty-initialize an instance of `nvmlC2cModeInfo_v1_t`.\n\n\n.. seealso:: `nvmlC2cModeInfo_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_C2cModeInfo_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_C2cModeInfo_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_C2cModeInfo_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_C2cModeInfo_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_C2cModeInfo_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_C2cModeInfo_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_RowRemapperHistogramValues __pyx_vtable_4cuda_8bindings_5_nvml_RowRemapperHistogramValues;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_RowRemapperHistogramValues(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_RowRemapperHistogramValues;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_RowRemapperHistogramValues(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_RowRemapperHistogramValues) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_RowRemapperHistogramValues(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_RowRemapperHistogramValues(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_RowRemapperHistogramValues(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_RowRemapperHistogramValues(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_max_(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4max__1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_max_(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4max__3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_high(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4high_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_high(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4high_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_partial(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_7partial_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_partial(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_7partial_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_low(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3low_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_low(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_3low_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_none(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4none_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_none(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_4none_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_RowRemapperHistogramValues[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_RowRemapperHistogramValues[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"max_", __pyx_getprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_max_, __pyx_setprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_max_, PyDoc_STR("int: "), 0},
  {"high", __pyx_getprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_high, __pyx_setprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_high, PyDoc_STR("int: "), 0},
  {"partial", __pyx_getprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_partial, __pyx_setprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_partial, PyDoc_STR("int: "), 0},
  {"low", __pyx_getprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_low, __pyx_setprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_low, PyDoc_STR("int: "), 0},
  {"none", __pyx_getprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_none, __pyx_setprop_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_none, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_RowRemapperHistogramValues_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_RowRemapperHistogramValues},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_RowRemapperHistogramValues},
  {Py_tp_doc, (void *)PyDoc_STR("RowRemapperHistogramValues()\n\nEmpty-initialize an instance of `nvmlRowRemapperHistogramValues_t`.\n\n\n.. seealso:: `nvmlRowRemapperHistogramValues_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_RowRemapperHistogramValues},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_RowRemapperHistogramValues},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_RowRemapperHistogramValues},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_RowRemapperHistogramValues},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_RowRemapperHistogramValues},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_RowRemapperHistogramValues},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_RowRemapperHistogramValues_spec = {
  "cuda.bindings._nvml.RowRemapperHistogramValues",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_RowRemapperHistogramValues_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_RowRemapperHistogramValues = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_RowRemapperHistogramValues = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_RowRemapperHistogramValues, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_RowRemapperHistogramValues = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""RowRemapperHistogramValues", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_RowRemapperHistogramValues, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_RowRemapperHistogramValues, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_RowRemapperHistogramValues, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("RowRemapperHistogramValues()\n\nEmpty-initialize an instance of `nvmlRowRemapperHistogramValues_t`.\n\n\n.. seealso:: `nvmlRowRemapperHistogramValues_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_RowRemapperHistogramValues, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_RowRemapperHistogramValues, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_RowRemapperHistogramValues, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_RowRemapperHistogramValues, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_RowRemapperHistogramValues, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_RowRemapperHistogramValues, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_BridgeChipInfo __pyx_vtable_4cuda_8bindings_5_nvml_BridgeChipInfo;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_BridgeChipInfo(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_BridgeChipInfo;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_BridgeChipInfo(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_BridgeChipInfo) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_BridgeChipInfo(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_BridgeChipInfo(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_BridgeChipInfo(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_BridgeChipInfo(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_BridgeChipInfo(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14BridgeChipInfo_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14BridgeChipInfo_type(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_4type_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14BridgeChipInfo_type(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_4type_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14BridgeChipInfo_fw_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_10fw_version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14BridgeChipInfo_fw_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_10fw_version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14BridgeChipInfo__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_BridgeChipInfo[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14BridgeChipInfo_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14BridgeChipInfo_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14BridgeChipInfo_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14BridgeChipInfo_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_BridgeChipInfo[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_14BridgeChipInfo_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"type", __pyx_getprop_4cuda_8bindings_5_nvml_14BridgeChipInfo_type, __pyx_setprop_4cuda_8bindings_5_nvml_14BridgeChipInfo_type, PyDoc_STR("Union[~_numpy.int32, int]: "), 0},
  {"fw_version", __pyx_getprop_4cuda_8bindings_5_nvml_14BridgeChipInfo_fw_version, __pyx_setprop_4cuda_8bindings_5_nvml_14BridgeChipInfo_fw_version, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_14BridgeChipInfo__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_BridgeChipInfo_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_BridgeChipInfo},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_BridgeChipInfo},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_BridgeChipInfo},
  {Py_tp_doc, (void *)PyDoc_STR("BridgeChipInfo(size=1)\n\nEmpty-initialize an array of `nvmlBridgeChipInfo_t`.\n\nThe resulting object is of length `size` and of dtype `bridge_chip_info_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlBridgeChipInfo_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_BridgeChipInfo},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_BridgeChipInfo},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_BridgeChipInfo},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_BridgeChipInfo},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_BridgeChipInfo},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_BridgeChipInfo},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_BridgeChipInfo_spec = {
  "cuda.bindings._nvml.BridgeChipInfo",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_BridgeChipInfo_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_BridgeChipInfo = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_BridgeChipInfo = {
  __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_BridgeChipInfo, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_BridgeChipInfo = {
  __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_BridgeChipInfo, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_BridgeChipInfo = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""BridgeChipInfo", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_BridgeChipInfo, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_BridgeChipInfo, /*tp_as_number*/
  &__pyx_tp_as_sequence_BridgeChipInfo, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_BridgeChipInfo, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("BridgeChipInfo(size=1)\n\nEmpty-initialize an array of `nvmlBridgeChipInfo_t`.\n\nThe resulting object is of length `size` and of dtype `bridge_chip_info_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlBridgeChipInfo_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_BridgeChipInfo, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_BridgeChipInfo, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_BridgeChipInfo, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_BridgeChipInfo, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_BridgeChipInfo, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_14BridgeChipInfo_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_BridgeChipInfo, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Value __pyx_vtable_4cuda_8bindings_5_nvml_Value;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_Value(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Value *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_Value;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Value(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Value *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Value) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_5Value_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_Value(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_Value *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_Value(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_Value *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Value *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Value(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_5Value_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_Value(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_5Value_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_5Value_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_5Value_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_5Value_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_5Value_d_val(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_5Value_5d_val_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_5Value_d_val(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_5Value_5d_val_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_5Value_si_val(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_5Value_6si_val_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_5Value_si_val(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_5Value_6si_val_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_5Value_ui_val(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_5Value_6ui_val_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_5Value_ui_val(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_5Value_6ui_val_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_5Value_ul_val(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_5Value_6ul_val_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_5Value_ul_val(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_5Value_6ul_val_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_5Value_ull_val(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_5Value_7ull_val_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_5Value_ull_val(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_5Value_7ull_val_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_5Value_sll_val(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_5Value_7sll_val_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_5Value_sll_val(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_5Value_7sll_val_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_5Value_us_val(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_5Value_6us_val_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_5Value_us_val(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_5Value_6us_val_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_Value[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_5Value_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_5Value_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_5Value_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_5Value_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_5Value_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_5Value_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_5Value_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_5Value_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_Value[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_5Value_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"d_val", __pyx_getprop_4cuda_8bindings_5_nvml_5Value_d_val, __pyx_setprop_4cuda_8bindings_5_nvml_5Value_d_val, PyDoc_STR("float: "), 0},
  {"si_val", __pyx_getprop_4cuda_8bindings_5_nvml_5Value_si_val, __pyx_setprop_4cuda_8bindings_5_nvml_5Value_si_val, PyDoc_STR("int: "), 0},
  {"ui_val", __pyx_getprop_4cuda_8bindings_5_nvml_5Value_ui_val, __pyx_setprop_4cuda_8bindings_5_nvml_5Value_ui_val, PyDoc_STR("int: "), 0},
  {"ul_val", __pyx_getprop_4cuda_8bindings_5_nvml_5Value_ul_val, __pyx_setprop_4cuda_8bindings_5_nvml_5Value_ul_val, PyDoc_STR("int: "), 0},
  {"ull_val", __pyx_getprop_4cuda_8bindings_5_nvml_5Value_ull_val, __pyx_setprop_4cuda_8bindings_5_nvml_5Value_ull_val, PyDoc_STR("int: "), 0},
  {"sll_val", __pyx_getprop_4cuda_8bindings_5_nvml_5Value_sll_val, __pyx_setprop_4cuda_8bindings_5_nvml_5Value_sll_val, PyDoc_STR("int: "), 0},
  {"us_val", __pyx_getprop_4cuda_8bindings_5_nvml_5Value_us_val, __pyx_setprop_4cuda_8bindings_5_nvml_5Value_us_val, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_Value_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_Value},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_5Value_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_5Value_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Value},
  {Py_tp_doc, (void *)PyDoc_STR("Value()\n\nEmpty-initialize an instance of `nvmlValue_t`.\n\n\n.. seealso:: `nvmlValue_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_Value},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_Value},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_Value},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_Value},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_Value},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_5Value_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_Value},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_Value_spec = {
  "cuda.bindings._nvml.Value",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_Value),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_Value_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_Value = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_5Value_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_Value = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Value, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_Value = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""Value", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_Value), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Value, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_5Value_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_Value, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_Value, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("Value()\n\nEmpty-initialize an instance of `nvmlValue_t`.\n\n\n.. seealso:: `nvmlValue_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_Value, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_Value, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_Value, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_Value, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_Value, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_5Value_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_Value, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod0 __pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod0;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod0(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod0;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod0(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod0) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod0(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod0(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod0(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod0(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_controller(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_10controller_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_controller(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_10controller_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_default_min_temp(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_min_temp_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_default_min_temp(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_min_temp_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_default_max_temp(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_max_temp_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_default_max_temp(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_16default_max_temp_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_current_temp(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_12current_temp_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_current_temp(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_12current_temp_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_target(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_6target_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_target(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_6target_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod0[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod0_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod0_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod0_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod0_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod0[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"controller", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_controller, __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_controller, PyDoc_STR("int: "), 0},
  {"default_min_temp", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_default_min_temp, __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_default_min_temp, PyDoc_STR("int: "), 0},
  {"default_max_temp", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_default_max_temp, __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_default_max_temp, PyDoc_STR("int: "), 0},
  {"current_temp", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_current_temp, __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_current_temp, PyDoc_STR("int: "), 0},
  {"target", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_target, __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod0_target, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod0_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod0},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod0},
  {Py_tp_doc, (void *)PyDoc_STR("_py_anon_pod0()\n\nEmpty-initialize an instance of `_anon_pod0`.\n\n\n.. seealso:: `_anon_pod0`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod0},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod0},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod0},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod0},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod0},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod0},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod0_spec = {
  "cuda.bindings._nvml._py_anon_pod0",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod0_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number__py_anon_pod0 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping__py_anon_pod0 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod0, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod0 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""_py_anon_pod0", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod0, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number__py_anon_pod0, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping__py_anon_pod0, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("_py_anon_pod0()\n\nEmpty-initialize an instance of `_anon_pod0`.\n\n\n.. seealso:: `_anon_pod0`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod0, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod0, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod0, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod0, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod0, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod0_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod0, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_CoolerInfo_v1 __pyx_vtable_4cuda_8bindings_5_nvml_CoolerInfo_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_CoolerInfo_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_CoolerInfo_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_CoolerInfo_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_CoolerInfo_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_CoolerInfo_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_CoolerInfo_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_CoolerInfo_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_CoolerInfo_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_ind_ex(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6ind_ex_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_ind_ex(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6ind_ex_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_signal_type(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_11signal_type_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_signal_type(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_11signal_type_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_target(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6target_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_target(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_6target_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_CoolerInfo_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13CoolerInfo_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13CoolerInfo_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13CoolerInfo_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13CoolerInfo_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_CoolerInfo_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_version, PyDoc_STR("int: the API version number"), 0},
  {"ind_ex", __pyx_getprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_ind_ex, __pyx_setprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_ind_ex, PyDoc_STR("int: the cooler index"), 0},
  {"signal_type", __pyx_getprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_signal_type, __pyx_setprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_signal_type, PyDoc_STR("int: OUT: the cooler's control signal characteristics."), 0},
  {"target", __pyx_getprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_target, __pyx_setprop_4cuda_8bindings_5_nvml_13CoolerInfo_v1_target, PyDoc_STR("int: OUT: the target that cooler cools."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_CoolerInfo_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_CoolerInfo_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_CoolerInfo_v1},
  {Py_tp_doc, (void *)PyDoc_STR("CoolerInfo_v1()\n\nEmpty-initialize an instance of `nvmlCoolerInfo_v1_t`.\n\n\n.. seealso:: `nvmlCoolerInfo_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_CoolerInfo_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_CoolerInfo_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_CoolerInfo_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_CoolerInfo_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_CoolerInfo_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_CoolerInfo_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_CoolerInfo_v1_spec = {
  "cuda.bindings._nvml.CoolerInfo_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_CoolerInfo_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_CoolerInfo_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_CoolerInfo_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_CoolerInfo_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_CoolerInfo_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""CoolerInfo_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_CoolerInfo_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_CoolerInfo_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_CoolerInfo_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("CoolerInfo_v1()\n\nEmpty-initialize an instance of `nvmlCoolerInfo_v1_t`.\n\n\n.. seealso:: `nvmlCoolerInfo_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_CoolerInfo_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_CoolerInfo_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_CoolerInfo_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_CoolerInfo_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_CoolerInfo_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_13CoolerInfo_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_CoolerInfo_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_MarginTemperature_v1 __pyx_vtable_4cuda_8bindings_5_nvml_MarginTemperature_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_MarginTemperature_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_MarginTemperature_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_MarginTemperature_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_MarginTemperature_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_MarginTemperature_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_MarginTemperature_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_MarginTemperature_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_MarginTemperature_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_20MarginTemperature_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_20MarginTemperature_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_20MarginTemperature_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_20MarginTemperature_v1_margin_temperature(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18margin_temperature_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_20MarginTemperature_v1_margin_temperature(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18margin_temperature_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_MarginTemperature_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20MarginTemperature_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20MarginTemperature_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20MarginTemperature_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20MarginTemperature_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_MarginTemperature_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_20MarginTemperature_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_20MarginTemperature_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_20MarginTemperature_v1_version, PyDoc_STR("int: The version number of this struct."), 0},
  {"margin_temperature", __pyx_getprop_4cuda_8bindings_5_nvml_20MarginTemperature_v1_margin_temperature, __pyx_setprop_4cuda_8bindings_5_nvml_20MarginTemperature_v1_margin_temperature, PyDoc_STR("int: The margin temperature value."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_MarginTemperature_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_MarginTemperature_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_MarginTemperature_v1},
  {Py_tp_doc, (void *)PyDoc_STR("MarginTemperature_v1()\n\nEmpty-initialize an instance of `nvmlMarginTemperature_v1_t`.\n\n\n.. seealso:: `nvmlMarginTemperature_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_MarginTemperature_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_MarginTemperature_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_MarginTemperature_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_MarginTemperature_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_MarginTemperature_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_MarginTemperature_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_MarginTemperature_v1_spec = {
  "cuda.bindings._nvml.MarginTemperature_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_MarginTemperature_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_MarginTemperature_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_MarginTemperature_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_MarginTemperature_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_MarginTemperature_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""MarginTemperature_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_MarginTemperature_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_MarginTemperature_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_MarginTemperature_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("MarginTemperature_v1()\n\nEmpty-initialize an instance of `nvmlMarginTemperature_v1_t`.\n\n\n.. seealso:: `nvmlMarginTemperature_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_MarginTemperature_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_MarginTemperature_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_MarginTemperature_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_MarginTemperature_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_MarginTemperature_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_20MarginTemperature_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_MarginTemperature_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ClkMonFaultInfo __pyx_vtable_4cuda_8bindings_5_nvml_ClkMonFaultInfo;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ClkMonFaultInfo(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ClkMonFaultInfo;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ClkMonFaultInfo(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ClkMonFaultInfo) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ClkMonFaultInfo(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ClkMonFaultInfo(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_ClkMonFaultInfo(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ClkMonFaultInfo(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ClkMonFaultInfo(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_clk_api_domain(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14clk_api_domain_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_clk_api_domain(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14clk_api_domain_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_clk_domain_fault_mask(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21clk_domain_fault_mask_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_clk_domain_fault_mask(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21clk_domain_fault_mask_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15ClkMonFaultInfo__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ClkMonFaultInfo[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ClkMonFaultInfo[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"clk_api_domain", __pyx_getprop_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_clk_api_domain, __pyx_setprop_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_clk_api_domain, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"clk_domain_fault_mask", __pyx_getprop_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_clk_domain_fault_mask, __pyx_setprop_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_clk_domain_fault_mask, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_15ClkMonFaultInfo__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ClkMonFaultInfo_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ClkMonFaultInfo},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_ClkMonFaultInfo},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ClkMonFaultInfo},
  {Py_tp_doc, (void *)PyDoc_STR("ClkMonFaultInfo(size=1)\n\nEmpty-initialize an array of `nvmlClkMonFaultInfo_t`.\n\nThe resulting object is of length `size` and of dtype `clk_mon_fault_info_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlClkMonFaultInfo_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ClkMonFaultInfo},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ClkMonFaultInfo},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ClkMonFaultInfo},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ClkMonFaultInfo},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ClkMonFaultInfo},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ClkMonFaultInfo},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ClkMonFaultInfo_spec = {
  "cuda.bindings._nvml.ClkMonFaultInfo",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ClkMonFaultInfo_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ClkMonFaultInfo = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_ClkMonFaultInfo = {
  __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_ClkMonFaultInfo, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_ClkMonFaultInfo = {
  __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ClkMonFaultInfo, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ClkMonFaultInfo = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ClkMonFaultInfo", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ClkMonFaultInfo, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ClkMonFaultInfo, /*tp_as_number*/
  &__pyx_tp_as_sequence_ClkMonFaultInfo, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ClkMonFaultInfo, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ClkMonFaultInfo(size=1)\n\nEmpty-initialize an array of `nvmlClkMonFaultInfo_t`.\n\nThe resulting object is of length `size` and of dtype `clk_mon_fault_info_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlClkMonFaultInfo_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ClkMonFaultInfo, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ClkMonFaultInfo, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ClkMonFaultInfo, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ClkMonFaultInfo, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ClkMonFaultInfo, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ClkMonFaultInfo, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ClockOffset_v1 __pyx_vtable_4cuda_8bindings_5_nvml_ClockOffset_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ClockOffset_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ClockOffset_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ClockOffset_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ClockOffset_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ClockOffset_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ClockOffset_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ClockOffset_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ClockOffset_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_type(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_4type_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_type(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_4type_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_pstate(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_6pstate_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_pstate(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_6pstate_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_clock_offset_m_hz(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_17clock_offset_m_hz_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_clock_offset_m_hz(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_17clock_offset_m_hz_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_min_clock_offset_m_hz(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_21min_clock_offset_m_hz_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_min_clock_offset_m_hz(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_21min_clock_offset_m_hz_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_max_clock_offset_m_hz(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_21max_clock_offset_m_hz_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_max_clock_offset_m_hz(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_21max_clock_offset_m_hz_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ClockOffset_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14ClockOffset_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14ClockOffset_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14ClockOffset_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14ClockOffset_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ClockOffset_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_version, PyDoc_STR("int: The version number of this struct."), 0},
  {"type", __pyx_getprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_type, __pyx_setprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_type, PyDoc_STR("int: "), 0},
  {"pstate", __pyx_getprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_pstate, __pyx_setprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_pstate, PyDoc_STR("int: "), 0},
  {"clock_offset_m_hz", __pyx_getprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_clock_offset_m_hz, __pyx_setprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_clock_offset_m_hz, PyDoc_STR("int: "), 0},
  {"min_clock_offset_m_hz", __pyx_getprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_min_clock_offset_m_hz, __pyx_setprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_min_clock_offset_m_hz, PyDoc_STR("int: "), 0},
  {"max_clock_offset_m_hz", __pyx_getprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_max_clock_offset_m_hz, __pyx_setprop_4cuda_8bindings_5_nvml_14ClockOffset_v1_max_clock_offset_m_hz, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ClockOffset_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ClockOffset_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ClockOffset_v1},
  {Py_tp_doc, (void *)PyDoc_STR("ClockOffset_v1()\n\nEmpty-initialize an instance of `nvmlClockOffset_v1_t`.\n\n\n.. seealso:: `nvmlClockOffset_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ClockOffset_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ClockOffset_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ClockOffset_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ClockOffset_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ClockOffset_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ClockOffset_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ClockOffset_v1_spec = {
  "cuda.bindings._nvml.ClockOffset_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ClockOffset_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ClockOffset_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_ClockOffset_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ClockOffset_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ClockOffset_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ClockOffset_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ClockOffset_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ClockOffset_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ClockOffset_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ClockOffset_v1()\n\nEmpty-initialize an instance of `nvmlClockOffset_v1_t`.\n\n\n.. seealso:: `nvmlClockOffset_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ClockOffset_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ClockOffset_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ClockOffset_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ClockOffset_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ClockOffset_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_14ClockOffset_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ClockOffset_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 __pyx_vtable_4cuda_8bindings_5_nvml_FanSpeedInfo_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_FanSpeedInfo_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_FanSpeedInfo_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_FanSpeedInfo_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_FanSpeedInfo_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_FanSpeedInfo_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_FanSpeedInfo_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_FanSpeedInfo_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_FanSpeedInfo_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_fan(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3fan_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_fan(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_3fan_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_speed(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_5speed_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_speed(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_5speed_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_FanSpeedInfo_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_FanSpeedInfo_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_version, PyDoc_STR("int: the API version number"), 0},
  {"fan", __pyx_getprop_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_fan, __pyx_setprop_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_fan, PyDoc_STR("int: the fan index"), 0},
  {"speed", __pyx_getprop_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_speed, __pyx_setprop_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_speed, PyDoc_STR("int: OUT: the fan speed in RPM."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_FanSpeedInfo_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_FanSpeedInfo_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_FanSpeedInfo_v1},
  {Py_tp_doc, (void *)PyDoc_STR("FanSpeedInfo_v1()\n\nEmpty-initialize an instance of `nvmlFanSpeedInfo_v1_t`.\n\n\n.. seealso:: `nvmlFanSpeedInfo_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_FanSpeedInfo_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_FanSpeedInfo_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_FanSpeedInfo_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_FanSpeedInfo_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_FanSpeedInfo_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_FanSpeedInfo_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_FanSpeedInfo_v1_spec = {
  "cuda.bindings._nvml.FanSpeedInfo_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_FanSpeedInfo_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_FanSpeedInfo_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_FanSpeedInfo_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_FanSpeedInfo_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""FanSpeedInfo_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_FanSpeedInfo_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_FanSpeedInfo_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_FanSpeedInfo_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("FanSpeedInfo_v1()\n\nEmpty-initialize an instance of `nvmlFanSpeedInfo_v1_t`.\n\n\n.. seealso:: `nvmlFanSpeedInfo_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_FanSpeedInfo_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_FanSpeedInfo_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_FanSpeedInfo_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_FanSpeedInfo_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_FanSpeedInfo_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_FanSpeedInfo_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DevicePerfModes_v1 __pyx_vtable_4cuda_8bindings_5_nvml_DevicePerfModes_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_DevicePerfModes_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_DevicePerfModes_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DevicePerfModes_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DevicePerfModes_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_DevicePerfModes_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_DevicePerfModes_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DevicePerfModes_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_DevicePerfModes_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_str(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3str_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_str(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_3str_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_DevicePerfModes_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_DevicePerfModes_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_version, PyDoc_STR("int: the API version number"), 0},
  {"str", __pyx_getprop_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_str, __pyx_setprop_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_str, PyDoc_STR("~_numpy.int8: (array of length 2048).OUT: the performance modes string."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_DevicePerfModes_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_DevicePerfModes_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DevicePerfModes_v1},
  {Py_tp_doc, (void *)PyDoc_STR("DevicePerfModes_v1()\n\nEmpty-initialize an instance of `nvmlDevicePerfModes_v1_t`.\n\n\n.. seealso:: `nvmlDevicePerfModes_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_DevicePerfModes_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_DevicePerfModes_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_DevicePerfModes_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_DevicePerfModes_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_DevicePerfModes_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_DevicePerfModes_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_DevicePerfModes_v1_spec = {
  "cuda.bindings._nvml.DevicePerfModes_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_DevicePerfModes_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_DevicePerfModes_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_DevicePerfModes_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DevicePerfModes_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_DevicePerfModes_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""DevicePerfModes_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DevicePerfModes_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_DevicePerfModes_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_DevicePerfModes_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("DevicePerfModes_v1()\n\nEmpty-initialize an instance of `nvmlDevicePerfModes_v1_t`.\n\n\n.. seealso:: `nvmlDevicePerfModes_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_DevicePerfModes_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_DevicePerfModes_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_DevicePerfModes_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_DevicePerfModes_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_DevicePerfModes_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_DevicePerfModes_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 __pyx_vtable_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_str(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3str_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_str(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_3str_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_version, PyDoc_STR("int: the API version number"), 0},
  {"str", __pyx_getprop_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_str, __pyx_setprop_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_str, PyDoc_STR("~_numpy.int8: (array of length 2048).OUT: the current clock frequency string."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1},
  {Py_tp_doc, (void *)PyDoc_STR("DeviceCurrentClockFreqs_v1()\n\nEmpty-initialize an instance of `nvmlDeviceCurrentClockFreqs_v1_t`.\n\n\n.. seealso:: `nvmlDeviceCurrentClockFreqs_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1_spec = {
  "cuda.bindings._nvml.DeviceCurrentClockFreqs_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_DeviceCurrentClockFreqs_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_DeviceCurrentClockFreqs_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""DeviceCurrentClockFreqs_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_DeviceCurrentClockFreqs_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_DeviceCurrentClockFreqs_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("DeviceCurrentClockFreqs_v1()\n\nEmpty-initialize an instance of `nvmlDeviceCurrentClockFreqs_v1_t`.\n\n\n.. seealso:: `nvmlDeviceCurrentClockFreqs_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessUtilizationSample __pyx_vtable_4cuda_8bindings_5_nvml_ProcessUtilizationSample;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessUtilizationSample(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessUtilizationSample;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessUtilizationSample(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessUtilizationSample) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessUtilizationSample(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessUtilizationSample(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_ProcessUtilizationSample(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessUtilizationSample(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessUtilizationSample(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_pid(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3pid_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_pid(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3pid_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_time_stamp(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_10time_stamp_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_time_stamp(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_10time_stamp_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_sm_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7sm_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_sm_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7sm_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_mem_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8mem_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_mem_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8mem_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_enc_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8enc_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_enc_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8enc_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_dec_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8dec_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_dec_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_8dec_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ProcessUtilizationSample[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ProcessUtilizationSample[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"pid", __pyx_getprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_pid, __pyx_setprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_pid, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"time_stamp", __pyx_getprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_time_stamp, __pyx_setprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_time_stamp, PyDoc_STR("Union[~_numpy.uint64, int]: "), 0},
  {"sm_util", __pyx_getprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_sm_util, __pyx_setprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_sm_util, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"mem_util", __pyx_getprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_mem_util, __pyx_setprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_mem_util, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"enc_util", __pyx_getprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_enc_util, __pyx_setprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_enc_util, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"dec_util", __pyx_getprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_dec_util, __pyx_setprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_dec_util, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_24ProcessUtilizationSample__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationSample_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessUtilizationSample},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_ProcessUtilizationSample},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessUtilizationSample},
  {Py_tp_doc, (void *)PyDoc_STR("ProcessUtilizationSample(size=1)\n\nEmpty-initialize an array of `nvmlProcessUtilizationSample_t`.\n\nThe resulting object is of length `size` and of dtype `process_utilization_sample_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlProcessUtilizationSample_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessUtilizationSample},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessUtilizationSample},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessUtilizationSample},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ProcessUtilizationSample},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ProcessUtilizationSample},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessUtilizationSample},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationSample_spec = {
  "cuda.bindings._nvml.ProcessUtilizationSample",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationSample_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ProcessUtilizationSample = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_ProcessUtilizationSample = {
  __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_ProcessUtilizationSample, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_ProcessUtilizationSample = {
  __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessUtilizationSample, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationSample = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ProcessUtilizationSample", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessUtilizationSample, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ProcessUtilizationSample, /*tp_as_number*/
  &__pyx_tp_as_sequence_ProcessUtilizationSample, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ProcessUtilizationSample, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ProcessUtilizationSample(size=1)\n\nEmpty-initialize an array of `nvmlProcessUtilizationSample_t`.\n\nThe resulting object is of length `size` and of dtype `process_utilization_sample_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlProcessUtilizationSample_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessUtilizationSample, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessUtilizationSample, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessUtilizationSample, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ProcessUtilizationSample, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ProcessUtilizationSample, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ProcessUtilizationSample, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 __pyx_vtable_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_time_stamp(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_10time_stamp_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_time_stamp(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_10time_stamp_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_pid(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3pid_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_pid(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3pid_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_sm_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7sm_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_sm_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7sm_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_mem_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8mem_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_mem_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8mem_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_enc_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8enc_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_enc_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8enc_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_dec_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8dec_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_dec_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8dec_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_jpg_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8jpg_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_jpg_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8jpg_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_ofa_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8ofa_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_ofa_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_8ofa_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"time_stamp", __pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_time_stamp, __pyx_setprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_time_stamp, PyDoc_STR("Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."), 0},
  {"pid", __pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_pid, __pyx_setprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_pid, PyDoc_STR("Union[~_numpy.uint32, int]: PID of process."), 0},
  {"sm_util", __pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_sm_util, __pyx_setprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_sm_util, PyDoc_STR("Union[~_numpy.uint32, int]: SM (3D/Compute) Util Value."), 0},
  {"mem_util", __pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_mem_util, __pyx_setprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_mem_util, PyDoc_STR("Union[~_numpy.uint32, int]: Frame Buffer Memory Util Value."), 0},
  {"enc_util", __pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_enc_util, __pyx_setprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_enc_util, PyDoc_STR("Union[~_numpy.uint32, int]: Encoder Util Value."), 0},
  {"dec_util", __pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_dec_util, __pyx_setprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_dec_util, PyDoc_STR("Union[~_numpy.uint32, int]: Decoder Util Value."), 0},
  {"jpg_util", __pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_jpg_util, __pyx_setprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_jpg_util, PyDoc_STR("Union[~_numpy.uint32, int]: Jpeg Util Value."), 0},
  {"ofa_util", __pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_ofa_util, __pyx_setprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_ofa_util, PyDoc_STR("Union[~_numpy.uint32, int]: Ofa Util Value."), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1},
  {Py_tp_doc, (void *)PyDoc_STR("ProcessUtilizationInfo_v1(size=1)\n\nEmpty-initialize an array of `nvmlProcessUtilizationInfo_v1_t`.\n\nThe resulting object is of length `size` and of dtype `process_utilization_info_v1_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlProcessUtilizationInfo_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1_spec = {
  "cuda.bindings._nvml.ProcessUtilizationInfo_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ProcessUtilizationInfo_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_ProcessUtilizationInfo_v1 = {
  __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_ProcessUtilizationInfo_v1 = {
  __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ProcessUtilizationInfo_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ProcessUtilizationInfo_v1, /*tp_as_number*/
  &__pyx_tp_as_sequence_ProcessUtilizationInfo_v1, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ProcessUtilizationInfo_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ProcessUtilizationInfo_v1(size=1)\n\nEmpty-initialize an array of `nvmlProcessUtilizationInfo_v1_t`.\n\nThe resulting object is of length `size` and of dtype `process_utilization_info_v1_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlProcessUtilizationInfo_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 __pyx_vtable_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_parity(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20aggregate_unc_parity_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_parity(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20aggregate_unc_parity_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_sec_ded(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_21aggregate_unc_sec_ded_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_sec_ded(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_21aggregate_unc_sec_ded_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_cor(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13aggregate_cor_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_cor(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13aggregate_cor_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_volatile_unc_parity(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19volatile_unc_parity_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_volatile_unc_parity(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19volatile_unc_parity_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_volatile_unc_sec_ded(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20volatile_unc_sec_ded_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_volatile_unc_sec_ded(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20volatile_unc_sec_ded_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_volatile_cor(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12volatile_cor_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_volatile_cor(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12volatile_cor_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_l2(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_l2_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_l2(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_l2_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_sm(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_sm_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_sm(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_23aggregate_unc_bucket_sm_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_pcie(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_25aggregate_unc_bucket_pcie_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_pcie(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_25aggregate_unc_bucket_pcie_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_mcu(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_24aggregate_unc_bucket_mcu_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_mcu(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_24aggregate_unc_bucket_mcu_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_other(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_26aggregate_unc_bucket_other_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_other(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_26aggregate_unc_bucket_other_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_b_threshold_exceeded(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20b_threshold_exceeded_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_b_threshold_exceeded(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_20b_threshold_exceeded_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_version, PyDoc_STR("int: the API version number"), 0},
  {"aggregate_unc_parity", __pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_parity, __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_parity, PyDoc_STR("int: aggregate uncorrectable parity error count"), 0},
  {"aggregate_unc_sec_ded", __pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_sec_ded, __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_sec_ded, PyDoc_STR("int: aggregate uncorrectable SEC-DED error count"), 0},
  {"aggregate_cor", __pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_cor, __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_cor, PyDoc_STR("int: aggregate correctable error count"), 0},
  {"volatile_unc_parity", __pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_volatile_unc_parity, __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_volatile_unc_parity, PyDoc_STR("int: volatile uncorrectable parity error count"), 0},
  {"volatile_unc_sec_ded", __pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_volatile_unc_sec_ded, __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_volatile_unc_sec_ded, PyDoc_STR("int: volatile uncorrectable SEC-DED error count"), 0},
  {"volatile_cor", __pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_volatile_cor, __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_volatile_cor, PyDoc_STR("int: volatile correctable error count"), 0},
  {"aggregate_unc_bucket_l2", __pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_l2, __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_l2, PyDoc_STR("int: aggregate uncorrectable error count for L2 cache bucket"), 0},
  {"aggregate_unc_bucket_sm", __pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_sm, __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_sm, PyDoc_STR("int: aggregate uncorrectable error count for SM bucket"), 0},
  {"aggregate_unc_bucket_pcie", __pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_pcie, __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_pcie, PyDoc_STR("int: aggregate uncorrectable error count for PCIE bucket"), 0},
  {"aggregate_unc_bucket_mcu", __pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_mcu, __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_mcu, PyDoc_STR("int: aggregate uncorrectable error count for Microcontroller bucket"), 0},
  {"aggregate_unc_bucket_other", __pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_other, __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_aggregate_unc_bucket_other, PyDoc_STR("int: aggregate uncorrectable error count for Other bucket"), 0},
  {"b_threshold_exceeded", __pyx_getprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_b_threshold_exceeded, __pyx_setprop_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_b_threshold_exceeded, PyDoc_STR("int: if the error threshold of field diag is exceeded"), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1},
  {Py_tp_doc, (void *)PyDoc_STR("EccSramErrorStatus_v1()\n\nEmpty-initialize an instance of `nvmlEccSramErrorStatus_v1_t`.\n\n\n.. seealso:: `nvmlEccSramErrorStatus_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1_spec = {
  "cuda.bindings._nvml.EccSramErrorStatus_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_EccSramErrorStatus_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_EccSramErrorStatus_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""EccSramErrorStatus_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_EccSramErrorStatus_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_EccSramErrorStatus_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("EccSramErrorStatus_v1()\n\nEmpty-initialize an instance of `nvmlEccSramErrorStatus_v1_t`.\n\n\n.. seealso:: `nvmlEccSramErrorStatus_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PlatformInfo_v2 __pyx_vtable_4cuda_8bindings_5_nvml_PlatformInfo_v2;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_PlatformInfo_v2(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_PlatformInfo_v2;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_PlatformInfo_v2(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_PlatformInfo_v2) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_PlatformInfo_v2(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_PlatformInfo_v2(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_PlatformInfo_v2(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_PlatformInfo_v2(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_ib_guid(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7ib_guid_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_ib_guid(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7ib_guid_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_chassis_serial_number(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_21chassis_serial_number_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_chassis_serial_number(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_21chassis_serial_number_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_slot_number(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11slot_number_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_slot_number(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11slot_number_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_tray_ind_ex(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11tray_ind_ex_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_tray_ind_ex(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_11tray_ind_ex_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_host_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7host_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_host_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7host_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_peer_type(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9peer_type_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_peer_type(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9peer_type_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_module_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9module_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_module_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_9module_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_PlatformInfo_v2[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15PlatformInfo_v2_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15PlatformInfo_v2_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15PlatformInfo_v2_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15PlatformInfo_v2_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_PlatformInfo_v2[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_version, __pyx_setprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_version, PyDoc_STR("int: the API version number"), 0},
  {"ib_guid", __pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_ib_guid, __pyx_setprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_ib_guid, PyDoc_STR("~_numpy.uint8: (array of length 16).Infiniband GUID reported by platform (for Blackwell, ibGuid is 8 bytes so indices 8-15 are zero)"), 0},
  {"chassis_serial_number", __pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_chassis_serial_number, __pyx_setprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_chassis_serial_number, PyDoc_STR("~_numpy.uint8: (array of length 16).Serial number of the chassis containing this GPU (for Blackwell it is 13 bytes so indices 13-15 are zero)"), 0},
  {"slot_number", __pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_slot_number, __pyx_setprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_slot_number, PyDoc_STR("int: The slot number in the chassis containing this GPU (includes switches)"), 0},
  {"tray_ind_ex", __pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_tray_ind_ex, __pyx_setprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_tray_ind_ex, PyDoc_STR("int: The tray index within the compute slots in the chassis containing this GPU (does not include switches)"), 0},
  {"host_id", __pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_host_id, __pyx_setprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_host_id, PyDoc_STR("int: Index of the node within the slot containing this GPU."), 0},
  {"peer_type", __pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_peer_type, __pyx_setprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_peer_type, PyDoc_STR("int: Platform indicated NVLink-peer type (e.g. switch present or not)"), 0},
  {"module_id", __pyx_getprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_module_id, __pyx_setprop_4cuda_8bindings_5_nvml_15PlatformInfo_v2_module_id, PyDoc_STR("int: ID of this GPU within the node."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_PlatformInfo_v2_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_PlatformInfo_v2},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_PlatformInfo_v2},
  {Py_tp_doc, (void *)PyDoc_STR("PlatformInfo_v2()\n\nEmpty-initialize an instance of `nvmlPlatformInfo_v2_t`.\n\n\n.. seealso:: `nvmlPlatformInfo_v2_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_PlatformInfo_v2},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_PlatformInfo_v2},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_PlatformInfo_v2},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_PlatformInfo_v2},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_PlatformInfo_v2},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_PlatformInfo_v2},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_PlatformInfo_v2_spec = {
  "cuda.bindings._nvml.PlatformInfo_v2",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_PlatformInfo_v2_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_PlatformInfo_v2 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_PlatformInfo_v2 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_PlatformInfo_v2, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_PlatformInfo_v2 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""PlatformInfo_v2", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_PlatformInfo_v2, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_PlatformInfo_v2, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_PlatformInfo_v2, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("PlatformInfo_v2()\n\nEmpty-initialize an instance of `nvmlPlatformInfo_v2_t`.\n\n\n.. seealso:: `nvmlPlatformInfo_v2_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_PlatformInfo_v2, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_PlatformInfo_v2, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_PlatformInfo_v2, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_PlatformInfo_v2, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_PlatformInfo_v2, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_15PlatformInfo_v2_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_PlatformInfo_v2, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod1 __pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_b_is_present(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_12b_is_present_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_b_is_present(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_12b_is_present_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_percentage(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_10percentage_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_percentage(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_10percentage_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_inc_threshold(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_13inc_threshold_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_inc_threshold(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_13inc_threshold_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_dec_threshold(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_13dec_threshold_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_dec_threshold(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_13dec_threshold_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"b_is_present", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_b_is_present, __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_b_is_present, PyDoc_STR("int: "), 0},
  {"percentage", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_percentage, __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_percentage, PyDoc_STR("int: "), 0},
  {"inc_threshold", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_inc_threshold, __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_inc_threshold, PyDoc_STR("int: "), 0},
  {"dec_threshold", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_dec_threshold, __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod1_dec_threshold, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod1},
  {Py_tp_doc, (void *)PyDoc_STR("_py_anon_pod1()\n\nEmpty-initialize an instance of `_anon_pod1`.\n\n\n.. seealso:: `_anon_pod1`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod1_spec = {
  "cuda.bindings._nvml._py_anon_pod1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number__py_anon_pod1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping__py_anon_pod1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""_py_anon_pod1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number__py_anon_pod1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping__py_anon_pod1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("_py_anon_pod1()\n\nEmpty-initialize an instance of `_anon_pod1`.\n\n\n.. seealso:: `_anon_pod1`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 __pyx_vtable_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_mode(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_4mode_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_mode(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_4mode_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_version, PyDoc_STR("int: The version number of this struct."), 0},
  {"mode", __pyx_getprop_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_mode, __pyx_setprop_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_mode, PyDoc_STR("int: The vGPU heterogeneous mode."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuHeterogeneousMode_v1()\n\nEmpty-initialize an instance of `nvmlVgpuHeterogeneousMode_v1_t`.\n\n\n.. seealso:: `nvmlVgpuHeterogeneousMode_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1_spec = {
  "cuda.bindings._nvml.VgpuHeterogeneousMode_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuHeterogeneousMode_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuHeterogeneousMode_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuHeterogeneousMode_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuHeterogeneousMode_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuHeterogeneousMode_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuHeterogeneousMode_v1()\n\nEmpty-initialize an instance of `nvmlVgpuHeterogeneousMode_v1_t`.\n\n\n.. seealso:: `nvmlVgpuHeterogeneousMode_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 __pyx_vtable_4cuda_8bindings_5_nvml_VgpuPlacementId_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPlacementId_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuPlacementId_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuPlacementId_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuPlacementId_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuPlacementId_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuPlacementId_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuPlacementId_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuPlacementId_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_placement_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12placement_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_placement_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12placement_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuPlacementId_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuPlacementId_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_version, PyDoc_STR("int: The version number of this struct."), 0},
  {"placement_id", __pyx_getprop_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_placement_id, __pyx_setprop_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_placement_id, PyDoc_STR("int: Placement ID of the active vGPU instance."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementId_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuPlacementId_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuPlacementId_v1},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuPlacementId_v1()\n\nEmpty-initialize an instance of `nvmlVgpuPlacementId_v1_t`.\n\n\n.. seealso:: `nvmlVgpuPlacementId_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuPlacementId_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuPlacementId_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuPlacementId_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuPlacementId_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuPlacementId_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPlacementId_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementId_v1_spec = {
  "cuda.bindings._nvml.VgpuPlacementId_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementId_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuPlacementId_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuPlacementId_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuPlacementId_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuPlacementId_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuPlacementId_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuPlacementId_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuPlacementId_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuPlacementId_v1()\n\nEmpty-initialize an instance of `nvmlVgpuPlacementId_v1_t`.\n\n\n.. seealso:: `nvmlVgpuPlacementId_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuPlacementId_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuPlacementId_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuPlacementId_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuPlacementId_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuPlacementId_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPlacementId_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 __pyx_vtable_4cuda_8bindings_5_nvml_VgpuPlacementList_v2;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPlacementList_v2(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuPlacementList_v2;
  p->_owner = Py_None; Py_INCREF(Py_None);
  p->_refs = ((PyObject*)Py_None); Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuPlacementList_v2(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuPlacementList_v2) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  Py_CLEAR(p->_refs);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuPlacementList_v2(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  if (p->_refs) {
    e = (*v)(p->_refs, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuPlacementList_v2(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  tmp = ((PyObject*)p->_refs);
  p->_refs = ((PyObject*)Py_None); Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuPlacementList_v2(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuPlacementList_v2(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_placement_size(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14placement_size_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_placement_size(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14placement_size_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_placement_ids(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13placement_ids_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_placement_ids(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13placement_ids_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_mode(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_4mode_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_mode(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_4mode_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuPlacementList_v2[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuPlacementList_v2[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_version, __pyx_setprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_version, PyDoc_STR("int: IN: The version number of this struct."), 0},
  {"placement_size", __pyx_getprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_placement_size, __pyx_setprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_placement_size, PyDoc_STR("int: OUT: The number of slots occupied by the vGPU type."), 0},
  {"placement_ids", __pyx_getprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_placement_ids, __pyx_setprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_placement_ids, PyDoc_STR("int: IN/OUT: Placement IDs for the vGPU type."), 0},
  {"mode", __pyx_getprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_mode, __pyx_setprop_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_mode, PyDoc_STR("int: IN: The vGPU mode. Either NVML_VGPU_PGPU_HETEROGENEOUS_MODE or NVML_VGPU_PGPU_HOMOGENEOUS_MODE."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementList_v2_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuPlacementList_v2},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuPlacementList_v2},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuPlacementList_v2()\n\nEmpty-initialize an instance of `nvmlVgpuPlacementList_v2_t`.\n\n\n.. seealso:: `nvmlVgpuPlacementList_v2_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuPlacementList_v2},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuPlacementList_v2},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuPlacementList_v2},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuPlacementList_v2},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuPlacementList_v2},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPlacementList_v2},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementList_v2_spec = {
  "cuda.bindings._nvml.VgpuPlacementList_v2",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementList_v2_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuPlacementList_v2 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuPlacementList_v2 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuPlacementList_v2, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuPlacementList_v2", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuPlacementList_v2, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuPlacementList_v2, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuPlacementList_v2, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuPlacementList_v2()\n\nEmpty-initialize an instance of `nvmlVgpuPlacementList_v2_t`.\n\n\n.. seealso:: `nvmlVgpuPlacementList_v2_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuPlacementList_v2, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuPlacementList_v2, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuPlacementList_v2, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuPlacementList_v2, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuPlacementList_v2, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPlacementList_v2, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 __pyx_vtable_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_bar1size(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_8bar1size_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_bar1size(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_8bar1size_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_version, PyDoc_STR("int: The version number of this struct."), 0},
  {"bar1size", __pyx_getprop_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_bar1size, __pyx_setprop_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_bar1size, PyDoc_STR("int: BAR1 size in megabytes."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuTypeBar1Info_v1()\n\nEmpty-initialize an instance of `nvmlVgpuTypeBar1Info_v1_t`.\n\n\n.. seealso:: `nvmlVgpuTypeBar1Info_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1_spec = {
  "cuda.bindings._nvml.VgpuTypeBar1Info_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuTypeBar1Info_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuTypeBar1Info_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuTypeBar1Info_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuTypeBar1Info_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuTypeBar1Info_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuTypeBar1Info_v1()\n\nEmpty-initialize an instance of `nvmlVgpuTypeBar1Info_v1_t`.\n\n\n.. seealso:: `nvmlVgpuTypeBar1Info_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 __pyx_vtable_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_process_name(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_12process_name_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_process_name(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_12process_name_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_time_stamp(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_10time_stamp_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_time_stamp(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_10time_stamp_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_vgpu_instance(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_13vgpu_instance_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_vgpu_instance(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_13vgpu_instance_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_pid(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3pid_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_pid(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3pid_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_sm_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7sm_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_sm_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7sm_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_mem_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8mem_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_mem_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8mem_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_enc_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8enc_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_enc_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8enc_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_dec_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8dec_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_dec_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8dec_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_jpg_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8jpg_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_jpg_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8jpg_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_ofa_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8ofa_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_ofa_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_8ofa_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"process_name", __pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_process_name, __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_process_name, PyDoc_STR("~_numpy.int8: (array of length 64).Name of process running within the vGPU VM."), 0},
  {"time_stamp", __pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_time_stamp, __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_time_stamp, PyDoc_STR("Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."), 0},
  {"vgpu_instance", __pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_vgpu_instance, __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_vgpu_instance, PyDoc_STR("Union[~_numpy.uint32, int]: vGPU Instance"), 0},
  {"pid", __pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_pid, __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_pid, PyDoc_STR("Union[~_numpy.uint32, int]: PID of process running within the vGPU VM."), 0},
  {"sm_util", __pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_sm_util, __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_sm_util, PyDoc_STR("Union[~_numpy.uint32, int]: SM (3D/Compute) Util Value."), 0},
  {"mem_util", __pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_mem_util, __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_mem_util, PyDoc_STR("Union[~_numpy.uint32, int]: Frame Buffer Memory Util Value."), 0},
  {"enc_util", __pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_enc_util, __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_enc_util, PyDoc_STR("Union[~_numpy.uint32, int]: Encoder Util Value."), 0},
  {"dec_util", __pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_dec_util, __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_dec_util, PyDoc_STR("Union[~_numpy.uint32, int]: Decoder Util Value."), 0},
  {"jpg_util", __pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_jpg_util, __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_jpg_util, PyDoc_STR("Union[~_numpy.uint32, int]: Jpeg Util Value."), 0},
  {"ofa_util", __pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_ofa_util, __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_ofa_util, PyDoc_STR("Union[~_numpy.uint32, int]: Ofa Util Value."), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuProcessUtilizationInfo_v1(size=1)\n\nEmpty-initialize an array of `nvmlVgpuProcessUtilizationInfo_v1_t`.\n\nThe resulting object is of length `size` and of dtype `vgpu_process_utilization_info_v1_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlVgpuProcessUtilizationInfo_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1_spec = {
  "cuda.bindings._nvml.VgpuProcessUtilizationInfo_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuProcessUtilizationInfo_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_VgpuProcessUtilizationInfo_v1 = {
  __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuProcessUtilizationInfo_v1 = {
  __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuProcessUtilizationInfo_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuProcessUtilizationInfo_v1, /*tp_as_number*/
  &__pyx_tp_as_sequence_VgpuProcessUtilizationInfo_v1, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuProcessUtilizationInfo_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuProcessUtilizationInfo_v1(size=1)\n\nEmpty-initialize an array of `nvmlVgpuProcessUtilizationInfo_v1_t`.\n\nThe resulting object is of length `size` and of dtype `vgpu_process_utilization_info_v1_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlVgpuProcessUtilizationInfo_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 __pyx_vtable_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_size_(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_5size__1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_size_(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_5size__3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_version, PyDoc_STR("int: IN: The version number of this struct."), 0},
  {"size_", __pyx_getprop_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_size_, __pyx_setprop_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_size_, PyDoc_STR("int: OUT: The runtime state size of the vGPU instance."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuRuntimeState_v1()\n\nEmpty-initialize an instance of `nvmlVgpuRuntimeState_v1_t`.\n\n\n.. seealso:: `nvmlVgpuRuntimeState_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1_spec = {
  "cuda.bindings._nvml.VgpuRuntimeState_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuRuntimeState_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuRuntimeState_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuRuntimeState_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuRuntimeState_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuRuntimeState_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuRuntimeState_v1()\n\nEmpty-initialize an instance of `nvmlVgpuRuntimeState_v1_t`.\n\n\n.. seealso:: `nvmlVgpuRuntimeState_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod2 __pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod2;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod2(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod2;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod2(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod2) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod2(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod2(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod2(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod2(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod2_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod2_avg_factor(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_10avg_factor_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod2_avg_factor(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_10avg_factor_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod2_timeslice(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_9timeslice_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod2_timeslice(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_9timeslice_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod2[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod2_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod2_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod2_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod2_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod2[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod2_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"avg_factor", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod2_avg_factor, __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod2_avg_factor, PyDoc_STR("int: "), 0},
  {"timeslice", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod2_timeslice, __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod2_timeslice, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod2_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod2},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod2},
  {Py_tp_doc, (void *)PyDoc_STR("_py_anon_pod2()\n\nEmpty-initialize an instance of `_anon_pod2`.\n\n\n.. seealso:: `_anon_pod2`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod2},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod2},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod2},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod2},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod2},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod2},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod2_spec = {
  "cuda.bindings._nvml._py_anon_pod2",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod2_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number__py_anon_pod2 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping__py_anon_pod2 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod2, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod2 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""_py_anon_pod2", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod2, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number__py_anon_pod2, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping__py_anon_pod2, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("_py_anon_pod2()\n\nEmpty-initialize an instance of `_anon_pod2`.\n\n\n.. seealso:: `_anon_pod2`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod2, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod2, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod2, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod2, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod2, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod2_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod2, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod3 __pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod3;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod3(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod3;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod3(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod3) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod3(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod3(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod3(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod3(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod3_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod3_timeslice(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_9timeslice_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod3_timeslice(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_9timeslice_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod3[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod3_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod3_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod3_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod3_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod3[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod3_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"timeslice", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod3_timeslice, __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod3_timeslice, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod3_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod3},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod3},
  {Py_tp_doc, (void *)PyDoc_STR("_py_anon_pod3()\n\nEmpty-initialize an instance of `_anon_pod3`.\n\n\n.. seealso:: `_anon_pod3`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod3},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod3},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod3},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod3},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod3},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod3},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod3_spec = {
  "cuda.bindings._nvml._py_anon_pod3",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod3_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number__py_anon_pod3 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping__py_anon_pod3 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod3, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod3 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""_py_anon_pod3", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod3, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number__py_anon_pod3, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping__py_anon_pod3, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("_py_anon_pod3()\n\nEmpty-initialize an instance of `_anon_pod3`.\n\n\n.. seealso:: `_anon_pod3`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod3, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod3, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod3, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod3, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod3, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod3_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod3, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_timestamp(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_9timestamp_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_timestamp(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_9timestamp_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_time_run_total(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14time_run_total_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_time_run_total(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14time_run_total_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_time_run(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_8time_run_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_time_run(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_8time_run_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_sw_runlist_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_13sw_runlist_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_sw_runlist_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_13sw_runlist_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_target_time_slice(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17target_time_slice_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_target_time_slice(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17target_time_slice_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_cumulative_preemption_time(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_26cumulative_preemption_time_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_cumulative_preemption_time(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_26cumulative_preemption_time_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"timestamp", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_timestamp, __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_timestamp, PyDoc_STR("Union[~_numpy.uint64, int]: "), 0},
  {"time_run_total", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_time_run_total, __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_time_run_total, PyDoc_STR("Union[~_numpy.uint64, int]: "), 0},
  {"time_run", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_time_run, __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_time_run, PyDoc_STR("Union[~_numpy.uint64, int]: "), 0},
  {"sw_runlist_id", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_sw_runlist_id, __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_sw_runlist_id, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"target_time_slice", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_target_time_slice, __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_target_time_slice, PyDoc_STR("Union[~_numpy.uint64, int]: "), 0},
  {"cumulative_preemption_time", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_cumulative_preemption_time, __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_cumulative_preemption_time, PyDoc_STR("Union[~_numpy.uint64, int]: "), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuSchedulerLogEntry(size=1)\n\nEmpty-initialize an array of `nvmlVgpuSchedulerLogEntry_t`.\n\nThe resulting object is of length `size` and of dtype `vgpu_scheduler_log_entry_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlVgpuSchedulerLogEntry_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry_spec = {
  "cuda.bindings._nvml.VgpuSchedulerLogEntry",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuSchedulerLogEntry = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_VgpuSchedulerLogEntry = {
  __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuSchedulerLogEntry = {
  __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuSchedulerLogEntry", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuSchedulerLogEntry, /*tp_as_number*/
  &__pyx_tp_as_sequence_VgpuSchedulerLogEntry, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuSchedulerLogEntry, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuSchedulerLogEntry(size=1)\n\nEmpty-initialize an array of `nvmlVgpuSchedulerLogEntry_t`.\n\nThe resulting object is of length `size` and of dtype `vgpu_scheduler_log_entry_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlVgpuSchedulerLogEntry_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod4 __pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod4;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod4(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod4;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod4(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod4) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod4(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod4(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod4(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod4(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod4_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod4_avg_factor(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_10avg_factor_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod4_avg_factor(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_10avg_factor_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod4_frequency(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_9frequency_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod4_frequency(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_9frequency_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod4[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod4_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod4_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod4_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod4_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod4[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod4_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"avg_factor", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod4_avg_factor, __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod4_avg_factor, PyDoc_STR("int: "), 0},
  {"frequency", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod4_frequency, __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod4_frequency, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod4_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod4},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod4},
  {Py_tp_doc, (void *)PyDoc_STR("_py_anon_pod4()\n\nEmpty-initialize an instance of `_anon_pod4`.\n\n\n.. seealso:: `_anon_pod4`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod4},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod4},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod4},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod4},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod4},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod4},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod4_spec = {
  "cuda.bindings._nvml._py_anon_pod4",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod4_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number__py_anon_pod4 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping__py_anon_pod4 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod4, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod4 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""_py_anon_pod4", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod4, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number__py_anon_pod4, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping__py_anon_pod4, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("_py_anon_pod4()\n\nEmpty-initialize an instance of `_anon_pod4`.\n\n\n.. seealso:: `_anon_pod4`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod4, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod4, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod4, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod4, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod4, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod4_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod4, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml__py_anon_pod5 __pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod5;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod5(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod5;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod5(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod5) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod5(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod5(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod5(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod5(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod5_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod5_timeslice(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_9timeslice_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod5_timeslice(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_9timeslice_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod5[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod5_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod5_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod5_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13_py_anon_pod5_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod5[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod5_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"timeslice", __pyx_getprop_4cuda_8bindings_5_nvml_13_py_anon_pod5_timeslice, __pyx_setprop_4cuda_8bindings_5_nvml_13_py_anon_pod5_timeslice, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod5_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod5},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod5},
  {Py_tp_doc, (void *)PyDoc_STR("_py_anon_pod5()\n\nEmpty-initialize an instance of `_anon_pod5`.\n\n\n.. seealso:: `_anon_pod5`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod5},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod5},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod5},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod5},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod5},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod5},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod5_spec = {
  "cuda.bindings._nvml._py_anon_pod5",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod5_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number__py_anon_pod5 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping__py_anon_pod5 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml__py_anon_pod5, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml__py_anon_pod5 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""_py_anon_pod5", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml__py_anon_pod5, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number__py_anon_pod5, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping__py_anon_pod5, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("_py_anon_pod5()\n\nEmpty-initialize an instance of `_anon_pod5`.\n\n\n.. seealso:: `_anon_pod5`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml__py_anon_pod5, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml__py_anon_pod5, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml__py_anon_pod5, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml__py_anon_pod5, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml__py_anon_pod5, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_13_py_anon_pod5_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml__py_anon_pod5, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_supported_schedulers(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_20supported_schedulers_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_supported_schedulers(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_20supported_schedulers_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_max_timeslice(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13max_timeslice_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_max_timeslice(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13max_timeslice_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_min_timeslice(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13min_timeslice_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_min_timeslice(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13min_timeslice_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_is_arr_mode_supported(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21is_arr_mode_supported_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_is_arr_mode_supported(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21is_arr_mode_supported_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_max_frequency_for_arr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21max_frequency_for_arr_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_max_frequency_for_arr(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21max_frequency_for_arr_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_min_frequency_for_arr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21min_frequency_for_arr_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_min_frequency_for_arr(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_21min_frequency_for_arr_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_max_avg_factor_for_arr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22max_avg_factor_for_arr_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_max_avg_factor_for_arr(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22max_avg_factor_for_arr_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_min_avg_factor_for_arr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22min_avg_factor_for_arr_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_min_avg_factor_for_arr(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_22min_avg_factor_for_arr_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"supported_schedulers", __pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_supported_schedulers, __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_supported_schedulers, PyDoc_STR("~_numpy.uint32: (array of length 3)."), 0},
  {"max_timeslice", __pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_max_timeslice, __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_max_timeslice, PyDoc_STR("int: "), 0},
  {"min_timeslice", __pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_min_timeslice, __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_min_timeslice, PyDoc_STR("int: "), 0},
  {"is_arr_mode_supported", __pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_is_arr_mode_supported, __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_is_arr_mode_supported, PyDoc_STR("int: "), 0},
  {"max_frequency_for_arr", __pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_max_frequency_for_arr, __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_max_frequency_for_arr, PyDoc_STR("int: "), 0},
  {"min_frequency_for_arr", __pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_min_frequency_for_arr, __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_min_frequency_for_arr, PyDoc_STR("int: "), 0},
  {"max_avg_factor_for_arr", __pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_max_avg_factor_for_arr, __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_max_avg_factor_for_arr, PyDoc_STR("int: "), 0},
  {"min_avg_factor_for_arr", __pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_min_avg_factor_for_arr, __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_min_avg_factor_for_arr, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuSchedulerCapabilities()\n\nEmpty-initialize an instance of `nvmlVgpuSchedulerCapabilities_t`.\n\n\n.. seealso:: `nvmlVgpuSchedulerCapabilities_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities_spec = {
  "cuda.bindings._nvml.VgpuSchedulerCapabilities",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuSchedulerCapabilities = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuSchedulerCapabilities = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuSchedulerCapabilities", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuSchedulerCapabilities, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuSchedulerCapabilities, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuSchedulerCapabilities()\n\nEmpty-initialize an instance of `nvmlVgpuSchedulerCapabilities_t`.\n\n\n.. seealso:: `nvmlVgpuSchedulerCapabilities_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuLicenseExpiry __pyx_vtable_4cuda_8bindings_5_nvml_VgpuLicenseExpiry;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuLicenseExpiry(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuLicenseExpiry;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuLicenseExpiry(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuLicenseExpiry) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuLicenseExpiry(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuLicenseExpiry(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuLicenseExpiry(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuLicenseExpiry(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_year(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4year_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_year(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4year_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_month(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_5month_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_month(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_5month_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_day(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3day_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_day(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3day_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_hour(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4hour_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_hour(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4hour_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_min_(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4min__1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_min_(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_4min__3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_sec(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3sec_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_sec(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_3sec_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_status(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_6status_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_status(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_6status_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuLicenseExpiry[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuLicenseExpiry[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"year", __pyx_getprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_year, __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_year, PyDoc_STR("int: "), 0},
  {"month", __pyx_getprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_month, __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_month, PyDoc_STR("int: "), 0},
  {"day", __pyx_getprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_day, __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_day, PyDoc_STR("int: "), 0},
  {"hour", __pyx_getprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_hour, __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_hour, PyDoc_STR("int: "), 0},
  {"min_", __pyx_getprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_min_, __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_min_, PyDoc_STR("int: "), 0},
  {"sec", __pyx_getprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_sec, __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_sec, PyDoc_STR("int: "), 0},
  {"status", __pyx_getprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_status, __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_status, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseExpiry_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuLicenseExpiry},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuLicenseExpiry},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuLicenseExpiry()\n\nEmpty-initialize an instance of `nvmlVgpuLicenseExpiry_t`.\n\n\n.. seealso:: `nvmlVgpuLicenseExpiry_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuLicenseExpiry},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuLicenseExpiry},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuLicenseExpiry},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuLicenseExpiry},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuLicenseExpiry},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuLicenseExpiry},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseExpiry_spec = {
  "cuda.bindings._nvml.VgpuLicenseExpiry",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseExpiry_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuLicenseExpiry = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuLicenseExpiry = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuLicenseExpiry, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseExpiry = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuLicenseExpiry", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuLicenseExpiry, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuLicenseExpiry, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuLicenseExpiry, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuLicenseExpiry()\n\nEmpty-initialize an instance of `nvmlVgpuLicenseExpiry_t`.\n\n\n.. seealso:: `nvmlVgpuLicenseExpiry_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuLicenseExpiry, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuLicenseExpiry, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuLicenseExpiry, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuLicenseExpiry, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuLicenseExpiry, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuLicenseExpiry, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GridLicenseExpiry __pyx_vtable_4cuda_8bindings_5_nvml_GridLicenseExpiry;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GridLicenseExpiry(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_GridLicenseExpiry;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GridLicenseExpiry(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GridLicenseExpiry) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_GridLicenseExpiry(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_GridLicenseExpiry(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GridLicenseExpiry(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GridLicenseExpiry(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_year(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4year_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_year(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4year_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_month(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_5month_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_month(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_5month_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_day(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3day_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_day(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3day_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_hour(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4hour_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_hour(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4hour_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_min_(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4min__1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_min_(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_4min__3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_sec(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3sec_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_sec(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_3sec_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_status(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_6status_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_status(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_6status_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_GridLicenseExpiry[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17GridLicenseExpiry_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17GridLicenseExpiry_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17GridLicenseExpiry_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17GridLicenseExpiry_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_GridLicenseExpiry[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"year", __pyx_getprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_year, __pyx_setprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_year, PyDoc_STR("int: "), 0},
  {"month", __pyx_getprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_month, __pyx_setprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_month, PyDoc_STR("int: "), 0},
  {"day", __pyx_getprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_day, __pyx_setprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_day, PyDoc_STR("int: "), 0},
  {"hour", __pyx_getprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_hour, __pyx_setprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_hour, PyDoc_STR("int: "), 0},
  {"min_", __pyx_getprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_min_, __pyx_setprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_min_, PyDoc_STR("int: "), 0},
  {"sec", __pyx_getprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_sec, __pyx_setprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_sec, PyDoc_STR("int: "), 0},
  {"status", __pyx_getprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_status, __pyx_setprop_4cuda_8bindings_5_nvml_17GridLicenseExpiry_status, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_GridLicenseExpiry_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_GridLicenseExpiry},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GridLicenseExpiry},
  {Py_tp_doc, (void *)PyDoc_STR("GridLicenseExpiry()\n\nEmpty-initialize an instance of `nvmlGridLicenseExpiry_t`.\n\n\n.. seealso:: `nvmlGridLicenseExpiry_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_GridLicenseExpiry},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_GridLicenseExpiry},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GridLicenseExpiry},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_GridLicenseExpiry},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_GridLicenseExpiry},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_GridLicenseExpiry},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_GridLicenseExpiry_spec = {
  "cuda.bindings._nvml.GridLicenseExpiry",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_GridLicenseExpiry_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_GridLicenseExpiry = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_GridLicenseExpiry = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GridLicenseExpiry, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_GridLicenseExpiry = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""GridLicenseExpiry", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GridLicenseExpiry, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_GridLicenseExpiry, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_GridLicenseExpiry, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("GridLicenseExpiry()\n\nEmpty-initialize an instance of `nvmlGridLicenseExpiry_t`.\n\n\n.. seealso:: `nvmlGridLicenseExpiry_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_GridLicenseExpiry, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_GridLicenseExpiry, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_GridLicenseExpiry, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_GridLicenseExpiry, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_GridLicenseExpiry, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_17GridLicenseExpiry_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_GridLicenseExpiry, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 __pyx_vtable_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_vgpu_type_ids(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13vgpu_type_ids_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_vgpu_type_ids(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13vgpu_type_ids_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_version, PyDoc_STR("int: IN: The version number of this struct."), 0},
  {"vgpu_type_ids", __pyx_getprop_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_vgpu_type_ids, __pyx_setprop_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_vgpu_type_ids, PyDoc_STR("int: OUT: List of vGPU type IDs."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuTypeIdInfo_v1()\n\nEmpty-initialize an instance of `nvmlVgpuTypeIdInfo_v1_t`.\n\n\n.. seealso:: `nvmlVgpuTypeIdInfo_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1_spec = {
  "cuda.bindings._nvml.VgpuTypeIdInfo_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuTypeIdInfo_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuTypeIdInfo_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuTypeIdInfo_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuTypeIdInfo_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuTypeIdInfo_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuTypeIdInfo_v1()\n\nEmpty-initialize an instance of `nvmlVgpuTypeIdInfo_v1_t`.\n\n\n.. seealso:: `nvmlVgpuTypeIdInfo_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 __pyx_vtable_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_vgpu_type_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12vgpu_type_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_vgpu_type_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12vgpu_type_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_max_instance_per_gi(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19max_instance_per_gi_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_max_instance_per_gi(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19max_instance_per_gi_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_version, PyDoc_STR("int: IN: The version number of this struct."), 0},
  {"vgpu_type_id", __pyx_getprop_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_vgpu_type_id, __pyx_setprop_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_vgpu_type_id, PyDoc_STR("int: IN: Handle to vGPU type."), 0},
  {"max_instance_per_gi", __pyx_getprop_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_max_instance_per_gi, __pyx_setprop_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_max_instance_per_gi, PyDoc_STR("int: OUT: Maximum number of vGPU instances per GPU instance."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuTypeMaxInstance_v1()\n\nEmpty-initialize an instance of `nvmlVgpuTypeMaxInstance_v1_t`.\n\n\n.. seealso:: `nvmlVgpuTypeMaxInstance_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1_spec = {
  "cuda.bindings._nvml.VgpuTypeMaxInstance_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuTypeMaxInstance_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuTypeMaxInstance_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuTypeMaxInstance_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuTypeMaxInstance_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuTypeMaxInstance_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuTypeMaxInstance_v1()\n\nEmpty-initialize an instance of `nvmlVgpuTypeMaxInstance_v1_t`.\n\n\n.. seealso:: `nvmlVgpuTypeMaxInstance_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 __pyx_vtable_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_vgpu_instances(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14vgpu_instances_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_vgpu_instances(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14vgpu_instances_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_version, PyDoc_STR("int: IN: The version number of this struct."), 0},
  {"vgpu_instances", __pyx_getprop_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_vgpu_instances, __pyx_setprop_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_vgpu_instances, PyDoc_STR("int: IN/OUT: list of active vGPU instances."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1},
  {Py_tp_doc, (void *)PyDoc_STR("ActiveVgpuInstanceInfo_v1()\n\nEmpty-initialize an instance of `nvmlActiveVgpuInstanceInfo_v1_t`.\n\n\n.. seealso:: `nvmlActiveVgpuInstanceInfo_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1_spec = {
  "cuda.bindings._nvml.ActiveVgpuInstanceInfo_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ActiveVgpuInstanceInfo_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_ActiveVgpuInstanceInfo_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ActiveVgpuInstanceInfo_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ActiveVgpuInstanceInfo_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ActiveVgpuInstanceInfo_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ActiveVgpuInstanceInfo_v1()\n\nEmpty-initialize an instance of `nvmlActiveVgpuInstanceInfo_v1_t`.\n\n\n.. seealso:: `nvmlActiveVgpuInstanceInfo_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 __pyx_vtable_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  p->_refs = ((PyObject*)Py_None); Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  Py_CLEAR(p->_refs);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  if (p->_refs) {
    e = (*v)(p->_refs, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  tmp = ((PyObject*)p->_refs);
  p->_refs = ((PyObject*)Py_None); Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_vgpu_type_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12vgpu_type_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_vgpu_type_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12vgpu_type_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_5count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_5count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_placement_ids(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13placement_ids_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_placement_ids(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13placement_ids_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_version, PyDoc_STR("int: IN: The version number of this struct."), 0},
  {"vgpu_type_id", __pyx_getprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_vgpu_type_id, __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_vgpu_type_id, PyDoc_STR("int: IN: Handle to vGPU type."), 0},
  {"count", __pyx_getprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_count, __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_count, PyDoc_STR("int: IN/OUT: Count of the placement IDs."), 0},
  {"placement_ids", __pyx_getprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_placement_ids, __pyx_setprop_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_placement_ids, PyDoc_STR("int: IN/OUT: Placement IDs for the vGPU type."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuCreatablePlacementInfo_v1()\n\nEmpty-initialize an instance of `nvmlVgpuCreatablePlacementInfo_v1_t`.\n\n\n.. seealso:: `nvmlVgpuCreatablePlacementInfo_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1_spec = {
  "cuda.bindings._nvml.VgpuCreatablePlacementInfo_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuCreatablePlacementInfo_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuCreatablePlacementInfo_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuCreatablePlacementInfo_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuCreatablePlacementInfo_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuCreatablePlacementInfo_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuCreatablePlacementInfo_v1()\n\nEmpty-initialize an instance of `nvmlVgpuCreatablePlacementInfo_v1_t`.\n\n\n.. seealso:: `nvmlVgpuCreatablePlacementInfo_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_HwbcEntry __pyx_vtable_4cuda_8bindings_5_nvml_HwbcEntry;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_HwbcEntry(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_HwbcEntry;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_HwbcEntry(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_HwbcEntry) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_HwbcEntry(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_HwbcEntry(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_HwbcEntry(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_HwbcEntry(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_HwbcEntry(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_9HwbcEntry_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_9HwbcEntry_hwbc_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_7hwbc_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_9HwbcEntry_hwbc_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_7hwbc_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_9HwbcEntry_firmware_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_16firmware_version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_9HwbcEntry_firmware_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_16firmware_version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_9HwbcEntry__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_HwbcEntry[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9HwbcEntry_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9HwbcEntry_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9HwbcEntry_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9HwbcEntry_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_HwbcEntry[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_9HwbcEntry_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"hwbc_id", __pyx_getprop_4cuda_8bindings_5_nvml_9HwbcEntry_hwbc_id, __pyx_setprop_4cuda_8bindings_5_nvml_9HwbcEntry_hwbc_id, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"firmware_version", __pyx_getprop_4cuda_8bindings_5_nvml_9HwbcEntry_firmware_version, __pyx_setprop_4cuda_8bindings_5_nvml_9HwbcEntry_firmware_version, PyDoc_STR("~_numpy.int8: (array of length 32)."), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_9HwbcEntry__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_HwbcEntry_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_HwbcEntry},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_HwbcEntry},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_HwbcEntry},
  {Py_tp_doc, (void *)PyDoc_STR("HwbcEntry(size=1)\n\nEmpty-initialize an array of `nvmlHwbcEntry_t`.\n\nThe resulting object is of length `size` and of dtype `hwbc_entry_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlHwbcEntry_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_HwbcEntry},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_HwbcEntry},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_HwbcEntry},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_HwbcEntry},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_HwbcEntry},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_HwbcEntry},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_HwbcEntry_spec = {
  "cuda.bindings._nvml.HwbcEntry",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_HwbcEntry_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_HwbcEntry = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_HwbcEntry = {
  __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_HwbcEntry, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_HwbcEntry = {
  __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_HwbcEntry, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_HwbcEntry = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""HwbcEntry", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_HwbcEntry, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_HwbcEntry, /*tp_as_number*/
  &__pyx_tp_as_sequence_HwbcEntry, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_HwbcEntry, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("HwbcEntry(size=1)\n\nEmpty-initialize an array of `nvmlHwbcEntry_t`.\n\nThe resulting object is of length `size` and of dtype `hwbc_entry_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlHwbcEntry_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_HwbcEntry, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_HwbcEntry, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_HwbcEntry, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_HwbcEntry, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_HwbcEntry, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_9HwbcEntry_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_HwbcEntry, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_LedState __pyx_vtable_4cuda_8bindings_5_nvml_LedState;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_LedState(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_LedState;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_LedState(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_LedState) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_8LedState_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_LedState(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_LedState(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_LedState(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_8LedState_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_LedState(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_8LedState_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_8LedState_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_8LedState_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_8LedState_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_8LedState_cause(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_8LedState_5cause_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_8LedState_cause(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_8LedState_5cause_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_8LedState_color(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_8LedState_5color_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_8LedState_color(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_8LedState_5color_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_LedState[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8LedState_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8LedState_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8LedState_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8LedState_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8LedState_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8LedState_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8LedState_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8LedState_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_LedState[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_8LedState_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"cause", __pyx_getprop_4cuda_8bindings_5_nvml_8LedState_cause, __pyx_setprop_4cuda_8bindings_5_nvml_8LedState_cause, PyDoc_STR("~_numpy.int8: (array of length 256)."), 0},
  {"color", __pyx_getprop_4cuda_8bindings_5_nvml_8LedState_color, __pyx_setprop_4cuda_8bindings_5_nvml_8LedState_color, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_LedState_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_LedState},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_8LedState_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_8LedState_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_LedState},
  {Py_tp_doc, (void *)PyDoc_STR("LedState()\n\nEmpty-initialize an instance of `nvmlLedState_t`.\n\n\n.. seealso:: `nvmlLedState_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_LedState},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_LedState},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_LedState},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_LedState},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_LedState},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_8LedState_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_LedState},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_LedState_spec = {
  "cuda.bindings._nvml.LedState",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_LedState_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_LedState = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_8LedState_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_LedState = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_LedState, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_LedState = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""LedState", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_LedState, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_8LedState_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_LedState, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_LedState, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("LedState()\n\nEmpty-initialize an instance of `nvmlLedState_t`.\n\n\n.. seealso:: `nvmlLedState_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_LedState, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_LedState, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_LedState, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_LedState, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_LedState, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_8LedState_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_LedState, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_UnitInfo __pyx_vtable_4cuda_8bindings_5_nvml_UnitInfo;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_UnitInfo(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_UnitInfo;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_UnitInfo(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_UnitInfo) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_UnitInfo(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_UnitInfo(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_UnitInfo(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_UnitInfo(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_8UnitInfo_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_8UnitInfo_name(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_4name_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_8UnitInfo_name(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_4name_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_8UnitInfo_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_2id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_8UnitInfo_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_2id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_8UnitInfo_serial(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_6serial_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_8UnitInfo_serial(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_6serial_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_8UnitInfo_firmware_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_16firmware_version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_8UnitInfo_firmware_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_16firmware_version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_UnitInfo[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8UnitInfo_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8UnitInfo_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8UnitInfo_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8UnitInfo_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_UnitInfo[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_8UnitInfo_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"name", __pyx_getprop_4cuda_8bindings_5_nvml_8UnitInfo_name, __pyx_setprop_4cuda_8bindings_5_nvml_8UnitInfo_name, PyDoc_STR("~_numpy.int8: (array of length 96)."), 0},
  {"id", __pyx_getprop_4cuda_8bindings_5_nvml_8UnitInfo_id, __pyx_setprop_4cuda_8bindings_5_nvml_8UnitInfo_id, PyDoc_STR("~_numpy.int8: (array of length 96)."), 0},
  {"serial", __pyx_getprop_4cuda_8bindings_5_nvml_8UnitInfo_serial, __pyx_setprop_4cuda_8bindings_5_nvml_8UnitInfo_serial, PyDoc_STR("~_numpy.int8: (array of length 96)."), 0},
  {"firmware_version", __pyx_getprop_4cuda_8bindings_5_nvml_8UnitInfo_firmware_version, __pyx_setprop_4cuda_8bindings_5_nvml_8UnitInfo_firmware_version, PyDoc_STR("~_numpy.int8: (array of length 96)."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_UnitInfo_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_UnitInfo},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_UnitInfo},
  {Py_tp_doc, (void *)PyDoc_STR("UnitInfo()\n\nEmpty-initialize an instance of `nvmlUnitInfo_t`.\n\n\n.. seealso:: `nvmlUnitInfo_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_UnitInfo},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_UnitInfo},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_UnitInfo},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_UnitInfo},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_UnitInfo},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_UnitInfo},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_UnitInfo_spec = {
  "cuda.bindings._nvml.UnitInfo",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_UnitInfo_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_UnitInfo = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_UnitInfo = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_UnitInfo, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_UnitInfo = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""UnitInfo", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_UnitInfo, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_UnitInfo, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_UnitInfo, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("UnitInfo()\n\nEmpty-initialize an instance of `nvmlUnitInfo_t`.\n\n\n.. seealso:: `nvmlUnitInfo_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_UnitInfo, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_UnitInfo, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_UnitInfo, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_UnitInfo, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_UnitInfo, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_8UnitInfo_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_UnitInfo, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_PSUInfo __pyx_vtable_4cuda_8bindings_5_nvml_PSUInfo;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_PSUInfo(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_PSUInfo;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_PSUInfo(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_PSUInfo) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_PSUInfo(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_PSUInfo(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_PSUInfo(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_PSUInfo(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_7PSUInfo_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_7PSUInfo_state(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_5state_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_7PSUInfo_state(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_5state_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_7PSUInfo_current(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_7current_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_7PSUInfo_current(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_7current_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_7PSUInfo_voltage(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_7voltage_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_7PSUInfo_voltage(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_7voltage_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_7PSUInfo_power(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_5power_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_7PSUInfo_power(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_5power_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_PSUInfo[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_7PSUInfo_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_7PSUInfo_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_7PSUInfo_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_7PSUInfo_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_PSUInfo[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_7PSUInfo_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"state", __pyx_getprop_4cuda_8bindings_5_nvml_7PSUInfo_state, __pyx_setprop_4cuda_8bindings_5_nvml_7PSUInfo_state, PyDoc_STR("~_numpy.int8: (array of length 256)."), 0},
  {"current", __pyx_getprop_4cuda_8bindings_5_nvml_7PSUInfo_current, __pyx_setprop_4cuda_8bindings_5_nvml_7PSUInfo_current, PyDoc_STR("int: "), 0},
  {"voltage", __pyx_getprop_4cuda_8bindings_5_nvml_7PSUInfo_voltage, __pyx_setprop_4cuda_8bindings_5_nvml_7PSUInfo_voltage, PyDoc_STR("int: "), 0},
  {"power", __pyx_getprop_4cuda_8bindings_5_nvml_7PSUInfo_power, __pyx_setprop_4cuda_8bindings_5_nvml_7PSUInfo_power, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_PSUInfo_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_PSUInfo},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_PSUInfo},
  {Py_tp_doc, (void *)PyDoc_STR("PSUInfo()\n\nEmpty-initialize an instance of `nvmlPSUInfo_t`.\n\n\n.. seealso:: `nvmlPSUInfo_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_PSUInfo},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_PSUInfo},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_PSUInfo},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_PSUInfo},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_PSUInfo},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_PSUInfo},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_PSUInfo_spec = {
  "cuda.bindings._nvml.PSUInfo",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_PSUInfo_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_PSUInfo = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_PSUInfo = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_PSUInfo, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_PSUInfo = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""PSUInfo", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_PSUInfo, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_PSUInfo, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_PSUInfo, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("PSUInfo()\n\nEmpty-initialize an instance of `nvmlPSUInfo_t`.\n\n\n.. seealso:: `nvmlPSUInfo_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_PSUInfo, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_PSUInfo, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_PSUInfo, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_PSUInfo, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_PSUInfo, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_7PSUInfo_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_PSUInfo, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_UnitFanInfo __pyx_vtable_4cuda_8bindings_5_nvml_UnitFanInfo;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_UnitFanInfo(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_UnitFanInfo;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_UnitFanInfo(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_UnitFanInfo) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_UnitFanInfo(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_UnitFanInfo(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_UnitFanInfo(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_UnitFanInfo(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_UnitFanInfo(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_11UnitFanInfo_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_11UnitFanInfo_speed(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5speed_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_11UnitFanInfo_speed(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5speed_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_11UnitFanInfo_state(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5state_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_11UnitFanInfo_state(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5state_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_11UnitFanInfo__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_UnitFanInfo[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11UnitFanInfo_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11UnitFanInfo_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11UnitFanInfo_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11UnitFanInfo_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_UnitFanInfo[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_11UnitFanInfo_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"speed", __pyx_getprop_4cuda_8bindings_5_nvml_11UnitFanInfo_speed, __pyx_setprop_4cuda_8bindings_5_nvml_11UnitFanInfo_speed, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"state", __pyx_getprop_4cuda_8bindings_5_nvml_11UnitFanInfo_state, __pyx_setprop_4cuda_8bindings_5_nvml_11UnitFanInfo_state, PyDoc_STR("Union[~_numpy.int32, int]: "), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_11UnitFanInfo__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_UnitFanInfo_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_UnitFanInfo},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_UnitFanInfo},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_UnitFanInfo},
  {Py_tp_doc, (void *)PyDoc_STR("UnitFanInfo(size=1)\n\nEmpty-initialize an array of `nvmlUnitFanInfo_t`.\n\nThe resulting object is of length `size` and of dtype `unit_fan_info_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlUnitFanInfo_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_UnitFanInfo},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_UnitFanInfo},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_UnitFanInfo},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_UnitFanInfo},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_UnitFanInfo},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_UnitFanInfo},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_UnitFanInfo_spec = {
  "cuda.bindings._nvml.UnitFanInfo",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_UnitFanInfo_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_UnitFanInfo = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_UnitFanInfo = {
  __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_UnitFanInfo, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_UnitFanInfo = {
  __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_UnitFanInfo, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_UnitFanInfo = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""UnitFanInfo", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_UnitFanInfo, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_UnitFanInfo, /*tp_as_number*/
  &__pyx_tp_as_sequence_UnitFanInfo, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_UnitFanInfo, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("UnitFanInfo(size=1)\n\nEmpty-initialize an array of `nvmlUnitFanInfo_t`.\n\nThe resulting object is of length `size` and of dtype `unit_fan_info_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlUnitFanInfo_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_UnitFanInfo, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_UnitFanInfo, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_UnitFanInfo, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_UnitFanInfo, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_UnitFanInfo, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_11UnitFanInfo_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_UnitFanInfo, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EventData __pyx_vtable_4cuda_8bindings_5_nvml_EventData;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_EventData(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_EventData;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_EventData(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_EventData) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_9EventData_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_EventData(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_EventData(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_EventData(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_9EventData_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_EventData(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_9EventData_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_9EventData_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_9EventData_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_9EventData_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_9EventData_device_(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_9EventData_7device__1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_9EventData_device_(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_9EventData_7device__3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_9EventData_event_type(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_9EventData_10event_type_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_9EventData_event_type(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_9EventData_10event_type_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_9EventData_event_data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_9EventData_10event_data_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_9EventData_event_data(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_9EventData_10event_data_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_9EventData_gpu_instance_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_9EventData_15gpu_instance_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_9EventData_gpu_instance_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_9EventData_15gpu_instance_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_9EventData_compute_instance_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_9EventData_19compute_instance_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_9EventData_compute_instance_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_9EventData_19compute_instance_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_EventData[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9EventData_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9EventData_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9EventData_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9EventData_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9EventData_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9EventData_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_9EventData_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_9EventData_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_EventData[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_9EventData_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"device_", __pyx_getprop_4cuda_8bindings_5_nvml_9EventData_device_, __pyx_setprop_4cuda_8bindings_5_nvml_9EventData_device_, PyDoc_STR("int: "), 0},
  {"event_type", __pyx_getprop_4cuda_8bindings_5_nvml_9EventData_event_type, __pyx_setprop_4cuda_8bindings_5_nvml_9EventData_event_type, PyDoc_STR("int: "), 0},
  {"event_data", __pyx_getprop_4cuda_8bindings_5_nvml_9EventData_event_data, __pyx_setprop_4cuda_8bindings_5_nvml_9EventData_event_data, PyDoc_STR("int: "), 0},
  {"gpu_instance_id", __pyx_getprop_4cuda_8bindings_5_nvml_9EventData_gpu_instance_id, __pyx_setprop_4cuda_8bindings_5_nvml_9EventData_gpu_instance_id, PyDoc_STR("int: "), 0},
  {"compute_instance_id", __pyx_getprop_4cuda_8bindings_5_nvml_9EventData_compute_instance_id, __pyx_setprop_4cuda_8bindings_5_nvml_9EventData_compute_instance_id, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_EventData_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_EventData},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_9EventData_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_9EventData_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_EventData},
  {Py_tp_doc, (void *)PyDoc_STR("EventData()\n\nEmpty-initialize an instance of `nvmlEventData_t`.\n\n\n.. seealso:: `nvmlEventData_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_EventData},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_EventData},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_EventData},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_EventData},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_EventData},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_9EventData_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_EventData},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_EventData_spec = {
  "cuda.bindings._nvml.EventData",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_EventData_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_EventData = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_9EventData_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_EventData = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_EventData, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_EventData = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""EventData", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_EventData, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_9EventData_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_EventData, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_EventData, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("EventData()\n\nEmpty-initialize an instance of `nvmlEventData_t`.\n\n\n.. seealso:: `nvmlEventData_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_EventData, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_EventData, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_EventData, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_EventData, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_EventData, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_9EventData_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_EventData, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_AccountingStats __pyx_vtable_4cuda_8bindings_5_nvml_AccountingStats;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_AccountingStats(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_AccountingStats;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_AccountingStats(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_AccountingStats) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_AccountingStats(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_AccountingStats(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_AccountingStats(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_AccountingStats(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15AccountingStats_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15AccountingStats_gpu_utilization(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_15gpu_utilization_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15AccountingStats_gpu_utilization(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_15gpu_utilization_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15AccountingStats_memory_utilization(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_18memory_utilization_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15AccountingStats_memory_utilization(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_18memory_utilization_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15AccountingStats_max_memory_usage(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_16max_memory_usage_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15AccountingStats_max_memory_usage(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_16max_memory_usage_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15AccountingStats_time(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_4time_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15AccountingStats_time(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_4time_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15AccountingStats_start_time(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_10start_time_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15AccountingStats_start_time(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_10start_time_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15AccountingStats_is_running(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_10is_running_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15AccountingStats_is_running(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_10is_running_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_AccountingStats[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15AccountingStats_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15AccountingStats_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15AccountingStats_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15AccountingStats_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_AccountingStats[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_15AccountingStats_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"gpu_utilization", __pyx_getprop_4cuda_8bindings_5_nvml_15AccountingStats_gpu_utilization, __pyx_setprop_4cuda_8bindings_5_nvml_15AccountingStats_gpu_utilization, PyDoc_STR("int: "), 0},
  {"memory_utilization", __pyx_getprop_4cuda_8bindings_5_nvml_15AccountingStats_memory_utilization, __pyx_setprop_4cuda_8bindings_5_nvml_15AccountingStats_memory_utilization, PyDoc_STR("int: "), 0},
  {"max_memory_usage", __pyx_getprop_4cuda_8bindings_5_nvml_15AccountingStats_max_memory_usage, __pyx_setprop_4cuda_8bindings_5_nvml_15AccountingStats_max_memory_usage, PyDoc_STR("int: "), 0},
  {"time", __pyx_getprop_4cuda_8bindings_5_nvml_15AccountingStats_time, __pyx_setprop_4cuda_8bindings_5_nvml_15AccountingStats_time, PyDoc_STR("int: "), 0},
  {"start_time", __pyx_getprop_4cuda_8bindings_5_nvml_15AccountingStats_start_time, __pyx_setprop_4cuda_8bindings_5_nvml_15AccountingStats_start_time, PyDoc_STR("int: "), 0},
  {"is_running", __pyx_getprop_4cuda_8bindings_5_nvml_15AccountingStats_is_running, __pyx_setprop_4cuda_8bindings_5_nvml_15AccountingStats_is_running, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_AccountingStats_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_AccountingStats},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_AccountingStats},
  {Py_tp_doc, (void *)PyDoc_STR("AccountingStats()\n\nEmpty-initialize an instance of `nvmlAccountingStats_t`.\n\n\n.. seealso:: `nvmlAccountingStats_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_AccountingStats},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_AccountingStats},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_AccountingStats},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_AccountingStats},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_AccountingStats},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_AccountingStats},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_AccountingStats_spec = {
  "cuda.bindings._nvml.AccountingStats",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_AccountingStats_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_AccountingStats = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_AccountingStats = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_AccountingStats, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_AccountingStats = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""AccountingStats", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_AccountingStats, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_AccountingStats, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_AccountingStats, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("AccountingStats()\n\nEmpty-initialize an instance of `nvmlAccountingStats_t`.\n\n\n.. seealso:: `nvmlAccountingStats_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_AccountingStats, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_AccountingStats, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_AccountingStats, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_AccountingStats, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_AccountingStats, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_15AccountingStats_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_AccountingStats, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EncoderSessionInfo __pyx_vtable_4cuda_8bindings_5_nvml_EncoderSessionInfo;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_EncoderSessionInfo(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_EncoderSessionInfo;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_EncoderSessionInfo(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_EncoderSessionInfo) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_EncoderSessionInfo(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_EncoderSessionInfo(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_EncoderSessionInfo(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_EncoderSessionInfo(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_EncoderSessionInfo(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_session_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10session_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_session_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10session_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_pid(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3pid_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_pid(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3pid_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_vgpu_instance(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_13vgpu_instance_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_vgpu_instance(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_13vgpu_instance_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_codec_type(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10codec_type_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_codec_type(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_10codec_type_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_h_resolution(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12h_resolution_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_h_resolution(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12h_resolution_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_v_resolution(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12v_resolution_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_v_resolution(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_12v_resolution_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_average_fps(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_11average_fps_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_average_fps(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_11average_fps_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_average_latency(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15average_latency_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_average_latency(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15average_latency_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_EncoderSessionInfo[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18EncoderSessionInfo_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18EncoderSessionInfo_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18EncoderSessionInfo_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18EncoderSessionInfo_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_EncoderSessionInfo[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"session_id", __pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_session_id, __pyx_setprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_session_id, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"pid", __pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_pid, __pyx_setprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_pid, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"vgpu_instance", __pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_vgpu_instance, __pyx_setprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_vgpu_instance, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"codec_type", __pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_codec_type, __pyx_setprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_codec_type, PyDoc_STR("Union[~_numpy.int32, int]: "), 0},
  {"h_resolution", __pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_h_resolution, __pyx_setprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_h_resolution, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"v_resolution", __pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_v_resolution, __pyx_setprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_v_resolution, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"average_fps", __pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_average_fps, __pyx_setprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_average_fps, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"average_latency", __pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_average_latency, __pyx_setprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo_average_latency, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_18EncoderSessionInfo__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_EncoderSessionInfo_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_EncoderSessionInfo},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_EncoderSessionInfo},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_EncoderSessionInfo},
  {Py_tp_doc, (void *)PyDoc_STR("EncoderSessionInfo(size=1)\n\nEmpty-initialize an array of `nvmlEncoderSessionInfo_t`.\n\nThe resulting object is of length `size` and of dtype `encoder_session_info_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlEncoderSessionInfo_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_EncoderSessionInfo},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_EncoderSessionInfo},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_EncoderSessionInfo},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_EncoderSessionInfo},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_EncoderSessionInfo},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_EncoderSessionInfo},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_EncoderSessionInfo_spec = {
  "cuda.bindings._nvml.EncoderSessionInfo",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_EncoderSessionInfo_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_EncoderSessionInfo = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_EncoderSessionInfo = {
  __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_EncoderSessionInfo, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_EncoderSessionInfo = {
  __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_EncoderSessionInfo, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_EncoderSessionInfo = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""EncoderSessionInfo", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_EncoderSessionInfo, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_EncoderSessionInfo, /*tp_as_number*/
  &__pyx_tp_as_sequence_EncoderSessionInfo, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_EncoderSessionInfo, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("EncoderSessionInfo(size=1)\n\nEmpty-initialize an array of `nvmlEncoderSessionInfo_t`.\n\nThe resulting object is of length `size` and of dtype `encoder_session_info_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlEncoderSessionInfo_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_EncoderSessionInfo, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_EncoderSessionInfo, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_EncoderSessionInfo, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_EncoderSessionInfo, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_EncoderSessionInfo, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_18EncoderSessionInfo_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_EncoderSessionInfo, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FBCStats __pyx_vtable_4cuda_8bindings_5_nvml_FBCStats;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_FBCStats(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_FBCStats;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_FBCStats(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_FBCStats) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_FBCStats(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_FBCStats(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_FBCStats(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_FBCStats(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_8FBCStats_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_8FBCStats_sessions_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_14sessions_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_8FBCStats_sessions_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_14sessions_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_8FBCStats_average_fps(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_11average_fps_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_8FBCStats_average_fps(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_11average_fps_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_8FBCStats_average_latency(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_15average_latency_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_8FBCStats_average_latency(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_15average_latency_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_FBCStats[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8FBCStats_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8FBCStats_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8FBCStats_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_8FBCStats_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_FBCStats[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_8FBCStats_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"sessions_count", __pyx_getprop_4cuda_8bindings_5_nvml_8FBCStats_sessions_count, __pyx_setprop_4cuda_8bindings_5_nvml_8FBCStats_sessions_count, PyDoc_STR("int: "), 0},
  {"average_fps", __pyx_getprop_4cuda_8bindings_5_nvml_8FBCStats_average_fps, __pyx_setprop_4cuda_8bindings_5_nvml_8FBCStats_average_fps, PyDoc_STR("int: "), 0},
  {"average_latency", __pyx_getprop_4cuda_8bindings_5_nvml_8FBCStats_average_latency, __pyx_setprop_4cuda_8bindings_5_nvml_8FBCStats_average_latency, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_FBCStats_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_FBCStats},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_FBCStats},
  {Py_tp_doc, (void *)PyDoc_STR("FBCStats()\n\nEmpty-initialize an instance of `nvmlFBCStats_t`.\n\n\n.. seealso:: `nvmlFBCStats_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_FBCStats},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_FBCStats},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_FBCStats},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_FBCStats},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_FBCStats},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_FBCStats},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_FBCStats_spec = {
  "cuda.bindings._nvml.FBCStats",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_FBCStats_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_FBCStats = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_FBCStats = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_FBCStats, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_FBCStats = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""FBCStats", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_FBCStats, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_FBCStats, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_FBCStats, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("FBCStats()\n\nEmpty-initialize an instance of `nvmlFBCStats_t`.\n\n\n.. seealso:: `nvmlFBCStats_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_FBCStats, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_FBCStats, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_FBCStats, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_FBCStats, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_FBCStats, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_8FBCStats_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_FBCStats, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FBCSessionInfo __pyx_vtable_4cuda_8bindings_5_nvml_FBCSessionInfo;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_FBCSessionInfo(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_FBCSessionInfo;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_FBCSessionInfo(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_FBCSessionInfo) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_FBCSessionInfo(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_FBCSessionInfo(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_FBCSessionInfo(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_FBCSessionInfo(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_FBCSessionInfo(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_session_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_10session_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_session_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_10session_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_pid(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_3pid_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_pid(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_3pid_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_vgpu_instance(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_13vgpu_instance_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_vgpu_instance(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_13vgpu_instance_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_display_ordinal(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_15display_ordinal_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_display_ordinal(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_15display_ordinal_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_session_type(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12session_type_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_session_type(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12session_type_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_session_flags(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_13session_flags_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_session_flags(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_13session_flags_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_h_max_resolution(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_16h_max_resolution_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_h_max_resolution(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_16h_max_resolution_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_v_max_resolution(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_16v_max_resolution_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_v_max_resolution(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_16v_max_resolution_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_h_resolution(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12h_resolution_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_h_resolution(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12h_resolution_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_v_resolution(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12v_resolution_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_v_resolution(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_12v_resolution_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_average_fps(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_11average_fps_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_average_fps(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_11average_fps_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_average_latency(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_15average_latency_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_average_latency(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_15average_latency_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_FBCSessionInfo[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14FBCSessionInfo_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14FBCSessionInfo_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14FBCSessionInfo_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_14FBCSessionInfo_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_FBCSessionInfo[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"session_id", __pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_session_id, __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_session_id, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"pid", __pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_pid, __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_pid, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"vgpu_instance", __pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_vgpu_instance, __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_vgpu_instance, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"display_ordinal", __pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_display_ordinal, __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_display_ordinal, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"session_type", __pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_session_type, __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_session_type, PyDoc_STR("Union[~_numpy.int32, int]: "), 0},
  {"session_flags", __pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_session_flags, __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_session_flags, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"h_max_resolution", __pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_h_max_resolution, __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_h_max_resolution, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"v_max_resolution", __pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_v_max_resolution, __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_v_max_resolution, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"h_resolution", __pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_h_resolution, __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_h_resolution, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"v_resolution", __pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_v_resolution, __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_v_resolution, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"average_fps", __pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_average_fps, __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_average_fps, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"average_latency", __pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_average_latency, __pyx_setprop_4cuda_8bindings_5_nvml_14FBCSessionInfo_average_latency, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_14FBCSessionInfo__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_FBCSessionInfo_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_FBCSessionInfo},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_FBCSessionInfo},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_FBCSessionInfo},
  {Py_tp_doc, (void *)PyDoc_STR("FBCSessionInfo(size=1)\n\nEmpty-initialize an array of `nvmlFBCSessionInfo_t`.\n\nThe resulting object is of length `size` and of dtype `fbc_session_info_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlFBCSessionInfo_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_FBCSessionInfo},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_FBCSessionInfo},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_FBCSessionInfo},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_FBCSessionInfo},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_FBCSessionInfo},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_FBCSessionInfo},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_FBCSessionInfo_spec = {
  "cuda.bindings._nvml.FBCSessionInfo",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_FBCSessionInfo_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_FBCSessionInfo = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_FBCSessionInfo = {
  __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_FBCSessionInfo, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_FBCSessionInfo = {
  __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_FBCSessionInfo, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_FBCSessionInfo = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""FBCSessionInfo", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_FBCSessionInfo, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_FBCSessionInfo, /*tp_as_number*/
  &__pyx_tp_as_sequence_FBCSessionInfo, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_FBCSessionInfo, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("FBCSessionInfo(size=1)\n\nEmpty-initialize an array of `nvmlFBCSessionInfo_t`.\n\nThe resulting object is of length `size` and of dtype `fbc_session_info_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlFBCSessionInfo_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_FBCSessionInfo, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_FBCSessionInfo, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_FBCSessionInfo, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_FBCSessionInfo, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_FBCSessionInfo, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_14FBCSessionInfo_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_FBCSessionInfo, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeSystemCaps __pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeSystemCaps;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeSystemCaps(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeSystemCaps;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeSystemCaps(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeSystemCaps) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeSystemCaps(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeSystemCaps(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeSystemCaps(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeSystemCaps(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_cpu_caps(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_8cpu_caps_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_cpu_caps(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_8cpu_caps_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_gpus_caps(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_9gpus_caps_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_gpus_caps(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_9gpus_caps_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ConfComputeSystemCaps[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeSystemCaps[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"cpu_caps", __pyx_getprop_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_cpu_caps, __pyx_setprop_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_cpu_caps, PyDoc_STR("int: "), 0},
  {"gpus_caps", __pyx_getprop_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_gpus_caps, __pyx_setprop_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_gpus_caps, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemCaps_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeSystemCaps},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeSystemCaps},
  {Py_tp_doc, (void *)PyDoc_STR("ConfComputeSystemCaps()\n\nEmpty-initialize an instance of `nvmlConfComputeSystemCaps_t`.\n\n\n.. seealso:: `nvmlConfComputeSystemCaps_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeSystemCaps},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeSystemCaps},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeSystemCaps},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ConfComputeSystemCaps},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeSystemCaps},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeSystemCaps},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemCaps_spec = {
  "cuda.bindings._nvml.ConfComputeSystemCaps",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemCaps_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ConfComputeSystemCaps = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_ConfComputeSystemCaps = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeSystemCaps, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemCaps = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ConfComputeSystemCaps", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeSystemCaps, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ConfComputeSystemCaps, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ConfComputeSystemCaps, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ConfComputeSystemCaps()\n\nEmpty-initialize an instance of `nvmlConfComputeSystemCaps_t`.\n\n\n.. seealso:: `nvmlConfComputeSystemCaps_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeSystemCaps, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeSystemCaps, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeSystemCaps, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ConfComputeSystemCaps, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeSystemCaps, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeSystemCaps, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeSystemState __pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeSystemState;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeSystemState(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeSystemState;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeSystemState(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeSystemState) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeSystemState(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeSystemState(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeSystemState(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeSystemState(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22ConfComputeSystemState_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22ConfComputeSystemState_environment(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_11environment_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_22ConfComputeSystemState_environment(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_11environment_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22ConfComputeSystemState_cc_feature(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_10cc_feature_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_22ConfComputeSystemState_cc_feature(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_10cc_feature_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22ConfComputeSystemState_dev_tools_mode(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14dev_tools_mode_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_22ConfComputeSystemState_dev_tools_mode(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14dev_tools_mode_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ConfComputeSystemState[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeSystemState_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeSystemState_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeSystemState_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeSystemState_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeSystemState[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_22ConfComputeSystemState_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"environment", __pyx_getprop_4cuda_8bindings_5_nvml_22ConfComputeSystemState_environment, __pyx_setprop_4cuda_8bindings_5_nvml_22ConfComputeSystemState_environment, PyDoc_STR("int: "), 0},
  {"cc_feature", __pyx_getprop_4cuda_8bindings_5_nvml_22ConfComputeSystemState_cc_feature, __pyx_setprop_4cuda_8bindings_5_nvml_22ConfComputeSystemState_cc_feature, PyDoc_STR("int: "), 0},
  {"dev_tools_mode", __pyx_getprop_4cuda_8bindings_5_nvml_22ConfComputeSystemState_dev_tools_mode, __pyx_setprop_4cuda_8bindings_5_nvml_22ConfComputeSystemState_dev_tools_mode, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemState_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeSystemState},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeSystemState},
  {Py_tp_doc, (void *)PyDoc_STR("ConfComputeSystemState()\n\nEmpty-initialize an instance of `nvmlConfComputeSystemState_t`.\n\n\n.. seealso:: `nvmlConfComputeSystemState_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeSystemState},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeSystemState},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeSystemState},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ConfComputeSystemState},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeSystemState},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeSystemState},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemState_spec = {
  "cuda.bindings._nvml.ConfComputeSystemState",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemState_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ConfComputeSystemState = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_ConfComputeSystemState = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeSystemState, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemState = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ConfComputeSystemState", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeSystemState, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ConfComputeSystemState, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ConfComputeSystemState, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ConfComputeSystemState()\n\nEmpty-initialize an instance of `nvmlConfComputeSystemState_t`.\n\n\n.. seealso:: `nvmlConfComputeSystemState_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeSystemState, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeSystemState, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeSystemState, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ConfComputeSystemState, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeSystemState, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeSystemState_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeSystemState, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 __pyx_vtable_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_environment(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_11environment_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_environment(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_11environment_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_cc_feature(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_10cc_feature_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_cc_feature(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_10cc_feature_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_dev_tools_mode(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14dev_tools_mode_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_dev_tools_mode(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14dev_tools_mode_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_multi_gpu_mode(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14multi_gpu_mode_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_multi_gpu_mode(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14multi_gpu_mode_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_version, PyDoc_STR("int: "), 0},
  {"environment", __pyx_getprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_environment, __pyx_setprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_environment, PyDoc_STR("int: "), 0},
  {"cc_feature", __pyx_getprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_cc_feature, __pyx_setprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_cc_feature, PyDoc_STR("int: "), 0},
  {"dev_tools_mode", __pyx_getprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_dev_tools_mode, __pyx_setprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_dev_tools_mode, PyDoc_STR("int: "), 0},
  {"multi_gpu_mode", __pyx_getprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_multi_gpu_mode, __pyx_setprop_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_multi_gpu_mode, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1},
  {Py_tp_doc, (void *)PyDoc_STR("SystemConfComputeSettings_v1()\n\nEmpty-initialize an instance of `nvmlSystemConfComputeSettings_v1_t`.\n\n\n.. seealso:: `nvmlSystemConfComputeSettings_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1_spec = {
  "cuda.bindings._nvml.SystemConfComputeSettings_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_SystemConfComputeSettings_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_SystemConfComputeSettings_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""SystemConfComputeSettings_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_SystemConfComputeSettings_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_SystemConfComputeSettings_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("SystemConfComputeSettings_v1()\n\nEmpty-initialize an instance of `nvmlSystemConfComputeSettings_v1_t`.\n\n\n.. seealso:: `nvmlSystemConfComputeSettings_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo __pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_protected_mem_size_kib(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_22protected_mem_size_kib_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_protected_mem_size_kib(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_22protected_mem_size_kib_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_unprotected_mem_size_kib(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_24unprotected_mem_size_kib_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_unprotected_mem_size_kib(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_24unprotected_mem_size_kib_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"protected_mem_size_kib", __pyx_getprop_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_protected_mem_size_kib, __pyx_setprop_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_protected_mem_size_kib, PyDoc_STR("int: "), 0},
  {"unprotected_mem_size_kib", __pyx_getprop_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_unprotected_mem_size_kib, __pyx_setprop_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_unprotected_mem_size_kib, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo},
  {Py_tp_doc, (void *)PyDoc_STR("ConfComputeMemSizeInfo()\n\nEmpty-initialize an instance of `nvmlConfComputeMemSizeInfo_t`.\n\n\n.. seealso:: `nvmlConfComputeMemSizeInfo_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo_spec = {
  "cuda.bindings._nvml.ConfComputeMemSizeInfo",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ConfComputeMemSizeInfo = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_ConfComputeMemSizeInfo = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ConfComputeMemSizeInfo", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ConfComputeMemSizeInfo, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ConfComputeMemSizeInfo, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ConfComputeMemSizeInfo()\n\nEmpty-initialize an instance of `nvmlConfComputeMemSizeInfo_t`.\n\n\n.. seealso:: `nvmlConfComputeMemSizeInfo_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate __pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_cert_chain_size(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15cert_chain_size_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_cert_chain_size(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15cert_chain_size_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_attestation_cert_chain_size(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_27attestation_cert_chain_size_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_attestation_cert_chain_size(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_27attestation_cert_chain_size_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_cert_chain(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_10cert_chain_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_cert_chain(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_10cert_chain_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_attestation_cert_chain(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_22attestation_cert_chain_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_attestation_cert_chain(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_22attestation_cert_chain_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"cert_chain_size", __pyx_getprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_cert_chain_size, __pyx_setprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_cert_chain_size, PyDoc_STR("int: "), 0},
  {"attestation_cert_chain_size", __pyx_getprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_attestation_cert_chain_size, __pyx_setprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_attestation_cert_chain_size, PyDoc_STR("int: "), 0},
  {"cert_chain", __pyx_getprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_cert_chain, __pyx_setprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_cert_chain, PyDoc_STR("~_numpy.uint8: (array of length 4096)."), 0},
  {"attestation_cert_chain", __pyx_getprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_attestation_cert_chain, __pyx_setprop_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_attestation_cert_chain, PyDoc_STR("~_numpy.uint8: (array of length 5120)."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate},
  {Py_tp_doc, (void *)PyDoc_STR("ConfComputeGpuCertificate()\n\nEmpty-initialize an instance of `nvmlConfComputeGpuCertificate_t`.\n\n\n.. seealso:: `nvmlConfComputeGpuCertificate_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate_spec = {
  "cuda.bindings._nvml.ConfComputeGpuCertificate",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ConfComputeGpuCertificate = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_ConfComputeGpuCertificate = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ConfComputeGpuCertificate", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ConfComputeGpuCertificate, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ConfComputeGpuCertificate, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ConfComputeGpuCertificate()\n\nEmpty-initialize an instance of `nvmlConfComputeGpuCertificate_t`.\n\n\n.. seealso:: `nvmlConfComputeGpuCertificate_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport __pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_is_cec_attestation_report_present(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_33is_cec_attestation_report_present_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_is_cec_attestation_report_present(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_33is_cec_attestation_report_present_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_attestation_report_size(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_23attestation_report_size_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_attestation_report_size(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_23attestation_report_size_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_cec_attestation_report_size(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_27cec_attestation_report_size_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_cec_attestation_report_size(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_27cec_attestation_report_size_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_nonce(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_5nonce_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_nonce(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_5nonce_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_attestation_report(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18attestation_report_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_attestation_report(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18attestation_report_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_cec_attestation_report(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_22cec_attestation_report_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_cec_attestation_report(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_22cec_attestation_report_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"is_cec_attestation_report_present", __pyx_getprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_is_cec_attestation_report_present, __pyx_setprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_is_cec_attestation_report_present, PyDoc_STR("int: "), 0},
  {"attestation_report_size", __pyx_getprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_attestation_report_size, __pyx_setprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_attestation_report_size, PyDoc_STR("int: "), 0},
  {"cec_attestation_report_size", __pyx_getprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_cec_attestation_report_size, __pyx_setprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_cec_attestation_report_size, PyDoc_STR("int: "), 0},
  {"nonce", __pyx_getprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_nonce, __pyx_setprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_nonce, PyDoc_STR("~_numpy.uint8: (array of length 32)."), 0},
  {"attestation_report", __pyx_getprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_attestation_report, __pyx_setprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_attestation_report, PyDoc_STR("~_numpy.uint8: (array of length 8192)."), 0},
  {"cec_attestation_report", __pyx_getprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_cec_attestation_report, __pyx_setprop_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_cec_attestation_report, PyDoc_STR("~_numpy.uint8: (array of length 4096)."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport},
  {Py_tp_doc, (void *)PyDoc_STR("ConfComputeGpuAttestationReport()\n\nEmpty-initialize an instance of `nvmlConfComputeGpuAttestationReport_t`.\n\n\n.. seealso:: `nvmlConfComputeGpuAttestationReport_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport_spec = {
  "cuda.bindings._nvml.ConfComputeGpuAttestationReport",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ConfComputeGpuAttestationReport = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_ConfComputeGpuAttestationReport = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ConfComputeGpuAttestationReport", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ConfComputeGpuAttestationReport, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ConfComputeGpuAttestationReport, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ConfComputeGpuAttestationReport()\n\nEmpty-initialize an instance of `nvmlConfComputeGpuAttestationReport_t`.\n\n\n.. seealso:: `nvmlConfComputeGpuAttestationReport_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 __pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_attacker_advantage(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18attacker_advantage_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_attacker_advantage(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18attacker_advantage_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_version, PyDoc_STR("int: "), 0},
  {"attacker_advantage", __pyx_getprop_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_attacker_advantage, __pyx_setprop_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_attacker_advantage, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1},
  {Py_tp_doc, (void *)PyDoc_STR("ConfComputeGetKeyRotationThresholdInfo_v1()\n\nEmpty-initialize an instance of `nvmlConfComputeGetKeyRotationThresholdInfo_v1_t`.\n\n\n.. seealso:: `nvmlConfComputeGetKeyRotationThresholdInfo_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1_spec = {
  "cuda.bindings._nvml.ConfComputeGetKeyRotationThresholdInfo_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ConfComputeGetKeyRotationThresholdInfo_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_ConfComputeGetKeyRotationThresholdInfo_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ConfComputeGetKeyRotationThresholdInfo_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ConfComputeGetKeyRotationThresholdInfo_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ConfComputeGetKeyRotationThresholdInfo_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ConfComputeGetKeyRotationThresholdInfo_v1()\n\nEmpty-initialize an instance of `nvmlConfComputeGetKeyRotationThresholdInfo_v1_t`.\n\n\n.. seealso:: `nvmlConfComputeGetKeyRotationThresholdInfo_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 __pyx_vtable_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_bw_modes(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_8bw_modes_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_bw_modes(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_8bw_modes_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_total_bw_modes(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14total_bw_modes_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_total_bw_modes(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14total_bw_modes_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_version, PyDoc_STR("int: "), 0},
  {"bw_modes", __pyx_getprop_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_bw_modes, __pyx_setprop_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_bw_modes, PyDoc_STR("~_numpy.uint8: (array of length 23)."), 0},
  {"total_bw_modes", __pyx_getprop_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_total_bw_modes, __pyx_setprop_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_total_bw_modes, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1},
  {Py_tp_doc, (void *)PyDoc_STR("NvlinkSupportedBwModes_v1()\n\nEmpty-initialize an instance of `nvmlNvlinkSupportedBwModes_v1_t`.\n\n\n.. seealso:: `nvmlNvlinkSupportedBwModes_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1_spec = {
  "cuda.bindings._nvml.NvlinkSupportedBwModes_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_NvlinkSupportedBwModes_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_NvlinkSupportedBwModes_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""NvlinkSupportedBwModes_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_NvlinkSupportedBwModes_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_NvlinkSupportedBwModes_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("NvlinkSupportedBwModes_v1()\n\nEmpty-initialize an instance of `nvmlNvlinkSupportedBwModes_v1_t`.\n\n\n.. seealso:: `nvmlNvlinkSupportedBwModes_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 __pyx_vtable_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_b_is_best(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_9b_is_best_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_b_is_best(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_9b_is_best_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_bw_mode(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7bw_mode_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_bw_mode(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7bw_mode_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_version, PyDoc_STR("int: "), 0},
  {"b_is_best", __pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_b_is_best, __pyx_setprop_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_b_is_best, PyDoc_STR("int: "), 0},
  {"bw_mode", __pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_bw_mode, __pyx_setprop_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_bw_mode, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1},
  {Py_tp_doc, (void *)PyDoc_STR("NvlinkGetBwMode_v1()\n\nEmpty-initialize an instance of `nvmlNvlinkGetBwMode_v1_t`.\n\n\n.. seealso:: `nvmlNvlinkGetBwMode_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1_spec = {
  "cuda.bindings._nvml.NvlinkGetBwMode_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_NvlinkGetBwMode_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_NvlinkGetBwMode_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""NvlinkGetBwMode_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_NvlinkGetBwMode_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_NvlinkGetBwMode_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("NvlinkGetBwMode_v1()\n\nEmpty-initialize an instance of `nvmlNvlinkGetBwMode_v1_t`.\n\n\n.. seealso:: `nvmlNvlinkGetBwMode_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 __pyx_vtable_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_b_set_best(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_10b_set_best_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_b_set_best(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_10b_set_best_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_bw_mode(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7bw_mode_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_bw_mode(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7bw_mode_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_version, PyDoc_STR("int: "), 0},
  {"b_set_best", __pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_b_set_best, __pyx_setprop_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_b_set_best, PyDoc_STR("int: "), 0},
  {"bw_mode", __pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_bw_mode, __pyx_setprop_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_bw_mode, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1},
  {Py_tp_doc, (void *)PyDoc_STR("NvlinkSetBwMode_v1()\n\nEmpty-initialize an instance of `nvmlNvlinkSetBwMode_v1_t`.\n\n\n.. seealso:: `nvmlNvlinkSetBwMode_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1_spec = {
  "cuda.bindings._nvml.NvlinkSetBwMode_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_NvlinkSetBwMode_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_NvlinkSetBwMode_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""NvlinkSetBwMode_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_NvlinkSetBwMode_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_NvlinkSetBwMode_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("NvlinkSetBwMode_v1()\n\nEmpty-initialize an instance of `nvmlNvlinkSetBwMode_v1_t`.\n\n\n.. seealso:: `nvmlNvlinkSetBwMode_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuVersion __pyx_vtable_4cuda_8bindings_5_nvml_VgpuVersion;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuVersion(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuVersion;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuVersion(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuVersion) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuVersion(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuVersion(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuVersion(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuVersion(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_11VgpuVersion_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_11VgpuVersion_min_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_11min_version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_11VgpuVersion_min_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_11min_version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_11VgpuVersion_max_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_11max_version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_11VgpuVersion_max_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_11max_version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuVersion[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11VgpuVersion_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11VgpuVersion_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11VgpuVersion_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_11VgpuVersion_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuVersion[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_11VgpuVersion_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"min_version", __pyx_getprop_4cuda_8bindings_5_nvml_11VgpuVersion_min_version, __pyx_setprop_4cuda_8bindings_5_nvml_11VgpuVersion_min_version, PyDoc_STR("int: "), 0},
  {"max_version", __pyx_getprop_4cuda_8bindings_5_nvml_11VgpuVersion_max_version, __pyx_setprop_4cuda_8bindings_5_nvml_11VgpuVersion_max_version, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuVersion_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuVersion},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuVersion},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuVersion()\n\nEmpty-initialize an instance of `nvmlVgpuVersion_t`.\n\n\n.. seealso:: `nvmlVgpuVersion_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuVersion},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuVersion},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuVersion},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuVersion},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuVersion},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuVersion},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuVersion_spec = {
  "cuda.bindings._nvml.VgpuVersion",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuVersion_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuVersion = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuVersion = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuVersion, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuVersion = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuVersion", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuVersion, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuVersion, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuVersion, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuVersion()\n\nEmpty-initialize an instance of `nvmlVgpuVersion_t`.\n\n\n.. seealso:: `nvmlVgpuVersion_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuVersion, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuVersion, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuVersion, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuVersion, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuVersion, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_11VgpuVersion_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuVersion, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuMetadata __pyx_vtable_4cuda_8bindings_5_nvml_VgpuMetadata;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuMetadata(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuMetadata;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuMetadata(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuMetadata) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuMetadata(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuMetadata(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuMetadata(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuMetadata(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_revision(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_8revision_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_revision(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_8revision_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_guest_info_state(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_16guest_info_state_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_guest_info_state(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_16guest_info_state_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_guest_driver_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_20guest_driver_version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_guest_driver_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_20guest_driver_version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_host_driver_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_19host_driver_version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_host_driver_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_19host_driver_version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_vgpu_virtualization_caps(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_24vgpu_virtualization_caps_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_vgpu_virtualization_caps(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_24vgpu_virtualization_caps_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_guest_vgpu_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_18guest_vgpu_version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_guest_vgpu_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_18guest_vgpu_version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_opaque_data_size(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_16opaque_data_size_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_opaque_data_size(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_16opaque_data_size_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_opaque_data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_11opaque_data_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_opaque_data(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_11opaque_data_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuMetadata[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_12VgpuMetadata_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_12VgpuMetadata_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_12VgpuMetadata_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_12VgpuMetadata_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuMetadata[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_version, __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_version, PyDoc_STR("int: "), 0},
  {"revision", __pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_revision, __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_revision, PyDoc_STR("int: "), 0},
  {"guest_info_state", __pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_guest_info_state, __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_guest_info_state, PyDoc_STR("int: "), 0},
  {"guest_driver_version", __pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_guest_driver_version, __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_guest_driver_version, PyDoc_STR("~_numpy.int8: (array of length 80)."), 0},
  {"host_driver_version", __pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_host_driver_version, __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_host_driver_version, PyDoc_STR("~_numpy.int8: (array of length 80)."), 0},
  {"vgpu_virtualization_caps", __pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_vgpu_virtualization_caps, __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_vgpu_virtualization_caps, PyDoc_STR("int: "), 0},
  {"guest_vgpu_version", __pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_guest_vgpu_version, __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_guest_vgpu_version, PyDoc_STR("int: "), 0},
  {"opaque_data_size", __pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_opaque_data_size, __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_opaque_data_size, PyDoc_STR("int: "), 0},
  {"opaque_data", __pyx_getprop_4cuda_8bindings_5_nvml_12VgpuMetadata_opaque_data, __pyx_setprop_4cuda_8bindings_5_nvml_12VgpuMetadata_opaque_data, PyDoc_STR("~_numpy.int8: (array of length 4)."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuMetadata_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuMetadata},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuMetadata},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuMetadata()\n\nEmpty-initialize an instance of `nvmlVgpuMetadata_t`.\n\n\n.. seealso:: `nvmlVgpuMetadata_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuMetadata},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuMetadata},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuMetadata},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuMetadata},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuMetadata},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuMetadata},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuMetadata_spec = {
  "cuda.bindings._nvml.VgpuMetadata",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuMetadata_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuMetadata = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuMetadata = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuMetadata, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuMetadata = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuMetadata", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuMetadata, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuMetadata, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuMetadata, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuMetadata()\n\nEmpty-initialize an instance of `nvmlVgpuMetadata_t`.\n\n\n.. seealso:: `nvmlVgpuMetadata_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuMetadata, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuMetadata, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuMetadata, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuMetadata, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuMetadata, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_12VgpuMetadata_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuMetadata, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility __pyx_vtable_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_vgpu_vm_compatibility(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_21vgpu_vm_compatibility_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_vgpu_vm_compatibility(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_21vgpu_vm_compatibility_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_compatibility_limit_code(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_24compatibility_limit_code_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_compatibility_limit_code(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_24compatibility_limit_code_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"vgpu_vm_compatibility", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_vgpu_vm_compatibility, __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_vgpu_vm_compatibility, PyDoc_STR("int: "), 0},
  {"compatibility_limit_code", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_compatibility_limit_code, __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_compatibility_limit_code, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuPgpuCompatibility()\n\nEmpty-initialize an instance of `nvmlVgpuPgpuCompatibility_t`.\n\n\n.. seealso:: `nvmlVgpuPgpuCompatibility_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility_spec = {
  "cuda.bindings._nvml.VgpuPgpuCompatibility",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuPgpuCompatibility = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuPgpuCompatibility = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuPgpuCompatibility", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuPgpuCompatibility, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuPgpuCompatibility, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuPgpuCompatibility()\n\nEmpty-initialize an instance of `nvmlVgpuPgpuCompatibility_t`.\n\n\n.. seealso:: `nvmlVgpuPgpuCompatibility_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstancePlacement __pyx_vtable_4cuda_8bindings_5_nvml_GpuInstancePlacement;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstancePlacement(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuInstancePlacement;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuInstancePlacement(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuInstancePlacement) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuInstancePlacement(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_GpuInstancePlacement(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_GpuInstancePlacement(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuInstancePlacement(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuInstancePlacement(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_20GpuInstancePlacement_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_20GpuInstancePlacement_start(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5start_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_20GpuInstancePlacement_start(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5start_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_20GpuInstancePlacement_size_(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5size__1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_20GpuInstancePlacement_size_(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5size__3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_20GpuInstancePlacement__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_GpuInstancePlacement[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20GpuInstancePlacement_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20GpuInstancePlacement_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20GpuInstancePlacement_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20GpuInstancePlacement_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_GpuInstancePlacement[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_20GpuInstancePlacement_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"start", __pyx_getprop_4cuda_8bindings_5_nvml_20GpuInstancePlacement_start, __pyx_setprop_4cuda_8bindings_5_nvml_20GpuInstancePlacement_start, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"size_", __pyx_getprop_4cuda_8bindings_5_nvml_20GpuInstancePlacement_size_, __pyx_setprop_4cuda_8bindings_5_nvml_20GpuInstancePlacement_size_, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_20GpuInstancePlacement__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_GpuInstancePlacement_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuInstancePlacement},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_GpuInstancePlacement},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuInstancePlacement},
  {Py_tp_doc, (void *)PyDoc_STR("GpuInstancePlacement(size=1)\n\nEmpty-initialize an array of `nvmlGpuInstancePlacement_t`.\n\nThe resulting object is of length `size` and of dtype `gpu_instance_placement_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlGpuInstancePlacement_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuInstancePlacement},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_GpuInstancePlacement},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuInstancePlacement},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_GpuInstancePlacement},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_GpuInstancePlacement},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstancePlacement},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_GpuInstancePlacement_spec = {
  "cuda.bindings._nvml.GpuInstancePlacement",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_GpuInstancePlacement_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_GpuInstancePlacement = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_GpuInstancePlacement = {
  __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_GpuInstancePlacement, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_GpuInstancePlacement = {
  __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuInstancePlacement, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_GpuInstancePlacement = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""GpuInstancePlacement", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuInstancePlacement, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_GpuInstancePlacement, /*tp_as_number*/
  &__pyx_tp_as_sequence_GpuInstancePlacement, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_GpuInstancePlacement, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("GpuInstancePlacement(size=1)\n\nEmpty-initialize an array of `nvmlGpuInstancePlacement_t`.\n\nThe resulting object is of length `size` and of dtype `gpu_instance_placement_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlGpuInstancePlacement_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuInstancePlacement, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_GpuInstancePlacement, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuInstancePlacement, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_GpuInstancePlacement, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_GpuInstancePlacement, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_20GpuInstancePlacement_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstancePlacement, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 __pyx_vtable_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_2id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_2id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_is_p2p_supported(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16is_p2p_supported_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_is_p2p_supported(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16is_p2p_supported_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_slice_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_11slice_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_slice_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_11slice_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_instance_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14instance_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_instance_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14instance_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_multiprocessor_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_20multiprocessor_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_multiprocessor_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_20multiprocessor_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_copy_engine_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17copy_engine_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_copy_engine_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17copy_engine_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_decoder_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13decoder_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_decoder_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13decoder_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_encoder_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13encoder_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_encoder_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13encoder_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_jpeg_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_10jpeg_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_jpeg_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_10jpeg_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_ofa_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_9ofa_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_ofa_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_9ofa_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_memory_size_mb(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14memory_size_mb_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_memory_size_mb(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14memory_size_mb_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_name(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_4name_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_name(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_4name_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_version, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_version, PyDoc_STR("int: "), 0},
  {"id", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_id, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_id, PyDoc_STR("int: "), 0},
  {"is_p2p_supported", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_is_p2p_supported, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_is_p2p_supported, PyDoc_STR("int: "), 0},
  {"slice_count", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_slice_count, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_slice_count, PyDoc_STR("int: "), 0},
  {"instance_count", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_instance_count, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_instance_count, PyDoc_STR("int: "), 0},
  {"multiprocessor_count", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_multiprocessor_count, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_multiprocessor_count, PyDoc_STR("int: "), 0},
  {"copy_engine_count", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_copy_engine_count, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_copy_engine_count, PyDoc_STR("int: "), 0},
  {"decoder_count", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_decoder_count, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_decoder_count, PyDoc_STR("int: "), 0},
  {"encoder_count", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_encoder_count, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_encoder_count, PyDoc_STR("int: "), 0},
  {"jpeg_count", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_jpeg_count, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_jpeg_count, PyDoc_STR("int: "), 0},
  {"ofa_count", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_ofa_count, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_ofa_count, PyDoc_STR("int: "), 0},
  {"memory_size_mb", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_memory_size_mb, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_memory_size_mb, PyDoc_STR("int: "), 0},
  {"name", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_name, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_name, PyDoc_STR("~_numpy.int8: (array of length 96)."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2},
  {Py_tp_doc, (void *)PyDoc_STR("GpuInstanceProfileInfo_v2()\n\nEmpty-initialize an instance of `nvmlGpuInstanceProfileInfo_v2_t`.\n\n\n.. seealso:: `nvmlGpuInstanceProfileInfo_v2_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2_spec = {
  "cuda.bindings._nvml.GpuInstanceProfileInfo_v2",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_GpuInstanceProfileInfo_v2 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_GpuInstanceProfileInfo_v2 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""GpuInstanceProfileInfo_v2", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_GpuInstanceProfileInfo_v2, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_GpuInstanceProfileInfo_v2, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("GpuInstanceProfileInfo_v2()\n\nEmpty-initialize an instance of `nvmlGpuInstanceProfileInfo_v2_t`.\n\n\n.. seealso:: `nvmlGpuInstanceProfileInfo_v2_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 __pyx_vtable_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_2id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_2id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_slice_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_11slice_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_slice_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_11slice_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_instance_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14instance_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_instance_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14instance_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_multiprocessor_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_20multiprocessor_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_multiprocessor_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_20multiprocessor_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_copy_engine_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17copy_engine_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_copy_engine_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17copy_engine_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_decoder_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13decoder_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_decoder_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13decoder_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_encoder_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13encoder_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_encoder_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13encoder_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_jpeg_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_10jpeg_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_jpeg_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_10jpeg_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_ofa_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_9ofa_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_ofa_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_9ofa_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_memory_size_mb(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14memory_size_mb_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_memory_size_mb(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14memory_size_mb_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_name(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_4name_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_name(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_4name_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_capabilities(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12capabilities_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_capabilities(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12capabilities_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_version, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_version, PyDoc_STR("int: "), 0},
  {"id", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_id, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_id, PyDoc_STR("int: "), 0},
  {"slice_count", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_slice_count, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_slice_count, PyDoc_STR("int: "), 0},
  {"instance_count", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_instance_count, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_instance_count, PyDoc_STR("int: "), 0},
  {"multiprocessor_count", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_multiprocessor_count, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_multiprocessor_count, PyDoc_STR("int: "), 0},
  {"copy_engine_count", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_copy_engine_count, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_copy_engine_count, PyDoc_STR("int: "), 0},
  {"decoder_count", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_decoder_count, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_decoder_count, PyDoc_STR("int: "), 0},
  {"encoder_count", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_encoder_count, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_encoder_count, PyDoc_STR("int: "), 0},
  {"jpeg_count", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_jpeg_count, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_jpeg_count, PyDoc_STR("int: "), 0},
  {"ofa_count", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_ofa_count, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_ofa_count, PyDoc_STR("int: "), 0},
  {"memory_size_mb", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_memory_size_mb, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_memory_size_mb, PyDoc_STR("int: "), 0},
  {"name", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_name, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_name, PyDoc_STR("~_numpy.int8: (array of length 96)."), 0},
  {"capabilities", __pyx_getprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_capabilities, __pyx_setprop_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_capabilities, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3},
  {Py_tp_doc, (void *)PyDoc_STR("GpuInstanceProfileInfo_v3()\n\nEmpty-initialize an instance of `nvmlGpuInstanceProfileInfo_v3_t`.\n\n\n.. seealso:: `nvmlGpuInstanceProfileInfo_v3_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3_spec = {
  "cuda.bindings._nvml.GpuInstanceProfileInfo_v3",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_GpuInstanceProfileInfo_v3 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_GpuInstanceProfileInfo_v3 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""GpuInstanceProfileInfo_v3", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_GpuInstanceProfileInfo_v3, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_GpuInstanceProfileInfo_v3, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("GpuInstanceProfileInfo_v3()\n\nEmpty-initialize an instance of `nvmlGpuInstanceProfileInfo_v3_t`.\n\n\n.. seealso:: `nvmlGpuInstanceProfileInfo_v3_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstancePlacement __pyx_vtable_4cuda_8bindings_5_nvml_ComputeInstancePlacement;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstancePlacement(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ComputeInstancePlacement;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ComputeInstancePlacement(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ComputeInstancePlacement) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ComputeInstancePlacement(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ComputeInstancePlacement(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_ComputeInstancePlacement(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ComputeInstancePlacement(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ComputeInstancePlacement(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_start(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5start_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_start(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5start_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_size_(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5size__1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_size_(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5size__3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24ComputeInstancePlacement__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ComputeInstancePlacement[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ComputeInstancePlacement[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"start", __pyx_getprop_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_start, __pyx_setprop_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_start, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"size_", __pyx_getprop_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_size_, __pyx_setprop_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_size_, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_24ComputeInstancePlacement__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ComputeInstancePlacement_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ComputeInstancePlacement},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_ComputeInstancePlacement},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ComputeInstancePlacement},
  {Py_tp_doc, (void *)PyDoc_STR("ComputeInstancePlacement(size=1)\n\nEmpty-initialize an array of `nvmlComputeInstancePlacement_t`.\n\nThe resulting object is of length `size` and of dtype `compute_instance_placement_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlComputeInstancePlacement_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ComputeInstancePlacement},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ComputeInstancePlacement},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ComputeInstancePlacement},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ComputeInstancePlacement},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ComputeInstancePlacement},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstancePlacement},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ComputeInstancePlacement_spec = {
  "cuda.bindings._nvml.ComputeInstancePlacement",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ComputeInstancePlacement_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ComputeInstancePlacement = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_ComputeInstancePlacement = {
  __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_ComputeInstancePlacement, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_ComputeInstancePlacement = {
  __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ComputeInstancePlacement, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ComputeInstancePlacement = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ComputeInstancePlacement", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ComputeInstancePlacement, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ComputeInstancePlacement, /*tp_as_number*/
  &__pyx_tp_as_sequence_ComputeInstancePlacement, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ComputeInstancePlacement, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ComputeInstancePlacement(size=1)\n\nEmpty-initialize an array of `nvmlComputeInstancePlacement_t`.\n\nThe resulting object is of length `size` and of dtype `compute_instance_placement_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlComputeInstancePlacement_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ComputeInstancePlacement, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ComputeInstancePlacement, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ComputeInstancePlacement, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ComputeInstancePlacement, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ComputeInstancePlacement, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstancePlacement, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 __pyx_vtable_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_2id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_2id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_slice_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_11slice_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_slice_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_11slice_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_instance_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14instance_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_instance_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14instance_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_multiprocessor_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20multiprocessor_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_multiprocessor_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20multiprocessor_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_copy_engine_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_24shared_copy_engine_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_copy_engine_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_24shared_copy_engine_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_decoder_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_decoder_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_decoder_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_decoder_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_encoder_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_encoder_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_encoder_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_20shared_encoder_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_jpeg_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17shared_jpeg_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_jpeg_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17shared_jpeg_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_ofa_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16shared_ofa_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_ofa_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16shared_ofa_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_name(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_4name_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_name(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_4name_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_version, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_version, PyDoc_STR("int: "), 0},
  {"id", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_id, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_id, PyDoc_STR("int: "), 0},
  {"slice_count", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_slice_count, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_slice_count, PyDoc_STR("int: "), 0},
  {"instance_count", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_instance_count, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_instance_count, PyDoc_STR("int: "), 0},
  {"multiprocessor_count", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_multiprocessor_count, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_multiprocessor_count, PyDoc_STR("int: "), 0},
  {"shared_copy_engine_count", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_copy_engine_count, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_copy_engine_count, PyDoc_STR("int: "), 0},
  {"shared_decoder_count", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_decoder_count, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_decoder_count, PyDoc_STR("int: "), 0},
  {"shared_encoder_count", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_encoder_count, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_encoder_count, PyDoc_STR("int: "), 0},
  {"shared_jpeg_count", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_jpeg_count, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_jpeg_count, PyDoc_STR("int: "), 0},
  {"shared_ofa_count", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_ofa_count, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_shared_ofa_count, PyDoc_STR("int: "), 0},
  {"name", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_name, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_name, PyDoc_STR("~_numpy.int8: (array of length 96)."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2},
  {Py_tp_doc, (void *)PyDoc_STR("ComputeInstanceProfileInfo_v2()\n\nEmpty-initialize an instance of `nvmlComputeInstanceProfileInfo_v2_t`.\n\n\n.. seealso:: `nvmlComputeInstanceProfileInfo_v2_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2_spec = {
  "cuda.bindings._nvml.ComputeInstanceProfileInfo_v2",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ComputeInstanceProfileInfo_v2 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_ComputeInstanceProfileInfo_v2 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ComputeInstanceProfileInfo_v2", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ComputeInstanceProfileInfo_v2, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ComputeInstanceProfileInfo_v2, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ComputeInstanceProfileInfo_v2()\n\nEmpty-initialize an instance of `nvmlComputeInstanceProfileInfo_v2_t`.\n\n\n.. seealso:: `nvmlComputeInstanceProfileInfo_v2_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 __pyx_vtable_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_2id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_2id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_slice_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_11slice_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_slice_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_11slice_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_instance_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14instance_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_instance_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14instance_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_multiprocessor_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20multiprocessor_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_multiprocessor_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20multiprocessor_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_copy_engine_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_24shared_copy_engine_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_copy_engine_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_24shared_copy_engine_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_decoder_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_decoder_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_decoder_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_decoder_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_encoder_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_encoder_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_encoder_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_20shared_encoder_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_jpeg_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17shared_jpeg_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_jpeg_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17shared_jpeg_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_ofa_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16shared_ofa_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_ofa_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16shared_ofa_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_name(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_4name_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_name(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_4name_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_capabilities(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12capabilities_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_capabilities(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12capabilities_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_version, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_version, PyDoc_STR("int: "), 0},
  {"id", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_id, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_id, PyDoc_STR("int: "), 0},
  {"slice_count", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_slice_count, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_slice_count, PyDoc_STR("int: "), 0},
  {"instance_count", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_instance_count, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_instance_count, PyDoc_STR("int: "), 0},
  {"multiprocessor_count", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_multiprocessor_count, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_multiprocessor_count, PyDoc_STR("int: "), 0},
  {"shared_copy_engine_count", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_copy_engine_count, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_copy_engine_count, PyDoc_STR("int: "), 0},
  {"shared_decoder_count", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_decoder_count, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_decoder_count, PyDoc_STR("int: "), 0},
  {"shared_encoder_count", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_encoder_count, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_encoder_count, PyDoc_STR("int: "), 0},
  {"shared_jpeg_count", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_jpeg_count, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_jpeg_count, PyDoc_STR("int: "), 0},
  {"shared_ofa_count", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_ofa_count, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_shared_ofa_count, PyDoc_STR("int: "), 0},
  {"name", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_name, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_name, PyDoc_STR("~_numpy.int8: (array of length 96)."), 0},
  {"capabilities", __pyx_getprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_capabilities, __pyx_setprop_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_capabilities, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3},
  {Py_tp_doc, (void *)PyDoc_STR("ComputeInstanceProfileInfo_v3()\n\nEmpty-initialize an instance of `nvmlComputeInstanceProfileInfo_v3_t`.\n\n\n.. seealso:: `nvmlComputeInstanceProfileInfo_v3_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3_spec = {
  "cuda.bindings._nvml.ComputeInstanceProfileInfo_v3",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ComputeInstanceProfileInfo_v3 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_ComputeInstanceProfileInfo_v3 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ComputeInstanceProfileInfo_v3", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ComputeInstanceProfileInfo_v3, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ComputeInstanceProfileInfo_v3, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ComputeInstanceProfileInfo_v3()\n\nEmpty-initialize an instance of `nvmlComputeInstanceProfileInfo_v3_t`.\n\n\n.. seealso:: `nvmlComputeInstanceProfileInfo_v3_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpmSupport __pyx_vtable_4cuda_8bindings_5_nvml_GpmSupport;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GpmSupport(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_GpmSupport;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpmSupport(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpmSupport) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_GpmSupport(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_GpmSupport(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpmSupport(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpmSupport(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_10GpmSupport_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_10GpmSupport_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_10GpmSupport_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_10GpmSupport_is_supported_device(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_19is_supported_device_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_10GpmSupport_is_supported_device(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_19is_supported_device_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_GpmSupport[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10GpmSupport_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10GpmSupport_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10GpmSupport_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10GpmSupport_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_GpmSupport[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_10GpmSupport_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_10GpmSupport_version, __pyx_setprop_4cuda_8bindings_5_nvml_10GpmSupport_version, PyDoc_STR("int: IN: Set to NVML_GPM_SUPPORT_VERSION."), 0},
  {"is_supported_device", __pyx_getprop_4cuda_8bindings_5_nvml_10GpmSupport_is_supported_device, __pyx_setprop_4cuda_8bindings_5_nvml_10GpmSupport_is_supported_device, PyDoc_STR("int: OUT: Indicates device support."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_GpmSupport_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpmSupport},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpmSupport},
  {Py_tp_doc, (void *)PyDoc_STR("GpmSupport()\n\nEmpty-initialize an instance of `nvmlGpmSupport_t`.\n\n\n.. seealso:: `nvmlGpmSupport_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_GpmSupport},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_GpmSupport},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpmSupport},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_GpmSupport},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_GpmSupport},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_GpmSupport},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_GpmSupport_spec = {
  "cuda.bindings._nvml.GpmSupport",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_GpmSupport_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_GpmSupport = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_GpmSupport = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpmSupport, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_GpmSupport = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""GpmSupport", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpmSupport, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_GpmSupport, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_GpmSupport, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("GpmSupport()\n\nEmpty-initialize an instance of `nvmlGpmSupport_t`.\n\n\n.. seealso:: `nvmlGpmSupport_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_GpmSupport, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_GpmSupport, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpmSupport, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_GpmSupport, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_GpmSupport, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_10GpmSupport_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_GpmSupport, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 __pyx_vtable_4cuda_8bindings_5_nvml_DeviceCapabilities_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_DeviceCapabilities_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_DeviceCapabilities_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DeviceCapabilities_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DeviceCapabilities_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_DeviceCapabilities_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_DeviceCapabilities_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DeviceCapabilities_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_DeviceCapabilities_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_cap_mask(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_8cap_mask_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_cap_mask(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_8cap_mask_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_DeviceCapabilities_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_DeviceCapabilities_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_version, PyDoc_STR("int: the API version number"), 0},
  {"cap_mask", __pyx_getprop_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_cap_mask, __pyx_setprop_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_cap_mask, PyDoc_STR("int: OUT: Bit mask of capabilities."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_DeviceCapabilities_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_DeviceCapabilities_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DeviceCapabilities_v1},
  {Py_tp_doc, (void *)PyDoc_STR("DeviceCapabilities_v1()\n\nEmpty-initialize an instance of `nvmlDeviceCapabilities_v1_t`.\n\n\n.. seealso:: `nvmlDeviceCapabilities_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_DeviceCapabilities_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_DeviceCapabilities_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_DeviceCapabilities_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_DeviceCapabilities_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_DeviceCapabilities_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_DeviceCapabilities_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_DeviceCapabilities_v1_spec = {
  "cuda.bindings._nvml.DeviceCapabilities_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_DeviceCapabilities_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_DeviceCapabilities_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_DeviceCapabilities_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DeviceCapabilities_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""DeviceCapabilities_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DeviceCapabilities_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_DeviceCapabilities_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_DeviceCapabilities_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("DeviceCapabilities_v1()\n\nEmpty-initialize an instance of `nvmlDeviceCapabilities_v1_t`.\n\n\n.. seealso:: `nvmlDeviceCapabilities_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_DeviceCapabilities_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_DeviceCapabilities_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_DeviceCapabilities_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_DeviceCapabilities_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_DeviceCapabilities_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_DeviceCapabilities_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 __pyx_vtable_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_value(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_5value_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_value(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_5value_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_version, PyDoc_STR("int: API version."), 0},
  {"value", __pyx_getprop_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_value, __pyx_setprop_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_value, PyDoc_STR("int: One of `nvmlDeviceAddressingModeType_t`."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1},
  {Py_tp_doc, (void *)PyDoc_STR("DeviceAddressingMode_v1()\n\nEmpty-initialize an instance of `nvmlDeviceAddressingMode_v1_t`.\n\n\n.. seealso:: `nvmlDeviceAddressingMode_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1_spec = {
  "cuda.bindings._nvml.DeviceAddressingMode_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_DeviceAddressingMode_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_DeviceAddressingMode_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""DeviceAddressingMode_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_DeviceAddressingMode_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_DeviceAddressingMode_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("DeviceAddressingMode_v1()\n\nEmpty-initialize an instance of `nvmlDeviceAddressingMode_v1_t`.\n\n\n.. seealso:: `nvmlDeviceAddressingMode_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_RepairStatus_v1 __pyx_vtable_4cuda_8bindings_5_nvml_RepairStatus_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_RepairStatus_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_RepairStatus_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_RepairStatus_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_RepairStatus_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_RepairStatus_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_RepairStatus_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_RepairStatus_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_RepairStatus_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15RepairStatus_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15RepairStatus_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15RepairStatus_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15RepairStatus_v1_b_channel_repair_pending(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_24b_channel_repair_pending_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15RepairStatus_v1_b_channel_repair_pending(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_24b_channel_repair_pending_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15RepairStatus_v1_b_tpc_repair_pending(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_20b_tpc_repair_pending_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15RepairStatus_v1_b_tpc_repair_pending(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_20b_tpc_repair_pending_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_RepairStatus_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15RepairStatus_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15RepairStatus_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15RepairStatus_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15RepairStatus_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_RepairStatus_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_15RepairStatus_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_15RepairStatus_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_15RepairStatus_v1_version, PyDoc_STR("int: API version number."), 0},
  {"b_channel_repair_pending", __pyx_getprop_4cuda_8bindings_5_nvml_15RepairStatus_v1_b_channel_repair_pending, __pyx_setprop_4cuda_8bindings_5_nvml_15RepairStatus_v1_b_channel_repair_pending, PyDoc_STR("int: Reference to `unsigned` int."), 0},
  {"b_tpc_repair_pending", __pyx_getprop_4cuda_8bindings_5_nvml_15RepairStatus_v1_b_tpc_repair_pending, __pyx_setprop_4cuda_8bindings_5_nvml_15RepairStatus_v1_b_tpc_repair_pending, PyDoc_STR("int: Reference to `unsigned` int."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_RepairStatus_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_RepairStatus_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_RepairStatus_v1},
  {Py_tp_doc, (void *)PyDoc_STR("RepairStatus_v1()\n\nEmpty-initialize an instance of `nvmlRepairStatus_v1_t`.\n\n\n.. seealso:: `nvmlRepairStatus_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_RepairStatus_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_RepairStatus_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_RepairStatus_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_RepairStatus_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_RepairStatus_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_RepairStatus_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_RepairStatus_v1_spec = {
  "cuda.bindings._nvml.RepairStatus_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_RepairStatus_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_RepairStatus_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_RepairStatus_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_RepairStatus_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_RepairStatus_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""RepairStatus_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_RepairStatus_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_RepairStatus_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_RepairStatus_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("RepairStatus_v1()\n\nEmpty-initialize an instance of `nvmlRepairStatus_v1_t`.\n\n\n.. seealso:: `nvmlRepairStatus_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_RepairStatus_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_RepairStatus_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_RepairStatus_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_RepairStatus_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_RepairStatus_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_15RepairStatus_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_RepairStatus_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Pdi_v1 __pyx_vtable_4cuda_8bindings_5_nvml_Pdi_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_Pdi_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_Pdi_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Pdi_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Pdi_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_Pdi_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_Pdi_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Pdi_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_Pdi_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_6Pdi_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_6Pdi_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_6Pdi_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_6Pdi_v1_value(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_5value_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_6Pdi_v1_value(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_5value_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_Pdi_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Pdi_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Pdi_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Pdi_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Pdi_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_Pdi_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_6Pdi_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_6Pdi_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_6Pdi_v1_version, PyDoc_STR("int: API version number."), 0},
  {"value", __pyx_getprop_4cuda_8bindings_5_nvml_6Pdi_v1_value, __pyx_setprop_4cuda_8bindings_5_nvml_6Pdi_v1_value, PyDoc_STR("int: 64-bit PDI value"), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_Pdi_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_Pdi_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Pdi_v1},
  {Py_tp_doc, (void *)PyDoc_STR("Pdi_v1()\n\nEmpty-initialize an instance of `nvmlPdi_v1_t`.\n\n\n.. seealso:: `nvmlPdi_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_Pdi_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_Pdi_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_Pdi_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_Pdi_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_Pdi_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_Pdi_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_Pdi_v1_spec = {
  "cuda.bindings._nvml.Pdi_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_Pdi_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_Pdi_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_Pdi_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Pdi_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_Pdi_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""Pdi_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Pdi_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_Pdi_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_Pdi_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("Pdi_v1()\n\nEmpty-initialize an instance of `nvmlPdi_v1_t`.\n\n\n.. seealso:: `nvmlPdi_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_Pdi_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_Pdi_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_Pdi_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_Pdi_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_Pdi_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_6Pdi_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_Pdi_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 __pyx_vtable_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_current_mode(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12current_mode_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_current_mode(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12current_mode_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_mode(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_4mode_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_mode(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_4mode_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_supported_power_mizer_modes(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_27supported_power_mizer_modes_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_supported_power_mizer_modes(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_27supported_power_mizer_modes_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"current_mode", __pyx_getprop_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_current_mode, __pyx_setprop_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_current_mode, PyDoc_STR("int: OUT: the current powermizer mode."), 0},
  {"mode", __pyx_getprop_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_mode, __pyx_setprop_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_mode, PyDoc_STR("int: IN: the powermizer mode to set."), 0},
  {"supported_power_mizer_modes", __pyx_getprop_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_supported_power_mizer_modes, __pyx_setprop_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_supported_power_mizer_modes, PyDoc_STR("int: OUT: Bitmask of supported powermizer modes."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1},
  {Py_tp_doc, (void *)PyDoc_STR("DevicePowerMizerModes_v1()\n\nEmpty-initialize an instance of `nvmlDevicePowerMizerModes_v1_t`.\n\n\n.. seealso:: `nvmlDevicePowerMizerModes_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1_spec = {
  "cuda.bindings._nvml.DevicePowerMizerModes_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_DevicePowerMizerModes_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_DevicePowerMizerModes_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""DevicePowerMizerModes_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_DevicePowerMizerModes_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_DevicePowerMizerModes_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("DevicePowerMizerModes_v1()\n\nEmpty-initialize an instance of `nvmlDevicePowerMizerModes_v1_t`.\n\n\n.. seealso:: `nvmlDevicePowerMizerModes_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 __pyx_vtable_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_unit(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_4unit_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_unit(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_4unit_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_location(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_8location_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_location(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_8location_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_sublocation(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11sublocation_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_sublocation(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11sublocation_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_extlocation(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11extlocation_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_extlocation(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11extlocation_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_address(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7address_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_address(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7address_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_is_parity(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_9is_parity_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_is_parity(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_9is_parity_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"unit", __pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_unit, __pyx_setprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_unit, PyDoc_STR("Union[~_numpy.uint32, int]: the SRAM unit index"), 0},
  {"location", __pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_location, __pyx_setprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_location, PyDoc_STR("Union[~_numpy.uint32, int]: the error location within the SRAM unit"), 0},
  {"sublocation", __pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_sublocation, __pyx_setprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_sublocation, PyDoc_STR("Union[~_numpy.uint32, int]: the error sublocation within the SRAM unit"), 0},
  {"extlocation", __pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_extlocation, __pyx_setprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_extlocation, PyDoc_STR("Union[~_numpy.uint32, int]: the error extlocation within the SRAM unit"), 0},
  {"address", __pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_address, __pyx_setprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_address, PyDoc_STR("Union[~_numpy.uint32, int]: the error address within the SRAM unit"), 0},
  {"is_parity", __pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_is_parity, __pyx_setprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_is_parity, PyDoc_STR("Union[~_numpy.uint32, int]: if the SRAM error is parity or not"), 0},
  {"count", __pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_count, __pyx_setprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_count, PyDoc_STR("Union[~_numpy.uint32, int]: the error count at the same SRAM address"), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1},
  {Py_tp_doc, (void *)PyDoc_STR("EccSramUniqueUncorrectedErrorEntry_v1(size=1)\n\nEmpty-initialize an array of `nvmlEccSramUniqueUncorrectedErrorEntry_v1_t`.\n\nThe resulting object is of length `size` and of dtype `ecc_sram_unique_uncorrected_error_entry_v1_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlEccSramUniqueUncorrectedErrorEntry_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1_spec = {
  "cuda.bindings._nvml.EccSramUniqueUncorrectedErrorEntry_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_EccSramUniqueUncorrectedErrorEntry_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_EccSramUniqueUncorrectedErrorEntry_v1 = {
  __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_EccSramUniqueUncorrectedErrorEntry_v1 = {
  __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""EccSramUniqueUncorrectedErrorEntry_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_EccSramUniqueUncorrectedErrorEntry_v1, /*tp_as_number*/
  &__pyx_tp_as_sequence_EccSramUniqueUncorrectedErrorEntry_v1, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_EccSramUniqueUncorrectedErrorEntry_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("EccSramUniqueUncorrectedErrorEntry_v1(size=1)\n\nEmpty-initialize an array of `nvmlEccSramUniqueUncorrectedErrorEntry_v1_t`.\n\nThe resulting object is of length `size` and of dtype `ecc_sram_unique_uncorrected_error_entry_v1_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlEccSramUniqueUncorrectedErrorEntry_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 __pyx_vtable_4cuda_8bindings_5_nvml_GpuFabricInfo_v3;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GpuFabricInfo_v3(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuFabricInfo_v3;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuFabricInfo_v3(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuFabricInfo_v3) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuFabricInfo_v3(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_GpuFabricInfo_v3(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuFabricInfo_v3(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuFabricInfo_v3(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_cluster_uuid(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12cluster_uuid_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_cluster_uuid(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12cluster_uuid_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_status(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_6status_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_status(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_6status_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_clique_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_9clique_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_clique_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_9clique_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_state(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_5state_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_state(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_5state_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_health_mask(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_11health_mask_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_health_mask(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_11health_mask_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_health_summary(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14health_summary_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_health_summary(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14health_summary_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_GpuFabricInfo_v3[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_GpuFabricInfo_v3[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_version, __pyx_setprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_version, PyDoc_STR("int: Structure version identifier (set to nvmlGpuFabricInfo_v2)"), 0},
  {"cluster_uuid", __pyx_getprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_cluster_uuid, __pyx_setprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_cluster_uuid, PyDoc_STR("~_numpy.uint8: (array of length 16).Uuid of the cluster to which this GPU belongs."), 0},
  {"status", __pyx_getprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_status, __pyx_setprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_status, PyDoc_STR("int: Probe Error status, if any. Must be checked only if Probe state returns \"complete\"."), 0},
  {"clique_id", __pyx_getprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_clique_id, __pyx_setprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_clique_id, PyDoc_STR("int: ID of the fabric clique to which this GPU belongs."), 0},
  {"state", __pyx_getprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_state, __pyx_setprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_state, PyDoc_STR("int: Current Probe State of GPU registration process. See NVML_GPU_FABRIC_STATE_*."), 0},
  {"health_mask", __pyx_getprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_health_mask, __pyx_setprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_health_mask, PyDoc_STR("int: GPU Fabric health Status Mask. See NVML_GPU_FABRIC_HEALTH_MASK_*."), 0},
  {"health_summary", __pyx_getprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_health_summary, __pyx_setprop_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_health_summary, PyDoc_STR("int: GPU Fabric health summary. See NVML_GPU_FABRIC_HEALTH_SUMMARY_*."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_GpuFabricInfo_v3_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuFabricInfo_v3},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuFabricInfo_v3},
  {Py_tp_doc, (void *)PyDoc_STR("GpuFabricInfo_v3()\n\nEmpty-initialize an instance of `nvmlGpuFabricInfo_v3_t`.\n\n\n.. seealso:: `nvmlGpuFabricInfo_v3_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuFabricInfo_v3},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_GpuFabricInfo_v3},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuFabricInfo_v3},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_GpuFabricInfo_v3},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_GpuFabricInfo_v3},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_GpuFabricInfo_v3},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_GpuFabricInfo_v3_spec = {
  "cuda.bindings._nvml.GpuFabricInfo_v3",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_GpuFabricInfo_v3_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_GpuFabricInfo_v3 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_GpuFabricInfo_v3 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuFabricInfo_v3, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""GpuFabricInfo_v3", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuFabricInfo_v3, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_GpuFabricInfo_v3, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_GpuFabricInfo_v3, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("GpuFabricInfo_v3()\n\nEmpty-initialize an instance of `nvmlGpuFabricInfo_v3_t`.\n\n\n.. seealso:: `nvmlGpuFabricInfo_v3_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuFabricInfo_v3, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_GpuFabricInfo_v3, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuFabricInfo_v3, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_GpuFabricInfo_v3, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_GpuFabricInfo_v3, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_GpuFabricInfo_v3, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion __pyx_vtable_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_ucode_type(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_10ucode_type_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_ucode_type(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_10ucode_type_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_major(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5major_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_major(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5major_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_minor(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5minor_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_minor(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5minor_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_sub_minor(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_9sub_minor_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_sub_minor(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_9sub_minor_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"ucode_type", __pyx_getprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_ucode_type, __pyx_setprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_ucode_type, PyDoc_STR("int: "), 0},
  {"major", __pyx_getprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_major, __pyx_setprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_major, PyDoc_STR("int: "), 0},
  {"minor", __pyx_getprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_minor, __pyx_setprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_minor, PyDoc_STR("int: "), 0},
  {"sub_minor", __pyx_getprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_sub_minor, __pyx_setprop_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_sub_minor, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion},
  {Py_tp_doc, (void *)PyDoc_STR("NvlinkFirmwareVersion()\n\nEmpty-initialize an instance of `nvmlNvlinkFirmwareVersion_t`.\n\n\n.. seealso:: `nvmlNvlinkFirmwareVersion_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion_spec = {
  "cuda.bindings._nvml.NvlinkFirmwareVersion",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_NvlinkFirmwareVersion = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_NvlinkFirmwareVersion = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""NvlinkFirmwareVersion", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_NvlinkFirmwareVersion, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_NvlinkFirmwareVersion, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("NvlinkFirmwareVersion()\n\nEmpty-initialize an instance of `nvmlNvlinkFirmwareVersion_t`.\n\n\n.. seealso:: `nvmlNvlinkFirmwareVersion_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ExcludedDeviceInfo __pyx_vtable_4cuda_8bindings_5_nvml_ExcludedDeviceInfo;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ExcludedDeviceInfo(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ExcludedDeviceInfo;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ExcludedDeviceInfo(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ExcludedDeviceInfo) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ExcludedDeviceInfo(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ExcludedDeviceInfo(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ExcludedDeviceInfo(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ExcludedDeviceInfo(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_pci_info(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_8pci_info_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_pci_info(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_8pci_info_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_uuid(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_4uuid_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_uuid(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_4uuid_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ExcludedDeviceInfo[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ExcludedDeviceInfo[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"pci_info", __pyx_getprop_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_pci_info, __pyx_setprop_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_pci_info, PyDoc_STR("PciInfo: "), 0},
  {"uuid", __pyx_getprop_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_uuid, __pyx_setprop_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_uuid, PyDoc_STR("~_numpy.int8: (array of length 80)."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ExcludedDeviceInfo_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ExcludedDeviceInfo},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ExcludedDeviceInfo},
  {Py_tp_doc, (void *)PyDoc_STR("ExcludedDeviceInfo()\n\nEmpty-initialize an instance of `nvmlExcludedDeviceInfo_t`.\n\n\n.. seealso:: `nvmlExcludedDeviceInfo_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ExcludedDeviceInfo},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ExcludedDeviceInfo},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ExcludedDeviceInfo},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ExcludedDeviceInfo},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ExcludedDeviceInfo},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ExcludedDeviceInfo},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ExcludedDeviceInfo_spec = {
  "cuda.bindings._nvml.ExcludedDeviceInfo",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ExcludedDeviceInfo_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ExcludedDeviceInfo = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_ExcludedDeviceInfo = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ExcludedDeviceInfo, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ExcludedDeviceInfo = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ExcludedDeviceInfo", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ExcludedDeviceInfo, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ExcludedDeviceInfo, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ExcludedDeviceInfo, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ExcludedDeviceInfo()\n\nEmpty-initialize an instance of `nvmlExcludedDeviceInfo_t`.\n\n\n.. seealso:: `nvmlExcludedDeviceInfo_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ExcludedDeviceInfo, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ExcludedDeviceInfo, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ExcludedDeviceInfo, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ExcludedDeviceInfo, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ExcludedDeviceInfo, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ExcludedDeviceInfo, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessDetailList_v1 __pyx_vtable_4cuda_8bindings_5_nvml_ProcessDetailList_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessDetailList_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessDetailList_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  p->_refs = ((PyObject*)Py_None); Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessDetailList_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessDetailList_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  Py_CLEAR(p->_refs);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessDetailList_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  if (p->_refs) {
    e = (*v)(p->_refs, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessDetailList_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  tmp = ((PyObject*)p->_refs);
  p->_refs = ((PyObject*)Py_None); Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessDetailList_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessDetailList_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_mode(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_4mode_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_mode(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_4mode_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_proc_array(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_10proc_array_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_proc_array(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_10proc_array_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ProcessDetailList_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ProcessDetailList_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_version, PyDoc_STR("int: Struct version, MUST be nvmlProcessDetailList_v1."), 0},
  {"mode", __pyx_getprop_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_mode, __pyx_setprop_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_mode, PyDoc_STR("int: Process mode(Compute/Graphics/MPSCompute)"), 0},
  {"proc_array", __pyx_getprop_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_proc_array, __pyx_setprop_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_proc_array, PyDoc_STR("int: Process array."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ProcessDetailList_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessDetailList_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessDetailList_v1},
  {Py_tp_doc, (void *)PyDoc_STR("ProcessDetailList_v1()\n\nEmpty-initialize an instance of `nvmlProcessDetailList_v1_t`.\n\n\n.. seealso:: `nvmlProcessDetailList_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessDetailList_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessDetailList_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessDetailList_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ProcessDetailList_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ProcessDetailList_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessDetailList_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ProcessDetailList_v1_spec = {
  "cuda.bindings._nvml.ProcessDetailList_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ProcessDetailList_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ProcessDetailList_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_ProcessDetailList_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessDetailList_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ProcessDetailList_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ProcessDetailList_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessDetailList_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ProcessDetailList_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ProcessDetailList_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ProcessDetailList_v1()\n\nEmpty-initialize an instance of `nvmlProcessDetailList_v1_t`.\n\n\n.. seealso:: `nvmlProcessDetailList_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessDetailList_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessDetailList_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessDetailList_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ProcessDetailList_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ProcessDetailList_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ProcessDetailList_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_BridgeChipHierarchy __pyx_vtable_4cuda_8bindings_5_nvml_BridgeChipHierarchy;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_BridgeChipHierarchy(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_BridgeChipHierarchy;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_BridgeChipHierarchy(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_BridgeChipHierarchy) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_BridgeChipHierarchy(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_BridgeChipHierarchy(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_BridgeChipHierarchy(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_BridgeChipHierarchy(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_bridge_chip_info(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16bridge_chip_info_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_bridge_chip_info(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16bridge_chip_info_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_bridge_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12bridge_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_bridge_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12bridge_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_BridgeChipHierarchy[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_BridgeChipHierarchy[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"bridge_chip_info", __pyx_getprop_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_bridge_chip_info, __pyx_setprop_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_bridge_chip_info, PyDoc_STR("BridgeChipInfo: "), 0},
  {"bridge_count", __pyx_getprop_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_bridge_count, __pyx_setprop_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_bridge_count, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_BridgeChipHierarchy_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_BridgeChipHierarchy},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_BridgeChipHierarchy},
  {Py_tp_doc, (void *)PyDoc_STR("BridgeChipHierarchy()\n\nEmpty-initialize an instance of `nvmlBridgeChipHierarchy_t`.\n\n\n.. seealso:: `nvmlBridgeChipHierarchy_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_BridgeChipHierarchy},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_BridgeChipHierarchy},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_BridgeChipHierarchy},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_BridgeChipHierarchy},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_BridgeChipHierarchy},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_BridgeChipHierarchy},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_BridgeChipHierarchy_spec = {
  "cuda.bindings._nvml.BridgeChipHierarchy",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_BridgeChipHierarchy_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_BridgeChipHierarchy = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_BridgeChipHierarchy = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_BridgeChipHierarchy, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_BridgeChipHierarchy = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""BridgeChipHierarchy", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_BridgeChipHierarchy, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_BridgeChipHierarchy, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_BridgeChipHierarchy, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("BridgeChipHierarchy()\n\nEmpty-initialize an instance of `nvmlBridgeChipHierarchy_t`.\n\n\n.. seealso:: `nvmlBridgeChipHierarchy_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_BridgeChipHierarchy, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_BridgeChipHierarchy, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_BridgeChipHierarchy, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_BridgeChipHierarchy, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_BridgeChipHierarchy, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_BridgeChipHierarchy, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_Sample __pyx_vtable_4cuda_8bindings_5_nvml_Sample;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_Sample(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_Sample;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Sample(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Sample) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_Sample(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_Sample(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_Sample(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Sample(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_6Sample_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_Sample(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_6Sample_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_6Sample_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_6Sample_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_6Sample_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_6Sample_time_stamp(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_6Sample_10time_stamp_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_6Sample_time_stamp(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_6Sample_10time_stamp_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_6Sample_sample_value(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_6Sample_12sample_value_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_6Sample_sample_value(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_6Sample_12sample_value_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_6Sample__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_6Sample_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_Sample[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Sample_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Sample_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Sample_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Sample_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Sample_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Sample_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_6Sample_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_6Sample_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_Sample[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_6Sample_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"time_stamp", __pyx_getprop_4cuda_8bindings_5_nvml_6Sample_time_stamp, __pyx_setprop_4cuda_8bindings_5_nvml_6Sample_time_stamp, PyDoc_STR("Union[~_numpy.uint64, int]: "), 0},
  {"sample_value", __pyx_getprop_4cuda_8bindings_5_nvml_6Sample_sample_value, __pyx_setprop_4cuda_8bindings_5_nvml_6Sample_sample_value, PyDoc_STR("value_dtype: "), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_6Sample__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_Sample_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_Sample},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_6Sample_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_6Sample_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_6Sample_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_Sample},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_6Sample_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_6Sample_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Sample},
  {Py_tp_doc, (void *)PyDoc_STR("Sample(size=1)\n\nEmpty-initialize an array of `nvmlSample_t`.\n\nThe resulting object is of length `size` and of dtype `sample_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlSample_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_Sample},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_Sample},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_Sample},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_Sample},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_Sample},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_6Sample_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_Sample},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_Sample_spec = {
  "cuda.bindings._nvml.Sample",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_Sample_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_Sample = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_6Sample_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_Sample = {
  __pyx_pw_4cuda_8bindings_5_nvml_6Sample_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_Sample, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_Sample = {
  __pyx_pw_4cuda_8bindings_5_nvml_6Sample_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_6Sample_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_Sample, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_Sample = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""Sample", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_Sample, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_6Sample_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_Sample, /*tp_as_number*/
  &__pyx_tp_as_sequence_Sample, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_Sample, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("Sample(size=1)\n\nEmpty-initialize an array of `nvmlSample_t`.\n\nThe resulting object is of length `size` and of dtype `sample_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlSample_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_Sample, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_Sample, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_Sample, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_Sample, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_Sample, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_6Sample_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_Sample, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 __pyx_vtable_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_time_stamp(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_10time_stamp_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_time_stamp(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_10time_stamp_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_vgpu_instance(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_13vgpu_instance_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_vgpu_instance(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_13vgpu_instance_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_sm_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7sm_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_sm_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7sm_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_mem_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8mem_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_mem_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8mem_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_enc_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8enc_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_enc_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8enc_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_dec_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8dec_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_dec_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8dec_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_jpg_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8jpg_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_jpg_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8jpg_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_ofa_util(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8ofa_util_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_ofa_util(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_8ofa_util_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"time_stamp", __pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_time_stamp, __pyx_setprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_time_stamp, PyDoc_STR("Union[~_numpy.uint64, int]: CPU Timestamp in microseconds."), 0},
  {"vgpu_instance", __pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_vgpu_instance, __pyx_setprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_vgpu_instance, PyDoc_STR("Union[~_numpy.uint32, int]: vGPU Instance"), 0},
  {"sm_util", __pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_sm_util, __pyx_setprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_sm_util, PyDoc_STR("value_dtype: SM (3D/Compute) Util Value."), 0},
  {"mem_util", __pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_mem_util, __pyx_setprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_mem_util, PyDoc_STR("value_dtype: Frame Buffer Memory Util Value."), 0},
  {"enc_util", __pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_enc_util, __pyx_setprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_enc_util, PyDoc_STR("value_dtype: Encoder Util Value."), 0},
  {"dec_util", __pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_dec_util, __pyx_setprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_dec_util, PyDoc_STR("value_dtype: Decoder Util Value."), 0},
  {"jpg_util", __pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_jpg_util, __pyx_setprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_jpg_util, PyDoc_STR("value_dtype: Jpeg Util Value."), 0},
  {"ofa_util", __pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_ofa_util, __pyx_setprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_ofa_util, PyDoc_STR("value_dtype: Ofa Util Value."), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuInstanceUtilizationInfo_v1(size=1)\n\nEmpty-initialize an array of `nvmlVgpuInstanceUtilizationInfo_v1_t`.\n\nThe resulting object is of length `size` and of dtype `vgpu_instance_utilization_info_v1_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlVgpuInstanceUtilizationInfo_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1_spec = {
  "cuda.bindings._nvml.VgpuInstanceUtilizationInfo_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuInstanceUtilizationInfo_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_VgpuInstanceUtilizationInfo_v1 = {
  __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuInstanceUtilizationInfo_v1 = {
  __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuInstanceUtilizationInfo_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuInstanceUtilizationInfo_v1, /*tp_as_number*/
  &__pyx_tp_as_sequence_VgpuInstanceUtilizationInfo_v1, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuInstanceUtilizationInfo_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuInstanceUtilizationInfo_v1(size=1)\n\nEmpty-initialize an array of `nvmlVgpuInstanceUtilizationInfo_v1_t`.\n\nThe resulting object is of length `size` and of dtype `vgpu_instance_utilization_info_v1_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlVgpuInstanceUtilizationInfo_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_FieldValue __pyx_vtable_4cuda_8bindings_5_nvml_FieldValue;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_FieldValue(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_FieldValue;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_FieldValue(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_FieldValue) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_FieldValue(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_FieldValue(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_FieldValue(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_FieldValue(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_FieldValue(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue_field_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_8field_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_10FieldValue_field_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_8field_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue_scope_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_8scope_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_10FieldValue_scope_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_8scope_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue_timestamp(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_9timestamp_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_10FieldValue_timestamp(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_9timestamp_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue_latency_usec(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_12latency_usec_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_10FieldValue_latency_usec(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_12latency_usec_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue_value_type(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_10value_type_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_10FieldValue_value_type(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_10value_type_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue_nvml_return(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_11nvml_return_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_10FieldValue_nvml_return(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_11nvml_return_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue_value(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_5value_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_10FieldValue_value(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_5value_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_FieldValue[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10FieldValue_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10FieldValue_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10FieldValue_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_10FieldValue_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_FieldValue[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"field_id", __pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue_field_id, __pyx_setprop_4cuda_8bindings_5_nvml_10FieldValue_field_id, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"scope_id", __pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue_scope_id, __pyx_setprop_4cuda_8bindings_5_nvml_10FieldValue_scope_id, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"timestamp", __pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue_timestamp, __pyx_setprop_4cuda_8bindings_5_nvml_10FieldValue_timestamp, PyDoc_STR("Union[~_numpy.int64, int]: "), 0},
  {"latency_usec", __pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue_latency_usec, __pyx_setprop_4cuda_8bindings_5_nvml_10FieldValue_latency_usec, PyDoc_STR("Union[~_numpy.int64, int]: "), 0},
  {"value_type", __pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue_value_type, __pyx_setprop_4cuda_8bindings_5_nvml_10FieldValue_value_type, PyDoc_STR("Union[~_numpy.int32, int]: "), 0},
  {"nvml_return", __pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue_nvml_return, __pyx_setprop_4cuda_8bindings_5_nvml_10FieldValue_nvml_return, PyDoc_STR("Union[~_numpy.int32, int]: "), 0},
  {"value", __pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue_value, __pyx_setprop_4cuda_8bindings_5_nvml_10FieldValue_value, PyDoc_STR("value_dtype: "), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_10FieldValue__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_FieldValue_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_FieldValue},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_FieldValue},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_FieldValue},
  {Py_tp_doc, (void *)PyDoc_STR("FieldValue(size=1)\n\nEmpty-initialize an array of `nvmlFieldValue_t`.\n\nThe resulting object is of length `size` and of dtype `field_value_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlFieldValue_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_FieldValue},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_FieldValue},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_FieldValue},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_FieldValue},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_FieldValue},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_FieldValue},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_FieldValue_spec = {
  "cuda.bindings._nvml.FieldValue",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_FieldValue_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_FieldValue = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_FieldValue = {
  __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_FieldValue, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_FieldValue = {
  __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_FieldValue, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_FieldValue = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""FieldValue", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_FieldValue, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_FieldValue, /*tp_as_number*/
  &__pyx_tp_as_sequence_FieldValue, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_FieldValue, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("FieldValue(size=1)\n\nEmpty-initialize an array of `nvmlFieldValue_t`.\n\nThe resulting object is of length `size` and of dtype `field_value_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlFieldValue_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_FieldValue, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_FieldValue, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_FieldValue, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_FieldValue, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_FieldValue, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_10FieldValue_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_FieldValue, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuThermalSettings __pyx_vtable_4cuda_8bindings_5_nvml_GpuThermalSettings;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GpuThermalSettings(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuThermalSettings;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuThermalSettings(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuThermalSettings) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuThermalSettings(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_GpuThermalSettings(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuThermalSettings(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuThermalSettings(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18GpuThermalSettings_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18GpuThermalSettings_sensor(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_6sensor_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18GpuThermalSettings_sensor(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_6sensor_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18GpuThermalSettings_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_5count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18GpuThermalSettings_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_5count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_GpuThermalSettings[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18GpuThermalSettings_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18GpuThermalSettings_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18GpuThermalSettings_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18GpuThermalSettings_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_GpuThermalSettings[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_18GpuThermalSettings_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"sensor", __pyx_getprop_4cuda_8bindings_5_nvml_18GpuThermalSettings_sensor, __pyx_setprop_4cuda_8bindings_5_nvml_18GpuThermalSettings_sensor, PyDoc_STR("_py_anon_pod0: "), 0},
  {"count", __pyx_getprop_4cuda_8bindings_5_nvml_18GpuThermalSettings_count, __pyx_setprop_4cuda_8bindings_5_nvml_18GpuThermalSettings_count, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_GpuThermalSettings_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuThermalSettings},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuThermalSettings},
  {Py_tp_doc, (void *)PyDoc_STR("GpuThermalSettings()\n\nEmpty-initialize an instance of `nvmlGpuThermalSettings_t`.\n\n\n.. seealso:: `nvmlGpuThermalSettings_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuThermalSettings},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_GpuThermalSettings},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuThermalSettings},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_GpuThermalSettings},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_GpuThermalSettings},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_GpuThermalSettings},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_GpuThermalSettings_spec = {
  "cuda.bindings._nvml.GpuThermalSettings",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_GpuThermalSettings_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_GpuThermalSettings = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_GpuThermalSettings = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuThermalSettings, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_GpuThermalSettings = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""GpuThermalSettings", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuThermalSettings, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_GpuThermalSettings, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_GpuThermalSettings, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("GpuThermalSettings()\n\nEmpty-initialize an instance of `nvmlGpuThermalSettings_t`.\n\n\n.. seealso:: `nvmlGpuThermalSettings_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuThermalSettings, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_GpuThermalSettings, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuThermalSettings, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_GpuThermalSettings, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_GpuThermalSettings, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_18GpuThermalSettings_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_GpuThermalSettings, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ClkMonStatus __pyx_vtable_4cuda_8bindings_5_nvml_ClkMonStatus;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ClkMonStatus(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ClkMonStatus;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ClkMonStatus(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ClkMonStatus) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ClkMonStatus(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ClkMonStatus(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ClkMonStatus(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ClkMonStatus(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_12ClkMonStatus_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_12ClkMonStatus_clk_mon_list(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_12clk_mon_list_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_12ClkMonStatus_clk_mon_list(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_12clk_mon_list_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_12ClkMonStatus_b_global_status(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_15b_global_status_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_12ClkMonStatus_b_global_status(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_15b_global_status_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_12ClkMonStatus_clk_mon_list_size(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_17clk_mon_list_size_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_12ClkMonStatus_clk_mon_list_size(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_17clk_mon_list_size_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ClkMonStatus[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_12ClkMonStatus_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_12ClkMonStatus_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_12ClkMonStatus_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_12ClkMonStatus_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ClkMonStatus[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_12ClkMonStatus_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"clk_mon_list", __pyx_getprop_4cuda_8bindings_5_nvml_12ClkMonStatus_clk_mon_list, __pyx_setprop_4cuda_8bindings_5_nvml_12ClkMonStatus_clk_mon_list, PyDoc_STR("ClkMonFaultInfo: "), 0},
  {"b_global_status", __pyx_getprop_4cuda_8bindings_5_nvml_12ClkMonStatus_b_global_status, __pyx_setprop_4cuda_8bindings_5_nvml_12ClkMonStatus_b_global_status, PyDoc_STR("int: "), 0},
  {"clk_mon_list_size", __pyx_getprop_4cuda_8bindings_5_nvml_12ClkMonStatus_clk_mon_list_size, __pyx_setprop_4cuda_8bindings_5_nvml_12ClkMonStatus_clk_mon_list_size, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ClkMonStatus_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ClkMonStatus},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ClkMonStatus},
  {Py_tp_doc, (void *)PyDoc_STR("ClkMonStatus()\n\nEmpty-initialize an instance of `nvmlClkMonStatus_t`.\n\n\n.. seealso:: `nvmlClkMonStatus_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ClkMonStatus},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ClkMonStatus},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ClkMonStatus},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ClkMonStatus},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ClkMonStatus},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ClkMonStatus},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ClkMonStatus_spec = {
  "cuda.bindings._nvml.ClkMonStatus",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ClkMonStatus_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ClkMonStatus = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_ClkMonStatus = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ClkMonStatus, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ClkMonStatus = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ClkMonStatus", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ClkMonStatus, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ClkMonStatus, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ClkMonStatus, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ClkMonStatus()\n\nEmpty-initialize an instance of `nvmlClkMonStatus_t`.\n\n\n.. seealso:: `nvmlClkMonStatus_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ClkMonStatus, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ClkMonStatus, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ClkMonStatus, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ClkMonStatus, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ClkMonStatus, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_12ClkMonStatus_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ClkMonStatus, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 __pyx_vtable_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  p->_refs = ((PyObject*)Py_None); Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  Py_CLEAR(p->_refs);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  if (p->_refs) {
    e = (*v)(p->_refs, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  tmp = ((PyObject*)p->_refs);
  p->_refs = ((PyObject*)Py_None); Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_last_seen_time_stamp(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_20last_seen_time_stamp_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_last_seen_time_stamp(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_20last_seen_time_stamp_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_proc_util_array(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15proc_util_array_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_proc_util_array(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15proc_util_array_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_version, PyDoc_STR("int: The version number of this struct."), 0},
  {"last_seen_time_stamp", __pyx_getprop_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_last_seen_time_stamp, __pyx_setprop_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_last_seen_time_stamp, PyDoc_STR("int: Return only samples with timestamp greater than lastSeenTimeStamp."), 0},
  {"proc_util_array", __pyx_getprop_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_proc_util_array, __pyx_setprop_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_proc_util_array, PyDoc_STR("int: The array (allocated by caller) of the utilization of GPU SM, framebuffer, video encoder, video decoder, JPEG, and OFA."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1},
  {Py_tp_doc, (void *)PyDoc_STR("ProcessesUtilizationInfo_v1()\n\nEmpty-initialize an instance of `nvmlProcessesUtilizationInfo_v1_t`.\n\n\n.. seealso:: `nvmlProcessesUtilizationInfo_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1_spec = {
  "cuda.bindings._nvml.ProcessesUtilizationInfo_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ProcessesUtilizationInfo_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_ProcessesUtilizationInfo_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ProcessesUtilizationInfo_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ProcessesUtilizationInfo_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ProcessesUtilizationInfo_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ProcessesUtilizationInfo_v1()\n\nEmpty-initialize an instance of `nvmlProcessesUtilizationInfo_v1_t`.\n\n\n.. seealso:: `nvmlProcessesUtilizationInfo_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo __pyx_vtable_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_utilization(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_11utilization_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_utilization(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_11utilization_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_flags_(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_6flags__1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_flags_(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_6flags__3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"utilization", __pyx_getprop_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_utilization, __pyx_setprop_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_utilization, PyDoc_STR("_py_anon_pod1: "), 0},
  {"flags_", __pyx_getprop_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_flags_, __pyx_setprop_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_flags_, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo},
  {Py_tp_doc, (void *)PyDoc_STR("GpuDynamicPstatesInfo()\n\nEmpty-initialize an instance of `nvmlGpuDynamicPstatesInfo_t`.\n\n\n.. seealso:: `nvmlGpuDynamicPstatesInfo_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo_spec = {
  "cuda.bindings._nvml.GpuDynamicPstatesInfo",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_GpuDynamicPstatesInfo = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_GpuDynamicPstatesInfo = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""GpuDynamicPstatesInfo", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_GpuDynamicPstatesInfo, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_GpuDynamicPstatesInfo, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("GpuDynamicPstatesInfo()\n\nEmpty-initialize an instance of `nvmlGpuDynamicPstatesInfo_t`.\n\n\n.. seealso:: `nvmlGpuDynamicPstatesInfo_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 __pyx_vtable_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  p->_refs = ((PyObject*)Py_None); Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  Py_CLEAR(p->_refs);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  if (p->_refs) {
    e = (*v)(p->_refs, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  tmp = ((PyObject*)p->_refs);
  p->_refs = ((PyObject*)Py_None); Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_last_seen_time_stamp(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20last_seen_time_stamp_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_last_seen_time_stamp(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20last_seen_time_stamp_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_vgpu_proc_util_array(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20vgpu_proc_util_array_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_vgpu_proc_util_array(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_20vgpu_proc_util_array_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_version, PyDoc_STR("int: The version number of this struct."), 0},
  {"last_seen_time_stamp", __pyx_getprop_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_last_seen_time_stamp, __pyx_setprop_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_last_seen_time_stamp, PyDoc_STR("int: Return only samples with timestamp greater than lastSeenTimeStamp."), 0},
  {"vgpu_proc_util_array", __pyx_getprop_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_vgpu_proc_util_array, __pyx_setprop_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_vgpu_proc_util_array, PyDoc_STR("int: The array (allocated by caller) in which utilization of processes running on vGPU instances are returned."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuProcessesUtilizationInfo_v1()\n\nEmpty-initialize an instance of `nvmlVgpuProcessesUtilizationInfo_v1_t`.\n\n\n.. seealso:: `nvmlVgpuProcessesUtilizationInfo_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1_spec = {
  "cuda.bindings._nvml.VgpuProcessesUtilizationInfo_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuProcessesUtilizationInfo_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuProcessesUtilizationInfo_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuProcessesUtilizationInfo_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuProcessesUtilizationInfo_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuProcessesUtilizationInfo_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuProcessesUtilizationInfo_v1()\n\nEmpty-initialize an instance of `nvmlVgpuProcessesUtilizationInfo_v1_t`.\n\n\n.. seealso:: `nvmlVgpuProcessesUtilizationInfo_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerParams __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerParams;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerParams(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerParams;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerParams(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerParams) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerParams(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerParams(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerParams(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerParams(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_vgpu_sched_data_with_arr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_24vgpu_sched_data_with_arr_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_vgpu_sched_data_with_arr(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_24vgpu_sched_data_with_arr_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_vgpu_sched_data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15vgpu_sched_data_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_vgpu_sched_data(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15vgpu_sched_data_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerParams[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerParams[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"vgpu_sched_data_with_arr", __pyx_getprop_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_vgpu_sched_data_with_arr, __pyx_setprop_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_vgpu_sched_data_with_arr, PyDoc_STR("_py_anon_pod2: "), 0},
  {"vgpu_sched_data", __pyx_getprop_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_vgpu_sched_data, __pyx_setprop_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_vgpu_sched_data, PyDoc_STR("_py_anon_pod3: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerParams_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerParams},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerParams},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuSchedulerParams()\n\nEmpty-initialize an instance of `nvmlVgpuSchedulerParams_t`.\n\n\n.. seealso:: `nvmlVgpuSchedulerParams_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerParams},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerParams},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerParams},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerParams},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerParams},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerParams},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerParams_spec = {
  "cuda.bindings._nvml.VgpuSchedulerParams",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerParams_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuSchedulerParams = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuSchedulerParams = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerParams, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerParams = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuSchedulerParams", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerParams, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuSchedulerParams, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuSchedulerParams, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuSchedulerParams()\n\nEmpty-initialize an instance of `nvmlVgpuSchedulerParams_t`.\n\n\n.. seealso:: `nvmlVgpuSchedulerParams_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerParams, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerParams, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerParams, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerParams, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerParams, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerParams, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_vgpu_sched_data_with_arr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_24vgpu_sched_data_with_arr_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_vgpu_sched_data_with_arr(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_24vgpu_sched_data_with_arr_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_vgpu_sched_data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15vgpu_sched_data_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_vgpu_sched_data(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15vgpu_sched_data_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"vgpu_sched_data_with_arr", __pyx_getprop_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_vgpu_sched_data_with_arr, __pyx_setprop_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_vgpu_sched_data_with_arr, PyDoc_STR("_py_anon_pod4: "), 0},
  {"vgpu_sched_data", __pyx_getprop_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_vgpu_sched_data, __pyx_setprop_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_vgpu_sched_data, PyDoc_STR("_py_anon_pod5: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuSchedulerSetParams()\n\nEmpty-initialize an instance of `nvmlVgpuSchedulerSetParams_t`.\n\n\n.. seealso:: `nvmlVgpuSchedulerSetParams_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams_spec = {
  "cuda.bindings._nvml.VgpuSchedulerSetParams",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuSchedulerSetParams = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuSchedulerSetParams = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuSchedulerSetParams", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuSchedulerSetParams, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuSchedulerSetParams, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuSchedulerSetParams()\n\nEmpty-initialize an instance of `nvmlVgpuSchedulerSetParams_t`.\n\n\n.. seealso:: `nvmlVgpuSchedulerSetParams_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuLicenseInfo __pyx_vtable_4cuda_8bindings_5_nvml_VgpuLicenseInfo;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuLicenseInfo(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuLicenseInfo;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuLicenseInfo(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuLicenseInfo) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuLicenseInfo(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuLicenseInfo(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuLicenseInfo(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuLicenseInfo(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_license_expiry(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14license_expiry_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_license_expiry(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14license_expiry_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_is_licensed(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_11is_licensed_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_is_licensed(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_11is_licensed_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_current_state(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13current_state_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_current_state(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13current_state_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuLicenseInfo[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuLicenseInfo[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"license_expiry", __pyx_getprop_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_license_expiry, __pyx_setprop_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_license_expiry, PyDoc_STR("VgpuLicenseExpiry: "), 0},
  {"is_licensed", __pyx_getprop_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_is_licensed, __pyx_setprop_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_is_licensed, PyDoc_STR("int: "), 0},
  {"current_state", __pyx_getprop_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_current_state, __pyx_setprop_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_current_state, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseInfo_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuLicenseInfo},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuLicenseInfo},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuLicenseInfo()\n\nEmpty-initialize an instance of `nvmlVgpuLicenseInfo_t`.\n\n\n.. seealso:: `nvmlVgpuLicenseInfo_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuLicenseInfo},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuLicenseInfo},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuLicenseInfo},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuLicenseInfo},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuLicenseInfo},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuLicenseInfo},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseInfo_spec = {
  "cuda.bindings._nvml.VgpuLicenseInfo",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseInfo_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuLicenseInfo = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuLicenseInfo = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuLicenseInfo, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseInfo = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuLicenseInfo", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuLicenseInfo, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuLicenseInfo, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuLicenseInfo, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuLicenseInfo()\n\nEmpty-initialize an instance of `nvmlVgpuLicenseInfo_t`.\n\n\n.. seealso:: `nvmlVgpuLicenseInfo_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuLicenseInfo, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuLicenseInfo, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuLicenseInfo, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuLicenseInfo, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuLicenseInfo, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuLicenseInfo, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GridLicensableFeature __pyx_vtable_4cuda_8bindings_5_nvml_GridLicensableFeature;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GridLicensableFeature(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_GridLicensableFeature;
  p->_data = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GridLicensableFeature(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GridLicensableFeature) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->_data);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_GridLicensableFeature(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_data) {
    e = (*v)(p->_data, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_GridLicensableFeature(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *)o;
  tmp = ((PyObject*)p->_data);
  p->_data = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyObject *__pyx_sq_item_4cuda_8bindings_5_nvml_GridLicensableFeature(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GridLicensableFeature(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_13__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GridLicensableFeature(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_feature_code(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12feature_code_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_feature_code(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12feature_code_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_feature_state(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_13feature_state_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_feature_state(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_13feature_state_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_license_info(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12license_info_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_license_info(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12license_info_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_product_name(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12product_name_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_product_name(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_12product_name_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_feature_enabled(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_15feature_enabled_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_feature_enabled(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_15feature_enabled_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_license_expiry(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_14license_expiry_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_license_expiry(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_14license_expiry_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21GridLicensableFeature__data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_5_data_1__get__(o);
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_GridLicensableFeature[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_15from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21GridLicensableFeature_14from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_17from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21GridLicensableFeature_16from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_19__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21GridLicensableFeature_18__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_21__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21GridLicensableFeature_20__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_GridLicensableFeature[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"feature_code", __pyx_getprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_feature_code, __pyx_setprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_feature_code, PyDoc_STR("Union[~_numpy.int32, int]: "), 0},
  {"feature_state", __pyx_getprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_feature_state, __pyx_setprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_feature_state, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"license_info", __pyx_getprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_license_info, __pyx_setprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_license_info, PyDoc_STR("~_numpy.int8: (array of length 128)."), 0},
  {"product_name", __pyx_getprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_product_name, __pyx_setprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_product_name, PyDoc_STR("~_numpy.int8: (array of length 128)."), 0},
  {"feature_enabled", __pyx_getprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_feature_enabled, __pyx_setprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_feature_enabled, PyDoc_STR("Union[~_numpy.uint32, int]: "), 0},
  {"license_expiry", __pyx_getprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_license_expiry, __pyx_setprop_4cuda_8bindings_5_nvml_21GridLicensableFeature_license_expiry, PyDoc_STR("grid_license_expiry_dtype: "), 0},
  {"_data", __pyx_getprop_4cuda_8bindings_5_nvml_21GridLicensableFeature__data, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeature_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_GridLicensableFeature},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_3__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_5__int__},
  {Py_sq_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_7__len__},
  {Py_sq_item, (void *)__pyx_sq_item_4cuda_8bindings_5_nvml_GridLicensableFeature},
  {Py_mp_length, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_7__len__},
  {Py_mp_subscript, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_11__getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GridLicensableFeature},
  {Py_tp_doc, (void *)PyDoc_STR("GridLicensableFeature(size=1)\n\nEmpty-initialize an array of `nvmlGridLicensableFeature_t`.\n\nThe resulting object is of length `size` and of dtype `grid_licensable_feature_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlGridLicensableFeature_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_GridLicensableFeature},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_GridLicensableFeature},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GridLicensableFeature},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_GridLicensableFeature},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_GridLicensableFeature},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_GridLicensableFeature},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeature_spec = {
  "cuda.bindings._nvml.GridLicensableFeature",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeature_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_GridLicensableFeature = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_5__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PySequenceMethods __pyx_tp_as_sequence_GridLicensableFeature = {
  __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_7__len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_4cuda_8bindings_5_nvml_GridLicensableFeature, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_GridLicensableFeature = {
  __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_7__len__, /*mp_length*/
  __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_11__getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GridLicensableFeature, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeature = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""GridLicensableFeature", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GridLicensableFeature, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_3__repr__, /*tp_repr*/
  &__pyx_tp_as_number_GridLicensableFeature, /*tp_as_number*/
  &__pyx_tp_as_sequence_GridLicensableFeature, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_GridLicensableFeature, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("GridLicensableFeature(size=1)\n\nEmpty-initialize an array of `nvmlGridLicensableFeature_t`.\n\nThe resulting object is of length `size` and of dtype `grid_licensable_feature_dtype`.\nIf default-constructed, the instance represents a single struct.\n\nArgs:\n    size (int): number of structs, default=1.\n\n\n.. seealso:: `nvmlGridLicensableFeature_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_GridLicensableFeature, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_GridLicensableFeature, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_GridLicensableFeature, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_GridLicensableFeature, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_GridLicensableFeature, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_21GridLicensableFeature_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_GridLicensableFeature, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_UnitFanSpeeds __pyx_vtable_4cuda_8bindings_5_nvml_UnitFanSpeeds;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_UnitFanSpeeds(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_UnitFanSpeeds;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_UnitFanSpeeds(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_UnitFanSpeeds) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_UnitFanSpeeds(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_UnitFanSpeeds(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_UnitFanSpeeds(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_UnitFanSpeeds(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13UnitFanSpeeds_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13UnitFanSpeeds_fans(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_4fans_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13UnitFanSpeeds_fans(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_4fans_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13UnitFanSpeeds_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_5count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13UnitFanSpeeds_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_5count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_UnitFanSpeeds[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13UnitFanSpeeds_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13UnitFanSpeeds_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13UnitFanSpeeds_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13UnitFanSpeeds_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_UnitFanSpeeds[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_13UnitFanSpeeds_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"fans", __pyx_getprop_4cuda_8bindings_5_nvml_13UnitFanSpeeds_fans, __pyx_setprop_4cuda_8bindings_5_nvml_13UnitFanSpeeds_fans, PyDoc_STR("UnitFanInfo: "), 0},
  {"count", __pyx_getprop_4cuda_8bindings_5_nvml_13UnitFanSpeeds_count, __pyx_setprop_4cuda_8bindings_5_nvml_13UnitFanSpeeds_count, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_UnitFanSpeeds_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_UnitFanSpeeds},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_UnitFanSpeeds},
  {Py_tp_doc, (void *)PyDoc_STR("UnitFanSpeeds()\n\nEmpty-initialize an instance of `nvmlUnitFanSpeeds_t`.\n\n\n.. seealso:: `nvmlUnitFanSpeeds_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_UnitFanSpeeds},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_UnitFanSpeeds},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_UnitFanSpeeds},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_UnitFanSpeeds},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_UnitFanSpeeds},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_UnitFanSpeeds},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_UnitFanSpeeds_spec = {
  "cuda.bindings._nvml.UnitFanSpeeds",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_UnitFanSpeeds_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_UnitFanSpeeds = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_UnitFanSpeeds = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_UnitFanSpeeds, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_UnitFanSpeeds = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""UnitFanSpeeds", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_UnitFanSpeeds, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_UnitFanSpeeds, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_UnitFanSpeeds, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("UnitFanSpeeds()\n\nEmpty-initialize an instance of `nvmlUnitFanSpeeds_t`.\n\n\n.. seealso:: `nvmlUnitFanSpeeds_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_UnitFanSpeeds, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_UnitFanSpeeds, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_UnitFanSpeeds, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_UnitFanSpeeds, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_UnitFanSpeeds, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_13UnitFanSpeeds_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_UnitFanSpeeds, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuPgpuMetadata __pyx_vtable_4cuda_8bindings_5_nvml_VgpuPgpuMetadata;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPgpuMetadata(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuPgpuMetadata;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuPgpuMetadata(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuPgpuMetadata) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuPgpuMetadata(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuPgpuMetadata(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuPgpuMetadata(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuPgpuMetadata(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_host_supported_vgpu_range(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_25host_supported_vgpu_range_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_host_supported_vgpu_range(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_25host_supported_vgpu_range_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_revision(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_8revision_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_revision(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_8revision_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_host_driver_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19host_driver_version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_host_driver_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19host_driver_version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_pgpu_virtualization_caps(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_24pgpu_virtualization_caps_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_pgpu_virtualization_caps(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_24pgpu_virtualization_caps_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_opaque_data_size(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16opaque_data_size_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_opaque_data_size(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16opaque_data_size_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_opaque_data(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_11opaque_data_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_opaque_data(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_11opaque_data_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuPgpuMetadata[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuPgpuMetadata[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"host_supported_vgpu_range", __pyx_getprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_host_supported_vgpu_range, __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_host_supported_vgpu_range, PyDoc_STR("VgpuVersion: "), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_version, __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_version, PyDoc_STR("int: "), 0},
  {"revision", __pyx_getprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_revision, __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_revision, PyDoc_STR("int: "), 0},
  {"host_driver_version", __pyx_getprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_host_driver_version, __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_host_driver_version, PyDoc_STR("~_numpy.int8: (array of length 80)."), 0},
  {"pgpu_virtualization_caps", __pyx_getprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_pgpu_virtualization_caps, __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_pgpu_virtualization_caps, PyDoc_STR("int: "), 0},
  {"opaque_data_size", __pyx_getprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_opaque_data_size, __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_opaque_data_size, PyDoc_STR("int: "), 0},
  {"opaque_data", __pyx_getprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_opaque_data, __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_opaque_data, PyDoc_STR("~_numpy.int8: (array of length 4)."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuMetadata_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuPgpuMetadata},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuPgpuMetadata},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuPgpuMetadata()\n\nEmpty-initialize an instance of `nvmlVgpuPgpuMetadata_t`.\n\n\n.. seealso:: `nvmlVgpuPgpuMetadata_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuPgpuMetadata},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuPgpuMetadata},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuPgpuMetadata},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuPgpuMetadata},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuPgpuMetadata},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPgpuMetadata},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuMetadata_spec = {
  "cuda.bindings._nvml.VgpuPgpuMetadata",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuMetadata_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuPgpuMetadata = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuPgpuMetadata = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuPgpuMetadata, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuMetadata = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuPgpuMetadata", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuPgpuMetadata, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuPgpuMetadata, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuPgpuMetadata, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuPgpuMetadata()\n\nEmpty-initialize an instance of `nvmlVgpuPgpuMetadata_t`.\n\n\n.. seealso:: `nvmlVgpuPgpuMetadata_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuPgpuMetadata, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuPgpuMetadata, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuPgpuMetadata, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuPgpuMetadata, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuPgpuMetadata, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuPgpuMetadata, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GpuInstanceInfo __pyx_vtable_4cuda_8bindings_5_nvml_GpuInstanceInfo;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstanceInfo(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuInstanceInfo;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuInstanceInfo(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuInstanceInfo) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuInstanceInfo(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_GpuInstanceInfo(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuInstanceInfo(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuInstanceInfo(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_placement(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_9placement_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_placement(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_9placement_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_device_(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_7device__1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_device_(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_7device__3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_2id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_2id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_profile_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_10profile_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_profile_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_10profile_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_GpuInstanceInfo[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15GpuInstanceInfo_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15GpuInstanceInfo_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15GpuInstanceInfo_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_15GpuInstanceInfo_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_GpuInstanceInfo[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"placement", __pyx_getprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_placement, __pyx_setprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_placement, PyDoc_STR("GpuInstancePlacement: "), 0},
  {"device_", __pyx_getprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_device_, __pyx_setprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_device_, PyDoc_STR("int: "), 0},
  {"id", __pyx_getprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_id, __pyx_setprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_id, PyDoc_STR("int: "), 0},
  {"profile_id", __pyx_getprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_profile_id, __pyx_setprop_4cuda_8bindings_5_nvml_15GpuInstanceInfo_profile_id, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_GpuInstanceInfo_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuInstanceInfo},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuInstanceInfo},
  {Py_tp_doc, (void *)PyDoc_STR("GpuInstanceInfo()\n\nEmpty-initialize an instance of `nvmlGpuInstanceInfo_t`.\n\n\n.. seealso:: `nvmlGpuInstanceInfo_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuInstanceInfo},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_GpuInstanceInfo},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuInstanceInfo},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_GpuInstanceInfo},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_GpuInstanceInfo},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstanceInfo},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_GpuInstanceInfo_spec = {
  "cuda.bindings._nvml.GpuInstanceInfo",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_GpuInstanceInfo_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_GpuInstanceInfo = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_GpuInstanceInfo = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GpuInstanceInfo, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_GpuInstanceInfo = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""GpuInstanceInfo", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GpuInstanceInfo, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_GpuInstanceInfo, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_GpuInstanceInfo, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("GpuInstanceInfo()\n\nEmpty-initialize an instance of `nvmlGpuInstanceInfo_t`.\n\n\n.. seealso:: `nvmlGpuInstanceInfo_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_GpuInstanceInfo, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_GpuInstanceInfo, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_GpuInstanceInfo, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_GpuInstanceInfo, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_GpuInstanceInfo, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_15GpuInstanceInfo_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_GpuInstanceInfo, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_ComputeInstanceInfo __pyx_vtable_4cuda_8bindings_5_nvml_ComputeInstanceInfo;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstanceInfo(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_ComputeInstanceInfo;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ComputeInstanceInfo(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ComputeInstanceInfo) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_ComputeInstanceInfo(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_ComputeInstanceInfo(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ComputeInstanceInfo(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ComputeInstanceInfo(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_placement(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_9placement_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_placement(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_9placement_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_device_(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_7device__1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_device_(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_7device__3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_gpu_instance(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12gpu_instance_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_gpu_instance(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12gpu_instance_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_2id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_2id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_profile_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_10profile_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_profile_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_10profile_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_ComputeInstanceInfo[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_ComputeInstanceInfo[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"placement", __pyx_getprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_placement, __pyx_setprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_placement, PyDoc_STR("ComputeInstancePlacement: "), 0},
  {"device_", __pyx_getprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_device_, __pyx_setprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_device_, PyDoc_STR("int: "), 0},
  {"gpu_instance", __pyx_getprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_gpu_instance, __pyx_setprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_gpu_instance, PyDoc_STR("int: "), 0},
  {"id", __pyx_getprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_id, __pyx_setprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_id, PyDoc_STR("int: "), 0},
  {"profile_id", __pyx_getprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_profile_id, __pyx_setprop_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_profile_id, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceInfo_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_ComputeInstanceInfo},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ComputeInstanceInfo},
  {Py_tp_doc, (void *)PyDoc_STR("ComputeInstanceInfo()\n\nEmpty-initialize an instance of `nvmlComputeInstanceInfo_t`.\n\n\n.. seealso:: `nvmlComputeInstanceInfo_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_ComputeInstanceInfo},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_ComputeInstanceInfo},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_ComputeInstanceInfo},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_ComputeInstanceInfo},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_ComputeInstanceInfo},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstanceInfo},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceInfo_spec = {
  "cuda.bindings._nvml.ComputeInstanceInfo",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceInfo_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_ComputeInstanceInfo = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_ComputeInstanceInfo = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_ComputeInstanceInfo, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceInfo = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""ComputeInstanceInfo", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_ComputeInstanceInfo, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_ComputeInstanceInfo, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_ComputeInstanceInfo, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("ComputeInstanceInfo()\n\nEmpty-initialize an instance of `nvmlComputeInstanceInfo_t`.\n\n\n.. seealso:: `nvmlComputeInstanceInfo_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_ComputeInstanceInfo, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_ComputeInstanceInfo, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_ComputeInstanceInfo, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_ComputeInstanceInfo, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_ComputeInstanceInfo, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_ComputeInstanceInfo, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 __pyx_vtable_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  p->_refs = ((PyObject*)Py_None); Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  Py_CLEAR(p->_refs);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  if (p->_refs) {
    e = (*v)(p->_refs, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  tmp = ((PyObject*)p->_refs);
  p->_refs = ((PyObject*)Py_None); Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_entries(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7entries_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_entries(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7entries_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_version, PyDoc_STR("int: the API version number"), 0},
  {"entries", __pyx_getprop_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_entries, __pyx_setprop_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_entries, PyDoc_STR("int: pointer to caller-supplied buffer to return the SRAM unique uncorrected ECC error count entries"), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1},
  {Py_tp_doc, (void *)PyDoc_STR("EccSramUniqueUncorrectedErrorCounts_v1()\n\nEmpty-initialize an instance of `nvmlEccSramUniqueUncorrectedErrorCounts_v1_t`.\n\n\n.. seealso:: `nvmlEccSramUniqueUncorrectedErrorCounts_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1_spec = {
  "cuda.bindings._nvml.EccSramUniqueUncorrectedErrorCounts_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_EccSramUniqueUncorrectedErrorCounts_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_EccSramUniqueUncorrectedErrorCounts_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""EccSramUniqueUncorrectedErrorCounts_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_EccSramUniqueUncorrectedErrorCounts_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_EccSramUniqueUncorrectedErrorCounts_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("EccSramUniqueUncorrectedErrorCounts_v1()\n\nEmpty-initialize an instance of `nvmlEccSramUniqueUncorrectedErrorCounts_v1_t`.\n\n\n.. seealso:: `nvmlEccSramUniqueUncorrectedErrorCounts_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo __pyx_vtable_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_firmware_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16firmware_version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_firmware_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16firmware_version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_num_valid_entries(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17num_valid_entries_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_num_valid_entries(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17num_valid_entries_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"firmware_version", __pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_firmware_version, __pyx_setprop_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_firmware_version, PyDoc_STR("NvlinkFirmwareVersion: OUT - NVLINK firmware version."), 0},
  {"num_valid_entries", __pyx_getprop_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_num_valid_entries, __pyx_setprop_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_num_valid_entries, PyDoc_STR("int: OUT - Number of valid firmware entries."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo},
  {Py_tp_doc, (void *)PyDoc_STR("NvlinkFirmwareInfo()\n\nEmpty-initialize an instance of `nvmlNvlinkFirmwareInfo_t`.\n\n\n.. seealso:: `nvmlNvlinkFirmwareInfo_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo_spec = {
  "cuda.bindings._nvml.NvlinkFirmwareInfo",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_NvlinkFirmwareInfo = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_NvlinkFirmwareInfo = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""NvlinkFirmwareInfo", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_NvlinkFirmwareInfo, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_NvlinkFirmwareInfo, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("NvlinkFirmwareInfo()\n\nEmpty-initialize an instance of `nvmlNvlinkFirmwareInfo_t`.\n\n\n.. seealso:: `nvmlNvlinkFirmwareInfo_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 __pyx_vtable_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  p->_refs = ((PyObject*)Py_None); Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  Py_CLEAR(p->_refs);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  if (p->_refs) {
    e = (*v)(p->_refs, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  tmp = ((PyObject*)p->_refs);
  p->_refs = ((PyObject*)Py_None); Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_sample_val_type(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15sample_val_type_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_sample_val_type(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15sample_val_type_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_last_seen_time_stamp(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_20last_seen_time_stamp_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_last_seen_time_stamp(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_20last_seen_time_stamp_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_vgpu_util_array(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15vgpu_util_array_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_vgpu_util_array(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15vgpu_util_array_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_version, PyDoc_STR("int: The version number of this struct."), 0},
  {"sample_val_type", __pyx_getprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_sample_val_type, __pyx_setprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_sample_val_type, PyDoc_STR("int: Hold the type of returned sample values."), 0},
  {"last_seen_time_stamp", __pyx_getprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_last_seen_time_stamp, __pyx_setprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_last_seen_time_stamp, PyDoc_STR("int: Return only samples with timestamp greater than lastSeenTimeStamp."), 0},
  {"vgpu_util_array", __pyx_getprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_vgpu_util_array, __pyx_setprop_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_vgpu_util_array, PyDoc_STR("int: The array (allocated by caller) in which vGPU utilization are returned."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuInstancesUtilizationInfo_v1()\n\nEmpty-initialize an instance of `nvmlVgpuInstancesUtilizationInfo_v1_t`.\n\n\n.. seealso:: `nvmlVgpuInstancesUtilizationInfo_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1_spec = {
  "cuda.bindings._nvml.VgpuInstancesUtilizationInfo_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuInstancesUtilizationInfo_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuInstancesUtilizationInfo_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuInstancesUtilizationInfo_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuInstancesUtilizationInfo_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuInstancesUtilizationInfo_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuInstancesUtilizationInfo_v1()\n\nEmpty-initialize an instance of `nvmlVgpuInstancesUtilizationInfo_v1_t`.\n\n\n.. seealso:: `nvmlVgpuInstancesUtilizationInfo_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerLog __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerLog;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerLog(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerLog;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerLog(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerLog) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerLog(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerLog(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerLog(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerLog(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_scheduler_params(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_params_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_scheduler_params(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_params_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_log_entries(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_11log_entries_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_log_entries(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_11log_entries_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_engine_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_9engine_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_engine_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_9engine_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_scheduler_policy(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_policy_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_scheduler_policy(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16scheduler_policy_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_arr_mode(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_8arr_mode_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_arr_mode(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_8arr_mode_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_entries_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13entries_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_entries_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13entries_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerLog[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerLog[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"scheduler_params", __pyx_getprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_scheduler_params, __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_scheduler_params, PyDoc_STR("VgpuSchedulerParams: "), 0},
  {"log_entries", __pyx_getprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_log_entries, __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_log_entries, PyDoc_STR("VgpuSchedulerLogEntry: "), 0},
  {"engine_id", __pyx_getprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_engine_id, __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_engine_id, PyDoc_STR("int: "), 0},
  {"scheduler_policy", __pyx_getprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_scheduler_policy, __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_scheduler_policy, PyDoc_STR("int: "), 0},
  {"arr_mode", __pyx_getprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_arr_mode, __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_arr_mode, PyDoc_STR("int: "), 0},
  {"entries_count", __pyx_getprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_entries_count, __pyx_setprop_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_entries_count, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLog_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerLog},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerLog},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuSchedulerLog()\n\nEmpty-initialize an instance of `nvmlVgpuSchedulerLog_t`.\n\n\n.. seealso:: `nvmlVgpuSchedulerLog_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerLog},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerLog},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerLog},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerLog},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerLog},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerLog},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLog_spec = {
  "cuda.bindings._nvml.VgpuSchedulerLog",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLog_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuSchedulerLog = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuSchedulerLog = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerLog, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLog = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuSchedulerLog", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerLog, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuSchedulerLog, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuSchedulerLog, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuSchedulerLog()\n\nEmpty-initialize an instance of `nvmlVgpuSchedulerLog_t`.\n\n\n.. seealso:: `nvmlVgpuSchedulerLog_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerLog, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerLog, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerLog, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerLog, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerLog, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerLog, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerGetState __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerGetState;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerGetState(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerGetState;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerGetState(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerGetState) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerGetState(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerGetState(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerGetState(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerGetState(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_scheduler_params(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_params_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_scheduler_params(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_params_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_scheduler_policy(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_policy_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_scheduler_policy(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16scheduler_policy_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_arr_mode(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_8arr_mode_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_arr_mode(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_8arr_mode_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerGetState[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerGetState[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"scheduler_params", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_scheduler_params, __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_scheduler_params, PyDoc_STR("VgpuSchedulerParams: "), 0},
  {"scheduler_policy", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_scheduler_policy, __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_scheduler_policy, PyDoc_STR("int: "), 0},
  {"arr_mode", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_arr_mode, __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_arr_mode, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerGetState_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerGetState},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerGetState},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuSchedulerGetState()\n\nEmpty-initialize an instance of `nvmlVgpuSchedulerGetState_t`.\n\n\n.. seealso:: `nvmlVgpuSchedulerGetState_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerGetState},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerGetState},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerGetState},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerGetState},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerGetState},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerGetState},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerGetState_spec = {
  "cuda.bindings._nvml.VgpuSchedulerGetState",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerGetState_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuSchedulerGetState = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuSchedulerGetState = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerGetState, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerGetState = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuSchedulerGetState", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerGetState, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuSchedulerGetState, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuSchedulerGetState, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuSchedulerGetState()\n\nEmpty-initialize an instance of `nvmlVgpuSchedulerGetState_t`.\n\n\n.. seealso:: `nvmlVgpuSchedulerGetState_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerGetState, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerGetState, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerGetState, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerGetState, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerGetState, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerGetState, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_scheduler_params(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_params_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_scheduler_params(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_params_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_engine_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_9engine_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_engine_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_9engine_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_scheduler_policy(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_policy_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_scheduler_policy(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16scheduler_policy_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_arr_mode(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_8arr_mode_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_arr_mode(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_8arr_mode_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"scheduler_params", __pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_scheduler_params, __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_scheduler_params, PyDoc_STR("VgpuSchedulerParams: OUT: vGPU Scheduler Parameters."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_version, PyDoc_STR("int: IN: The version number of this struct."), 0},
  {"engine_id", __pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_engine_id, __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_engine_id, PyDoc_STR("int: IN: Engine whose software scheduler state info is fetched. One of NVML_VGPU_SCHEDULER_ENGINE_TYPE_*."), 0},
  {"scheduler_policy", __pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_scheduler_policy, __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_scheduler_policy, PyDoc_STR("int: OUT: Scheduler policy."), 0},
  {"arr_mode", __pyx_getprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_arr_mode, __pyx_setprop_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_arr_mode, PyDoc_STR("int: OUT: Adaptive Round Robin scheduler mode. One of the NVML_VGPU_SCHEDULER_ARR_*."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuSchedulerStateInfo_v1()\n\nEmpty-initialize an instance of `nvmlVgpuSchedulerStateInfo_v1_t`.\n\n\n.. seealso:: `nvmlVgpuSchedulerStateInfo_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1_spec = {
  "cuda.bindings._nvml.VgpuSchedulerStateInfo_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuSchedulerStateInfo_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuSchedulerStateInfo_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuSchedulerStateInfo_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuSchedulerStateInfo_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuSchedulerStateInfo_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuSchedulerStateInfo_v1()\n\nEmpty-initialize an instance of `nvmlVgpuSchedulerStateInfo_v1_t`.\n\n\n.. seealso:: `nvmlVgpuSchedulerStateInfo_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_scheduler_params(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_params_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_scheduler_params(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_params_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_log_entries(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_11log_entries_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_log_entries(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_11log_entries_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_engine_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_9engine_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_engine_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_9engine_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_scheduler_policy(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_policy_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_scheduler_policy(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16scheduler_policy_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_arr_mode(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_8arr_mode_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_arr_mode(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_8arr_mode_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_entries_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13entries_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_entries_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13entries_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"scheduler_params", __pyx_getprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_scheduler_params, __pyx_setprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_scheduler_params, PyDoc_STR("VgpuSchedulerParams: OUT: vGPU Scheduler Parameters."), 0},
  {"log_entries", __pyx_getprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_log_entries, __pyx_setprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_log_entries, PyDoc_STR("VgpuSchedulerLogEntry: OUT: Structure to store the state and logs of a software runlist."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_version, PyDoc_STR("int: IN: The version number of this struct."), 0},
  {"engine_id", __pyx_getprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_engine_id, __pyx_setprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_engine_id, PyDoc_STR("int: IN: Engine whose software runlist log entries are fetched. One of One of NVML_VGPU_SCHEDULER_ENGINE_TYPE_*."), 0},
  {"scheduler_policy", __pyx_getprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_scheduler_policy, __pyx_setprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_scheduler_policy, PyDoc_STR("int: OUT: Scheduler policy."), 0},
  {"arr_mode", __pyx_getprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_arr_mode, __pyx_setprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_arr_mode, PyDoc_STR("int: OUT: Adaptive Round Robin scheduler mode. One of the NVML_VGPU_SCHEDULER_ARR_*."), 0},
  {"entries_count", __pyx_getprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_entries_count, __pyx_setprop_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_entries_count, PyDoc_STR("int: OUT: Count of log entries fetched."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuSchedulerLogInfo_v1()\n\nEmpty-initialize an instance of `nvmlVgpuSchedulerLogInfo_v1_t`.\n\n\n.. seealso:: `nvmlVgpuSchedulerLogInfo_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1_spec = {
  "cuda.bindings._nvml.VgpuSchedulerLogInfo_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuSchedulerLogInfo_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuSchedulerLogInfo_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuSchedulerLogInfo_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuSchedulerLogInfo_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuSchedulerLogInfo_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuSchedulerLogInfo_v1()\n\nEmpty-initialize an instance of `nvmlVgpuSchedulerLogInfo_v1_t`.\n\n\n.. seealso:: `nvmlVgpuSchedulerLogInfo_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_scheduler_params(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_params_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_scheduler_params(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_params_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_engine_id(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_9engine_id_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_engine_id(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_9engine_id_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_scheduler_policy(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_policy_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_scheduler_policy(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16scheduler_policy_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_enable_arr_mode(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15enable_arr_mode_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_enable_arr_mode(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15enable_arr_mode_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"scheduler_params", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_scheduler_params, __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_scheduler_params, PyDoc_STR("VgpuSchedulerSetParams: IN: vGPU Scheduler Parameters."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_version, __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_version, PyDoc_STR("int: IN: The version number of this struct."), 0},
  {"engine_id", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_engine_id, __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_engine_id, PyDoc_STR("int: IN: One of NVML_VGPU_SCHEDULER_ENGINE_TYPE_*."), 0},
  {"scheduler_policy", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_scheduler_policy, __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_scheduler_policy, PyDoc_STR("int: IN: Scheduler policy."), 0},
  {"enable_arr_mode", __pyx_getprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_enable_arr_mode, __pyx_setprop_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_enable_arr_mode, PyDoc_STR("int: IN: Adaptive Round Robin scheduler."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1},
  {Py_tp_doc, (void *)PyDoc_STR("VgpuSchedulerState_v1()\n\nEmpty-initialize an instance of `nvmlVgpuSchedulerState_v1_t`.\n\n\n.. seealso:: `nvmlVgpuSchedulerState_v1_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1_spec = {
  "cuda.bindings._nvml.VgpuSchedulerState_v1",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_VgpuSchedulerState_v1 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_VgpuSchedulerState_v1 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""VgpuSchedulerState_v1", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_VgpuSchedulerState_v1, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_VgpuSchedulerState_v1, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("VgpuSchedulerState_v1()\n\nEmpty-initialize an instance of `nvmlVgpuSchedulerState_v1_t`.\n\n\n.. seealso:: `nvmlVgpuSchedulerState_v1_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_GridLicensableFeatures __pyx_vtable_4cuda_8bindings_5_nvml_GridLicensableFeatures;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_GridLicensableFeatures(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_GridLicensableFeatures;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GridLicensableFeatures(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GridLicensableFeatures) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_GridLicensableFeatures(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_GridLicensableFeatures(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GridLicensableFeatures(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GridLicensableFeatures(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22GridLicensableFeatures_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22GridLicensableFeatures_grid_licensable_features(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_24grid_licensable_features_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_22GridLicensableFeatures_grid_licensable_features(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_24grid_licensable_features_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22GridLicensableFeatures_is_grid_license_supported(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25is_grid_license_supported_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_22GridLicensableFeatures_is_grid_license_supported(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25is_grid_license_supported_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_22GridLicensableFeatures_licensable_features_count(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25licensable_features_count_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_22GridLicensableFeatures_licensable_features_count(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_25licensable_features_count_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_GridLicensableFeatures[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22GridLicensableFeatures_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22GridLicensableFeatures_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22GridLicensableFeatures_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_22GridLicensableFeatures_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_GridLicensableFeatures[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_22GridLicensableFeatures_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"grid_licensable_features", __pyx_getprop_4cuda_8bindings_5_nvml_22GridLicensableFeatures_grid_licensable_features, __pyx_setprop_4cuda_8bindings_5_nvml_22GridLicensableFeatures_grid_licensable_features, PyDoc_STR("GridLicensableFeature: "), 0},
  {"is_grid_license_supported", __pyx_getprop_4cuda_8bindings_5_nvml_22GridLicensableFeatures_is_grid_license_supported, __pyx_setprop_4cuda_8bindings_5_nvml_22GridLicensableFeatures_is_grid_license_supported, PyDoc_STR("int: "), 0},
  {"licensable_features_count", __pyx_getprop_4cuda_8bindings_5_nvml_22GridLicensableFeatures_licensable_features_count, __pyx_setprop_4cuda_8bindings_5_nvml_22GridLicensableFeatures_licensable_features_count, PyDoc_STR("int: "), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeatures_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_GridLicensableFeatures},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GridLicensableFeatures},
  {Py_tp_doc, (void *)PyDoc_STR("GridLicensableFeatures()\n\nEmpty-initialize an instance of `nvmlGridLicensableFeatures_t`.\n\n\n.. seealso:: `nvmlGridLicensableFeatures_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_GridLicensableFeatures},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_GridLicensableFeatures},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_GridLicensableFeatures},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_GridLicensableFeatures},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_GridLicensableFeatures},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_GridLicensableFeatures},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeatures_spec = {
  "cuda.bindings._nvml.GridLicensableFeatures",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeatures_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_GridLicensableFeatures = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_GridLicensableFeatures = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_GridLicensableFeatures, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeatures = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""GridLicensableFeatures", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_GridLicensableFeatures, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_GridLicensableFeatures, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_GridLicensableFeatures, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("GridLicensableFeatures()\n\nEmpty-initialize an instance of `nvmlGridLicensableFeatures_t`.\n\n\n.. seealso:: `nvmlGridLicensableFeatures_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_GridLicensableFeatures, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_GridLicensableFeatures, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_GridLicensableFeatures, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_GridLicensableFeatures, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_GridLicensableFeatures, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_22GridLicensableFeatures_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_GridLicensableFeatures, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_4cuda_8bindings_5_nvml_NvLinkInfo_v2 __pyx_vtable_4cuda_8bindings_5_nvml_NvLinkInfo_v2;

static PyObject *__pyx_tp_new_4cuda_8bindings_5_nvml_NvLinkInfo_v2(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)o);
  p->__pyx_vtab = __pyx_vtabptr_4cuda_8bindings_5_nvml_NvLinkInfo_v2;
  p->_owner = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvLinkInfo_v2(PyObject *o) {
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvLinkInfo_v2) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_3__dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->_owner);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_4cuda_8bindings_5_nvml_NvLinkInfo_v2(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->_owner) {
    e = (*v)(p->_owner, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_4cuda_8bindings_5_nvml_NvLinkInfo_v2(PyObject *o) {
  PyObject* tmp;
  struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *p = (struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *)o;
  tmp = ((PyObject*)p->_owner);
  p->_owner = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static int __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvLinkInfo_v2(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_11__setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvLinkInfo_v2(PyObject *o1, PyObject *o2, int op) {
  switch (op) {
    case Py_EQ: {
      return __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_9__eq__(o1, o2);
    }
    case Py_NE: {
      PyObject *ret;
      ret = __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_9__eq__(o1, o2);
      if (likely(ret && ret != Py_NotImplemented)) {
        int b = __Pyx_PyObject_IsTrue(ret);
        Py_DECREF(ret);
        if (unlikely(b < 0)) return NULL;
        ret = (b) ? Py_False : Py_True;
        Py_INCREF(ret);
      }
      return ret;
    }
    default: {
      return __Pyx_NewRef(Py_NotImplemented);
    }
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_ptr(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_3ptr_1__get__(o);
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_firmware_info(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13firmware_info_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_firmware_info(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13firmware_info_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_version(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_7version_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_version(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_7version_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyObject *__pyx_getprop_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_is_nvle_enabled(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15is_nvle_enabled_1__get__(o);
}

static int __pyx_setprop_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_is_nvle_enabled(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
  if (v) {
    return __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15is_nvle_enabled_3__set__(o, v);
  }
  else {
    PyErr_SetString(PyExc_NotImplementedError, "__del__");
    return -1;
  }
}

static PyMethodDef __pyx_methods_4cuda_8bindings_5_nvml_NvLinkInfo_v2[] = {
  {"from_data", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13from_data, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_12from_data},
  {"from_ptr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15from_ptr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_14from_ptr},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_17__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_16__reduce_cython__},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_19__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_18__setstate_cython__},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_4cuda_8bindings_5_nvml_NvLinkInfo_v2[] = {
  {"ptr", __pyx_getprop_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_ptr, 0, PyDoc_STR("Get the pointer address to the data as Python :class:`int`."), 0},
  {"firmware_info", __pyx_getprop_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_firmware_info, __pyx_setprop_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_firmware_info, PyDoc_STR("NvlinkFirmwareInfo: OUT - NVLINK Firmware info."), 0},
  {"version", __pyx_getprop_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_version, __pyx_setprop_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_version, PyDoc_STR("int: IN - the API version number."), 0},
  {"is_nvle_enabled", __pyx_getprop_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_is_nvle_enabled, __pyx_setprop_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_is_nvle_enabled, PyDoc_STR("int: OUT - NVLINK encryption enablement."), 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type_4cuda_8bindings_5_nvml_NvLinkInfo_v2_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvLinkInfo_v2},
  {Py_tp_repr, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_5__repr__},
  {Py_nb_int, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_7__int__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvLinkInfo_v2},
  {Py_tp_doc, (void *)PyDoc_STR("NvLinkInfo_v2()\n\nEmpty-initialize an instance of `nvmlNvLinkInfo_v2_t`.\n\n\n.. seealso:: `nvmlNvLinkInfo_v2_t`")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_4cuda_8bindings_5_nvml_NvLinkInfo_v2},
  {Py_tp_clear, (void *)__pyx_tp_clear_4cuda_8bindings_5_nvml_NvLinkInfo_v2},
  {Py_tp_richcompare, (void *)__pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvLinkInfo_v2},
  {Py_tp_methods, (void *)__pyx_methods_4cuda_8bindings_5_nvml_NvLinkInfo_v2},
  {Py_tp_getset, (void *)__pyx_getsets_4cuda_8bindings_5_nvml_NvLinkInfo_v2},
  {Py_tp_init, (void *)__pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_1__init__},
  {Py_tp_new, (void *)__pyx_tp_new_4cuda_8bindings_5_nvml_NvLinkInfo_v2},
  {0, 0},
};
static PyType_Spec __pyx_type_4cuda_8bindings_5_nvml_NvLinkInfo_v2_spec = {
  "cuda.bindings._nvml.NvLinkInfo_v2",
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type_4cuda_8bindings_5_nvml_NvLinkInfo_v2_slots,
};
#else

static PyNumberMethods __pyx_tp_as_number_NvLinkInfo_v2 = {
  0, /*nb_add*/
  0, /*nb_subtract*/
  0, /*nb_multiply*/
  0, /*nb_remainder*/
  0, /*nb_divmod*/
  0, /*nb_power*/
  0, /*nb_negative*/
  0, /*nb_positive*/
  0, /*nb_absolute*/
  0, /*nb_bool*/
  0, /*nb_invert*/
  0, /*nb_lshift*/
  0, /*nb_rshift*/
  0, /*nb_and*/
  0, /*nb_xor*/
  0, /*nb_or*/
  __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_7__int__, /*nb_int*/
  0, /*nb_long (reserved)*/
  0, /*nb_float*/
  0, /*nb_inplace_add*/
  0, /*nb_inplace_subtract*/
  0, /*nb_inplace_multiply*/
  0, /*nb_inplace_remainder*/
  0, /*nb_inplace_power*/
  0, /*nb_inplace_lshift*/
  0, /*nb_inplace_rshift*/
  0, /*nb_inplace_and*/
  0, /*nb_inplace_xor*/
  0, /*nb_inplace_or*/
  0, /*nb_floor_divide*/
  0, /*nb_true_divide*/
  0, /*nb_inplace_floor_divide*/
  0, /*nb_inplace_true_divide*/
  0, /*nb_index*/
  0, /*nb_matrix_multiply*/
  0, /*nb_inplace_matrix_multiply*/
};

static PyMappingMethods __pyx_tp_as_mapping_NvLinkInfo_v2 = {
  0, /*mp_length*/
  0, /*mp_subscript*/
  __pyx_mp_ass_subscript_4cuda_8bindings_5_nvml_NvLinkInfo_v2, /*mp_ass_subscript*/
};

static PyTypeObject __pyx_type_4cuda_8bindings_5_nvml_NvLinkInfo_v2 = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""NvLinkInfo_v2", /*tp_name*/
  sizeof(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_4cuda_8bindings_5_nvml_NvLinkInfo_v2, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_5__repr__, /*tp_repr*/
  &__pyx_tp_as_number_NvLinkInfo_v2, /*tp_as_number*/
  0, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_NvLinkInfo_v2, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  PyDoc_STR("NvLinkInfo_v2()\n\nEmpty-initialize an instance of `nvmlNvLinkInfo_v2_t`.\n\n\n.. seealso:: `nvmlNvLinkInfo_v2_t`"), /*tp_doc*/
  __pyx_tp_traverse_4cuda_8bindings_5_nvml_NvLinkInfo_v2, /*tp_traverse*/
  __pyx_tp_clear_4cuda_8bindings_5_nvml_NvLinkInfo_v2, /*tp_clear*/
  __pyx_tp_richcompare_4cuda_8bindings_5_nvml_NvLinkInfo_v2, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_4cuda_8bindings_5_nvml_NvLinkInfo_v2, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_4cuda_8bindings_5_nvml_NvLinkInfo_v2, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_pw_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_1__init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_4cuda_8bindings_5_nvml_NvLinkInfo_v2, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_array __pyx_vtable_array;

static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
  struct __pyx_array_obj *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_array_obj *)o);
  p->__pyx_vtab = __pyx_vtabptr_array;
  p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
  p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
  if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad;
  return o;
  bad:
  Py_DECREF(o); o = 0;
  return NULL;
}

static void __pyx_tp_dealloc_array(PyObject *o) {
  struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && (!PyType_IS_GC(Py_TYPE(o)) || !__Pyx_PyObject_GC_IsFinalized(o))) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_array) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_array___dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->mode);
  Py_CLEAR(p->_format);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_array___setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
  PyObject *v = PyObject_GenericGetAttr(o, n);
  if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
    PyErr_Clear();
    v = __pyx_array___getattr__(o, n);
  }
  return v;
}

static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o);
}

static PyMethodDef __pyx_methods_array[] = {
  {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_array_1__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_array_3__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_array[] = {
  {"memview", __pyx_getprop___pyx_array_memview, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
#if !CYTHON_COMPILING_IN_LIMITED_API

static PyBufferProcs __pyx_tp_as_buffer_array = {
  __pyx_array_getbuffer, /*bf_getbuffer*/
  0, /*bf_releasebuffer*/
};
#endif
static PyType_Slot __pyx_type___pyx_array_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_array},
  {Py_sq_length, (void *)__pyx_array___len__},
  {Py_sq_item, (void *)__pyx_sq_item_array},
  {Py_mp_length, (void *)__pyx_array___len__},
  {Py_mp_subscript, (void *)__pyx_array___getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_array},
  {Py_tp_getattro, (void *)__pyx_tp_getattro_array},
  #if defined(Py_bf_getbuffer)
  {Py_bf_getbuffer, (void *)__pyx_array_getbuffer},
  #endif
  {Py_tp_methods, (void *)__pyx_methods_array},
  {Py_tp_getset, (void *)__pyx_getsets_array},
  {Py_tp_new, (void *)__pyx_tp_new_array},
  {0, 0},
};
static PyType_Spec __pyx_type___pyx_array_spec = {
  "cuda.bindings._nvml.array",
  sizeof(struct __pyx_array_obj),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_SEQUENCE,
  __pyx_type___pyx_array_slots,
};
#else

static PySequenceMethods __pyx_tp_as_sequence_array = {
  __pyx_array___len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_array, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_array = {
  __pyx_array___len__, /*mp_length*/
  __pyx_array___getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};

static PyBufferProcs __pyx_tp_as_buffer_array = {
  __pyx_array_getbuffer, /*bf_getbuffer*/
  0, /*bf_releasebuffer*/
};

static PyTypeObject __pyx_type___pyx_array = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""array", /*tp_name*/
  sizeof(struct __pyx_array_obj), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_array, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  0, /*tp_repr*/
  0, /*tp_as_number*/
  &__pyx_tp_as_sequence_array, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_array, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  __pyx_tp_getattro_array, /*tp_getattro*/
  0, /*tp_setattro*/
  &__pyx_tp_as_buffer_array, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_SEQUENCE, /*tp_flags*/
  0, /*tp_doc*/
  0, /*tp_traverse*/
  0, /*tp_clear*/
  0, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_array, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_array, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  0, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_array, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif

static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
  struct __pyx_MemviewEnum_obj *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_MemviewEnum_obj *)o);
  p->name = Py_None; Py_INCREF(Py_None);
  return o;
}

static void __pyx_tp_dealloc_Enum(PyObject *o) {
  struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_Enum) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  Py_CLEAR(p->name);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->name) {
    e = (*v)(p->name, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_Enum(PyObject *o) {
  PyObject* tmp;
  struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
  tmp = ((PyObject*)p->name);
  p->name = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  return 0;
}

static PyMethodDef __pyx_methods_Enum[] = {
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0},
  {0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type___pyx_MemviewEnum_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_Enum},
  {Py_tp_repr, (void *)__pyx_MemviewEnum___repr__},
  {Py_tp_traverse, (void *)__pyx_tp_traverse_Enum},
  {Py_tp_clear, (void *)__pyx_tp_clear_Enum},
  {Py_tp_methods, (void *)__pyx_methods_Enum},
  {Py_tp_init, (void *)__pyx_MemviewEnum___init__},
  {Py_tp_new, (void *)__pyx_tp_new_Enum},
  {0, 0},
};
static PyType_Spec __pyx_type___pyx_MemviewEnum_spec = {
  "cuda.bindings._nvml.Enum",
  sizeof(struct __pyx_MemviewEnum_obj),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type___pyx_MemviewEnum_slots,
};
#else

static PyTypeObject __pyx_type___pyx_MemviewEnum = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""Enum", /*tp_name*/
  sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_Enum, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_MemviewEnum___repr__, /*tp_repr*/
  0, /*tp_as_number*/
  0, /*tp_as_sequence*/
  0, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  0, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  0, /*tp_doc*/
  __pyx_tp_traverse_Enum, /*tp_traverse*/
  __pyx_tp_clear_Enum, /*tp_clear*/
  0, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_Enum, /*tp_methods*/
  0, /*tp_members*/
  0, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  __pyx_MemviewEnum___init__, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_Enum, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;

static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
  struct __pyx_memoryview_obj *p;
  PyObject *o;
  o = __Pyx_AllocateExtensionType(t, 0);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_memoryview_obj *)o);
  p->__pyx_vtab = __pyx_vtabptr_memoryview;
  p->obj = Py_None; Py_INCREF(Py_None);
  p->_size = Py_None; Py_INCREF(Py_None);
  p->view.obj = NULL;
  if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad;
  return o;
  bad:
  Py_DECREF(o); o = 0;
  return NULL;
}

static void __pyx_tp_dealloc_memoryview(PyObject *o) {
  struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_memoryview) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_memoryview___dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  Py_CLEAR(p->obj);
  Py_CLEAR(p->_size);
  PyTypeObject *tp = Py_TYPE(o);
  #if CYTHON_USE_TYPE_SLOTS
  (*tp->tp_free)(o);
  #else
  {
    freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);
    if (tp_free) tp_free(o);
  }
  #endif
  #if CYTHON_USE_TYPE_SPECS
  Py_DECREF(tp);
  #endif
}

static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
  {
    e = __Pyx_call_type_traverse(o, 1, v, a);
    if (e) return e;
  }
  if (p->obj) {
    e = (*v)(p->obj, a); if (e) return e;
  }
  if (p->_size) {
    e = (*v)(p->_size, a); if (e) return e;
  }
  if (p->view.obj) {
    e = (*v)(p->view.obj, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear_memoryview(PyObject *o) {
  PyObject* tmp;
  struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
  tmp = ((PyObject*)p->obj);
  p->obj = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  tmp = ((PyObject*)p->_size);
  p->_size = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  Py_CLEAR(p->view.obj);
  return 0;
}

static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
  PyObject *r;
  PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;
  #if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
  #else
  r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);
  #endif
  Py_DECREF(x);
  return r;
}

static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
  if (v) {
    return __pyx_memoryview___setitem__(o, i, v);
  }
  else {
    __Pyx_TypeName o_type_name;
    o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));
    PyErr_Format(PyExc_NotImplementedError,
      "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);
    __Pyx_DECREF_TypeName(o_type_name);
    return -1;
  }
}

static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o);
}

static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o);
}

static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o);
}

static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o);
}

static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o);
}

static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o);
}

static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o);
}

static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o);
}

static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
  return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o);
}

static PyMethodDef __pyx_methods_memoryview[] = {
  {"is_c_contig", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_memoryview_is_c_contig, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0},
  {"is_f_contig", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_memoryview_is_f_contig, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0},
  {"copy", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_memoryview_copy, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0},
  {"copy_fortran", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_memoryview_copy_fortran, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0},
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_memoryview_1__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_memoryview_3__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0},
  {0, 0, 0, 0}
};

static struct PyGetSetDef __pyx_getsets_memoryview[] = {
  {"T", __pyx_getprop___pyx_memoryview_T, 0, 0, 0},
  {"base", __pyx_getprop___pyx_memoryview_base, 0, 0, 0},
  {"shape", __pyx_getprop___pyx_memoryview_shape, 0, 0, 0},
  {"strides", __pyx_getprop___pyx_memoryview_strides, 0, 0, 0},
  {"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, 0, 0},
  {"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, 0, 0},
  {"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, 0, 0},
  {"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, 0, 0},
  {"size", __pyx_getprop___pyx_memoryview_size, 0, 0, 0},
  {0, 0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
#if !CYTHON_COMPILING_IN_LIMITED_API

static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
  __pyx_memoryview_getbuffer, /*bf_getbuffer*/
  0, /*bf_releasebuffer*/
};
#endif
static PyType_Slot __pyx_type___pyx_memoryview_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc_memoryview},
  {Py_tp_repr, (void *)__pyx_memoryview___repr__},
  {Py_sq_length, (void *)__pyx_memoryview___len__},
  {Py_sq_item, (void *)__pyx_sq_item_memoryview},
  {Py_mp_length, (void *)__pyx_memoryview___len__},
  {Py_mp_subscript, (void *)__pyx_memoryview___getitem__},
  {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_memoryview},
  {Py_tp_str, (void *)__pyx_memoryview___str__},
  #if defined(Py_bf_getbuffer)
  {Py_bf_getbuffer, (void *)__pyx_memoryview_getbuffer},
  #endif
  {Py_tp_traverse, (void *)__pyx_tp_traverse_memoryview},
  {Py_tp_clear, (void *)__pyx_tp_clear_memoryview},
  {Py_tp_methods, (void *)__pyx_methods_memoryview},
  {Py_tp_getset, (void *)__pyx_getsets_memoryview},
  {Py_tp_new, (void *)__pyx_tp_new_memoryview},
  {0, 0},
};
static PyType_Spec __pyx_type___pyx_memoryview_spec = {
  "cuda.bindings._nvml.memoryview",
  sizeof(struct __pyx_memoryview_obj),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC,
  __pyx_type___pyx_memoryview_slots,
};
#else

static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
  __pyx_memoryview___len__, /*sq_length*/
  0, /*sq_concat*/
  0, /*sq_repeat*/
  __pyx_sq_item_memoryview, /*sq_item*/
  0, /*sq_slice*/
  0, /*sq_ass_item*/
  0, /*sq_ass_slice*/
  0, /*sq_contains*/
  0, /*sq_inplace_concat*/
  0, /*sq_inplace_repeat*/
};

static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
  __pyx_memoryview___len__, /*mp_length*/
  __pyx_memoryview___getitem__, /*mp_subscript*/
  __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};

static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
  __pyx_memoryview_getbuffer, /*bf_getbuffer*/
  0, /*bf_releasebuffer*/
};

static PyTypeObject __pyx_type___pyx_memoryview = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""memoryview", /*tp_name*/
  sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc_memoryview, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  __pyx_memoryview___repr__, /*tp_repr*/
  0, /*tp_as_number*/
  &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
  &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  __pyx_memoryview___str__, /*tp_str*/
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
  0, /*tp_doc*/
  __pyx_tp_traverse_memoryview, /*tp_traverse*/
  __pyx_tp_clear_memoryview, /*tp_clear*/
  0, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods_memoryview, /*tp_methods*/
  0, /*tp_members*/
  __pyx_getsets_memoryview, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  0, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new_memoryview, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;

static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
  struct __pyx_memoryviewslice_obj *p;
  PyObject *o = __pyx_tp_new_memoryview(t, a, k);
  if (unlikely(!o)) return 0;
  p = ((struct __pyx_memoryviewslice_obj *)o);
  p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
  __Pyx_default_placement_construct(&(p->from_slice));
  p->from_object = Py_None; Py_INCREF(Py_None);
  p->from_slice.memview = NULL;
  return o;
}

static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
  struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
  #if CYTHON_USE_TP_FINALIZE
  if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) {
    if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc__memoryviewslice) {
      if (PyObject_CallFinalizerFromDealloc(o)) return;
    }
  }
  #endif
  PyObject_GC_UnTrack(o);
  {
    PyObject *etype, *eval, *etb;
    PyErr_Fetch(&etype, &eval, &etb);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
    __pyx_memoryviewslice___dealloc__(o);
    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
    PyErr_Restore(etype, eval, etb);
  }
  __Pyx_call_destructor(p->from_slice);
  Py_CLEAR(p->from_object);
  PyObject_GC_Track(o);
  __pyx_tp_dealloc_memoryview(o);
}

static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
  int e;
  struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
  e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
  if (p->from_object) {
    e = (*v)(p->from_object, a); if (e) return e;
  }
  return 0;
}

static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
  PyObject* tmp;
  struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
  __pyx_tp_clear_memoryview(o);
  tmp = ((PyObject*)p->from_object);
  p->from_object = Py_None; Py_INCREF(Py_None);
  Py_XDECREF(tmp);
  __PYX_XCLEAR_MEMVIEW(&p->from_slice, 1);
  return 0;
}

static PyMethodDef __pyx_methods__memoryviewslice[] = {
  {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0},
  {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0},
  {0, 0, 0, 0}
};
#if CYTHON_USE_TYPE_SPECS
static PyType_Slot __pyx_type___pyx_memoryviewslice_slots[] = {
  {Py_tp_dealloc, (void *)__pyx_tp_dealloc__memoryviewslice},
  {Py_tp_doc, (void *)PyDoc_STR("Internal class for passing memoryview slices to Python")},
  {Py_tp_traverse, (void *)__pyx_tp_traverse__memoryviewslice},
  {Py_tp_clear, (void *)__pyx_tp_clear__memoryviewslice},
  {Py_tp_methods, (void *)__pyx_methods__memoryviewslice},
  {Py_tp_new, (void *)__pyx_tp_new__memoryviewslice},
  {0, 0},
};
static PyType_Spec __pyx_type___pyx_memoryviewslice_spec = {
  "cuda.bindings._nvml._memoryviewslice",
  sizeof(struct __pyx_memoryviewslice_obj),
  0,
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC|Py_TPFLAGS_SEQUENCE,
  __pyx_type___pyx_memoryviewslice_slots,
};
#else

static PyTypeObject __pyx_type___pyx_memoryviewslice = {
  PyVarObject_HEAD_INIT(0, 0)
  "cuda.bindings._nvml.""_memoryviewslice", /*tp_name*/
  sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
  0, /*tp_itemsize*/
  __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
  0, /*tp_vectorcall_offset*/
  0, /*tp_getattr*/
  0, /*tp_setattr*/
  0, /*tp_as_async*/
  #if CYTHON_COMPILING_IN_PYPY || 0
  __pyx_memoryview___repr__, /*tp_repr*/
  #else
  0, /*tp_repr*/
  #endif
  0, /*tp_as_number*/
  0, /*tp_as_sequence*/
  0, /*tp_as_mapping*/
  0, /*tp_hash*/
  0, /*tp_call*/
  #if CYTHON_COMPILING_IN_PYPY || 0
  __pyx_memoryview___str__, /*tp_str*/
  #else
  0, /*tp_str*/
  #endif
  0, /*tp_getattro*/
  0, /*tp_setattro*/
  0, /*tp_as_buffer*/
  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC|Py_TPFLAGS_SEQUENCE, /*tp_flags*/
  PyDoc_STR("Internal class for passing memoryview slices to Python"), /*tp_doc*/
  __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
  __pyx_tp_clear__memoryviewslice, /*tp_clear*/
  0, /*tp_richcompare*/
  0, /*tp_weaklistoffset*/
  0, /*tp_iter*/
  0, /*tp_iternext*/
  __pyx_methods__memoryviewslice, /*tp_methods*/
  0, /*tp_members*/
  0, /*tp_getset*/
  0, /*tp_base*/
  0, /*tp_dict*/
  0, /*tp_descr_get*/
  0, /*tp_descr_set*/
  #if !CYTHON_USE_TYPE_SPECS
  0, /*tp_dictoffset*/
  #endif
  0, /*tp_init*/
  0, /*tp_alloc*/
  __pyx_tp_new__memoryviewslice, /*tp_new*/
  0, /*tp_free*/
  0, /*tp_is_gc*/
  0, /*tp_bases*/
  0, /*tp_mro*/
  0, /*tp_cache*/
  0, /*tp_subclasses*/
  0, /*tp_weaklist*/
  0, /*tp_del*/
  0, /*tp_version_tag*/
  #if CYTHON_USE_TP_FINALIZE
  0, /*tp_finalize*/
  #else
  NULL, /*tp_finalize*/
  #endif
  #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800
  0, /*tp_vectorcall*/
  #endif
  #if __PYX_NEED_TP_PRINT_SLOT == 1
  0, /*tp_print*/
  #endif
  #if PY_VERSION_HEX >= 0x030C0000
  0, /*tp_watched*/
  #endif
  #if PY_VERSION_HEX >= 0x030d00A4
  0, /*tp_versions_used*/
  #endif
  #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000
  0, /*tp_pypy_flags*/
  #endif
};
#endif

static PyMethodDef __pyx_methods[] = {
  {0, 0, 0, 0}
};
/* #### Code section: initfunc_declarations ### */
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_InitConstants(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_CreateCodeObjects(__pyx_mstatetype *__pyx_mstate); /*proto*/
/* #### Code section: init_module ### */

static int __Pyx_modinit_global_init_code(__pyx_mstatetype *__pyx_mstate) {
  __Pyx_RefNannyDeclarations
  CYTHON_UNUSED_VAR(__pyx_mstate);
  __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
  /*--- Global init code ---*/
  __pyx_collections_abc_Sequence = Py_None; Py_INCREF(Py_None);
  generic = Py_None; Py_INCREF(Py_None);
  strided = Py_None; Py_INCREF(Py_None);
  indirect = Py_None; Py_INCREF(Py_None);
  contiguous = Py_None; Py_INCREF(Py_None);
  indirect_contiguous = Py_None; Py_INCREF(Py_None);
  __Pyx_RefNannyFinishContext();
  return 0;
}

static int __Pyx_modinit_variable_export_code(__pyx_mstatetype *__pyx_mstate) {
  __Pyx_RefNannyDeclarations
  CYTHON_UNUSED_VAR(__pyx_mstate);
  __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
  /*--- Variable export code ---*/
  __Pyx_RefNannyFinishContext();
  return 0;
}

static int __Pyx_modinit_function_export_code(__pyx_mstatetype *__pyx_mstate) {
  __Pyx_RefNannyDeclarations
  CYTHON_UNUSED_VAR(__pyx_mstate);
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
  /*--- Function export code ---*/
  {
    __pyx_t_1 = __Pyx_ApiExport_GetApiDict(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    const char * __pyx_export_signature = __Pyx_PyBytes_AsString(__pyx_mstate_global->__pyx_kp_b_PyObject_int___pyx_skip_dispatch);
    #if !CYTHON_ASSUME_SAFE_MACROS
    if (unlikely(!__pyx_export_signature)) __PYX_ERR(0, 1, __pyx_L1_error)
    #endif
    const char * __pyx_export_name = __pyx_export_signature + 2771;
    void (*const __pyx_export_pointers[])(void) = {(void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_init_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_shutdown, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_capabilities, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_key_rotation_threshold_info, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_settings, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_state, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_get_driver_version, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_get_hic_version, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_get_nvml_version, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_max_instances_per_gpu_instance, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_error_string, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_compute_instance_destroy, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_compute_instance_get_info_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_clear_accounting_pids, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_clear_cpu_affinity, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_discover_gpus, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_accounting_pids, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_addressing_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_attributes_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_auto_boosted_clocks_enabled, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_bar1_memory_info, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_board_part_number, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_bridge_chip_info, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_c2c_mode_info_v, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_capabilities, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_clk_mon_status, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_clock_offsets, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_compute_running_processes_v3, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_conf_compute_gpu_attestation_report, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_conf_compute_gpu_certificate, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_conf_compute_mem_size_info, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_conf_compute_protected_memory_usage, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_cooler_info, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_cuda_compute_capability, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_current_clock_freqs, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_decoder_utilization, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_dram_encryption_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_driver_model_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_dynamic_pstates_info, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_ecc_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_encoder_sessions, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_encoder_stats, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_encoder_utilization, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_fan_speed_rpm, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_fbc_sessions, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_fbc_stats, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_gpc_clk_min_max_vf_offset, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_fabric_info_v, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_operation_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_grid_licensable_features_v4, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_gsp_firmware_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_inforom_image_version, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_jpg_utilization, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_margin_temperature, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_mem_clk_min_max_vf_offset, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_memory_info_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_mig_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_min_max_fan_speed, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_mps_compute_running_processes_v3, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_name, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_bw_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_info, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_supported_bw_modes, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_ofa_utilization, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_pci_info_ext, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_pci_info_v3, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_pdi, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_performance_modes, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_pgpu_metadata_string, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_platform_info, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_power_management_limit_constraints, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_power_mizer_mode_v1, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_remapped_rows, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_repair_status, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_row_remapper_histogram, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_serial, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_sram_ecc_error_status, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_supported_memory_clocks, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_utilization_rates, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_uuid, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_vbios_version, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_heterogeneous_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_scheduler_capabilities, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_scheduler_log, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_scheduler_state, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_reset_gpu_locked_clocks, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_reset_memory_locked_clocks, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_cpu_affinity, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_validate_inforom, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_event_set_free, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpm_query_device_support, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_destroy, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_info, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_vgpu_heterogeneous_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_vgpu_scheduler_log, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_vgpu_scheduler_state, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_set_vgpu_version, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_event_set_create, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_event_set_free, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_event_set_wait, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_register_events, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_set_conf_compute_key_rotation_threshold_info, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_unit_get_fan_speed_info, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_unit_get_led_state, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_unit_get_psu_info, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_unit_get_unit_info, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_clear_ecc_error_counts, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_inforom_version, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_retired_pages, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_modify_drain_state, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_accounting_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_auto_boosted_clocks_enabled, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_compute_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_ecc_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_gpu_operation_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_persistence_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_virtualization_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_unit_set_led_state, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_min_max_clock_of_p_state, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_remove_gpu_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_api_restriction, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_vgpu_capabilities, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_temperature_threshold, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_default_auto_boosted_clocks_enabled, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_driver_model, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_power_smoothing_activate_preset_profile, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_power_smoothing_set_state, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_power_smoothing_update_preset_profile_param, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_read_write_prm_v1, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_clock_offsets, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_dram_encryption_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_nvlink_bw_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_nvlink_device_low_power_threshold, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_power_management_limit_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_power_mizer_mode_v1, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_vgpu_heterogeneous_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_vgpu_scheduler_state, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_workload_power_profile_clear_requested_profiles, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpm_sample_get, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_set_vgpu_heterogeneous_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_set_vgpu_scheduler_state, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_process_utilization, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_process_utilization, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_utilization, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_conf_compute_unprotected_mem_size, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_register_events, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_accounting_stats, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_cpu_affinity, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_possible_placements_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_profile_info_by_id_v, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_profile_info_v, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_remote_pci_info_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_supported_graphics_clocks, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_thermal_settings, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_type_supported_placements, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_reset_nvlink_error_counters, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_default_fan_speed_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_power_management_limit, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_event_set_wait_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpm_set_streaming_enabled, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instance_possible_placements, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpm_mig_sample_get, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_cpu_affinity_within_scope, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_memory_affinity, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_fan_control_policy, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_fan_speed_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_gpu_locked_clocks, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_memory_locked_clocks, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instance_profile_info_v, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_get_excluded_device_info_by_index, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_init_with_flags, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_get_process_name, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_set_conf_compute_gpus_ready_state, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_set_nvlink_bw_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_clear_accounting_pids, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_accounting_pids, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_encoder_sessions, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_encoder_stats, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_fbc_sessions, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_fbc_stats, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_gpu_pci_id, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_license_info_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_mdev_uuid, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_placement_id, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_runtime_state_size, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_uuid, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_vm_driver_version, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_bar1_info, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_class, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_device_id, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_name, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_license, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_accounting_stats, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_set_encoder_capacity, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_resolution, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_gsp_firmware_version, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_get_cuda_driver_version, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_get_cuda_driver_version_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_accounting_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_brand, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_compute_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_default_ecc_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_display_active, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_display_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_gpc_clk_vf_offset, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_host_vgpu_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_mem_clk_vf_offset, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_performance_state, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_persistence_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_power_state, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_retired_pages_pending_status, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_virtualization_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_query_drain_state, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_api_restriction, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_topology_common_ancestor, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_on_same_board, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_p2p_status, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_remote_device_type, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_state, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_set_mig_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_accounting_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_ecc_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_pci_bus_id_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_serial, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_uuid, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_event_set_create, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_device_handle_from_mig_device_handle, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_uuidv, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_create_gpu_instance, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_by_id, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_mig_device_handle_by_index, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_create_compute_instance, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instance_by_id, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_create_gpu_instance_with_placement, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_create_compute_instance_with_placement, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_handle_by_index_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_unit_get_handle_by_index, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_current_clocks_event_reasons, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_supported_clocks_event_reasons, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_supported_event_types, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_total_energy_consumption, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_total_ecc_errors, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_memory_error_counter, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_error_counter, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_fb_usage, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_fb_reservation, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_framebuffer_size, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_gsp_heap_size, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_count_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_get_excluded_device_count, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_get_conf_compute_gpus_ready_state, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_system_get_nvlink_bw_mode, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_unit_get_count, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_get_vgpu_driver_capabilities, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_accounting_buffer_size, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_adaptive_clock_info_status, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_architecture, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_board_id, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_bus_type, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_compute_instance_id, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_curr_pcie_link_generation, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_curr_pcie_link_width, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_enforced_power_limit, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_fan_speed, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_id, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_max_pcie_link_generation, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_index, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_inforom_configuration_checksum, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_irq_num, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_max_mig_device_count, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_max_pcie_link_generation, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_max_pcie_link_width, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_memory_bus_width, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_minor_number, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_module_id, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_multi_gpu_board, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_num_fans, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_num_gpu_cores, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_numa_node_id, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_pcie_link_max_speed, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_pcie_replay_counter, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_pcie_speed, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_power_management_default_limit, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_power_management_limit, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_power_source, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_power_usage, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_is_mig_device_handle, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpm_query_if_streaming_enabled, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_clock_info, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_encoder_capacity, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_max_clock_info, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_max_customer_boost_clock, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_pcie_throughput, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_temperature_threshold, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_vgpu_capabilities, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_clock, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_fan_control_policy_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_fan_speed_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_gpu_instance_remaining_capacity, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_version, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_target_fan_speed, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_gpu_instance_get_compute_instance_remaining_capacity, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_unit_get_temperature, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_max_instances, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_nvlink_capability, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_encoder_capacity, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_frame_rate_limit, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_gpu_instance_id, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_license_status, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_instance_get_type, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_frame_rate_limit, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_gpu_instance_profile_id, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_max_instances_per_vm, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_num_display_heads, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_vgpu_type_get_capabilities, (void (*)(void))&__pyx_f_4cuda_8bindings_5_nvml_device_get_last_bbx_flush_time, (void (*)(void)) NULL};
    void (*const *__pyx_export_pointer)(void) = __pyx_export_pointers;
    const char *__pyx_export_current_signature = __pyx_export_signature;
    while (*__pyx_export_pointer) {
      if (__Pyx_ExportFunction(__pyx_t_1, __pyx_export_name, *__pyx_export_pointer, __pyx_export_current_signature) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
      ++__pyx_export_pointer;
      __pyx_export_name = strchr(__pyx_export_name, '\0') + 1;
      __pyx_export_signature = strchr(__pyx_export_signature, '\0') + 1;
      if (*__pyx_export_signature != '\0') __pyx_export_current_signature = __pyx_export_signature;
    }
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  }
  __Pyx_RefNannyFinishContext();
  return 0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_RefNannyFinishContext();
  return -1;
}

static int __Pyx_modinit_type_init_code(__pyx_mstatetype *__pyx_mstate) {
  __Pyx_RefNannyDeclarations
  CYTHON_UNUSED_VAR(__pyx_mstate);
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
  /*--- Type init code ---*/
  __pyx_vtabptr_4cuda_8bindings_5_nvml_PciInfoExt_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_PciInfoExt_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_PciInfoExt_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfoExt_v1 *))__pyx_f_4cuda_8bindings_5_nvml_13PciInfoExt_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_PciInfoExt_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1)) __PYX_ERR(0, 1354, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_PciInfoExt_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1) < (0)) __PYX_ERR(0, 1354, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1 = &__pyx_type_4cuda_8bindings_5_nvml_PciInfoExt_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1) < (0)) __PYX_ERR(0, 1354, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_PciInfoExt_v1) < (0)) __PYX_ERR(0, 1354, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1) < (0)) __PYX_ERR(0, 1354, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_PciInfoExt_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1) < (0)) __PYX_ERR(0, 1354, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1) < (0)) __PYX_ERR(0, 1354, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_PciInfo = &__pyx_vtable_4cuda_8bindings_5_nvml_PciInfo;
  __pyx_vtable_4cuda_8bindings_5_nvml_PciInfo._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_PciInfo *))__pyx_f_4cuda_8bindings_5_nvml_7PciInfo__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_PciInfo_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo)) __PYX_ERR(0, 1572, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_PciInfo_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo) < (0)) __PYX_ERR(0, 1572, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo = &__pyx_type_4cuda_8bindings_5_nvml_PciInfo;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo) < (0)) __PYX_ERR(0, 1572, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo, __pyx_vtabptr_4cuda_8bindings_5_nvml_PciInfo) < (0)) __PYX_ERR(0, 1572, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo) < (0)) __PYX_ERR(0, 1572, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_PciInfo, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo) < (0)) __PYX_ERR(0, 1572, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo) < (0)) __PYX_ERR(0, 1572, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_Utilization = &__pyx_vtable_4cuda_8bindings_5_nvml_Utilization;
  __pyx_vtable_4cuda_8bindings_5_nvml_Utilization._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_Utilization *))__pyx_f_4cuda_8bindings_5_nvml_11Utilization__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_Utilization_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization)) __PYX_ERR(0, 1767, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_Utilization_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization) < (0)) __PYX_ERR(0, 1767, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization = &__pyx_type_4cuda_8bindings_5_nvml_Utilization;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization) < (0)) __PYX_ERR(0, 1767, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization, __pyx_vtabptr_4cuda_8bindings_5_nvml_Utilization) < (0)) __PYX_ERR(0, 1767, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization) < (0)) __PYX_ERR(0, 1767, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_Utilization, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization) < (0)) __PYX_ERR(0, 1767, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization) < (0)) __PYX_ERR(0, 1767, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_Memory = &__pyx_vtable_4cuda_8bindings_5_nvml_Memory;
  __pyx_vtable_4cuda_8bindings_5_nvml_Memory._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory *))__pyx_f_4cuda_8bindings_5_nvml_6Memory__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_Memory_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory)) __PYX_ERR(0, 1900, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_Memory_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory) < (0)) __PYX_ERR(0, 1900, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory = &__pyx_type_4cuda_8bindings_5_nvml_Memory;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory) < (0)) __PYX_ERR(0, 1900, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory, __pyx_vtabptr_4cuda_8bindings_5_nvml_Memory) < (0)) __PYX_ERR(0, 1900, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory) < (0)) __PYX_ERR(0, 1900, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_Memory, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory) < (0)) __PYX_ERR(0, 1900, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory) < (0)) __PYX_ERR(0, 1900, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_Memory_v2 = &__pyx_vtable_4cuda_8bindings_5_nvml_Memory_v2;
  __pyx_vtable_4cuda_8bindings_5_nvml_Memory_v2._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_Memory_v2 *))__pyx_f_4cuda_8bindings_5_nvml_9Memory_v2__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_Memory_v2_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2)) __PYX_ERR(0, 2046, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_Memory_v2_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2) < (0)) __PYX_ERR(0, 2046, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2 = &__pyx_type_4cuda_8bindings_5_nvml_Memory_v2;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2) < (0)) __PYX_ERR(0, 2046, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2, __pyx_vtabptr_4cuda_8bindings_5_nvml_Memory_v2) < (0)) __PYX_ERR(0, 2046, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2) < (0)) __PYX_ERR(0, 2046, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_Memory_v2, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2) < (0)) __PYX_ERR(0, 2046, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2) < (0)) __PYX_ERR(0, 2046, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_BAR1Memory = &__pyx_vtable_4cuda_8bindings_5_nvml_BAR1Memory;
  __pyx_vtable_4cuda_8bindings_5_nvml_BAR1Memory._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_BAR1Memory *))__pyx_f_4cuda_8bindings_5_nvml_10BAR1Memory__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_BAR1Memory_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory)) __PYX_ERR(0, 2201, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_BAR1Memory_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory) < (0)) __PYX_ERR(0, 2201, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory = &__pyx_type_4cuda_8bindings_5_nvml_BAR1Memory;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory) < (0)) __PYX_ERR(0, 2201, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory, __pyx_vtabptr_4cuda_8bindings_5_nvml_BAR1Memory) < (0)) __PYX_ERR(0, 2201, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory) < (0)) __PYX_ERR(0, 2201, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_BAR1Memory, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory) < (0)) __PYX_ERR(0, 2201, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory) < (0)) __PYX_ERR(0, 2201, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessInfo = &__pyx_vtable_4cuda_8bindings_5_nvml_ProcessInfo;
  __pyx_vtable_4cuda_8bindings_5_nvml_ProcessInfo._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessInfo *))__pyx_f_4cuda_8bindings_5_nvml_11ProcessInfo__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ProcessInfo_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo)) __PYX_ERR(0, 2346, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ProcessInfo_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo) < (0)) __PYX_ERR(0, 2346, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo = &__pyx_type_4cuda_8bindings_5_nvml_ProcessInfo;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo) < (0)) __PYX_ERR(0, 2346, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo, __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessInfo) < (0)) __PYX_ERR(0, 2346, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo) < (0)) __PYX_ERR(0, 2346, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ProcessInfo, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo) < (0)) __PYX_ERR(0, 2346, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo) < (0)) __PYX_ERR(0, 2346, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessDetail_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_ProcessDetail_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_ProcessDetail_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetail_v1 *))__pyx_f_4cuda_8bindings_5_nvml_16ProcessDetail_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ProcessDetail_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1)) __PYX_ERR(0, 2517, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ProcessDetail_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1) < (0)) __PYX_ERR(0, 2517, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1 = &__pyx_type_4cuda_8bindings_5_nvml_ProcessDetail_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1) < (0)) __PYX_ERR(0, 2517, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessDetail_v1) < (0)) __PYX_ERR(0, 2517, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1) < (0)) __PYX_ERR(0, 2517, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ProcessDetail_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1) < (0)) __PYX_ERR(0, 2517, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1) < (0)) __PYX_ERR(0, 2517, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_DeviceAttributes = &__pyx_vtable_4cuda_8bindings_5_nvml_DeviceAttributes;
  __pyx_vtable_4cuda_8bindings_5_nvml_DeviceAttributes._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAttributes *))__pyx_f_4cuda_8bindings_5_nvml_16DeviceAttributes__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_DeviceAttributes_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes)) __PYX_ERR(0, 2703, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_DeviceAttributes_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes) < (0)) __PYX_ERR(0, 2703, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes = &__pyx_type_4cuda_8bindings_5_nvml_DeviceAttributes;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes) < (0)) __PYX_ERR(0, 2703, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes, __pyx_vtabptr_4cuda_8bindings_5_nvml_DeviceAttributes) < (0)) __PYX_ERR(0, 2703, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes) < (0)) __PYX_ERR(0, 2703, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_DeviceAttributes, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes) < (0)) __PYX_ERR(0, 2703, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes) < (0)) __PYX_ERR(0, 2703, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_C2cModeInfo_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_C2cModeInfo_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_C2cModeInfo_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_C2cModeInfo_v1 *))__pyx_f_4cuda_8bindings_5_nvml_14C2cModeInfo_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_C2cModeInfo_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1)) __PYX_ERR(0, 2911, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_C2cModeInfo_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1) < (0)) __PYX_ERR(0, 2911, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1 = &__pyx_type_4cuda_8bindings_5_nvml_C2cModeInfo_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1) < (0)) __PYX_ERR(0, 2911, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_C2cModeInfo_v1) < (0)) __PYX_ERR(0, 2911, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1) < (0)) __PYX_ERR(0, 2911, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_C2cModeInfo_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1) < (0)) __PYX_ERR(0, 2911, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1) < (0)) __PYX_ERR(0, 2911, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_RowRemapperHistogramValues = &__pyx_vtable_4cuda_8bindings_5_nvml_RowRemapperHistogramValues;
  __pyx_vtable_4cuda_8bindings_5_nvml_RowRemapperHistogramValues._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_RowRemapperHistogramValues *))__pyx_f_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_RowRemapperHistogramValues_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues)) __PYX_ERR(0, 3035, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_RowRemapperHistogramValues_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues) < (0)) __PYX_ERR(0, 3035, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues = &__pyx_type_4cuda_8bindings_5_nvml_RowRemapperHistogramValues;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues) < (0)) __PYX_ERR(0, 3035, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues, __pyx_vtabptr_4cuda_8bindings_5_nvml_RowRemapperHistogramValues) < (0)) __PYX_ERR(0, 3035, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues) < (0)) __PYX_ERR(0, 3035, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_RowRemapperHistogramValues, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues) < (0)) __PYX_ERR(0, 3035, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues) < (0)) __PYX_ERR(0, 3035, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_BridgeChipInfo = &__pyx_vtable_4cuda_8bindings_5_nvml_BridgeChipInfo;
  __pyx_vtable_4cuda_8bindings_5_nvml_BridgeChipInfo._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipInfo *))__pyx_f_4cuda_8bindings_5_nvml_14BridgeChipInfo__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_BridgeChipInfo_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo)) __PYX_ERR(0, 3200, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_BridgeChipInfo_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo) < (0)) __PYX_ERR(0, 3200, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo = &__pyx_type_4cuda_8bindings_5_nvml_BridgeChipInfo;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo) < (0)) __PYX_ERR(0, 3200, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo, __pyx_vtabptr_4cuda_8bindings_5_nvml_BridgeChipInfo) < (0)) __PYX_ERR(0, 3200, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo) < (0)) __PYX_ERR(0, 3200, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_BridgeChipInfo, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo) < (0)) __PYX_ERR(0, 3200, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo) < (0)) __PYX_ERR(0, 3200, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_Value = &__pyx_vtable_4cuda_8bindings_5_nvml_Value;
  __pyx_vtable_4cuda_8bindings_5_nvml_Value._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_Value *))__pyx_f_4cuda_8bindings_5_nvml_5Value__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Value = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_Value_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Value)) __PYX_ERR(0, 3346, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_Value_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Value) < (0)) __PYX_ERR(0, 3346, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Value = &__pyx_type_4cuda_8bindings_5_nvml_Value;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Value) < (0)) __PYX_ERR(0, 3346, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Value);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Value->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Value->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Value->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Value, __pyx_vtabptr_4cuda_8bindings_5_nvml_Value) < (0)) __PYX_ERR(0, 3346, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Value) < (0)) __PYX_ERR(0, 3346, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_Value, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Value) < (0)) __PYX_ERR(0, 3346, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Value) < (0)) __PYX_ERR(0, 3346, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod0 = &__pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod0;
  __pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod0._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod0 *))__pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod0__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod0_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0)) __PYX_ERR(0, 3536, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod0_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0) < (0)) __PYX_ERR(0, 3536, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0 = &__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod0;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0) < (0)) __PYX_ERR(0, 3536, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0, __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod0) < (0)) __PYX_ERR(0, 3536, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0) < (0)) __PYX_ERR(0, 3536, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_py_anon_pod0, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0) < (0)) __PYX_ERR(0, 3536, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0) < (0)) __PYX_ERR(0, 3536, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_CoolerInfo_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_CoolerInfo_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_CoolerInfo_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_CoolerInfo_v1 *))__pyx_f_4cuda_8bindings_5_nvml_13CoolerInfo_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_CoolerInfo_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1)) __PYX_ERR(0, 3703, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_CoolerInfo_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1) < (0)) __PYX_ERR(0, 3703, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1 = &__pyx_type_4cuda_8bindings_5_nvml_CoolerInfo_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1) < (0)) __PYX_ERR(0, 3703, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_CoolerInfo_v1) < (0)) __PYX_ERR(0, 3703, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1) < (0)) __PYX_ERR(0, 3703, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_CoolerInfo_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1) < (0)) __PYX_ERR(0, 3703, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1) < (0)) __PYX_ERR(0, 3703, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_MarginTemperature_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_MarginTemperature_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_MarginTemperature_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_MarginTemperature_v1 *))__pyx_f_4cuda_8bindings_5_nvml_20MarginTemperature_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_MarginTemperature_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1)) __PYX_ERR(0, 3857, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_MarginTemperature_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1) < (0)) __PYX_ERR(0, 3857, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1 = &__pyx_type_4cuda_8bindings_5_nvml_MarginTemperature_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1) < (0)) __PYX_ERR(0, 3857, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_MarginTemperature_v1) < (0)) __PYX_ERR(0, 3857, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1) < (0)) __PYX_ERR(0, 3857, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_MarginTemperature_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1) < (0)) __PYX_ERR(0, 3857, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1) < (0)) __PYX_ERR(0, 3857, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ClkMonFaultInfo = &__pyx_vtable_4cuda_8bindings_5_nvml_ClkMonFaultInfo;
  __pyx_vtable_4cuda_8bindings_5_nvml_ClkMonFaultInfo._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonFaultInfo *))__pyx_f_4cuda_8bindings_5_nvml_15ClkMonFaultInfo__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ClkMonFaultInfo_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo)) __PYX_ERR(0, 3989, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ClkMonFaultInfo_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo) < (0)) __PYX_ERR(0, 3989, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo = &__pyx_type_4cuda_8bindings_5_nvml_ClkMonFaultInfo;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo) < (0)) __PYX_ERR(0, 3989, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo, __pyx_vtabptr_4cuda_8bindings_5_nvml_ClkMonFaultInfo) < (0)) __PYX_ERR(0, 3989, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo) < (0)) __PYX_ERR(0, 3989, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ClkMonFaultInfo, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo) < (0)) __PYX_ERR(0, 3989, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo) < (0)) __PYX_ERR(0, 3989, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ClockOffset_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_ClockOffset_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_ClockOffset_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ClockOffset_v1 *))__pyx_f_4cuda_8bindings_5_nvml_14ClockOffset_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ClockOffset_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1)) __PYX_ERR(0, 4139, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ClockOffset_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1) < (0)) __PYX_ERR(0, 4139, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1 = &__pyx_type_4cuda_8bindings_5_nvml_ClockOffset_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1) < (0)) __PYX_ERR(0, 4139, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_ClockOffset_v1) < (0)) __PYX_ERR(0, 4139, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1) < (0)) __PYX_ERR(0, 4139, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ClockOffset_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1) < (0)) __PYX_ERR(0, 4139, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1) < (0)) __PYX_ERR(0, 4139, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_FanSpeedInfo_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_FanSpeedInfo_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 *))__pyx_f_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_FanSpeedInfo_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1)) __PYX_ERR(0, 4316, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_FanSpeedInfo_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1) < (0)) __PYX_ERR(0, 4316, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1 = &__pyx_type_4cuda_8bindings_5_nvml_FanSpeedInfo_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1) < (0)) __PYX_ERR(0, 4316, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_FanSpeedInfo_v1) < (0)) __PYX_ERR(0, 4316, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1) < (0)) __PYX_ERR(0, 4316, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_FanSpeedInfo_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1) < (0)) __PYX_ERR(0, 4316, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1) < (0)) __PYX_ERR(0, 4316, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_DevicePerfModes_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_DevicePerfModes_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_DevicePerfModes_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePerfModes_v1 *))__pyx_f_4cuda_8bindings_5_nvml_18DevicePerfModes_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_DevicePerfModes_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1)) __PYX_ERR(0, 4459, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_DevicePerfModes_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1) < (0)) __PYX_ERR(0, 4459, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1 = &__pyx_type_4cuda_8bindings_5_nvml_DevicePerfModes_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1) < (0)) __PYX_ERR(0, 4459, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_DevicePerfModes_v1) < (0)) __PYX_ERR(0, 4459, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1) < (0)) __PYX_ERR(0, 4459, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_DevicePerfModes_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1) < (0)) __PYX_ERR(0, 4459, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1) < (0)) __PYX_ERR(0, 4459, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 *))__pyx_f_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1)) __PYX_ERR(0, 4595, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1) < (0)) __PYX_ERR(0, 4595, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1 = &__pyx_type_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1) < (0)) __PYX_ERR(0, 4595, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1) < (0)) __PYX_ERR(0, 4595, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1) < (0)) __PYX_ERR(0, 4595, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_DeviceCurrentClockFreqs_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1) < (0)) __PYX_ERR(0, 4595, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1) < (0)) __PYX_ERR(0, 4595, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessUtilizationSample = &__pyx_vtable_4cuda_8bindings_5_nvml_ProcessUtilizationSample;
  __pyx_vtable_4cuda_8bindings_5_nvml_ProcessUtilizationSample._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationSample *))__pyx_f_4cuda_8bindings_5_nvml_24ProcessUtilizationSample__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationSample_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample)) __PYX_ERR(0, 4735, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationSample_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample) < (0)) __PYX_ERR(0, 4735, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample = &__pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationSample;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample) < (0)) __PYX_ERR(0, 4735, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample, __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessUtilizationSample) < (0)) __PYX_ERR(0, 4735, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample) < (0)) __PYX_ERR(0, 4735, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ProcessUtilizationSample, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample) < (0)) __PYX_ERR(0, 4735, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample) < (0)) __PYX_ERR(0, 4735, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 *))__pyx_f_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1)) __PYX_ERR(0, 4931, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1) < (0)) __PYX_ERR(0, 4931, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1 = &__pyx_type_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1) < (0)) __PYX_ERR(0, 4931, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1) < (0)) __PYX_ERR(0, 4931, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1) < (0)) __PYX_ERR(0, 4931, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ProcessUtilizationInfo_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1) < (0)) __PYX_ERR(0, 4931, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1) < (0)) __PYX_ERR(0, 4931, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 *))__pyx_f_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1)) __PYX_ERR(0, 5154, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1) < (0)) __PYX_ERR(0, 5154, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1 = &__pyx_type_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1) < (0)) __PYX_ERR(0, 5154, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1) < (0)) __PYX_ERR(0, 5154, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1) < (0)) __PYX_ERR(0, 5154, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_EccSramErrorStatus_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1) < (0)) __PYX_ERR(0, 5154, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1) < (0)) __PYX_ERR(0, 5154, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_PlatformInfo_v2 = &__pyx_vtable_4cuda_8bindings_5_nvml_PlatformInfo_v2;
  __pyx_vtable_4cuda_8bindings_5_nvml_PlatformInfo_v2._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_PlatformInfo_v2 *))__pyx_f_4cuda_8bindings_5_nvml_15PlatformInfo_v2__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_PlatformInfo_v2_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2)) __PYX_ERR(0, 5413, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_PlatformInfo_v2_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2) < (0)) __PYX_ERR(0, 5413, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2 = &__pyx_type_4cuda_8bindings_5_nvml_PlatformInfo_v2;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2) < (0)) __PYX_ERR(0, 5413, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2, __pyx_vtabptr_4cuda_8bindings_5_nvml_PlatformInfo_v2) < (0)) __PYX_ERR(0, 5413, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2) < (0)) __PYX_ERR(0, 5413, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_PlatformInfo_v2, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2) < (0)) __PYX_ERR(0, 5413, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2) < (0)) __PYX_ERR(0, 5413, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod1 = &__pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod1;
  __pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod1 *))__pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1)) __PYX_ERR(0, 5621, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1) < (0)) __PYX_ERR(0, 5621, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1 = &__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1) < (0)) __PYX_ERR(0, 5621, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1, __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod1) < (0)) __PYX_ERR(0, 5621, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1) < (0)) __PYX_ERR(0, 5621, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_py_anon_pod1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1) < (0)) __PYX_ERR(0, 5621, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1) < (0)) __PYX_ERR(0, 5621, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 *))__pyx_f_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1)) __PYX_ERR(0, 5775, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1) < (0)) __PYX_ERR(0, 5775, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1 = &__pyx_type_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1) < (0)) __PYX_ERR(0, 5775, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1) < (0)) __PYX_ERR(0, 5775, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1) < (0)) __PYX_ERR(0, 5775, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuHeterogeneousMode_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1) < (0)) __PYX_ERR(0, 5775, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1) < (0)) __PYX_ERR(0, 5775, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuPlacementId_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuPlacementId_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 *))__pyx_f_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementId_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1)) __PYX_ERR(0, 5907, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementId_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1) < (0)) __PYX_ERR(0, 5907, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1 = &__pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementId_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1) < (0)) __PYX_ERR(0, 5907, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuPlacementId_v1) < (0)) __PYX_ERR(0, 5907, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1) < (0)) __PYX_ERR(0, 5907, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuPlacementId_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1) < (0)) __PYX_ERR(0, 5907, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1) < (0)) __PYX_ERR(0, 5907, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuPlacementList_v2;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuPlacementList_v2._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 *))__pyx_f_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementList_v2_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2)) __PYX_ERR(0, 6042, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementList_v2_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2) < (0)) __PYX_ERR(0, 6042, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2 = &__pyx_type_4cuda_8bindings_5_nvml_VgpuPlacementList_v2;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2) < (0)) __PYX_ERR(0, 6042, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuPlacementList_v2) < (0)) __PYX_ERR(0, 6042, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2) < (0)) __PYX_ERR(0, 6042, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuPlacementList_v2, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2) < (0)) __PYX_ERR(0, 6042, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2) < (0)) __PYX_ERR(0, 6042, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 *))__pyx_f_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1)) __PYX_ERR(0, 6207, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1) < (0)) __PYX_ERR(0, 6207, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1 = &__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1) < (0)) __PYX_ERR(0, 6207, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1) < (0)) __PYX_ERR(0, 6207, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1) < (0)) __PYX_ERR(0, 6207, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuTypeBar1Info_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1) < (0)) __PYX_ERR(0, 6207, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1) < (0)) __PYX_ERR(0, 6207, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 *))__pyx_f_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1)) __PYX_ERR(0, 6347, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1) < (0)) __PYX_ERR(0, 6347, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1 = &__pyx_type_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1) < (0)) __PYX_ERR(0, 6347, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1) < (0)) __PYX_ERR(0, 6347, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1) < (0)) __PYX_ERR(0, 6347, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuProcessUtilizationInfo_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1) < (0)) __PYX_ERR(0, 6347, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1) < (0)) __PYX_ERR(0, 6347, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 *))__pyx_f_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1)) __PYX_ERR(0, 6579, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1) < (0)) __PYX_ERR(0, 6579, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1 = &__pyx_type_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1) < (0)) __PYX_ERR(0, 6579, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1) < (0)) __PYX_ERR(0, 6579, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1) < (0)) __PYX_ERR(0, 6579, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuRuntimeState_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1) < (0)) __PYX_ERR(0, 6579, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1) < (0)) __PYX_ERR(0, 6579, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod2 = &__pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod2;
  __pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod2._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod2 *))__pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod2__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod2_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2)) __PYX_ERR(0, 6711, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod2_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2) < (0)) __PYX_ERR(0, 6711, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2 = &__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod2;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2) < (0)) __PYX_ERR(0, 6711, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2, __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod2) < (0)) __PYX_ERR(0, 6711, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2) < (0)) __PYX_ERR(0, 6711, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_py_anon_pod2, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2) < (0)) __PYX_ERR(0, 6711, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2) < (0)) __PYX_ERR(0, 6711, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod3 = &__pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod3;
  __pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod3._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod3 *))__pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod3__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod3_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3)) __PYX_ERR(0, 6842, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod3_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3) < (0)) __PYX_ERR(0, 6842, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3 = &__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod3;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3) < (0)) __PYX_ERR(0, 6842, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3, __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod3) < (0)) __PYX_ERR(0, 6842, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3) < (0)) __PYX_ERR(0, 6842, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_py_anon_pod3, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3) < (0)) __PYX_ERR(0, 6842, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3) < (0)) __PYX_ERR(0, 6842, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry *))__pyx_f_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry)) __PYX_ERR(0, 6967, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry) < (0)) __PYX_ERR(0, 6967, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry = &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry) < (0)) __PYX_ERR(0, 6967, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry) < (0)) __PYX_ERR(0, 6967, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry) < (0)) __PYX_ERR(0, 6967, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerLogEntry, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry) < (0)) __PYX_ERR(0, 6967, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry) < (0)) __PYX_ERR(0, 6967, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod4 = &__pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod4;
  __pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod4._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod4 *))__pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod4__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod4_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4)) __PYX_ERR(0, 7157, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod4_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4) < (0)) __PYX_ERR(0, 7157, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4 = &__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod4;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4) < (0)) __PYX_ERR(0, 7157, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4, __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod4) < (0)) __PYX_ERR(0, 7157, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4) < (0)) __PYX_ERR(0, 7157, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_py_anon_pod4, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4) < (0)) __PYX_ERR(0, 7157, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4) < (0)) __PYX_ERR(0, 7157, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod5 = &__pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod5;
  __pyx_vtable_4cuda_8bindings_5_nvml__py_anon_pod5._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml__py_anon_pod5 *))__pyx_f_4cuda_8bindings_5_nvml_13_py_anon_pod5__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod5_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5)) __PYX_ERR(0, 7288, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod5_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5) < (0)) __PYX_ERR(0, 7288, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5 = &__pyx_type_4cuda_8bindings_5_nvml__py_anon_pod5;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5) < (0)) __PYX_ERR(0, 7288, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5, __pyx_vtabptr_4cuda_8bindings_5_nvml__py_anon_pod5) < (0)) __PYX_ERR(0, 7288, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5) < (0)) __PYX_ERR(0, 7288, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_py_anon_pod5, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5) < (0)) __PYX_ERR(0, 7288, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5) < (0)) __PYX_ERR(0, 7288, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities *))__pyx_f_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities)) __PYX_ERR(0, 7415, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities) < (0)) __PYX_ERR(0, 7415, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities = &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities) < (0)) __PYX_ERR(0, 7415, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities) < (0)) __PYX_ERR(0, 7415, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities) < (0)) __PYX_ERR(0, 7415, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerCapabilities, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities) < (0)) __PYX_ERR(0, 7415, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities) < (0)) __PYX_ERR(0, 7415, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuLicenseExpiry = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuLicenseExpiry;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuLicenseExpiry._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseExpiry *))__pyx_f_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseExpiry_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry)) __PYX_ERR(0, 7622, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseExpiry_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry) < (0)) __PYX_ERR(0, 7622, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry = &__pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseExpiry;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry) < (0)) __PYX_ERR(0, 7622, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuLicenseExpiry) < (0)) __PYX_ERR(0, 7622, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry) < (0)) __PYX_ERR(0, 7622, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuLicenseExpiry, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry) < (0)) __PYX_ERR(0, 7622, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry) < (0)) __PYX_ERR(0, 7622, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_GridLicenseExpiry = &__pyx_vtable_4cuda_8bindings_5_nvml_GridLicenseExpiry;
  __pyx_vtable_4cuda_8bindings_5_nvml_GridLicenseExpiry._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicenseExpiry *))__pyx_f_4cuda_8bindings_5_nvml_17GridLicenseExpiry__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_GridLicenseExpiry_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry)) __PYX_ERR(0, 7814, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_GridLicenseExpiry_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry) < (0)) __PYX_ERR(0, 7814, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry = &__pyx_type_4cuda_8bindings_5_nvml_GridLicenseExpiry;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry) < (0)) __PYX_ERR(0, 7814, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry, __pyx_vtabptr_4cuda_8bindings_5_nvml_GridLicenseExpiry) < (0)) __PYX_ERR(0, 7814, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry) < (0)) __PYX_ERR(0, 7814, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_GridLicenseExpiry, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry) < (0)) __PYX_ERR(0, 7814, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry) < (0)) __PYX_ERR(0, 7814, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 *))__pyx_f_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1)) __PYX_ERR(0, 8002, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1) < (0)) __PYX_ERR(0, 8002, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1 = &__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1) < (0)) __PYX_ERR(0, 8002, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1) < (0)) __PYX_ERR(0, 8002, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1) < (0)) __PYX_ERR(0, 8002, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuTypeIdInfo_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1) < (0)) __PYX_ERR(0, 8002, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1) < (0)) __PYX_ERR(0, 8002, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 *))__pyx_f_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1)) __PYX_ERR(0, 8135, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1) < (0)) __PYX_ERR(0, 8135, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1 = &__pyx_type_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1) < (0)) __PYX_ERR(0, 8135, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1) < (0)) __PYX_ERR(0, 8135, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1) < (0)) __PYX_ERR(0, 8135, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuTypeMaxInstance_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1) < (0)) __PYX_ERR(0, 8135, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1) < (0)) __PYX_ERR(0, 8135, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 *))__pyx_f_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1)) __PYX_ERR(0, 8279, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1) < (0)) __PYX_ERR(0, 8279, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1 = &__pyx_type_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1) < (0)) __PYX_ERR(0, 8279, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1) < (0)) __PYX_ERR(0, 8279, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1) < (0)) __PYX_ERR(0, 8279, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ActiveVgpuInstanceInfo_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1) < (0)) __PYX_ERR(0, 8279, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1) < (0)) __PYX_ERR(0, 8279, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 *))__pyx_f_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1)) __PYX_ERR(0, 8414, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1) < (0)) __PYX_ERR(0, 8414, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1 = &__pyx_type_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1) < (0)) __PYX_ERR(0, 8414, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1) < (0)) __PYX_ERR(0, 8414, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1) < (0)) __PYX_ERR(0, 8414, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuCreatablePlacementInfo_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1) < (0)) __PYX_ERR(0, 8414, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1) < (0)) __PYX_ERR(0, 8414, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_HwbcEntry = &__pyx_vtable_4cuda_8bindings_5_nvml_HwbcEntry;
  __pyx_vtable_4cuda_8bindings_5_nvml_HwbcEntry._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_HwbcEntry *))__pyx_f_4cuda_8bindings_5_nvml_9HwbcEntry__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_HwbcEntry_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry)) __PYX_ERR(0, 8579, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_HwbcEntry_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry) < (0)) __PYX_ERR(0, 8579, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry = &__pyx_type_4cuda_8bindings_5_nvml_HwbcEntry;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry) < (0)) __PYX_ERR(0, 8579, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry, __pyx_vtabptr_4cuda_8bindings_5_nvml_HwbcEntry) < (0)) __PYX_ERR(0, 8579, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry) < (0)) __PYX_ERR(0, 8579, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_HwbcEntry, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry) < (0)) __PYX_ERR(0, 8579, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry) < (0)) __PYX_ERR(0, 8579, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_LedState = &__pyx_vtable_4cuda_8bindings_5_nvml_LedState;
  __pyx_vtable_4cuda_8bindings_5_nvml_LedState._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_LedState *))__pyx_f_4cuda_8bindings_5_nvml_8LedState__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_LedState = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_LedState_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_LedState)) __PYX_ERR(0, 8723, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_LedState_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_LedState) < (0)) __PYX_ERR(0, 8723, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_LedState = &__pyx_type_4cuda_8bindings_5_nvml_LedState;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_LedState) < (0)) __PYX_ERR(0, 8723, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_LedState);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_LedState->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_LedState->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_LedState->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_LedState, __pyx_vtabptr_4cuda_8bindings_5_nvml_LedState) < (0)) __PYX_ERR(0, 8723, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_LedState) < (0)) __PYX_ERR(0, 8723, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_LedState, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_LedState) < (0)) __PYX_ERR(0, 8723, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_LedState) < (0)) __PYX_ERR(0, 8723, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_UnitInfo = &__pyx_vtable_4cuda_8bindings_5_nvml_UnitInfo;
  __pyx_vtable_4cuda_8bindings_5_nvml_UnitInfo._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitInfo *))__pyx_f_4cuda_8bindings_5_nvml_8UnitInfo__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_UnitInfo_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo)) __PYX_ERR(0, 8861, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_UnitInfo_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo) < (0)) __PYX_ERR(0, 8861, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo = &__pyx_type_4cuda_8bindings_5_nvml_UnitInfo;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo) < (0)) __PYX_ERR(0, 8861, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo, __pyx_vtabptr_4cuda_8bindings_5_nvml_UnitInfo) < (0)) __PYX_ERR(0, 8861, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo) < (0)) __PYX_ERR(0, 8861, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_UnitInfo, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo) < (0)) __PYX_ERR(0, 8861, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo) < (0)) __PYX_ERR(0, 8861, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_PSUInfo = &__pyx_vtable_4cuda_8bindings_5_nvml_PSUInfo;
  __pyx_vtable_4cuda_8bindings_5_nvml_PSUInfo._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_PSUInfo *))__pyx_f_4cuda_8bindings_5_nvml_7PSUInfo__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_PSUInfo_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo)) __PYX_ERR(0, 9033, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_PSUInfo_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo) < (0)) __PYX_ERR(0, 9033, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo = &__pyx_type_4cuda_8bindings_5_nvml_PSUInfo;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo) < (0)) __PYX_ERR(0, 9033, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo, __pyx_vtabptr_4cuda_8bindings_5_nvml_PSUInfo) < (0)) __PYX_ERR(0, 9033, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo) < (0)) __PYX_ERR(0, 9033, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_PSUInfo, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo) < (0)) __PYX_ERR(0, 9033, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo) < (0)) __PYX_ERR(0, 9033, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_UnitFanInfo = &__pyx_vtable_4cuda_8bindings_5_nvml_UnitFanInfo;
  __pyx_vtable_4cuda_8bindings_5_nvml_UnitFanInfo._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanInfo *))__pyx_f_4cuda_8bindings_5_nvml_11UnitFanInfo__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_UnitFanInfo_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo)) __PYX_ERR(0, 9191, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_UnitFanInfo_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo) < (0)) __PYX_ERR(0, 9191, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo = &__pyx_type_4cuda_8bindings_5_nvml_UnitFanInfo;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo) < (0)) __PYX_ERR(0, 9191, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo, __pyx_vtabptr_4cuda_8bindings_5_nvml_UnitFanInfo) < (0)) __PYX_ERR(0, 9191, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo) < (0)) __PYX_ERR(0, 9191, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_UnitFanInfo, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo) < (0)) __PYX_ERR(0, 9191, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo) < (0)) __PYX_ERR(0, 9191, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_EventData = &__pyx_vtable_4cuda_8bindings_5_nvml_EventData;
  __pyx_vtable_4cuda_8bindings_5_nvml_EventData._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_EventData *))__pyx_f_4cuda_8bindings_5_nvml_9EventData__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EventData = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_EventData_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EventData)) __PYX_ERR(0, 9340, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_EventData_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EventData) < (0)) __PYX_ERR(0, 9340, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EventData = &__pyx_type_4cuda_8bindings_5_nvml_EventData;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EventData) < (0)) __PYX_ERR(0, 9340, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EventData);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EventData->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EventData->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EventData->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EventData, __pyx_vtabptr_4cuda_8bindings_5_nvml_EventData) < (0)) __PYX_ERR(0, 9340, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EventData) < (0)) __PYX_ERR(0, 9340, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_EventData, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EventData) < (0)) __PYX_ERR(0, 9340, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EventData) < (0)) __PYX_ERR(0, 9340, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_AccountingStats = &__pyx_vtable_4cuda_8bindings_5_nvml_AccountingStats;
  __pyx_vtable_4cuda_8bindings_5_nvml_AccountingStats._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_AccountingStats *))__pyx_f_4cuda_8bindings_5_nvml_15AccountingStats__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_AccountingStats_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats)) __PYX_ERR(0, 9510, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_AccountingStats_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats) < (0)) __PYX_ERR(0, 9510, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats = &__pyx_type_4cuda_8bindings_5_nvml_AccountingStats;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats) < (0)) __PYX_ERR(0, 9510, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats, __pyx_vtabptr_4cuda_8bindings_5_nvml_AccountingStats) < (0)) __PYX_ERR(0, 9510, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats) < (0)) __PYX_ERR(0, 9510, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_AccountingStats, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats) < (0)) __PYX_ERR(0, 9510, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats) < (0)) __PYX_ERR(0, 9510, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_EncoderSessionInfo = &__pyx_vtable_4cuda_8bindings_5_nvml_EncoderSessionInfo;
  __pyx_vtable_4cuda_8bindings_5_nvml_EncoderSessionInfo._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_EncoderSessionInfo *))__pyx_f_4cuda_8bindings_5_nvml_18EncoderSessionInfo__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_EncoderSessionInfo_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo)) __PYX_ERR(0, 9692, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_EncoderSessionInfo_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo) < (0)) __PYX_ERR(0, 9692, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo = &__pyx_type_4cuda_8bindings_5_nvml_EncoderSessionInfo;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo) < (0)) __PYX_ERR(0, 9692, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo, __pyx_vtabptr_4cuda_8bindings_5_nvml_EncoderSessionInfo) < (0)) __PYX_ERR(0, 9692, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo) < (0)) __PYX_ERR(0, 9692, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_EncoderSessionInfo, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo) < (0)) __PYX_ERR(0, 9692, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo) < (0)) __PYX_ERR(0, 9692, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_FBCStats = &__pyx_vtable_4cuda_8bindings_5_nvml_FBCStats;
  __pyx_vtable_4cuda_8bindings_5_nvml_FBCStats._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCStats *))__pyx_f_4cuda_8bindings_5_nvml_8FBCStats__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_FBCStats_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats)) __PYX_ERR(0, 9905, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_FBCStats_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats) < (0)) __PYX_ERR(0, 9905, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats = &__pyx_type_4cuda_8bindings_5_nvml_FBCStats;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats) < (0)) __PYX_ERR(0, 9905, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats, __pyx_vtabptr_4cuda_8bindings_5_nvml_FBCStats) < (0)) __PYX_ERR(0, 9905, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats) < (0)) __PYX_ERR(0, 9905, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_FBCStats, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats) < (0)) __PYX_ERR(0, 9905, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats) < (0)) __PYX_ERR(0, 9905, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_FBCSessionInfo = &__pyx_vtable_4cuda_8bindings_5_nvml_FBCSessionInfo;
  __pyx_vtable_4cuda_8bindings_5_nvml_FBCSessionInfo._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_FBCSessionInfo *))__pyx_f_4cuda_8bindings_5_nvml_14FBCSessionInfo__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_FBCSessionInfo_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo)) __PYX_ERR(0, 10058, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_FBCSessionInfo_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo) < (0)) __PYX_ERR(0, 10058, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo = &__pyx_type_4cuda_8bindings_5_nvml_FBCSessionInfo;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo) < (0)) __PYX_ERR(0, 10058, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo, __pyx_vtabptr_4cuda_8bindings_5_nvml_FBCSessionInfo) < (0)) __PYX_ERR(0, 10058, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo) < (0)) __PYX_ERR(0, 10058, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_FBCSessionInfo, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo) < (0)) __PYX_ERR(0, 10058, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo) < (0)) __PYX_ERR(0, 10058, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeSystemCaps = &__pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeSystemCaps;
  __pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeSystemCaps._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemCaps *))__pyx_f_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemCaps_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps)) __PYX_ERR(0, 10314, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemCaps_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps) < (0)) __PYX_ERR(0, 10314, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps = &__pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemCaps;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps) < (0)) __PYX_ERR(0, 10314, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps, __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeSystemCaps) < (0)) __PYX_ERR(0, 10314, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps) < (0)) __PYX_ERR(0, 10314, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ConfComputeSystemCaps, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps) < (0)) __PYX_ERR(0, 10314, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps) < (0)) __PYX_ERR(0, 10314, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeSystemState = &__pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeSystemState;
  __pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeSystemState._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeSystemState *))__pyx_f_4cuda_8bindings_5_nvml_22ConfComputeSystemState__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemState_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState)) __PYX_ERR(0, 10447, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemState_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState) < (0)) __PYX_ERR(0, 10447, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState = &__pyx_type_4cuda_8bindings_5_nvml_ConfComputeSystemState;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState) < (0)) __PYX_ERR(0, 10447, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState, __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeSystemState) < (0)) __PYX_ERR(0, 10447, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState) < (0)) __PYX_ERR(0, 10447, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ConfComputeSystemState, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState) < (0)) __PYX_ERR(0, 10447, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState) < (0)) __PYX_ERR(0, 10447, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 *))__pyx_f_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1)) __PYX_ERR(0, 10593, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1) < (0)) __PYX_ERR(0, 10593, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1 = &__pyx_type_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1) < (0)) __PYX_ERR(0, 10593, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1) < (0)) __PYX_ERR(0, 10593, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1) < (0)) __PYX_ERR(0, 10593, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_SystemConfComputeSettings_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1) < (0)) __PYX_ERR(0, 10593, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1) < (0)) __PYX_ERR(0, 10593, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo = &__pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo;
  __pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo *))__pyx_f_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo)) __PYX_ERR(0, 10758, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo) < (0)) __PYX_ERR(0, 10758, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo = &__pyx_type_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo) < (0)) __PYX_ERR(0, 10758, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo, __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo) < (0)) __PYX_ERR(0, 10758, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo) < (0)) __PYX_ERR(0, 10758, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ConfComputeMemSizeInfo, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo) < (0)) __PYX_ERR(0, 10758, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo) < (0)) __PYX_ERR(0, 10758, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate = &__pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate;
  __pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate *))__pyx_f_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate)) __PYX_ERR(0, 10892, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate) < (0)) __PYX_ERR(0, 10892, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate = &__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate) < (0)) __PYX_ERR(0, 10892, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate, __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate) < (0)) __PYX_ERR(0, 10892, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate) < (0)) __PYX_ERR(0, 10892, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ConfComputeGpuCertificate, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate) < (0)) __PYX_ERR(0, 10892, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate) < (0)) __PYX_ERR(0, 10892, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport = &__pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport;
  __pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport *))__pyx_f_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport)) __PYX_ERR(0, 11058, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport) < (0)) __PYX_ERR(0, 11058, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport = &__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport) < (0)) __PYX_ERR(0, 11058, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport, __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport) < (0)) __PYX_ERR(0, 11058, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport) < (0)) __PYX_ERR(0, 11058, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ConfComputeGpuAttestationReport_2, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport) < (0)) __PYX_ERR(0, 11058, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport) < (0)) __PYX_ERR(0, 11058, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 *))__pyx_f_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1)) __PYX_ERR(0, 11246, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1) < (0)) __PYX_ERR(0, 11246, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1 = &__pyx_type_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1) < (0)) __PYX_ERR(0, 11246, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1) < (0)) __PYX_ERR(0, 11246, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1) < (0)) __PYX_ERR(0, 11246, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ConfComputeGetKeyRotationThresho, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1) < (0)) __PYX_ERR(0, 11246, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1) < (0)) __PYX_ERR(0, 11246, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 *))__pyx_f_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1)) __PYX_ERR(0, 11379, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1) < (0)) __PYX_ERR(0, 11379, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1 = &__pyx_type_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1) < (0)) __PYX_ERR(0, 11379, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1) < (0)) __PYX_ERR(0, 11379, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1) < (0)) __PYX_ERR(0, 11379, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_NvlinkSupportedBwModes_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1) < (0)) __PYX_ERR(0, 11379, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1) < (0)) __PYX_ERR(0, 11379, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 *))__pyx_f_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1)) __PYX_ERR(0, 11527, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1) < (0)) __PYX_ERR(0, 11527, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1 = &__pyx_type_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1) < (0)) __PYX_ERR(0, 11527, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1) < (0)) __PYX_ERR(0, 11527, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1) < (0)) __PYX_ERR(0, 11527, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_NvlinkGetBwMode_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1) < (0)) __PYX_ERR(0, 11527, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1) < (0)) __PYX_ERR(0, 11527, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 *))__pyx_f_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1)) __PYX_ERR(0, 11671, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1) < (0)) __PYX_ERR(0, 11671, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1 = &__pyx_type_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1) < (0)) __PYX_ERR(0, 11671, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1) < (0)) __PYX_ERR(0, 11671, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1) < (0)) __PYX_ERR(0, 11671, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_NvlinkSetBwMode_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1) < (0)) __PYX_ERR(0, 11671, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1) < (0)) __PYX_ERR(0, 11671, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuVersion = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuVersion;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuVersion._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuVersion *))__pyx_f_4cuda_8bindings_5_nvml_11VgpuVersion__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuVersion_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion)) __PYX_ERR(0, 11814, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuVersion_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion) < (0)) __PYX_ERR(0, 11814, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion = &__pyx_type_4cuda_8bindings_5_nvml_VgpuVersion;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion) < (0)) __PYX_ERR(0, 11814, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuVersion) < (0)) __PYX_ERR(0, 11814, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion) < (0)) __PYX_ERR(0, 11814, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuVersion, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion) < (0)) __PYX_ERR(0, 11814, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion) < (0)) __PYX_ERR(0, 11814, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuMetadata = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuMetadata;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuMetadata._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuMetadata *))__pyx_f_4cuda_8bindings_5_nvml_12VgpuMetadata__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuMetadata_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata)) __PYX_ERR(0, 11954, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuMetadata_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata) < (0)) __PYX_ERR(0, 11954, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata = &__pyx_type_4cuda_8bindings_5_nvml_VgpuMetadata;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata) < (0)) __PYX_ERR(0, 11954, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuMetadata) < (0)) __PYX_ERR(0, 11954, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata) < (0)) __PYX_ERR(0, 11954, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuMetadata, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata) < (0)) __PYX_ERR(0, 11954, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata) < (0)) __PYX_ERR(0, 11954, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility *))__pyx_f_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility)) __PYX_ERR(0, 12175, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility) < (0)) __PYX_ERR(0, 12175, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility = &__pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility) < (0)) __PYX_ERR(0, 12175, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility) < (0)) __PYX_ERR(0, 12175, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility) < (0)) __PYX_ERR(0, 12175, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuPgpuCompatibility, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility) < (0)) __PYX_ERR(0, 12175, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility) < (0)) __PYX_ERR(0, 12175, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuInstancePlacement = &__pyx_vtable_4cuda_8bindings_5_nvml_GpuInstancePlacement;
  __pyx_vtable_4cuda_8bindings_5_nvml_GpuInstancePlacement._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstancePlacement *))__pyx_f_4cuda_8bindings_5_nvml_20GpuInstancePlacement__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_GpuInstancePlacement_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement)) __PYX_ERR(0, 12307, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_GpuInstancePlacement_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement) < (0)) __PYX_ERR(0, 12307, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement = &__pyx_type_4cuda_8bindings_5_nvml_GpuInstancePlacement;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement) < (0)) __PYX_ERR(0, 12307, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement, __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuInstancePlacement) < (0)) __PYX_ERR(0, 12307, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement) < (0)) __PYX_ERR(0, 12307, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_GpuInstancePlacement, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement) < (0)) __PYX_ERR(0, 12307, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement) < (0)) __PYX_ERR(0, 12307, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 = &__pyx_vtable_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2;
  __pyx_vtable_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 *))__pyx_f_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2)) __PYX_ERR(0, 12464, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2) < (0)) __PYX_ERR(0, 12464, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2 = &__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2) < (0)) __PYX_ERR(0, 12464, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2, __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2) < (0)) __PYX_ERR(0, 12464, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2) < (0)) __PYX_ERR(0, 12464, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_GpuInstanceProfileInfo_v2, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2) < (0)) __PYX_ERR(0, 12464, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2) < (0)) __PYX_ERR(0, 12464, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 = &__pyx_vtable_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3;
  __pyx_vtable_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 *))__pyx_f_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3)) __PYX_ERR(0, 12732, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3) < (0)) __PYX_ERR(0, 12732, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3 = &__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3) < (0)) __PYX_ERR(0, 12732, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3, __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3) < (0)) __PYX_ERR(0, 12732, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3) < (0)) __PYX_ERR(0, 12732, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_GpuInstanceProfileInfo_v3, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3) < (0)) __PYX_ERR(0, 12732, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3) < (0)) __PYX_ERR(0, 12732, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ComputeInstancePlacement = &__pyx_vtable_4cuda_8bindings_5_nvml_ComputeInstancePlacement;
  __pyx_vtable_4cuda_8bindings_5_nvml_ComputeInstancePlacement._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstancePlacement *))__pyx_f_4cuda_8bindings_5_nvml_24ComputeInstancePlacement__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ComputeInstancePlacement_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement)) __PYX_ERR(0, 12989, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ComputeInstancePlacement_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement) < (0)) __PYX_ERR(0, 12989, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement = &__pyx_type_4cuda_8bindings_5_nvml_ComputeInstancePlacement;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement) < (0)) __PYX_ERR(0, 12989, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement, __pyx_vtabptr_4cuda_8bindings_5_nvml_ComputeInstancePlacement) < (0)) __PYX_ERR(0, 12989, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement) < (0)) __PYX_ERR(0, 12989, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ComputeInstancePlacement, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement) < (0)) __PYX_ERR(0, 12989, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement) < (0)) __PYX_ERR(0, 12989, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 = &__pyx_vtable_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2;
  __pyx_vtable_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 *))__pyx_f_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2)) __PYX_ERR(0, 13144, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2) < (0)) __PYX_ERR(0, 13144, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2 = &__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2) < (0)) __PYX_ERR(0, 13144, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2, __pyx_vtabptr_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2) < (0)) __PYX_ERR(0, 13144, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2) < (0)) __PYX_ERR(0, 13144, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ComputeInstanceProfileInfo_v2, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2) < (0)) __PYX_ERR(0, 13144, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2) < (0)) __PYX_ERR(0, 13144, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 = &__pyx_vtable_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3;
  __pyx_vtable_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 *))__pyx_f_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3)) __PYX_ERR(0, 13389, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3) < (0)) __PYX_ERR(0, 13389, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3 = &__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3) < (0)) __PYX_ERR(0, 13389, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3, __pyx_vtabptr_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3) < (0)) __PYX_ERR(0, 13389, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3) < (0)) __PYX_ERR(0, 13389, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ComputeInstanceProfileInfo_v3, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3) < (0)) __PYX_ERR(0, 13389, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3) < (0)) __PYX_ERR(0, 13389, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_GpmSupport = &__pyx_vtable_4cuda_8bindings_5_nvml_GpmSupport;
  __pyx_vtable_4cuda_8bindings_5_nvml_GpmSupport._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_GpmSupport *))__pyx_f_4cuda_8bindings_5_nvml_10GpmSupport__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_GpmSupport_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport)) __PYX_ERR(0, 13635, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_GpmSupport_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport) < (0)) __PYX_ERR(0, 13635, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport = &__pyx_type_4cuda_8bindings_5_nvml_GpmSupport;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport) < (0)) __PYX_ERR(0, 13635, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport, __pyx_vtabptr_4cuda_8bindings_5_nvml_GpmSupport) < (0)) __PYX_ERR(0, 13635, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport) < (0)) __PYX_ERR(0, 13635, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_GpmSupport, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport) < (0)) __PYX_ERR(0, 13635, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport) < (0)) __PYX_ERR(0, 13635, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_DeviceCapabilities_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_DeviceCapabilities_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 *))__pyx_f_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_DeviceCapabilities_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1)) __PYX_ERR(0, 13767, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_DeviceCapabilities_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1) < (0)) __PYX_ERR(0, 13767, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1 = &__pyx_type_4cuda_8bindings_5_nvml_DeviceCapabilities_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1) < (0)) __PYX_ERR(0, 13767, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_DeviceCapabilities_v1) < (0)) __PYX_ERR(0, 13767, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1) < (0)) __PYX_ERR(0, 13767, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_DeviceCapabilities_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1) < (0)) __PYX_ERR(0, 13767, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1) < (0)) __PYX_ERR(0, 13767, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 *))__pyx_f_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1)) __PYX_ERR(0, 13899, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1) < (0)) __PYX_ERR(0, 13899, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1 = &__pyx_type_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1) < (0)) __PYX_ERR(0, 13899, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1) < (0)) __PYX_ERR(0, 13899, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1) < (0)) __PYX_ERR(0, 13899, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_DeviceAddressingMode_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1) < (0)) __PYX_ERR(0, 13899, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1) < (0)) __PYX_ERR(0, 13899, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_RepairStatus_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_RepairStatus_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_RepairStatus_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_RepairStatus_v1 *))__pyx_f_4cuda_8bindings_5_nvml_15RepairStatus_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_RepairStatus_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1)) __PYX_ERR(0, 14032, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_RepairStatus_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1) < (0)) __PYX_ERR(0, 14032, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1 = &__pyx_type_4cuda_8bindings_5_nvml_RepairStatus_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1) < (0)) __PYX_ERR(0, 14032, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_RepairStatus_v1) < (0)) __PYX_ERR(0, 14032, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1) < (0)) __PYX_ERR(0, 14032, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_RepairStatus_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1) < (0)) __PYX_ERR(0, 14032, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1) < (0)) __PYX_ERR(0, 14032, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_Pdi_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_Pdi_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_Pdi_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_Pdi_v1 *))__pyx_f_4cuda_8bindings_5_nvml_6Pdi_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_Pdi_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1)) __PYX_ERR(0, 14175, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_Pdi_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1) < (0)) __PYX_ERR(0, 14175, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1 = &__pyx_type_4cuda_8bindings_5_nvml_Pdi_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1) < (0)) __PYX_ERR(0, 14175, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_Pdi_v1) < (0)) __PYX_ERR(0, 14175, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1) < (0)) __PYX_ERR(0, 14175, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_Pdi_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1) < (0)) __PYX_ERR(0, 14175, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1) < (0)) __PYX_ERR(0, 14175, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 *))__pyx_f_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1)) __PYX_ERR(0, 14308, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1) < (0)) __PYX_ERR(0, 14308, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1 = &__pyx_type_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1) < (0)) __PYX_ERR(0, 14308, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1) < (0)) __PYX_ERR(0, 14308, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1) < (0)) __PYX_ERR(0, 14308, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_DevicePowerMizerModes_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1) < (0)) __PYX_ERR(0, 14308, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1) < (0)) __PYX_ERR(0, 14308, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 *))__pyx_f_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1)) __PYX_ERR(0, 14456, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1) < (0)) __PYX_ERR(0, 14456, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1 = &__pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1) < (0)) __PYX_ERR(0, 14456, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1) < (0)) __PYX_ERR(0, 14456, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1) < (0)) __PYX_ERR(0, 14456, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_EccSramUniqueUncorrectedErrorEnt, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1) < (0)) __PYX_ERR(0, 14456, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1) < (0)) __PYX_ERR(0, 14456, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 = &__pyx_vtable_4cuda_8bindings_5_nvml_GpuFabricInfo_v3;
  __pyx_vtable_4cuda_8bindings_5_nvml_GpuFabricInfo_v3._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 *))__pyx_f_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_GpuFabricInfo_v3_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3)) __PYX_ERR(0, 14662, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_GpuFabricInfo_v3_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3) < (0)) __PYX_ERR(0, 14662, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3 = &__pyx_type_4cuda_8bindings_5_nvml_GpuFabricInfo_v3;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3) < (0)) __PYX_ERR(0, 14662, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3, __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuFabricInfo_v3) < (0)) __PYX_ERR(0, 14662, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3) < (0)) __PYX_ERR(0, 14662, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_GpuFabricInfo_v3, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3) < (0)) __PYX_ERR(0, 14662, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3) < (0)) __PYX_ERR(0, 14662, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion = &__pyx_vtable_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion;
  __pyx_vtable_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion *))__pyx_f_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion)) __PYX_ERR(0, 14855, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion) < (0)) __PYX_ERR(0, 14855, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion = &__pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion) < (0)) __PYX_ERR(0, 14855, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion, __pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion) < (0)) __PYX_ERR(0, 14855, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion) < (0)) __PYX_ERR(0, 14855, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_NvlinkFirmwareVersion, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion) < (0)) __PYX_ERR(0, 14855, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion) < (0)) __PYX_ERR(0, 14855, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ExcludedDeviceInfo = &__pyx_vtable_4cuda_8bindings_5_nvml_ExcludedDeviceInfo;
  __pyx_vtable_4cuda_8bindings_5_nvml_ExcludedDeviceInfo._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ExcludedDeviceInfo *))__pyx_f_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ExcludedDeviceInfo_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo)) __PYX_ERR(0, 15009, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ExcludedDeviceInfo_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo) < (0)) __PYX_ERR(0, 15009, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo = &__pyx_type_4cuda_8bindings_5_nvml_ExcludedDeviceInfo;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo) < (0)) __PYX_ERR(0, 15009, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo, __pyx_vtabptr_4cuda_8bindings_5_nvml_ExcludedDeviceInfo) < (0)) __PYX_ERR(0, 15009, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo) < (0)) __PYX_ERR(0, 15009, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ExcludedDeviceInfo, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo) < (0)) __PYX_ERR(0, 15009, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo) < (0)) __PYX_ERR(0, 15009, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessDetailList_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_ProcessDetailList_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_ProcessDetailList_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessDetailList_v1 *))__pyx_f_4cuda_8bindings_5_nvml_20ProcessDetailList_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ProcessDetailList_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1)) __PYX_ERR(0, 15148, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ProcessDetailList_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1) < (0)) __PYX_ERR(0, 15148, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1 = &__pyx_type_4cuda_8bindings_5_nvml_ProcessDetailList_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1) < (0)) __PYX_ERR(0, 15148, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessDetailList_v1) < (0)) __PYX_ERR(0, 15148, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1) < (0)) __PYX_ERR(0, 15148, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ProcessDetailList_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1) < (0)) __PYX_ERR(0, 15148, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1) < (0)) __PYX_ERR(0, 15148, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_BridgeChipHierarchy = &__pyx_vtable_4cuda_8bindings_5_nvml_BridgeChipHierarchy;
  __pyx_vtable_4cuda_8bindings_5_nvml_BridgeChipHierarchy._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_BridgeChipHierarchy *))__pyx_f_4cuda_8bindings_5_nvml_19BridgeChipHierarchy__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_BridgeChipHierarchy_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy)) __PYX_ERR(0, 15299, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_BridgeChipHierarchy_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy) < (0)) __PYX_ERR(0, 15299, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy = &__pyx_type_4cuda_8bindings_5_nvml_BridgeChipHierarchy;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy) < (0)) __PYX_ERR(0, 15299, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy, __pyx_vtabptr_4cuda_8bindings_5_nvml_BridgeChipHierarchy) < (0)) __PYX_ERR(0, 15299, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy) < (0)) __PYX_ERR(0, 15299, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_BridgeChipHierarchy, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy) < (0)) __PYX_ERR(0, 15299, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy) < (0)) __PYX_ERR(0, 15299, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_Sample = &__pyx_vtable_4cuda_8bindings_5_nvml_Sample;
  __pyx_vtable_4cuda_8bindings_5_nvml_Sample._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_Sample *))__pyx_f_4cuda_8bindings_5_nvml_6Sample__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Sample = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_Sample_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Sample)) __PYX_ERR(0, 15434, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_Sample_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Sample) < (0)) __PYX_ERR(0, 15434, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Sample = &__pyx_type_4cuda_8bindings_5_nvml_Sample;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Sample) < (0)) __PYX_ERR(0, 15434, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Sample);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Sample->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Sample->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Sample->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Sample, __pyx_vtabptr_4cuda_8bindings_5_nvml_Sample) < (0)) __PYX_ERR(0, 15434, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Sample) < (0)) __PYX_ERR(0, 15434, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_Sample, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Sample) < (0)) __PYX_ERR(0, 15434, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_Sample) < (0)) __PYX_ERR(0, 15434, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 *))__pyx_f_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1)) __PYX_ERR(0, 15584, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1) < (0)) __PYX_ERR(0, 15584, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1 = &__pyx_type_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1) < (0)) __PYX_ERR(0, 15584, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1) < (0)) __PYX_ERR(0, 15584, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1) < (0)) __PYX_ERR(0, 15584, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuInstanceUtilizationInfo_v1_3, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1) < (0)) __PYX_ERR(0, 15584, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1) < (0)) __PYX_ERR(0, 15584, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_FieldValue = &__pyx_vtable_4cuda_8bindings_5_nvml_FieldValue;
  __pyx_vtable_4cuda_8bindings_5_nvml_FieldValue._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_FieldValue *))__pyx_f_4cuda_8bindings_5_nvml_10FieldValue__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_FieldValue_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue)) __PYX_ERR(0, 15789, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_FieldValue_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue) < (0)) __PYX_ERR(0, 15789, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue = &__pyx_type_4cuda_8bindings_5_nvml_FieldValue;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue) < (0)) __PYX_ERR(0, 15789, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue, __pyx_vtabptr_4cuda_8bindings_5_nvml_FieldValue) < (0)) __PYX_ERR(0, 15789, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue) < (0)) __PYX_ERR(0, 15789, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_FieldValue, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue) < (0)) __PYX_ERR(0, 15789, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue) < (0)) __PYX_ERR(0, 15789, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuThermalSettings = &__pyx_vtable_4cuda_8bindings_5_nvml_GpuThermalSettings;
  __pyx_vtable_4cuda_8bindings_5_nvml_GpuThermalSettings._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuThermalSettings *))__pyx_f_4cuda_8bindings_5_nvml_18GpuThermalSettings__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_GpuThermalSettings_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings)) __PYX_ERR(0, 15988, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_GpuThermalSettings_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings) < (0)) __PYX_ERR(0, 15988, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings = &__pyx_type_4cuda_8bindings_5_nvml_GpuThermalSettings;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings) < (0)) __PYX_ERR(0, 15988, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings, __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuThermalSettings) < (0)) __PYX_ERR(0, 15988, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings) < (0)) __PYX_ERR(0, 15988, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_GpuThermalSettings, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings) < (0)) __PYX_ERR(0, 15988, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings) < (0)) __PYX_ERR(0, 15988, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ClkMonStatus = &__pyx_vtable_4cuda_8bindings_5_nvml_ClkMonStatus;
  __pyx_vtable_4cuda_8bindings_5_nvml_ClkMonStatus._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ClkMonStatus *))__pyx_f_4cuda_8bindings_5_nvml_12ClkMonStatus__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ClkMonStatus_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus)) __PYX_ERR(0, 16124, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ClkMonStatus_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus) < (0)) __PYX_ERR(0, 16124, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus = &__pyx_type_4cuda_8bindings_5_nvml_ClkMonStatus;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus) < (0)) __PYX_ERR(0, 16124, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus, __pyx_vtabptr_4cuda_8bindings_5_nvml_ClkMonStatus) < (0)) __PYX_ERR(0, 16124, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus) < (0)) __PYX_ERR(0, 16124, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ClkMonStatus, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus) < (0)) __PYX_ERR(0, 16124, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus) < (0)) __PYX_ERR(0, 16124, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 *))__pyx_f_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1)) __PYX_ERR(0, 16272, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 16272, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1 = &__pyx_type_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 16272, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 16272, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 16272, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ProcessesUtilizationInfo_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 16272, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 16272, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo = &__pyx_vtable_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo;
  __pyx_vtable_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo *))__pyx_f_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo)) __PYX_ERR(0, 16423, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo) < (0)) __PYX_ERR(0, 16423, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo = &__pyx_type_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo) < (0)) __PYX_ERR(0, 16423, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo, __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo) < (0)) __PYX_ERR(0, 16423, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo) < (0)) __PYX_ERR(0, 16423, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_GpuDynamicPstatesInfo, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo) < (0)) __PYX_ERR(0, 16423, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo) < (0)) __PYX_ERR(0, 16423, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 *))__pyx_f_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1)) __PYX_ERR(0, 16560, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 16560, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1 = &__pyx_type_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 16560, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 16560, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 16560, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuProcessesUtilizationInfo_v1_2, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 16560, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 16560, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerParams = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerParams;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerParams._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerParams *))__pyx_f_4cuda_8bindings_5_nvml_19VgpuSchedulerParams__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerParams_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams)) __PYX_ERR(0, 16706, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerParams_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams) < (0)) __PYX_ERR(0, 16706, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams = &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerParams;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams) < (0)) __PYX_ERR(0, 16706, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerParams) < (0)) __PYX_ERR(0, 16706, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams) < (0)) __PYX_ERR(0, 16706, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerParams, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams) < (0)) __PYX_ERR(0, 16706, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams) < (0)) __PYX_ERR(0, 16706, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams *))__pyx_f_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams)) __PYX_ERR(0, 16835, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams) < (0)) __PYX_ERR(0, 16835, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams = &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams) < (0)) __PYX_ERR(0, 16835, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams) < (0)) __PYX_ERR(0, 16835, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams) < (0)) __PYX_ERR(0, 16835, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerSetParams, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams) < (0)) __PYX_ERR(0, 16835, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams) < (0)) __PYX_ERR(0, 16835, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuLicenseInfo = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuLicenseInfo;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuLicenseInfo._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuLicenseInfo *))__pyx_f_4cuda_8bindings_5_nvml_15VgpuLicenseInfo__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseInfo_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo)) __PYX_ERR(0, 16970, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseInfo_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo) < (0)) __PYX_ERR(0, 16970, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo = &__pyx_type_4cuda_8bindings_5_nvml_VgpuLicenseInfo;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo) < (0)) __PYX_ERR(0, 16970, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuLicenseInfo) < (0)) __PYX_ERR(0, 16970, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo) < (0)) __PYX_ERR(0, 16970, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuLicenseInfo, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo) < (0)) __PYX_ERR(0, 16970, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo) < (0)) __PYX_ERR(0, 16970, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_GridLicensableFeature = &__pyx_vtable_4cuda_8bindings_5_nvml_GridLicensableFeature;
  __pyx_vtable_4cuda_8bindings_5_nvml_GridLicensableFeature._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeature *))__pyx_f_4cuda_8bindings_5_nvml_21GridLicensableFeature__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeature_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature)) __PYX_ERR(0, 17118, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeature_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature) < (0)) __PYX_ERR(0, 17118, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature = &__pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeature;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature) < (0)) __PYX_ERR(0, 17118, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature, __pyx_vtabptr_4cuda_8bindings_5_nvml_GridLicensableFeature) < (0)) __PYX_ERR(0, 17118, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature) < (0)) __PYX_ERR(0, 17118, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_GridLicensableFeature, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature) < (0)) __PYX_ERR(0, 17118, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature) < (0)) __PYX_ERR(0, 17118, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_UnitFanSpeeds = &__pyx_vtable_4cuda_8bindings_5_nvml_UnitFanSpeeds;
  __pyx_vtable_4cuda_8bindings_5_nvml_UnitFanSpeeds._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_UnitFanSpeeds *))__pyx_f_4cuda_8bindings_5_nvml_13UnitFanSpeeds__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_UnitFanSpeeds_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds)) __PYX_ERR(0, 17302, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_UnitFanSpeeds_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds) < (0)) __PYX_ERR(0, 17302, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds = &__pyx_type_4cuda_8bindings_5_nvml_UnitFanSpeeds;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds) < (0)) __PYX_ERR(0, 17302, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds, __pyx_vtabptr_4cuda_8bindings_5_nvml_UnitFanSpeeds) < (0)) __PYX_ERR(0, 17302, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds) < (0)) __PYX_ERR(0, 17302, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_UnitFanSpeeds, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds) < (0)) __PYX_ERR(0, 17302, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds) < (0)) __PYX_ERR(0, 17302, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuPgpuMetadata = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuPgpuMetadata;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuPgpuMetadata._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuPgpuMetadata *))__pyx_f_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuMetadata_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata)) __PYX_ERR(0, 17443, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuMetadata_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata) < (0)) __PYX_ERR(0, 17443, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata = &__pyx_type_4cuda_8bindings_5_nvml_VgpuPgpuMetadata;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata) < (0)) __PYX_ERR(0, 17443, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuPgpuMetadata) < (0)) __PYX_ERR(0, 17443, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata) < (0)) __PYX_ERR(0, 17443, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuPgpuMetadata, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata) < (0)) __PYX_ERR(0, 17443, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata) < (0)) __PYX_ERR(0, 17443, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuInstanceInfo = &__pyx_vtable_4cuda_8bindings_5_nvml_GpuInstanceInfo;
  __pyx_vtable_4cuda_8bindings_5_nvml_GpuInstanceInfo._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_GpuInstanceInfo *))__pyx_f_4cuda_8bindings_5_nvml_15GpuInstanceInfo__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceInfo_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo)) __PYX_ERR(0, 17641, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceInfo_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo) < (0)) __PYX_ERR(0, 17641, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo = &__pyx_type_4cuda_8bindings_5_nvml_GpuInstanceInfo;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo) < (0)) __PYX_ERR(0, 17641, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo, __pyx_vtabptr_4cuda_8bindings_5_nvml_GpuInstanceInfo) < (0)) __PYX_ERR(0, 17641, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo) < (0)) __PYX_ERR(0, 17641, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_GpuInstanceInfo, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo) < (0)) __PYX_ERR(0, 17641, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo) < (0)) __PYX_ERR(0, 17641, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_ComputeInstanceInfo = &__pyx_vtable_4cuda_8bindings_5_nvml_ComputeInstanceInfo;
  __pyx_vtable_4cuda_8bindings_5_nvml_ComputeInstanceInfo._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_ComputeInstanceInfo *))__pyx_f_4cuda_8bindings_5_nvml_19ComputeInstanceInfo__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceInfo_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo)) __PYX_ERR(0, 17799, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceInfo_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo) < (0)) __PYX_ERR(0, 17799, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo = &__pyx_type_4cuda_8bindings_5_nvml_ComputeInstanceInfo;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo) < (0)) __PYX_ERR(0, 17799, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo, __pyx_vtabptr_4cuda_8bindings_5_nvml_ComputeInstanceInfo) < (0)) __PYX_ERR(0, 17799, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo) < (0)) __PYX_ERR(0, 17799, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_ComputeInstanceInfo, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo) < (0)) __PYX_ERR(0, 17799, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo) < (0)) __PYX_ERR(0, 17799, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 *))__pyx_f_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1)) __PYX_ERR(0, 17966, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1) < (0)) __PYX_ERR(0, 17966, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1 = &__pyx_type_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1) < (0)) __PYX_ERR(0, 17966, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1) < (0)) __PYX_ERR(0, 17966, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1) < (0)) __PYX_ERR(0, 17966, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_EccSramUniqueUncorrectedErrorCou, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1) < (0)) __PYX_ERR(0, 17966, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1) < (0)) __PYX_ERR(0, 17966, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo = &__pyx_vtable_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo;
  __pyx_vtable_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo *))__pyx_f_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo)) __PYX_ERR(0, 18106, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo) < (0)) __PYX_ERR(0, 18106, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo = &__pyx_type_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo) < (0)) __PYX_ERR(0, 18106, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo, __pyx_vtabptr_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo) < (0)) __PYX_ERR(0, 18106, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo) < (0)) __PYX_ERR(0, 18106, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_NvlinkFirmwareInfo, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo) < (0)) __PYX_ERR(0, 18106, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo) < (0)) __PYX_ERR(0, 18106, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 *))__pyx_f_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1)) __PYX_ERR(0, 18244, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 18244, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1 = &__pyx_type_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 18244, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 18244, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 18244, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuInstancesUtilizationInfo_v1_2, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 18244, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1) < (0)) __PYX_ERR(0, 18244, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerLog = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerLog;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerLog._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLog *))__pyx_f_4cuda_8bindings_5_nvml_16VgpuSchedulerLog__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLog_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog)) __PYX_ERR(0, 18410, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLog_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog) < (0)) __PYX_ERR(0, 18410, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog = &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLog;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog) < (0)) __PYX_ERR(0, 18410, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerLog) < (0)) __PYX_ERR(0, 18410, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog) < (0)) __PYX_ERR(0, 18410, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerLog, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog) < (0)) __PYX_ERR(0, 18410, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog) < (0)) __PYX_ERR(0, 18410, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerGetState = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerGetState;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerGetState._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerGetState *))__pyx_f_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerGetState_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState)) __PYX_ERR(0, 18591, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerGetState_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState) < (0)) __PYX_ERR(0, 18591, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState = &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerGetState;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState) < (0)) __PYX_ERR(0, 18591, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerGetState) < (0)) __PYX_ERR(0, 18591, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState) < (0)) __PYX_ERR(0, 18591, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerGetState, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState) < (0)) __PYX_ERR(0, 18591, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState) < (0)) __PYX_ERR(0, 18591, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 *))__pyx_f_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1)) __PYX_ERR(0, 18738, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1) < (0)) __PYX_ERR(0, 18738, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1 = &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1) < (0)) __PYX_ERR(0, 18738, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1) < (0)) __PYX_ERR(0, 18738, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1) < (0)) __PYX_ERR(0, 18738, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerStateInfo_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1) < (0)) __PYX_ERR(0, 18738, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1) < (0)) __PYX_ERR(0, 18738, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 *))__pyx_f_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1)) __PYX_ERR(0, 18909, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1) < (0)) __PYX_ERR(0, 18909, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1 = &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1) < (0)) __PYX_ERR(0, 18909, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1) < (0)) __PYX_ERR(0, 18909, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1) < (0)) __PYX_ERR(0, 18909, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerLogInfo_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1) < (0)) __PYX_ERR(0, 18909, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1) < (0)) __PYX_ERR(0, 18909, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 = &__pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1;
  __pyx_vtable_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 *))__pyx_f_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1)) __PYX_ERR(0, 19103, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1) < (0)) __PYX_ERR(0, 19103, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1 = &__pyx_type_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1) < (0)) __PYX_ERR(0, 19103, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1, __pyx_vtabptr_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1) < (0)) __PYX_ERR(0, 19103, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1) < (0)) __PYX_ERR(0, 19103, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerState_v1, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1) < (0)) __PYX_ERR(0, 19103, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1) < (0)) __PYX_ERR(0, 19103, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_GridLicensableFeatures = &__pyx_vtable_4cuda_8bindings_5_nvml_GridLicensableFeatures;
  __pyx_vtable_4cuda_8bindings_5_nvml_GridLicensableFeatures._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_GridLicensableFeatures *))__pyx_f_4cuda_8bindings_5_nvml_22GridLicensableFeatures__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeatures_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures)) __PYX_ERR(0, 19270, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeatures_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures) < (0)) __PYX_ERR(0, 19270, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures = &__pyx_type_4cuda_8bindings_5_nvml_GridLicensableFeatures;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures) < (0)) __PYX_ERR(0, 19270, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures, __pyx_vtabptr_4cuda_8bindings_5_nvml_GridLicensableFeatures) < (0)) __PYX_ERR(0, 19270, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures) < (0)) __PYX_ERR(0, 19270, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_GridLicensableFeatures, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures) < (0)) __PYX_ERR(0, 19270, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures) < (0)) __PYX_ERR(0, 19270, __pyx_L1_error)
  __pyx_vtabptr_4cuda_8bindings_5_nvml_NvLinkInfo_v2 = &__pyx_vtable_4cuda_8bindings_5_nvml_NvLinkInfo_v2;
  __pyx_vtable_4cuda_8bindings_5_nvml_NvLinkInfo_v2._get_ptr = (intptr_t (*)(struct __pyx_obj_4cuda_8bindings_5_nvml_NvLinkInfo_v2 *))__pyx_f_4cuda_8bindings_5_nvml_13NvLinkInfo_v2__get_ptr;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2 = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_4cuda_8bindings_5_nvml_NvLinkInfo_v2_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2)) __PYX_ERR(0, 19417, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_4cuda_8bindings_5_nvml_NvLinkInfo_v2_spec, __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2) < (0)) __PYX_ERR(0, 19417, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2 = &__pyx_type_4cuda_8bindings_5_nvml_NvLinkInfo_v2;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2) < (0)) __PYX_ERR(0, 19417, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2->tp_dictoffset && __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2, __pyx_vtabptr_4cuda_8bindings_5_nvml_NvLinkInfo_v2) < (0)) __PYX_ERR(0, 19417, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2) < (0)) __PYX_ERR(0, 19417, __pyx_L1_error)
  if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_NvLinkInfo_v2, (PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2) < (0)) __PYX_ERR(0, 19417, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2) < (0)) __PYX_ERR(0, 19417, __pyx_L1_error)
  __pyx_vtabptr_array = &__pyx_vtable_array;
  __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_array_type = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type___pyx_array_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_array_type)) __PYX_ERR(1, 110, __pyx_L1_error)
  #if !CYTHON_COMPILING_IN_LIMITED_API
  __pyx_mstate->__pyx_array_type->tp_as_buffer = &__pyx_tp_as_buffer_array;
  if (!__pyx_mstate->__pyx_array_type->tp_as_buffer->bf_releasebuffer && __pyx_mstate->__pyx_array_type->tp_base->tp_as_buffer && __pyx_mstate->__pyx_array_type->tp_base->tp_as_buffer->bf_releasebuffer) {
    __pyx_mstate->__pyx_array_type->tp_as_buffer->bf_releasebuffer = __pyx_mstate->__pyx_array_type->tp_base->tp_as_buffer->bf_releasebuffer;
  }
  #elif defined(Py_bf_getbuffer) && defined(Py_bf_releasebuffer)
  /* PY_VERSION_HEX >= 0x03090000 || Py_LIMITED_API >= 0x030B0000 */
  #elif defined(_MSC_VER)
  #pragma message ("The buffer protocol is not supported in the Limited C-API < 3.11.")
  #else
  #warning "The buffer protocol is not supported in the Limited C-API < 3.11."
  #endif
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type___pyx_array_spec, __pyx_mstate->__pyx_array_type) < (0)) __PYX_ERR(1, 110, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_array_type = &__pyx_type___pyx_array;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_array_type) < (0)) __PYX_ERR(1, 110, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_array_type);
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_array_type, __pyx_vtabptr_array) < (0)) __PYX_ERR(1, 110, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_array_type) < (0)) __PYX_ERR(1, 110, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_array_type) < (0)) __PYX_ERR(1, 110, __pyx_L1_error)
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_MemviewEnum_type = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type___pyx_MemviewEnum_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_MemviewEnum_type)) __PYX_ERR(1, 299, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type___pyx_MemviewEnum_spec, __pyx_mstate->__pyx_MemviewEnum_type) < (0)) __PYX_ERR(1, 299, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_MemviewEnum_type) < (0)) __PYX_ERR(1, 299, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_MemviewEnum_type);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_MemviewEnum_type->tp_dictoffset && __pyx_mstate->__pyx_MemviewEnum_type->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_MemviewEnum_type->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_MemviewEnum_type) < (0)) __PYX_ERR(1, 299, __pyx_L1_error)
  __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
  __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
  __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
  __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
  __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
  __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
  __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
  __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
  __pyx_vtable_memoryview._get_base = (PyObject *(*)(struct __pyx_memoryview_obj *))__pyx_memoryview__get_base;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_mstate->__pyx_memoryview_type = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type___pyx_memoryview_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_memoryview_type)) __PYX_ERR(1, 334, __pyx_L1_error)
  #if !CYTHON_COMPILING_IN_LIMITED_API
  __pyx_mstate->__pyx_memoryview_type->tp_as_buffer = &__pyx_tp_as_buffer_memoryview;
  if (!__pyx_mstate->__pyx_memoryview_type->tp_as_buffer->bf_releasebuffer && __pyx_mstate->__pyx_memoryview_type->tp_base->tp_as_buffer && __pyx_mstate->__pyx_memoryview_type->tp_base->tp_as_buffer->bf_releasebuffer) {
    __pyx_mstate->__pyx_memoryview_type->tp_as_buffer->bf_releasebuffer = __pyx_mstate->__pyx_memoryview_type->tp_base->tp_as_buffer->bf_releasebuffer;
  }
  #elif defined(Py_bf_getbuffer) && defined(Py_bf_releasebuffer)
  /* PY_VERSION_HEX >= 0x03090000 || Py_LIMITED_API >= 0x030B0000 */
  #elif defined(_MSC_VER)
  #pragma message ("The buffer protocol is not supported in the Limited C-API < 3.11.")
  #else
  #warning "The buffer protocol is not supported in the Limited C-API < 3.11."
  #endif
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type___pyx_memoryview_spec, __pyx_mstate->__pyx_memoryview_type) < (0)) __PYX_ERR(1, 334, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_memoryview_type) < (0)) __PYX_ERR(1, 334, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_memoryview_type);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_memoryview_type->tp_dictoffset && __pyx_mstate->__pyx_memoryview_type->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_memoryview_type->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_memoryview_type, __pyx_vtabptr_memoryview) < (0)) __PYX_ERR(1, 334, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_memoryview_type) < (0)) __PYX_ERR(1, 334, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_memoryview_type) < (0)) __PYX_ERR(1, 334, __pyx_L1_error)
  __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
  __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
  __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
  __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
  __pyx_vtable__memoryviewslice.__pyx_base._get_base = (PyObject *(*)(struct __pyx_memoryview_obj *))__pyx_memoryviewslice__get_base;
  #if CYTHON_USE_TYPE_SPECS
  __pyx_t_1 = PyTuple_Pack(1, (PyObject *)__pyx_mstate_global->__pyx_memoryview_type); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 951, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_mstate->__pyx_memoryviewslice_type = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type___pyx_memoryviewslice_spec, __pyx_t_1);
  __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (unlikely(!__pyx_mstate->__pyx_memoryviewslice_type)) __PYX_ERR(1, 951, __pyx_L1_error)
  if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type___pyx_memoryviewslice_spec, __pyx_mstate->__pyx_memoryviewslice_type) < (0)) __PYX_ERR(1, 951, __pyx_L1_error)
  #else
  __pyx_mstate->__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  __pyx_mstate_global->__pyx_memoryviewslice_type->tp_base = __pyx_mstate_global->__pyx_memoryview_type;
  #endif
  #if !CYTHON_USE_TYPE_SPECS
  if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_memoryviewslice_type) < (0)) __PYX_ERR(1, 951, __pyx_L1_error)
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_memoryviewslice_type);
  #endif
  #if !CYTHON_COMPILING_IN_LIMITED_API
  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_memoryviewslice_type->tp_dictoffset && __pyx_mstate->__pyx_memoryviewslice_type->tp_getattro == PyObject_GenericGetAttr)) {
    __pyx_mstate->__pyx_memoryviewslice_type->tp_getattro = PyObject_GenericGetAttr;
  }
  #endif
  if (__Pyx_SetVtable(__pyx_mstate->__pyx_memoryviewslice_type, __pyx_vtabptr__memoryviewslice) < (0)) __PYX_ERR(1, 951, __pyx_L1_error)
  if (__Pyx_MergeVtables(__pyx_mstate->__pyx_memoryviewslice_type) < (0)) __PYX_ERR(1, 951, __pyx_L1_error)
  if (__Pyx_setup_reduce((PyObject *) __pyx_mstate->__pyx_memoryviewslice_type) < (0)) __PYX_ERR(1, 951, __pyx_L1_error)
  __Pyx_RefNannyFinishContext();
  return 0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_RefNannyFinishContext();
  return -1;
}

static int __Pyx_modinit_type_import_code(__pyx_mstatetype *__pyx_mstate) {
  __Pyx_RefNannyDeclarations
  CYTHON_UNUSED_VAR(__pyx_mstate);
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
  /*--- Type import code ---*/
  __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 9, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_mstate->__pyx_ptype_7cpython_4type_type = __Pyx_ImportType_3_2_2(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type",
  #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
  sizeof(PyTypeObject), __PYX_GET_STRUCT_ALIGNMENT_3_2_2(PyTypeObject),
  #elif CYTHON_COMPILING_IN_LIMITED_API
  0, 0,
  #else
  sizeof(PyHeapTypeObject), __PYX_GET_STRUCT_ALIGNMENT_3_2_2(PyHeapTypeObject),
  #endif
  __Pyx_ImportType_CheckSize_Warn_3_2_2); if (!__pyx_mstate->__pyx_ptype_7cpython_4type_type) __PYX_ERR(3, 9, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(4, 8, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_mstate->__pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType_3_2_2(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "bool",
  #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
  sizeof(PyLongObject), __PYX_GET_STRUCT_ALIGNMENT_3_2_2(PyLongObject),
  #elif CYTHON_COMPILING_IN_LIMITED_API
  0, 0,
  #else
  sizeof(PyLongObject), __PYX_GET_STRUCT_ALIGNMENT_3_2_2(PyLongObject),
  #endif
  __Pyx_ImportType_CheckSize_Warn_3_2_2); if (!__pyx_mstate->__pyx_ptype_7cpython_4bool_bool) __PYX_ERR(4, 8, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_mstate->__pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType_3_2_2(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "complex",
  #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
  sizeof(PyComplexObject), __PYX_GET_STRUCT_ALIGNMENT_3_2_2(PyComplexObject),
  #elif CYTHON_COMPILING_IN_LIMITED_API
  0, 0,
  #else
  sizeof(PyComplexObject), __PYX_GET_STRUCT_ALIGNMENT_3_2_2(PyComplexObject),
  #endif
  __Pyx_ImportType_CheckSize_Warn_3_2_2); if (!__pyx_mstate->__pyx_ptype_7cpython_7complex_complex) __PYX_ERR(5, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_RefNannyFinishContext();
  return 0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_RefNannyFinishContext();
  return -1;
}

static int __Pyx_modinit_variable_import_code(__pyx_mstatetype *__pyx_mstate) {
  __Pyx_RefNannyDeclarations
  CYTHON_UNUSED_VAR(__pyx_mstate);
  __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
  /*--- Variable import code ---*/
  __Pyx_RefNannyFinishContext();
  return 0;
}

static int __Pyx_modinit_function_import_code(__pyx_mstatetype *__pyx_mstate) {
  __Pyx_RefNannyDeclarations
  CYTHON_UNUSED_VAR(__pyx_mstate);
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
  /*--- Function import code ---*/
  {
    __pyx_t_1 = PyImport_ImportModule("cuda.bindings.cy_nvml"); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    const char * __pyx_import_signature = __Pyx_PyBytes_AsString(__pyx_mstate_global->__pyx_kp_b_char_const_nvmlReturn_t_nvmlRetu);
    #if !CYTHON_ASSUME_SAFE_MACROS
    if (unlikely(!__pyx_import_signature)) __PYX_ERR(0, 1, __pyx_L1_error)
    #endif
    const char * __pyx_import_name = __pyx_import_signature + 14397;
    void (**const __pyx_import_pointers[])(void) = {(void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlErrorString, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetDriverVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetNVMLVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByPciBusId_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleBySerial, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByUUID, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetCudaDriverVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetCudaDriverVersion_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlComputeInstanceDestroy, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlComputeInstanceGetInfo_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeKeyRotationThresholdInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemSetConfComputeKeyRotationThresholdInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeCapabilities, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeState, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearAccountingPids, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearCpuAffinity, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceResetGpuLockedClocks, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceResetMemoryLockedClocks, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetCpuAffinity, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceValidateInforom, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGspFirmwareVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPgpuMetadataString, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBoardPartNumber, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetInforomImageVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetName, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSerial, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetUUID, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVbiosVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpcClkVfOffset, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemClkVfOffset, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCudaComputeCapability, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpcClkMinMaxVfOffset, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemClkMinMaxVfOffset, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearFieldValues, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFieldValues, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBAR1MemoryInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBrand, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBridgeChipInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBusType, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetC2cModeInfoV, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClkMonStatus, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClockOffsets, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetClockOffsets, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClock, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMinMaxClockOfPState, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClockInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxClockInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxCustomerBoostClock, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetComputeMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetComputeMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeGpuAttestationReport, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeGpuCertificate, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeMemSizeInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCoolerInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAddressingMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetArchitecture, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAttributes_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCapabilities, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrentClockFreqs, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPerformanceModes, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerMizerMode_v1, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPowerMizerMode_v1, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVgpuCapabilities, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuCapabilities, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDeviceHandleFromMigDeviceHandle, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceOnSameBoard, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetP2PStatus, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTopologyCommonAncestor, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDramEncryptionMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDramEncryptionMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDriverModel_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDriverModel, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearEccErrorCounts, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSramEccErrorStatus, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDefaultEccMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDisplayActive, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDisplayMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPersistenceMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRetiredPagesPendingStatus, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAutoBoostedClocksEnabled, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEccMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetAccountingMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetAutoBoostedClocksEnabled, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetEccMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPersistenceMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDefaultAutoBoostedClocksEnabled, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderCapacity, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFBCStats, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanSpeedRPM, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmSampleGet, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmQueryDeviceSupport, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDynamicPstatesInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuFabricInfoV, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuOperationMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetGpuOperationMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTopologyNearestGpus, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVirtualizationMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVirtualizationMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGridLicensableFeatures_v4, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHostVgpuMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHostname_v1, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetHostname_v1, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetInforomVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMarginTemperature, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryErrorCounter, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTotalEccErrors, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeProtectedMemoryUsage, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryInfo_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetNvLinkDeviceLowPowerThreshold, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvlinkBwMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetNvlinkBwMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvlinkSupportedBwModes, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceReadWritePRM_v1, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRetiredPages, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRetiredPages_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPciInfoExt, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPciInfo_v3, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieThroughput, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPdi, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPlatformInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDevicePowerSmoothingActivatePresetProfile, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDevicePowerSmoothingUpdatePresetProfileParam, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDevicePowerSmoothingSetState, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerSource, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPowerManagementLimit_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRunningProcessDetailList, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetProcessUtilization, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetProcessesUtilizationInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPerformanceState, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerState, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedPerformanceStates, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRepairStatus, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAPIRestriction, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetAPIRestriction, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRowRemapperHistogram, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSamples, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetTemperatureThreshold, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTemperatureThreshold, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTemperatureV, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetUtilizationRates, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuHeterogeneousMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVgpuHeterogeneousMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuInstancesUtilizationInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuMetadata, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuProcessesUtilizationInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuSchedulerCapabilities, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuSchedulerState, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuSchedulerLog, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVgpuSchedulerState, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuTypeCreatablePlacements, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuTypeSupportedPlacements, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetMaxInstances, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceWorkloadPowerProfileClearRequestedProfiles, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrentClocksEventReasons, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedClocksEventReasons, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedEventTypes, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTotalEnergyConsumption, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetLastBBXFlushTime, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetConfComputeUnprotectedMemSize, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceRegisterEvents, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuUtilization, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuProcessUtilization, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingBufferSize, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAdaptiveClockInfoStatus, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBoardId, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetComputeInstanceId, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrPcieLinkGeneration, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrPcieLinkWidth, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEnforcedPowerLimit, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanSpeed, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceId, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuMaxPcieLinkGeneration, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetIndex, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetInforomConfigurationChecksum, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetIrqNum, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxMigDeviceCount, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxPcieLinkGeneration, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxPcieLinkWidth, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryBusWidth, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMinorNumber, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetModuleId, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMultiGpuBoard, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNumFans, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNumGpuCores, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNumaNodeId, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieLinkMaxSpeed, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieReplayCounter, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieSpeed, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerManagementDefaultLimit, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerManagementLimit, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerUsage, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceIsMigDeviceHandle, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmQueryIfStreamingEnabled, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderSessions, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFBCSessions, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetComputeRunningProcesses_v3, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMPSComputeRunningProcesses_v3, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetActiveVgpus, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCreatableVgpus, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedVgpus, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingPids, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDecoderUtilization, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderUtilization, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGspFirmwareMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetJpgUtilization, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMigMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMinMaxFanSpeed, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetOfaUtilization, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerManagementLimitConstraints, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedMemoryClocks, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderStats, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRemappedRows, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceResetNvLinkErrorCounters, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDefaultFanSpeed_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPowerManagementLimit, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmSetStreamingEnabled, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingStats, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMigDeviceHandleByIndex, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkState, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanControlPolicy_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetFanControlPolicy, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmMigSampleGet, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstancePossiblePlacements_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceCreateGpuInstanceWithPlacement, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceProfileInfoByIdV, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceProfileInfoV, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceCreateGpuInstance, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceById, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstances, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetThermalSettings, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkRemoteDeviceType, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkCapability, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkErrorCounter, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkRemotePciInfo_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetMigMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanSpeed_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceRemainingCapacity, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTargetFanSpeed, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedGraphicsClocks, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetFanSpeed_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetGpuLockedClocks, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetMemoryLockedClocks, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCpuAffinity, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCpuAffinityWithinScope, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryAffinity, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlEventSetCreate, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlEventSetFree, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlEventSetWait_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceDestroy, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetActiveVgpus, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuTypeCreatablePlacements, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuHeterogeneousMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceSetVgpuHeterogeneousMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuSchedulerLog, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuSchedulerState, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceSetVgpuSchedulerState, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetCreatableVgpus, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstancePossiblePlacements, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceCreateComputeInstanceWithPlacement, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceCreateComputeInstance, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstanceById, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstances, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstanceRemainingCapacity, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstanceProfileInfoV, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceDiscoverGpus, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceRemoveGpu_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceQueryDrainState, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceModifyDrainState, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeSettings, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetDriverBranch, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemEventSetCreate, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemEventSetFree, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemEventSetWait, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemRegisterEvents, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByUUIDV, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitSetLedState, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetLedState, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetPsuInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetFanSpeedInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetUnitInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetDevices, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetTemperature, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetVgpuDriverCapabilities, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceClearAccountingPids, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetGpuPciId, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetMdevUUID, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetUUID, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetVmDriverVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetVmID, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetAccountingMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEccMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFBCStats, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetLicenseInfo_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetMetadata, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetPlacementId, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetRuntimeStateSize, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetType, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFbUsage, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEncoderCapacity, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFrameRateLimit, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetGpuInstanceId, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetLicenseStatus, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEncoderSessions, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFBCSessions, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetAccountingPids, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEncoderStats, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceSetEncoderCapacity, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetAccountingStats, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetVgpuCompatibility, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetClass, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetName, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetLicense, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetCapabilities, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetBAR1Info, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetFbReservation, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetFramebufferSize, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetGspHeapSize, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetDeviceID, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetFrameRateLimit, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetGpuInstanceProfileId, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetMaxInstancesPerVm, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetNumDisplayHeads, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetResolution, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetMaxInstancesPerGpuInstance, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSetVgpuVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetVgpuVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCount_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetExcludedDeviceCount, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeGpusReadyState, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetNvlinkBwMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetCount, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetHicVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlInitWithFlags, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemSetConfComputeGpusReadyState, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemSetNvlinkBwMode, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetProcessName, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByIndex_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetExcludedDeviceInfoByIndex, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetHandleByIndex, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetTopologyGpuSet, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlInit_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlShutdown, (void (**)(void)) NULL};
    void (**const *__pyx_import_pointer)(void) = __pyx_import_pointers;
    const char *__pyx_import_current_signature = __pyx_import_signature;
    while (*__pyx_import_pointer) {
      if (__Pyx_ImportFunction_3_2_2(__pyx_t_1, __pyx_import_name, *__pyx_import_pointer, __pyx_import_current_signature) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
      ++__pyx_import_pointer;
      __pyx_import_name = strchr(__pyx_import_name, '\0') + 1;
      __pyx_import_signature = strchr(__pyx_import_signature, '\0') + 1;
      if (*__pyx_import_signature != '\0') __pyx_import_current_signature = __pyx_import_signature;
    }
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  }
  {
    __pyx_t_1 = PyImport_ImportModule("cuda.bindings._internal.utils"); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    const char * __pyx_import_signature = __Pyx_PyBytes_AsString(__pyx_mstate_global->__pyx_kp_b_int___pyx_t_4cuda_8bindings_9_in);
    #if !CYTHON_ASSUME_SAFE_MACROS
    if (unlikely(!__pyx_import_signature)) __PYX_ERR(0, 1, __pyx_L1_error)
    #endif
    const char * __pyx_import_name = __pyx_import_signature + 702;
    void (**const __pyx_import_pointers[])(void) = {(void (**)(void))&__pyx_fuse_3__pyx_f_4cuda_8bindings_9_internal_5utils_get_nested_resource_ptr, (void (**)(void))&__pyx_fuse_5__pyx_f_4cuda_8bindings_9_internal_5utils_get_nested_resource_ptr, (void (**)(void))&__pyx_fuse_4__pyx_f_4cuda_8bindings_9_internal_5utils_get_nested_resource_ptr, (void (**)(void))&__pyx_fuse_1__pyx_f_4cuda_8bindings_9_internal_5utils_get_nested_resource_ptr, (void (**)(void))&__pyx_fuse_2__pyx_f_4cuda_8bindings_9_internal_5utils_get_nested_resource_ptr, (void (**)(void))&__pyx_fuse_0__pyx_f_4cuda_8bindings_9_internal_5utils_get_nested_resource_ptr, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5utils_get_buffer_pointer, (void (**)(void)) NULL};
    void (**const *__pyx_import_pointer)(void) = __pyx_import_pointers;
    const char *__pyx_import_current_signature = __pyx_import_signature;
    while (*__pyx_import_pointer) {
      if (__Pyx_ImportFunction_3_2_2(__pyx_t_1, __pyx_import_name, *__pyx_import_pointer, __pyx_import_current_signature) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
      ++__pyx_import_pointer;
      __pyx_import_name = strchr(__pyx_import_name, '\0') + 1;
      __pyx_import_signature = strchr(__pyx_import_signature, '\0') + 1;
      if (*__pyx_import_signature != '\0') __pyx_import_current_signature = __pyx_import_signature;
    }
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  }
  __Pyx_RefNannyFinishContext();
  return 0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_RefNannyFinishContext();
  return -1;
}

#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec__nvml(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
  {Py_mod_create, (void*)__pyx_pymod_create},
  {Py_mod_exec, (void*)__pyx_pymod_exec__nvml},
  #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
  {Py_mod_gil, Py_MOD_GIL_NOT_USED},
  #endif
  #if PY_VERSION_HEX >= 0x030C0000 && CYTHON_USE_MODULE_STATE
  {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED},
  #endif
  {0, NULL}
};
#endif

#ifdef __cplusplus
namespace {
  struct PyModuleDef __pyx_moduledef =
  #else
  static struct PyModuleDef __pyx_moduledef =
  #endif
  {
      PyModuleDef_HEAD_INIT,
      "_nvml",
      0, /* m_doc */
    #if CYTHON_USE_MODULE_STATE
      sizeof(__pyx_mstatetype), /* m_size */
    #else
      (CYTHON_PEP489_MULTI_PHASE_INIT) ? 0 : -1, /* m_size */
    #endif
      __pyx_methods /* m_methods */,
    #if CYTHON_PEP489_MULTI_PHASE_INIT
      __pyx_moduledef_slots, /* m_slots */
    #else
      NULL, /* m_reload */
    #endif
    #if CYTHON_USE_MODULE_STATE
      __pyx_m_traverse, /* m_traverse */
      __pyx_m_clear, /* m_clear */
      NULL /* m_free */
    #else
      NULL, /* m_traverse */
      NULL, /* m_clear */
      NULL /* m_free */
    #endif
  };
  #ifdef __cplusplus
} /* anonymous namespace */
#endif

/* PyModInitFuncType */
#ifndef CYTHON_NO_PYINIT_EXPORT
  #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#else
  #ifdef __cplusplus
  #define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
  #else
  #define __Pyx_PyMODINIT_FUNC PyObject *
  #endif
#endif

__Pyx_PyMODINIT_FUNC PyInit__nvml(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit__nvml(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
  return PyModuleDef_Init(&__pyx_moduledef);
}
/* ModuleCreationPEP489 */
#if CYTHON_COMPILING_IN_LIMITED_API && (__PYX_LIMITED_VERSION_HEX < 0x03090000\
      || ((defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS)) && __PYX_LIMITED_VERSION_HEX < 0x030A0000))
static PY_INT64_T __Pyx_GetCurrentInterpreterId(void) {
    {
        PyObject *module = PyImport_ImportModule("_interpreters"); // 3.13+ I think
        if (!module) {
            PyErr_Clear(); // just try the 3.8-3.12 version
            module = PyImport_ImportModule("_xxsubinterpreters");
            if (!module) goto bad;
        }
        PyObject *current = PyObject_CallMethod(module, "get_current", NULL);
        Py_DECREF(module);
        if (!current) goto bad;
        if (PyTuple_Check(current)) {
            PyObject *new_current = PySequence_GetItem(current, 0);
            Py_DECREF(current);
            current = new_current;
            if (!new_current) goto bad;
        }
        long long as_c_int = PyLong_AsLongLong(current);
        Py_DECREF(current);
        return as_c_int;
    }
  bad:
    PySys_WriteStderr("__Pyx_GetCurrentInterpreterId failed. Try setting the C define CYTHON_PEP489_MULTI_PHASE_INIT=0\n");
    return -1;
}
#endif
#if !CYTHON_USE_MODULE_STATE
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
    static PY_INT64_T main_interpreter_id = -1;
#if CYTHON_COMPILING_IN_GRAAL && defined(GRAALPY_VERSION_NUM) && GRAALPY_VERSION_NUM > 0x19000000
    PY_INT64_T current_id = GraalPyInterpreterState_GetIDFromThreadState(PyThreadState_Get());
#elif CYTHON_COMPILING_IN_GRAAL
    PY_INT64_T current_id = PyInterpreterState_GetIDFromThreadState(PyThreadState_Get());
#elif CYTHON_COMPILING_IN_LIMITED_API && (__PYX_LIMITED_VERSION_HEX < 0x03090000\
      || ((defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS)) && __PYX_LIMITED_VERSION_HEX < 0x030A0000))
    PY_INT64_T current_id = __Pyx_GetCurrentInterpreterId();
#elif CYTHON_COMPILING_IN_LIMITED_API
    PY_INT64_T current_id = PyInterpreterState_GetID(PyInterpreterState_Get());
#else
    PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
#endif
    if (unlikely(current_id == -1)) {
        return -1;
    }
    if (main_interpreter_id == -1) {
        main_interpreter_id = current_id;
        return 0;
    } else if (unlikely(main_interpreter_id != current_id)) {
        PyErr_SetString(
            PyExc_ImportError,
            "Interpreter change detected - this module can only be loaded into one interpreter per process.");
        return -1;
    }
    return 0;
}
#endif
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none)
{
    PyObject *value = PyObject_GetAttrString(spec, from_name);
    int result = 0;
    if (likely(value)) {
        if (allow_none || value != Py_None) {
            result = PyDict_SetItemString(moddict, to_name, value);
        }
        Py_DECREF(value);
    } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
        PyErr_Clear();
    } else {
        result = -1;
    }
    return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def) {
    PyObject *module = NULL, *moddict, *modname;
    CYTHON_UNUSED_VAR(def);
    #if !CYTHON_USE_MODULE_STATE
    if (__Pyx_check_single_interpreter())
        return NULL;
    #endif
    if (__pyx_m)
        return __Pyx_NewRef(__pyx_m);
    modname = PyObject_GetAttrString(spec, "name");
    if (unlikely(!modname)) goto bad;
    module = PyModule_NewObject(modname);
    Py_DECREF(modname);
    if (unlikely(!module)) goto bad;
    moddict = PyModule_GetDict(module);
    if (unlikely(!moddict)) goto bad;
    if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
    if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
    if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
    if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
    return module;
bad:
    Py_XDECREF(module);
    return NULL;
}


static CYTHON_SMALL_CODE int __pyx_pymod_exec__nvml(PyObject *__pyx_pyinit_module)
#endif
{
  int stringtab_initialized = 0;
  #if CYTHON_USE_MODULE_STATE
  int pystate_addmodule_run = 0;
  #endif
  __pyx_mstatetype *__pyx_mstate = NULL;
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  PyObject *__pyx_t_3 = NULL;
  PyObject *__pyx_t_4 = NULL;
  PyObject *__pyx_t_5 = NULL;
  size_t __pyx_t_6;
  static PyThread_type_lock __pyx_t_7[8];
  int __pyx_t_8;
  Py_ssize_t __pyx_t_9;
  PyObject *__pyx_t_10 = NULL;
  PyObject *__pyx_t_11 = NULL;
  PyObject *__pyx_t_12 = NULL;
  PyObject *__pyx_t_13 = NULL;
  PyObject *__pyx_t_14 = NULL;
  PyObject *__pyx_t_15 = NULL;
  PyObject *__pyx_t_16 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannyDeclarations
  #if CYTHON_PEP489_MULTI_PHASE_INIT
  if (__pyx_m) {
    if (__pyx_m == __pyx_pyinit_module) return 0;
    PyErr_SetString(PyExc_RuntimeError, "Module '_nvml' has already been imported. Re-initialisation is not supported.");
    return -1;
  }
  #else
  if (__pyx_m) return __Pyx_NewRef(__pyx_m);
  #endif
  /*--- Module creation code ---*/
  #if CYTHON_PEP489_MULTI_PHASE_INIT
  __pyx_t_1 = __pyx_pyinit_module;
  Py_INCREF(__pyx_t_1);
  #else
  __pyx_t_1 = PyModule_Create(&__pyx_moduledef); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
  #endif
  #if CYTHON_USE_MODULE_STATE
  {
    int add_module_result = __Pyx_State_AddModule(__pyx_t_1, &__pyx_moduledef);
    __pyx_t_1 = 0; /* transfer ownership from __pyx_t_1 to "_nvml" pseudovariable */
    if (unlikely((add_module_result < 0))) __PYX_ERR(0, 1, __pyx_L1_error)
    pystate_addmodule_run = 1;
  }
  #else
  __pyx_m = __pyx_t_1;
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
  PyUnstable_Module_SetGIL(__pyx_m, Py_MOD_GIL_NOT_USED);
  #endif
  __pyx_mstate = __pyx_mstate_global;
  CYTHON_UNUSED_VAR(__pyx_t_1);
  __pyx_mstate->__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_mstate->__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
  Py_INCREF(__pyx_mstate->__pyx_d);
  __pyx_mstate->__pyx_b = __Pyx_PyImport_AddModuleRef(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_mstate->__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
  __pyx_mstate->__pyx_cython_runtime = __Pyx_PyImport_AddModuleRef("cython_runtime"); if (unlikely(!__pyx_mstate->__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
  if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_mstate->__pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
  /* ImportRefnannyAPI */
  #if CYTHON_REFNANNY
  __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
  if (!__Pyx_RefNanny) {
    PyErr_Clear();
    __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
    if (!__Pyx_RefNanny)
        Py_FatalError("failed to import 'refnanny' module");
  }
  #endif
  
__Pyx_RefNannySetupContext("PyInit__nvml", 0);
  __Pyx_init_runtime_version();
  if (__Pyx_check_binary_version(__PYX_LIMITED_VERSION_HEX, __Pyx_get_runtime_version(), CYTHON_COMPILING_IN_LIMITED_API) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
  __pyx_mstate->__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_mstate->__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
  __pyx_mstate->__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_mstate->__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
  __pyx_mstate->__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_mstate->__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
  /*--- Library function declarations ---*/
  /*--- Initialize various global constants etc. ---*/
  if (__Pyx_InitConstants(__pyx_mstate) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
  stringtab_initialized = 1;
  if (__Pyx_InitGlobals() < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
  if (__pyx_module_is_main_cuda__bindings___nvml) {
    if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_name_2, __pyx_mstate_global->__pyx_n_u_main) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
  }
  {
    PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
    if (!PyDict_GetItemString(modules, "cuda.bindings._nvml")) {
      if (unlikely((PyDict_SetItemString(modules, "cuda.bindings._nvml", __pyx_m) < 0))) __PYX_ERR(0, 1, __pyx_L1_error)
    }
  }
  /*--- Builtin init code ---*/
  if (__Pyx_InitCachedBuiltins(__pyx_mstate) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
  /*--- Constants init code ---*/
  if (__Pyx_InitCachedConstants(__pyx_mstate) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
  if (__Pyx_CreateCodeObjects(__pyx_mstate) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
  /*--- Global type/function init code ---*/
  (void)__Pyx_modinit_global_init_code(__pyx_mstate);
  (void)__Pyx_modinit_variable_export_code(__pyx_mstate);
  if (unlikely((__Pyx_modinit_function_export_code(__pyx_mstate) < 0))) __PYX_ERR(0, 1, __pyx_L1_error)
  if (unlikely((__Pyx_modinit_type_init_code(__pyx_mstate) < 0))) __PYX_ERR(0, 1, __pyx_L1_error)
  if (unlikely((__Pyx_modinit_type_import_code(__pyx_mstate) < 0))) __PYX_ERR(0, 1, __pyx_L1_error)
  (void)__Pyx_modinit_variable_import_code(__pyx_mstate);
  if (unlikely((__Pyx_modinit_function_import_code(__pyx_mstate) < 0))) __PYX_ERR(0, 1, __pyx_L1_error)
  /*--- Execution code ---*/

  /* "View.MemoryView":100
 * 
 * cdef object __pyx_collections_abc_Sequence "__pyx_collections_abc_Sequence"
 * try:             # <<<<<<<<<<<<<<
 *     __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence
 * except:
*/
  {
    __Pyx_PyThreadState_declare
    __Pyx_PyThreadState_assign
    __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
    __Pyx_XGOTREF(__pyx_t_1);
    __Pyx_XGOTREF(__pyx_t_2);
    __Pyx_XGOTREF(__pyx_t_3);
    /*try:*/ {

      /* "View.MemoryView":101
 * cdef object __pyx_collections_abc_Sequence "__pyx_collections_abc_Sequence"
 * try:
 *     __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence             # <<<<<<<<<<<<<<
 * except:
 * 
*/
      __pyx_t_5 = NULL;
      __pyx_t_6 = 1;
      {
        PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_collections_abc};
        __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin___import__, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
        __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
        if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 101, __pyx_L2_error)
        __Pyx_GOTREF(__pyx_t_4);
      }
      __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_abc); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 101, __pyx_L2_error)
      __Pyx_GOTREF(__pyx_t_5);
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_Sequence); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 101, __pyx_L2_error)
      __Pyx_GOTREF(__pyx_t_4);
      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
      __Pyx_XGOTREF(__pyx_collections_abc_Sequence);
      __Pyx_DECREF_SET(__pyx_collections_abc_Sequence, __pyx_t_4);
      __Pyx_GIVEREF(__pyx_t_4);
      __pyx_t_4 = 0;

      /* "View.MemoryView":100
 * 
 * cdef object __pyx_collections_abc_Sequence "__pyx_collections_abc_Sequence"
 * try:             # <<<<<<<<<<<<<<
 *     __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence
 * except:
*/
    }
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    goto __pyx_L7_try_end;
    __pyx_L2_error:;
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;

    /* "View.MemoryView":102
 * try:
 *     __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence
 * except:             # <<<<<<<<<<<<<<
 * 
 *     __pyx_collections_abc_Sequence = None
*/
    /*except:*/ {
      __Pyx_ErrRestore(0,0,0);

      /* "View.MemoryView":104
 * except:
 * 
 *     __pyx_collections_abc_Sequence = None             # <<<<<<<<<<<<<<
 * 
 * 
*/
      __Pyx_INCREF(Py_None);
      __Pyx_XGOTREF(__pyx_collections_abc_Sequence);
      __Pyx_DECREF_SET(__pyx_collections_abc_Sequence, Py_None);
      __Pyx_GIVEREF(Py_None);
      goto __pyx_L3_exception_handled;
    }
    __pyx_L3_exception_handled:;
    __Pyx_XGIVEREF(__pyx_t_1);
    __Pyx_XGIVEREF(__pyx_t_2);
    __Pyx_XGIVEREF(__pyx_t_3);
    __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
    __pyx_L7_try_end:;
  }

  /* "View.MemoryView":239
 * 
 * 
 *     try:             # <<<<<<<<<<<<<<
 *         count = __pyx_collections_abc_Sequence.count
 *         index = __pyx_collections_abc_Sequence.index
*/
  {
    __Pyx_PyThreadState_declare
    __Pyx_PyThreadState_assign
    __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_2, &__pyx_t_1);
    __Pyx_XGOTREF(__pyx_t_3);
    __Pyx_XGOTREF(__pyx_t_2);
    __Pyx_XGOTREF(__pyx_t_1);
    /*try:*/ {

      /* "View.MemoryView":240
 * 
 *     try:
 *         count = __pyx_collections_abc_Sequence.count             # <<<<<<<<<<<<<<
 *         index = __pyx_collections_abc_Sequence.index
 *     except:
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_collections_abc_Sequence, __pyx_mstate_global->__pyx_n_u_count); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 240, __pyx_L10_error)
      __Pyx_GOTREF(__pyx_t_4);
      if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_array_type, __pyx_mstate_global->__pyx_n_u_count, __pyx_t_4) < (0)) __PYX_ERR(1, 240, __pyx_L10_error)
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

      /* "View.MemoryView":241
 *     try:
 *         count = __pyx_collections_abc_Sequence.count
 *         index = __pyx_collections_abc_Sequence.index             # <<<<<<<<<<<<<<
 *     except:
 *         pass
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_collections_abc_Sequence, __pyx_mstate_global->__pyx_n_u_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 241, __pyx_L10_error)
      __Pyx_GOTREF(__pyx_t_4);
      if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_array_type, __pyx_mstate_global->__pyx_n_u_index, __pyx_t_4) < (0)) __PYX_ERR(1, 241, __pyx_L10_error)
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

      /* "View.MemoryView":239
 * 
 * 
 *     try:             # <<<<<<<<<<<<<<
 *         count = __pyx_collections_abc_Sequence.count
 *         index = __pyx_collections_abc_Sequence.index
*/
    }
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    goto __pyx_L15_try_end;
    __pyx_L10_error:;
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;

    /* "View.MemoryView":242
 *         count = __pyx_collections_abc_Sequence.count
 *         index = __pyx_collections_abc_Sequence.index
 *     except:             # <<<<<<<<<<<<<<
 *         pass
 * 
*/
    /*except:*/ {
      __Pyx_ErrRestore(0,0,0);
      goto __pyx_L11_exception_handled;
    }
    __pyx_L11_exception_handled:;
    __Pyx_XGIVEREF(__pyx_t_3);
    __Pyx_XGIVEREF(__pyx_t_2);
    __Pyx_XGIVEREF(__pyx_t_1);
    __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_2, __pyx_t_1);
    __pyx_L15_try_end:;
  }

  /* "View.MemoryView":307
 *         return self.name
 * 
 * cdef generic = Enum("<strided and direct or indirect>")             # <<<<<<<<<<<<<<
 * cdef strided = Enum("<strided and direct>") # default
 * cdef indirect = Enum("<strided and indirect>")
*/
  __pyx_t_5 = NULL;
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_strided_and_direct_or_indirect};
    __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_MemviewEnum_type, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 307, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_4);
  }
  __Pyx_XGOTREF(generic);
  __Pyx_DECREF_SET(generic, ((PyObject *)__pyx_t_4));
  __Pyx_GIVEREF((PyObject *)__pyx_t_4);
  __pyx_t_4 = 0;

  /* "View.MemoryView":308
 * 
 * cdef generic = Enum("<strided and direct or indirect>")
 * cdef strided = Enum("<strided and direct>") # default             # <<<<<<<<<<<<<<
 * cdef indirect = Enum("<strided and indirect>")
 * 
*/
  __pyx_t_5 = NULL;
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_strided_and_direct};
    __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_MemviewEnum_type, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 308, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_4);
  }
  __Pyx_XGOTREF(strided);
  __Pyx_DECREF_SET(strided, ((PyObject *)__pyx_t_4));
  __Pyx_GIVEREF((PyObject *)__pyx_t_4);
  __pyx_t_4 = 0;

  /* "View.MemoryView":309
 * cdef generic = Enum("<strided and direct or indirect>")
 * cdef strided = Enum("<strided and direct>") # default
 * cdef indirect = Enum("<strided and indirect>")             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_5 = NULL;
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_strided_and_indirect};
    __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_MemviewEnum_type, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 309, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_4);
  }
  __Pyx_XGOTREF(indirect);
  __Pyx_DECREF_SET(indirect, ((PyObject *)__pyx_t_4));
  __Pyx_GIVEREF((PyObject *)__pyx_t_4);
  __pyx_t_4 = 0;

  /* "View.MemoryView":312
 * 
 * 
 * cdef contiguous = Enum("<contiguous and direct>")             # <<<<<<<<<<<<<<
 * cdef indirect_contiguous = Enum("<contiguous and indirect>")
 * 
*/
  __pyx_t_5 = NULL;
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_contiguous_and_direct};
    __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_MemviewEnum_type, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 312, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_4);
  }
  __Pyx_XGOTREF(contiguous);
  __Pyx_DECREF_SET(contiguous, ((PyObject *)__pyx_t_4));
  __Pyx_GIVEREF((PyObject *)__pyx_t_4);
  __pyx_t_4 = 0;

  /* "View.MemoryView":313
 * 
 * cdef contiguous = Enum("<contiguous and direct>")
 * cdef indirect_contiguous = Enum("<contiguous and indirect>")             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_5 = NULL;
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_contiguous_and_indirect};
    __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_mstate_global->__pyx_MemviewEnum_type, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 313, __pyx_L1_error)
    __Pyx_GOTREF((PyObject *)__pyx_t_4);
  }
  __Pyx_XGOTREF(indirect_contiguous);
  __Pyx_DECREF_SET(indirect_contiguous, ((PyObject *)__pyx_t_4));
  __Pyx_GIVEREF((PyObject *)__pyx_t_4);
  __pyx_t_4 = 0;

  /* "View.MemoryView":321
 * 
 * 
 * cdef int __pyx_memoryview_thread_locks_used = 0             # <<<<<<<<<<<<<<
 * cdef PyThread_type_lock[8] __pyx_memoryview_thread_locks = [
 *     PyThread_allocate_lock(),
*/
  __pyx_memoryview_thread_locks_used = 0;

  /* "View.MemoryView":322
 * 
 * cdef int __pyx_memoryview_thread_locks_used = 0
 * cdef PyThread_type_lock[8] __pyx_memoryview_thread_locks = [             # <<<<<<<<<<<<<<
 *     PyThread_allocate_lock(),
 *     PyThread_allocate_lock(),
*/
  __pyx_t_7[0] = PyThread_allocate_lock();
  __pyx_t_7[1] = PyThread_allocate_lock();
  __pyx_t_7[2] = PyThread_allocate_lock();
  __pyx_t_7[3] = PyThread_allocate_lock();
  __pyx_t_7[4] = PyThread_allocate_lock();
  __pyx_t_7[5] = PyThread_allocate_lock();
  __pyx_t_7[6] = PyThread_allocate_lock();
  __pyx_t_7[7] = PyThread_allocate_lock();
  memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_7, sizeof(__pyx_memoryview_thread_locks[0]) * (8));

  /* "View.MemoryView":983
 * 
 * 
 *     try:             # <<<<<<<<<<<<<<
 *         count = __pyx_collections_abc_Sequence.count
 *         index = __pyx_collections_abc_Sequence.index
*/
  {
    __Pyx_PyThreadState_declare
    __Pyx_PyThreadState_assign
    __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
    __Pyx_XGOTREF(__pyx_t_1);
    __Pyx_XGOTREF(__pyx_t_2);
    __Pyx_XGOTREF(__pyx_t_3);
    /*try:*/ {

      /* "View.MemoryView":984
 * 
 *     try:
 *         count = __pyx_collections_abc_Sequence.count             # <<<<<<<<<<<<<<
 *         index = __pyx_collections_abc_Sequence.index
 *     except:
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_collections_abc_Sequence, __pyx_mstate_global->__pyx_n_u_count); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 984, __pyx_L18_error)
      __Pyx_GOTREF(__pyx_t_4);
      if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_memoryviewslice_type, __pyx_mstate_global->__pyx_n_u_count, __pyx_t_4) < (0)) __PYX_ERR(1, 984, __pyx_L18_error)
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

      /* "View.MemoryView":985
 *     try:
 *         count = __pyx_collections_abc_Sequence.count
 *         index = __pyx_collections_abc_Sequence.index             # <<<<<<<<<<<<<<
 *     except:
 *         pass
*/
      __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_collections_abc_Sequence, __pyx_mstate_global->__pyx_n_u_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 985, __pyx_L18_error)
      __Pyx_GOTREF(__pyx_t_4);
      if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_memoryviewslice_type, __pyx_mstate_global->__pyx_n_u_index, __pyx_t_4) < (0)) __PYX_ERR(1, 985, __pyx_L18_error)
      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

      /* "View.MemoryView":983
 * 
 * 
 *     try:             # <<<<<<<<<<<<<<
 *         count = __pyx_collections_abc_Sequence.count
 *         index = __pyx_collections_abc_Sequence.index
*/
    }
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    goto __pyx_L23_try_end;
    __pyx_L18_error:;
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;

    /* "View.MemoryView":986
 *         count = __pyx_collections_abc_Sequence.count
 *         index = __pyx_collections_abc_Sequence.index
 *     except:             # <<<<<<<<<<<<<<
 *         pass
 * 
*/
    /*except:*/ {
      __Pyx_ErrRestore(0,0,0);
      goto __pyx_L19_exception_handled;
    }
    __pyx_L19_exception_handled:;
    __Pyx_XGIVEREF(__pyx_t_1);
    __Pyx_XGIVEREF(__pyx_t_2);
    __Pyx_XGIVEREF(__pyx_t_3);
    __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
    __pyx_L23_try_end:;
  }

  /* "View.MemoryView":989
 *         pass
 * 
 * try:             # <<<<<<<<<<<<<<
 *     if __pyx_collections_abc_Sequence:
 * 
*/
  {
    __Pyx_PyThreadState_declare
    __Pyx_PyThreadState_assign
    __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_2, &__pyx_t_1);
    __Pyx_XGOTREF(__pyx_t_3);
    __Pyx_XGOTREF(__pyx_t_2);
    __Pyx_XGOTREF(__pyx_t_1);
    /*try:*/ {

      /* "View.MemoryView":990
 * 
 * try:
 *     if __pyx_collections_abc_Sequence:             # <<<<<<<<<<<<<<
 * 
 * 
*/
      __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_collections_abc_Sequence); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(1, 990, __pyx_L26_error)
      if (__pyx_t_8) {

        /* "View.MemoryView":994
 * 
 * 
 *         __pyx_collections_abc_Sequence.register(_memoryviewslice)             # <<<<<<<<<<<<<<
 *         __pyx_collections_abc_Sequence.register(array)
 * except:
*/
        __pyx_t_5 = __pyx_collections_abc_Sequence;
        __Pyx_INCREF(__pyx_t_5);
        __pyx_t_6 = 0;
        {
          PyObject *__pyx_callargs[2] = {__pyx_t_5, ((PyObject *)__pyx_mstate_global->__pyx_memoryviewslice_type)};
          __pyx_t_4 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_register, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
          __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
          if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 994, __pyx_L26_error)
          __Pyx_GOTREF(__pyx_t_4);
        }
        __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

        /* "View.MemoryView":995
 * 
 *         __pyx_collections_abc_Sequence.register(_memoryviewslice)
 *         __pyx_collections_abc_Sequence.register(array)             # <<<<<<<<<<<<<<
 * except:
 *     pass  # ignore failure, it's a minor issue
*/
        __pyx_t_5 = __pyx_collections_abc_Sequence;
        __Pyx_INCREF(__pyx_t_5);
        __pyx_t_6 = 0;
        {
          PyObject *__pyx_callargs[2] = {__pyx_t_5, ((PyObject *)__pyx_mstate_global->__pyx_array_type)};
          __pyx_t_4 = __Pyx_PyObject_FastCallMethod((PyObject*)__pyx_mstate_global->__pyx_n_u_register, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
          __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
          if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 995, __pyx_L26_error)
          __Pyx_GOTREF(__pyx_t_4);
        }
        __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

        /* "View.MemoryView":990
 * 
 * try:
 *     if __pyx_collections_abc_Sequence:             # <<<<<<<<<<<<<<
 * 
 * 
*/
      }

      /* "View.MemoryView":989
 *         pass
 * 
 * try:             # <<<<<<<<<<<<<<
 *     if __pyx_collections_abc_Sequence:
 * 
*/
    }
    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
    goto __pyx_L31_try_end;
    __pyx_L26_error:;
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;

    /* "View.MemoryView":996
 *         __pyx_collections_abc_Sequence.register(_memoryviewslice)
 *         __pyx_collections_abc_Sequence.register(array)
 * except:             # <<<<<<<<<<<<<<
 *     pass  # ignore failure, it's a minor issue
 * 
*/
    /*except:*/ {
      __Pyx_ErrRestore(0,0,0);
      goto __pyx_L27_exception_handled;
    }
    __pyx_L27_exception_handled:;
    __Pyx_XGIVEREF(__pyx_t_3);
    __Pyx_XGIVEREF(__pyx_t_2);
    __Pyx_XGIVEREF(__pyx_t_1);
    __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_2, __pyx_t_1);
    __pyx_L31_try_end:;
  }

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0x82a3537, 0x6ae9995, 0xb068931, b'name')
*/
  __pyx_t_4 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_mstate_global->__pyx_n_u_View_MemoryView); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_Enum, __pyx_t_4) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":14
 *                                nested_resource)
 * 
 * from enum import IntEnum as _IntEnum             # <<<<<<<<<<<<<<
 * 
 * 
*/
  {
    PyObject* const __pyx_imported_names[] = {__pyx_mstate_global->__pyx_n_u_IntEnum};
    __pyx_t_1 = __Pyx_Import(__pyx_mstate_global->__pyx_n_u_enum, __pyx_imported_names, 1, NULL, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14, __pyx_L1_error)
  }
  __pyx_t_4 = __pyx_t_1;
  __Pyx_GOTREF(__pyx_t_4);
  {
    PyObject* const __pyx_imported_names[] = {__pyx_mstate_global->__pyx_n_u_IntEnum};
    __pyx_t_9 = 0; {
      __pyx_t_5 = __Pyx_ImportFrom(__pyx_t_4, __pyx_imported_names[__pyx_t_9]); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_5);
      switch (__pyx_t_9) {
        case 0:
        if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_IntEnum_2, __pyx_t_5) < (0)) __PYX_ERR(0, 14, __pyx_L1_error)
        break;
        default:;
      }
      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
    }
  }
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":22
 * cimport cpython.memoryview
 * from libc.string cimport memcmp, memcpy
 * import numpy as _numpy             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __Pyx_Import(__pyx_mstate_global->__pyx_n_u_numpy_2, 0, 0, NULL, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error)
  __pyx_t_4 = __pyx_t_1;
  __Pyx_GOTREF(__pyx_t_4);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_numpy, __pyx_t_4) < (0)) __PYX_ERR(0, 22, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":42
 * ###############################################################################
 * 
 * class BridgeChipType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlBridgeChipType_t`."""
 *     BRIDGE_CHIP_PLX = NVML_BRIDGE_CHIP_PLX
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 42, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 42, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 42, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 42, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_BridgeChipType, __pyx_mstate_global->__pyx_n_u_BridgeChipType, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlBridgeChipType_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 42, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 42, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":44
 * class BridgeChipType(_IntEnum):
 *     """See `nvmlBridgeChipType_t`."""
 *     BRIDGE_CHIP_PLX = NVML_BRIDGE_CHIP_PLX             # <<<<<<<<<<<<<<
 *     BRIDGE_CHIP_BRO4 = NVML_BRIDGE_CHIP_BRO4
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlBridgeChipType_t(NVML_BRIDGE_CHIP_PLX); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 44, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_BRIDGE_CHIP_PLX, __pyx_t_5) < (0)) __PYX_ERR(0, 44, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":45
 *     """See `nvmlBridgeChipType_t`."""
 *     BRIDGE_CHIP_PLX = NVML_BRIDGE_CHIP_PLX
 *     BRIDGE_CHIP_BRO4 = NVML_BRIDGE_CHIP_BRO4             # <<<<<<<<<<<<<<
 * 
 * class NvLinkUtilizationCountUnits(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlBridgeChipType_t(NVML_BRIDGE_CHIP_BRO4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 45, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_BRIDGE_CHIP_BRO4, __pyx_t_5) < (0)) __PYX_ERR(0, 45, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":42
 * ###############################################################################
 * 
 * class BridgeChipType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlBridgeChipType_t`."""
 *     BRIDGE_CHIP_PLX = NVML_BRIDGE_CHIP_PLX
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_BridgeChipType, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 42, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_BridgeChipType, __pyx_t_5) < (0)) __PYX_ERR(0, 42, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":47
 *     BRIDGE_CHIP_BRO4 = NVML_BRIDGE_CHIP_BRO4
 * 
 * class NvLinkUtilizationCountUnits(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlNvLinkUtilizationCountUnits_t`."""
 *     NVLINK_COUNTER_UNIT_CYCLES = NVML_NVLINK_COUNTER_UNIT_CYCLES
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 47, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 47, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 47, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 47, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_NvLinkUtilizationCountUnits, __pyx_mstate_global->__pyx_n_u_NvLinkUtilizationCountUnits, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlNvLinkUtilizationCountUn); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 47, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 47, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":49
 * class NvLinkUtilizationCountUnits(_IntEnum):
 *     """See `nvmlNvLinkUtilizationCountUnits_t`."""
 *     NVLINK_COUNTER_UNIT_CYCLES = NVML_NVLINK_COUNTER_UNIT_CYCLES             # <<<<<<<<<<<<<<
 *     NVLINK_COUNTER_UNIT_PACKETS = NVML_NVLINK_COUNTER_UNIT_PACKETS
 *     NVLINK_COUNTER_UNIT_BYTES = NVML_NVLINK_COUNTER_UNIT_BYTES
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlNvLinkUtilizationCountUnits_t(NVML_NVLINK_COUNTER_UNIT_CYCLES); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 49, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NVLINK_COUNTER_UNIT_CYCLES, __pyx_t_10) < (0)) __PYX_ERR(0, 49, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":50
 *     """See `nvmlNvLinkUtilizationCountUnits_t`."""
 *     NVLINK_COUNTER_UNIT_CYCLES = NVML_NVLINK_COUNTER_UNIT_CYCLES
 *     NVLINK_COUNTER_UNIT_PACKETS = NVML_NVLINK_COUNTER_UNIT_PACKETS             # <<<<<<<<<<<<<<
 *     NVLINK_COUNTER_UNIT_BYTES = NVML_NVLINK_COUNTER_UNIT_BYTES
 *     NVLINK_COUNTER_UNIT_RESERVED = NVML_NVLINK_COUNTER_UNIT_RESERVED
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlNvLinkUtilizationCountUnits_t(NVML_NVLINK_COUNTER_UNIT_PACKETS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 50, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NVLINK_COUNTER_UNIT_PACKETS, __pyx_t_10) < (0)) __PYX_ERR(0, 50, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":51
 *     NVLINK_COUNTER_UNIT_CYCLES = NVML_NVLINK_COUNTER_UNIT_CYCLES
 *     NVLINK_COUNTER_UNIT_PACKETS = NVML_NVLINK_COUNTER_UNIT_PACKETS
 *     NVLINK_COUNTER_UNIT_BYTES = NVML_NVLINK_COUNTER_UNIT_BYTES             # <<<<<<<<<<<<<<
 *     NVLINK_COUNTER_UNIT_RESERVED = NVML_NVLINK_COUNTER_UNIT_RESERVED
 *     NVLINK_COUNTER_UNIT_COUNT = NVML_NVLINK_COUNTER_UNIT_COUNT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlNvLinkUtilizationCountUnits_t(NVML_NVLINK_COUNTER_UNIT_BYTES); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 51, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NVLINK_COUNTER_UNIT_BYTES, __pyx_t_10) < (0)) __PYX_ERR(0, 51, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":52
 *     NVLINK_COUNTER_UNIT_PACKETS = NVML_NVLINK_COUNTER_UNIT_PACKETS
 *     NVLINK_COUNTER_UNIT_BYTES = NVML_NVLINK_COUNTER_UNIT_BYTES
 *     NVLINK_COUNTER_UNIT_RESERVED = NVML_NVLINK_COUNTER_UNIT_RESERVED             # <<<<<<<<<<<<<<
 *     NVLINK_COUNTER_UNIT_COUNT = NVML_NVLINK_COUNTER_UNIT_COUNT
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlNvLinkUtilizationCountUnits_t(NVML_NVLINK_COUNTER_UNIT_RESERVED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 52, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NVLINK_COUNTER_UNIT_RESERVED, __pyx_t_10) < (0)) __PYX_ERR(0, 52, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":53
 *     NVLINK_COUNTER_UNIT_BYTES = NVML_NVLINK_COUNTER_UNIT_BYTES
 *     NVLINK_COUNTER_UNIT_RESERVED = NVML_NVLINK_COUNTER_UNIT_RESERVED
 *     NVLINK_COUNTER_UNIT_COUNT = NVML_NVLINK_COUNTER_UNIT_COUNT             # <<<<<<<<<<<<<<
 * 
 * class NvLinkUtilizationCountPktTypes(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlNvLinkUtilizationCountUnits_t(NVML_NVLINK_COUNTER_UNIT_COUNT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 53, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NVLINK_COUNTER_UNIT_COUNT, __pyx_t_10) < (0)) __PYX_ERR(0, 53, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":47
 *     BRIDGE_CHIP_BRO4 = NVML_BRIDGE_CHIP_BRO4
 * 
 * class NvLinkUtilizationCountUnits(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlNvLinkUtilizationCountUnits_t`."""
 *     NVLINK_COUNTER_UNIT_CYCLES = NVML_NVLINK_COUNTER_UNIT_CYCLES
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_NvLinkUtilizationCountUnits, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 47, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_NvLinkUtilizationCountUnits, __pyx_t_10) < (0)) __PYX_ERR(0, 47, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":55
 *     NVLINK_COUNTER_UNIT_COUNT = NVML_NVLINK_COUNTER_UNIT_COUNT
 * 
 * class NvLinkUtilizationCountPktTypes(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlNvLinkUtilizationCountPktTypes_t`."""
 *     NVLINK_COUNTER_PKTFILTER_NOP = NVML_NVLINK_COUNTER_PKTFILTER_NOP
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 55, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 55, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 55, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 55, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_NvLinkUtilizationCountPktTypes, __pyx_mstate_global->__pyx_n_u_NvLinkUtilizationCountPktTypes, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlNvLinkUtilizationCountPk); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 55, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 55, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":57
 * class NvLinkUtilizationCountPktTypes(_IntEnum):
 *     """See `nvmlNvLinkUtilizationCountPktTypes_t`."""
 *     NVLINK_COUNTER_PKTFILTER_NOP = NVML_NVLINK_COUNTER_PKTFILTER_NOP             # <<<<<<<<<<<<<<
 *     NVLINK_COUNTER_PKTFILTER_READ = NVML_NVLINK_COUNTER_PKTFILTER_READ
 *     NVLINK_COUNTER_PKTFILTER_WRITE = NVML_NVLINK_COUNTER_PKTFILTER_WRITE
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlNvLinkUtilizationCountPktTypes_t(NVML_NVLINK_COUNTER_PKTFILTER_NOP); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 57, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NVLINK_COUNTER_PKTFILTER_NOP, __pyx_t_11) < (0)) __PYX_ERR(0, 57, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":58
 *     """See `nvmlNvLinkUtilizationCountPktTypes_t`."""
 *     NVLINK_COUNTER_PKTFILTER_NOP = NVML_NVLINK_COUNTER_PKTFILTER_NOP
 *     NVLINK_COUNTER_PKTFILTER_READ = NVML_NVLINK_COUNTER_PKTFILTER_READ             # <<<<<<<<<<<<<<
 *     NVLINK_COUNTER_PKTFILTER_WRITE = NVML_NVLINK_COUNTER_PKTFILTER_WRITE
 *     NVLINK_COUNTER_PKTFILTER_RATOM = NVML_NVLINK_COUNTER_PKTFILTER_RATOM
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlNvLinkUtilizationCountPktTypes_t(NVML_NVLINK_COUNTER_PKTFILTER_READ); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 58, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NVLINK_COUNTER_PKTFILTER_READ, __pyx_t_11) < (0)) __PYX_ERR(0, 58, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":59
 *     NVLINK_COUNTER_PKTFILTER_NOP = NVML_NVLINK_COUNTER_PKTFILTER_NOP
 *     NVLINK_COUNTER_PKTFILTER_READ = NVML_NVLINK_COUNTER_PKTFILTER_READ
 *     NVLINK_COUNTER_PKTFILTER_WRITE = NVML_NVLINK_COUNTER_PKTFILTER_WRITE             # <<<<<<<<<<<<<<
 *     NVLINK_COUNTER_PKTFILTER_RATOM = NVML_NVLINK_COUNTER_PKTFILTER_RATOM
 *     NVLINK_COUNTER_PKTFILTER_NRATOM = NVML_NVLINK_COUNTER_PKTFILTER_NRATOM
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlNvLinkUtilizationCountPktTypes_t(NVML_NVLINK_COUNTER_PKTFILTER_WRITE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 59, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NVLINK_COUNTER_PKTFILTER_WRITE, __pyx_t_11) < (0)) __PYX_ERR(0, 59, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":60
 *     NVLINK_COUNTER_PKTFILTER_READ = NVML_NVLINK_COUNTER_PKTFILTER_READ
 *     NVLINK_COUNTER_PKTFILTER_WRITE = NVML_NVLINK_COUNTER_PKTFILTER_WRITE
 *     NVLINK_COUNTER_PKTFILTER_RATOM = NVML_NVLINK_COUNTER_PKTFILTER_RATOM             # <<<<<<<<<<<<<<
 *     NVLINK_COUNTER_PKTFILTER_NRATOM = NVML_NVLINK_COUNTER_PKTFILTER_NRATOM
 *     NVLINK_COUNTER_PKTFILTER_FLUSH = NVML_NVLINK_COUNTER_PKTFILTER_FLUSH
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlNvLinkUtilizationCountPktTypes_t(NVML_NVLINK_COUNTER_PKTFILTER_RATOM); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 60, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NVLINK_COUNTER_PKTFILTER_RATOM, __pyx_t_11) < (0)) __PYX_ERR(0, 60, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":61
 *     NVLINK_COUNTER_PKTFILTER_WRITE = NVML_NVLINK_COUNTER_PKTFILTER_WRITE
 *     NVLINK_COUNTER_PKTFILTER_RATOM = NVML_NVLINK_COUNTER_PKTFILTER_RATOM
 *     NVLINK_COUNTER_PKTFILTER_NRATOM = NVML_NVLINK_COUNTER_PKTFILTER_NRATOM             # <<<<<<<<<<<<<<
 *     NVLINK_COUNTER_PKTFILTER_FLUSH = NVML_NVLINK_COUNTER_PKTFILTER_FLUSH
 *     NVLINK_COUNTER_PKTFILTER_RESPDATA = NVML_NVLINK_COUNTER_PKTFILTER_RESPDATA
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlNvLinkUtilizationCountPktTypes_t(NVML_NVLINK_COUNTER_PKTFILTER_NRATOM); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 61, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NVLINK_COUNTER_PKTFILTER_NRATOM, __pyx_t_11) < (0)) __PYX_ERR(0, 61, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":62
 *     NVLINK_COUNTER_PKTFILTER_RATOM = NVML_NVLINK_COUNTER_PKTFILTER_RATOM
 *     NVLINK_COUNTER_PKTFILTER_NRATOM = NVML_NVLINK_COUNTER_PKTFILTER_NRATOM
 *     NVLINK_COUNTER_PKTFILTER_FLUSH = NVML_NVLINK_COUNTER_PKTFILTER_FLUSH             # <<<<<<<<<<<<<<
 *     NVLINK_COUNTER_PKTFILTER_RESPDATA = NVML_NVLINK_COUNTER_PKTFILTER_RESPDATA
 *     NVLINK_COUNTER_PKTFILTER_RESPNODATA = NVML_NVLINK_COUNTER_PKTFILTER_RESPNODATA
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlNvLinkUtilizationCountPktTypes_t(NVML_NVLINK_COUNTER_PKTFILTER_FLUSH); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 62, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NVLINK_COUNTER_PKTFILTER_FLUSH, __pyx_t_11) < (0)) __PYX_ERR(0, 62, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":63
 *     NVLINK_COUNTER_PKTFILTER_NRATOM = NVML_NVLINK_COUNTER_PKTFILTER_NRATOM
 *     NVLINK_COUNTER_PKTFILTER_FLUSH = NVML_NVLINK_COUNTER_PKTFILTER_FLUSH
 *     NVLINK_COUNTER_PKTFILTER_RESPDATA = NVML_NVLINK_COUNTER_PKTFILTER_RESPDATA             # <<<<<<<<<<<<<<
 *     NVLINK_COUNTER_PKTFILTER_RESPNODATA = NVML_NVLINK_COUNTER_PKTFILTER_RESPNODATA
 *     NVLINK_COUNTER_PKTFILTER_ALL = NVML_NVLINK_COUNTER_PKTFILTER_ALL
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlNvLinkUtilizationCountPktTypes_t(NVML_NVLINK_COUNTER_PKTFILTER_RESPDATA); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 63, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NVLINK_COUNTER_PKTFILTER_RESPDAT, __pyx_t_11) < (0)) __PYX_ERR(0, 63, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":64
 *     NVLINK_COUNTER_PKTFILTER_FLUSH = NVML_NVLINK_COUNTER_PKTFILTER_FLUSH
 *     NVLINK_COUNTER_PKTFILTER_RESPDATA = NVML_NVLINK_COUNTER_PKTFILTER_RESPDATA
 *     NVLINK_COUNTER_PKTFILTER_RESPNODATA = NVML_NVLINK_COUNTER_PKTFILTER_RESPNODATA             # <<<<<<<<<<<<<<
 *     NVLINK_COUNTER_PKTFILTER_ALL = NVML_NVLINK_COUNTER_PKTFILTER_ALL
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlNvLinkUtilizationCountPktTypes_t(NVML_NVLINK_COUNTER_PKTFILTER_RESPNODATA); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 64, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NVLINK_COUNTER_PKTFILTER_RESPNOD, __pyx_t_11) < (0)) __PYX_ERR(0, 64, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":65
 *     NVLINK_COUNTER_PKTFILTER_RESPDATA = NVML_NVLINK_COUNTER_PKTFILTER_RESPDATA
 *     NVLINK_COUNTER_PKTFILTER_RESPNODATA = NVML_NVLINK_COUNTER_PKTFILTER_RESPNODATA
 *     NVLINK_COUNTER_PKTFILTER_ALL = NVML_NVLINK_COUNTER_PKTFILTER_ALL             # <<<<<<<<<<<<<<
 * 
 * class NvLinkCapability(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlNvLinkUtilizationCountPktTypes_t(NVML_NVLINK_COUNTER_PKTFILTER_ALL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 65, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NVLINK_COUNTER_PKTFILTER_ALL, __pyx_t_11) < (0)) __PYX_ERR(0, 65, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":55
 *     NVLINK_COUNTER_UNIT_COUNT = NVML_NVLINK_COUNTER_UNIT_COUNT
 * 
 * class NvLinkUtilizationCountPktTypes(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlNvLinkUtilizationCountPktTypes_t`."""
 *     NVLINK_COUNTER_PKTFILTER_NOP = NVML_NVLINK_COUNTER_PKTFILTER_NOP
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvLinkUtilizationCountPktTypes, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 55, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_NvLinkUtilizationCountPktTypes, __pyx_t_11) < (0)) __PYX_ERR(0, 55, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":67
 *     NVLINK_COUNTER_PKTFILTER_ALL = NVML_NVLINK_COUNTER_PKTFILTER_ALL
 * 
 * class NvLinkCapability(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlNvLinkCapability_t`."""
 *     NVLINK_CAP_P2P_SUPPORTED = NVML_NVLINK_CAP_P2P_SUPPORTED
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 67, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 67, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_NvLinkCapability, __pyx_mstate_global->__pyx_n_u_NvLinkCapability, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlNvLinkCapability_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 67, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 67, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":69
 * class NvLinkCapability(_IntEnum):
 *     """See `nvmlNvLinkCapability_t`."""
 *     NVLINK_CAP_P2P_SUPPORTED = NVML_NVLINK_CAP_P2P_SUPPORTED             # <<<<<<<<<<<<<<
 *     NVLINK_CAP_SYSMEM_ACCESS = NVML_NVLINK_CAP_SYSMEM_ACCESS
 *     NVLINK_CAP_P2P_ATOMICS = NVML_NVLINK_CAP_P2P_ATOMICS
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlNvLinkCapability_t(NVML_NVLINK_CAP_P2P_SUPPORTED); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 69, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_NVLINK_CAP_P2P_SUPPORTED, __pyx_t_5) < (0)) __PYX_ERR(0, 69, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":70
 *     """See `nvmlNvLinkCapability_t`."""
 *     NVLINK_CAP_P2P_SUPPORTED = NVML_NVLINK_CAP_P2P_SUPPORTED
 *     NVLINK_CAP_SYSMEM_ACCESS = NVML_NVLINK_CAP_SYSMEM_ACCESS             # <<<<<<<<<<<<<<
 *     NVLINK_CAP_P2P_ATOMICS = NVML_NVLINK_CAP_P2P_ATOMICS
 *     NVLINK_CAP_SYSMEM_ATOMICS = NVML_NVLINK_CAP_SYSMEM_ATOMICS
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlNvLinkCapability_t(NVML_NVLINK_CAP_SYSMEM_ACCESS); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 70, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_NVLINK_CAP_SYSMEM_ACCESS, __pyx_t_5) < (0)) __PYX_ERR(0, 70, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":71
 *     NVLINK_CAP_P2P_SUPPORTED = NVML_NVLINK_CAP_P2P_SUPPORTED
 *     NVLINK_CAP_SYSMEM_ACCESS = NVML_NVLINK_CAP_SYSMEM_ACCESS
 *     NVLINK_CAP_P2P_ATOMICS = NVML_NVLINK_CAP_P2P_ATOMICS             # <<<<<<<<<<<<<<
 *     NVLINK_CAP_SYSMEM_ATOMICS = NVML_NVLINK_CAP_SYSMEM_ATOMICS
 *     NVLINK_CAP_SLI_BRIDGE = NVML_NVLINK_CAP_SLI_BRIDGE
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlNvLinkCapability_t(NVML_NVLINK_CAP_P2P_ATOMICS); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 71, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_NVLINK_CAP_P2P_ATOMICS, __pyx_t_5) < (0)) __PYX_ERR(0, 71, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":72
 *     NVLINK_CAP_SYSMEM_ACCESS = NVML_NVLINK_CAP_SYSMEM_ACCESS
 *     NVLINK_CAP_P2P_ATOMICS = NVML_NVLINK_CAP_P2P_ATOMICS
 *     NVLINK_CAP_SYSMEM_ATOMICS = NVML_NVLINK_CAP_SYSMEM_ATOMICS             # <<<<<<<<<<<<<<
 *     NVLINK_CAP_SLI_BRIDGE = NVML_NVLINK_CAP_SLI_BRIDGE
 *     NVLINK_CAP_VALID = NVML_NVLINK_CAP_VALID
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlNvLinkCapability_t(NVML_NVLINK_CAP_SYSMEM_ATOMICS); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 72, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_NVLINK_CAP_SYSMEM_ATOMICS, __pyx_t_5) < (0)) __PYX_ERR(0, 72, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":73
 *     NVLINK_CAP_P2P_ATOMICS = NVML_NVLINK_CAP_P2P_ATOMICS
 *     NVLINK_CAP_SYSMEM_ATOMICS = NVML_NVLINK_CAP_SYSMEM_ATOMICS
 *     NVLINK_CAP_SLI_BRIDGE = NVML_NVLINK_CAP_SLI_BRIDGE             # <<<<<<<<<<<<<<
 *     NVLINK_CAP_VALID = NVML_NVLINK_CAP_VALID
 *     NVLINK_CAP_COUNT = NVML_NVLINK_CAP_COUNT
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlNvLinkCapability_t(NVML_NVLINK_CAP_SLI_BRIDGE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 73, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_NVLINK_CAP_SLI_BRIDGE, __pyx_t_5) < (0)) __PYX_ERR(0, 73, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":74
 *     NVLINK_CAP_SYSMEM_ATOMICS = NVML_NVLINK_CAP_SYSMEM_ATOMICS
 *     NVLINK_CAP_SLI_BRIDGE = NVML_NVLINK_CAP_SLI_BRIDGE
 *     NVLINK_CAP_VALID = NVML_NVLINK_CAP_VALID             # <<<<<<<<<<<<<<
 *     NVLINK_CAP_COUNT = NVML_NVLINK_CAP_COUNT
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlNvLinkCapability_t(NVML_NVLINK_CAP_VALID); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 74, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_NVLINK_CAP_VALID, __pyx_t_5) < (0)) __PYX_ERR(0, 74, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":75
 *     NVLINK_CAP_SLI_BRIDGE = NVML_NVLINK_CAP_SLI_BRIDGE
 *     NVLINK_CAP_VALID = NVML_NVLINK_CAP_VALID
 *     NVLINK_CAP_COUNT = NVML_NVLINK_CAP_COUNT             # <<<<<<<<<<<<<<
 * 
 * class NvLinkErrorCounter(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlNvLinkCapability_t(NVML_NVLINK_CAP_COUNT); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 75, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_NVLINK_CAP_COUNT, __pyx_t_5) < (0)) __PYX_ERR(0, 75, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":67
 *     NVLINK_COUNTER_PKTFILTER_ALL = NVML_NVLINK_COUNTER_PKTFILTER_ALL
 * 
 * class NvLinkCapability(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlNvLinkCapability_t`."""
 *     NVLINK_CAP_P2P_SUPPORTED = NVML_NVLINK_CAP_P2P_SUPPORTED
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NvLinkCapability, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 67, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_NvLinkCapability, __pyx_t_5) < (0)) __PYX_ERR(0, 67, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":77
 *     NVLINK_CAP_COUNT = NVML_NVLINK_CAP_COUNT
 * 
 * class NvLinkErrorCounter(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlNvLinkErrorCounter_t`."""
 *     NVLINK_ERROR_DL_REPLAY = NVML_NVLINK_ERROR_DL_REPLAY
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 77, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 77, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 77, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 77, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_NvLinkErrorCounter, __pyx_mstate_global->__pyx_n_u_NvLinkErrorCounter, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlNvLinkErrorCounter_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 77, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 77, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":79
 * class NvLinkErrorCounter(_IntEnum):
 *     """See `nvmlNvLinkErrorCounter_t`."""
 *     NVLINK_ERROR_DL_REPLAY = NVML_NVLINK_ERROR_DL_REPLAY             # <<<<<<<<<<<<<<
 *     NVLINK_ERROR_DL_RECOVERY = NVML_NVLINK_ERROR_DL_RECOVERY
 *     NVLINK_ERROR_DL_CRC_FLIT = NVML_NVLINK_ERROR_DL_CRC_FLIT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlNvLinkErrorCounter_t(NVML_NVLINK_ERROR_DL_REPLAY); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 79, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NVLINK_ERROR_DL_REPLAY, __pyx_t_10) < (0)) __PYX_ERR(0, 79, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":80
 *     """See `nvmlNvLinkErrorCounter_t`."""
 *     NVLINK_ERROR_DL_REPLAY = NVML_NVLINK_ERROR_DL_REPLAY
 *     NVLINK_ERROR_DL_RECOVERY = NVML_NVLINK_ERROR_DL_RECOVERY             # <<<<<<<<<<<<<<
 *     NVLINK_ERROR_DL_CRC_FLIT = NVML_NVLINK_ERROR_DL_CRC_FLIT
 *     NVLINK_ERROR_DL_CRC_DATA = NVML_NVLINK_ERROR_DL_CRC_DATA
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlNvLinkErrorCounter_t(NVML_NVLINK_ERROR_DL_RECOVERY); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 80, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NVLINK_ERROR_DL_RECOVERY, __pyx_t_10) < (0)) __PYX_ERR(0, 80, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":81
 *     NVLINK_ERROR_DL_REPLAY = NVML_NVLINK_ERROR_DL_REPLAY
 *     NVLINK_ERROR_DL_RECOVERY = NVML_NVLINK_ERROR_DL_RECOVERY
 *     NVLINK_ERROR_DL_CRC_FLIT = NVML_NVLINK_ERROR_DL_CRC_FLIT             # <<<<<<<<<<<<<<
 *     NVLINK_ERROR_DL_CRC_DATA = NVML_NVLINK_ERROR_DL_CRC_DATA
 *     NVLINK_ERROR_DL_ECC_DATA = NVML_NVLINK_ERROR_DL_ECC_DATA
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlNvLinkErrorCounter_t(NVML_NVLINK_ERROR_DL_CRC_FLIT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 81, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NVLINK_ERROR_DL_CRC_FLIT, __pyx_t_10) < (0)) __PYX_ERR(0, 81, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":82
 *     NVLINK_ERROR_DL_RECOVERY = NVML_NVLINK_ERROR_DL_RECOVERY
 *     NVLINK_ERROR_DL_CRC_FLIT = NVML_NVLINK_ERROR_DL_CRC_FLIT
 *     NVLINK_ERROR_DL_CRC_DATA = NVML_NVLINK_ERROR_DL_CRC_DATA             # <<<<<<<<<<<<<<
 *     NVLINK_ERROR_DL_ECC_DATA = NVML_NVLINK_ERROR_DL_ECC_DATA
 *     NVLINK_ERROR_COUNT = NVML_NVLINK_ERROR_COUNT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlNvLinkErrorCounter_t(NVML_NVLINK_ERROR_DL_CRC_DATA); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 82, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NVLINK_ERROR_DL_CRC_DATA, __pyx_t_10) < (0)) __PYX_ERR(0, 82, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":83
 *     NVLINK_ERROR_DL_CRC_FLIT = NVML_NVLINK_ERROR_DL_CRC_FLIT
 *     NVLINK_ERROR_DL_CRC_DATA = NVML_NVLINK_ERROR_DL_CRC_DATA
 *     NVLINK_ERROR_DL_ECC_DATA = NVML_NVLINK_ERROR_DL_ECC_DATA             # <<<<<<<<<<<<<<
 *     NVLINK_ERROR_COUNT = NVML_NVLINK_ERROR_COUNT
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlNvLinkErrorCounter_t(NVML_NVLINK_ERROR_DL_ECC_DATA); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 83, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NVLINK_ERROR_DL_ECC_DATA, __pyx_t_10) < (0)) __PYX_ERR(0, 83, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":84
 *     NVLINK_ERROR_DL_CRC_DATA = NVML_NVLINK_ERROR_DL_CRC_DATA
 *     NVLINK_ERROR_DL_ECC_DATA = NVML_NVLINK_ERROR_DL_ECC_DATA
 *     NVLINK_ERROR_COUNT = NVML_NVLINK_ERROR_COUNT             # <<<<<<<<<<<<<<
 * 
 * class IntNvLinkDeviceType(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlNvLinkErrorCounter_t(NVML_NVLINK_ERROR_COUNT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 84, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NVLINK_ERROR_COUNT, __pyx_t_10) < (0)) __PYX_ERR(0, 84, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":77
 *     NVLINK_CAP_COUNT = NVML_NVLINK_CAP_COUNT
 * 
 * class NvLinkErrorCounter(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlNvLinkErrorCounter_t`."""
 *     NVLINK_ERROR_DL_REPLAY = NVML_NVLINK_ERROR_DL_REPLAY
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_NvLinkErrorCounter, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 77, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_NvLinkErrorCounter, __pyx_t_10) < (0)) __PYX_ERR(0, 77, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":86
 *     NVLINK_ERROR_COUNT = NVML_NVLINK_ERROR_COUNT
 * 
 * class IntNvLinkDeviceType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlIntNvLinkDeviceType_t`."""
 *     NVLINK_DEVICE_TYPE_GPU = NVML_NVLINK_DEVICE_TYPE_GPU
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 86, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 86, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 86, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 86, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntNvLinkDeviceType, __pyx_mstate_global->__pyx_n_u_IntNvLinkDeviceType, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlIntNvLinkDeviceType_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 86, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 86, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":88
 * class IntNvLinkDeviceType(_IntEnum):
 *     """See `nvmlIntNvLinkDeviceType_t`."""
 *     NVLINK_DEVICE_TYPE_GPU = NVML_NVLINK_DEVICE_TYPE_GPU             # <<<<<<<<<<<<<<
 *     NVLINK_DEVICE_TYPE_IBMNPU = NVML_NVLINK_DEVICE_TYPE_IBMNPU
 *     NVLINK_DEVICE_TYPE_SWITCH = NVML_NVLINK_DEVICE_TYPE_SWITCH
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlIntNvLinkDeviceType_t(NVML_NVLINK_DEVICE_TYPE_GPU); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 88, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NVLINK_DEVICE_TYPE_GPU, __pyx_t_11) < (0)) __PYX_ERR(0, 88, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":89
 *     """See `nvmlIntNvLinkDeviceType_t`."""
 *     NVLINK_DEVICE_TYPE_GPU = NVML_NVLINK_DEVICE_TYPE_GPU
 *     NVLINK_DEVICE_TYPE_IBMNPU = NVML_NVLINK_DEVICE_TYPE_IBMNPU             # <<<<<<<<<<<<<<
 *     NVLINK_DEVICE_TYPE_SWITCH = NVML_NVLINK_DEVICE_TYPE_SWITCH
 *     NVLINK_DEVICE_TYPE_UNKNOWN = NVML_NVLINK_DEVICE_TYPE_UNKNOWN
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlIntNvLinkDeviceType_t(NVML_NVLINK_DEVICE_TYPE_IBMNPU); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 89, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NVLINK_DEVICE_TYPE_IBMNPU, __pyx_t_11) < (0)) __PYX_ERR(0, 89, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":90
 *     NVLINK_DEVICE_TYPE_GPU = NVML_NVLINK_DEVICE_TYPE_GPU
 *     NVLINK_DEVICE_TYPE_IBMNPU = NVML_NVLINK_DEVICE_TYPE_IBMNPU
 *     NVLINK_DEVICE_TYPE_SWITCH = NVML_NVLINK_DEVICE_TYPE_SWITCH             # <<<<<<<<<<<<<<
 *     NVLINK_DEVICE_TYPE_UNKNOWN = NVML_NVLINK_DEVICE_TYPE_UNKNOWN
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlIntNvLinkDeviceType_t(NVML_NVLINK_DEVICE_TYPE_SWITCH); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 90, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NVLINK_DEVICE_TYPE_SWITCH, __pyx_t_11) < (0)) __PYX_ERR(0, 90, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":91
 *     NVLINK_DEVICE_TYPE_IBMNPU = NVML_NVLINK_DEVICE_TYPE_IBMNPU
 *     NVLINK_DEVICE_TYPE_SWITCH = NVML_NVLINK_DEVICE_TYPE_SWITCH
 *     NVLINK_DEVICE_TYPE_UNKNOWN = NVML_NVLINK_DEVICE_TYPE_UNKNOWN             # <<<<<<<<<<<<<<
 * 
 * class GpuTopologyLevel(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlIntNvLinkDeviceType_t(NVML_NVLINK_DEVICE_TYPE_UNKNOWN); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 91, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NVLINK_DEVICE_TYPE_UNKNOWN, __pyx_t_11) < (0)) __PYX_ERR(0, 91, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":86
 *     NVLINK_ERROR_COUNT = NVML_NVLINK_ERROR_COUNT
 * 
 * class IntNvLinkDeviceType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlIntNvLinkDeviceType_t`."""
 *     NVLINK_DEVICE_TYPE_GPU = NVML_NVLINK_DEVICE_TYPE_GPU
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_IntNvLinkDeviceType, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 86, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_IntNvLinkDeviceType, __pyx_t_11) < (0)) __PYX_ERR(0, 86, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":93
 *     NVLINK_DEVICE_TYPE_UNKNOWN = NVML_NVLINK_DEVICE_TYPE_UNKNOWN
 * 
 * class GpuTopologyLevel(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlGpuTopologyLevel_t`."""
 *     TOPOLOGY_INTERNAL = NVML_TOPOLOGY_INTERNAL
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 93, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 93, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 93, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 93, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_GpuTopologyLevel, __pyx_mstate_global->__pyx_n_u_GpuTopologyLevel, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlGpuTopologyLevel_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 93, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 93, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":95
 * class GpuTopologyLevel(_IntEnum):
 *     """See `nvmlGpuTopologyLevel_t`."""
 *     TOPOLOGY_INTERNAL = NVML_TOPOLOGY_INTERNAL             # <<<<<<<<<<<<<<
 *     TOPOLOGY_SINGLE = NVML_TOPOLOGY_SINGLE
 *     TOPOLOGY_MULTIPLE = NVML_TOPOLOGY_MULTIPLE
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlGpuTopologyLevel_t(NVML_TOPOLOGY_INTERNAL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 95, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_TOPOLOGY_INTERNAL, __pyx_t_5) < (0)) __PYX_ERR(0, 95, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":96
 *     """See `nvmlGpuTopologyLevel_t`."""
 *     TOPOLOGY_INTERNAL = NVML_TOPOLOGY_INTERNAL
 *     TOPOLOGY_SINGLE = NVML_TOPOLOGY_SINGLE             # <<<<<<<<<<<<<<
 *     TOPOLOGY_MULTIPLE = NVML_TOPOLOGY_MULTIPLE
 *     TOPOLOGY_HOSTBRIDGE = NVML_TOPOLOGY_HOSTBRIDGE
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlGpuTopologyLevel_t(NVML_TOPOLOGY_SINGLE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 96, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_TOPOLOGY_SINGLE, __pyx_t_5) < (0)) __PYX_ERR(0, 96, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":97
 *     TOPOLOGY_INTERNAL = NVML_TOPOLOGY_INTERNAL
 *     TOPOLOGY_SINGLE = NVML_TOPOLOGY_SINGLE
 *     TOPOLOGY_MULTIPLE = NVML_TOPOLOGY_MULTIPLE             # <<<<<<<<<<<<<<
 *     TOPOLOGY_HOSTBRIDGE = NVML_TOPOLOGY_HOSTBRIDGE
 *     TOPOLOGY_NODE = NVML_TOPOLOGY_NODE
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlGpuTopologyLevel_t(NVML_TOPOLOGY_MULTIPLE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 97, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_TOPOLOGY_MULTIPLE, __pyx_t_5) < (0)) __PYX_ERR(0, 97, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":98
 *     TOPOLOGY_SINGLE = NVML_TOPOLOGY_SINGLE
 *     TOPOLOGY_MULTIPLE = NVML_TOPOLOGY_MULTIPLE
 *     TOPOLOGY_HOSTBRIDGE = NVML_TOPOLOGY_HOSTBRIDGE             # <<<<<<<<<<<<<<
 *     TOPOLOGY_NODE = NVML_TOPOLOGY_NODE
 *     TOPOLOGY_SYSTEM = NVML_TOPOLOGY_SYSTEM
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlGpuTopologyLevel_t(NVML_TOPOLOGY_HOSTBRIDGE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 98, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_TOPOLOGY_HOSTBRIDGE, __pyx_t_5) < (0)) __PYX_ERR(0, 98, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":99
 *     TOPOLOGY_MULTIPLE = NVML_TOPOLOGY_MULTIPLE
 *     TOPOLOGY_HOSTBRIDGE = NVML_TOPOLOGY_HOSTBRIDGE
 *     TOPOLOGY_NODE = NVML_TOPOLOGY_NODE             # <<<<<<<<<<<<<<
 *     TOPOLOGY_SYSTEM = NVML_TOPOLOGY_SYSTEM
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlGpuTopologyLevel_t(NVML_TOPOLOGY_NODE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 99, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_TOPOLOGY_NODE, __pyx_t_5) < (0)) __PYX_ERR(0, 99, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":100
 *     TOPOLOGY_HOSTBRIDGE = NVML_TOPOLOGY_HOSTBRIDGE
 *     TOPOLOGY_NODE = NVML_TOPOLOGY_NODE
 *     TOPOLOGY_SYSTEM = NVML_TOPOLOGY_SYSTEM             # <<<<<<<<<<<<<<
 * 
 * class GpuP2PStatus(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlGpuTopologyLevel_t(NVML_TOPOLOGY_SYSTEM); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 100, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_TOPOLOGY_SYSTEM, __pyx_t_5) < (0)) __PYX_ERR(0, 100, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":93
 *     NVLINK_DEVICE_TYPE_UNKNOWN = NVML_NVLINK_DEVICE_TYPE_UNKNOWN
 * 
 * class GpuTopologyLevel(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlGpuTopologyLevel_t`."""
 *     TOPOLOGY_INTERNAL = NVML_TOPOLOGY_INTERNAL
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_GpuTopologyLevel, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 93, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_GpuTopologyLevel, __pyx_t_5) < (0)) __PYX_ERR(0, 93, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":102
 *     TOPOLOGY_SYSTEM = NVML_TOPOLOGY_SYSTEM
 * 
 * class GpuP2PStatus(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlGpuP2PStatus_t`."""
 *     P2P_STATUS_OK = NVML_P2P_STATUS_OK
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 102, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 102, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 102, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 102, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_GpuP2PStatus, __pyx_mstate_global->__pyx_n_u_GpuP2PStatus, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlGpuP2PStatus_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 102, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 102, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":104
 * class GpuP2PStatus(_IntEnum):
 *     """See `nvmlGpuP2PStatus_t`."""
 *     P2P_STATUS_OK = NVML_P2P_STATUS_OK             # <<<<<<<<<<<<<<
 *     P2P_STATUS_CHIPSET_NOT_SUPPORED = NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED
 *     P2P_STATUS_CHIPSET_NOT_SUPPORTED = NVML_P2P_STATUS_CHIPSET_NOT_SUPPORTED
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpuP2PStatus_t(NVML_P2P_STATUS_OK); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_P2P_STATUS_OK, __pyx_t_10) < (0)) __PYX_ERR(0, 104, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":105
 *     """See `nvmlGpuP2PStatus_t`."""
 *     P2P_STATUS_OK = NVML_P2P_STATUS_OK
 *     P2P_STATUS_CHIPSET_NOT_SUPPORED = NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED             # <<<<<<<<<<<<<<
 *     P2P_STATUS_CHIPSET_NOT_SUPPORTED = NVML_P2P_STATUS_CHIPSET_NOT_SUPPORTED
 *     P2P_STATUS_GPU_NOT_SUPPORTED = NVML_P2P_STATUS_GPU_NOT_SUPPORTED
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpuP2PStatus_t(NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 105, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_P2P_STATUS_CHIPSET_NOT_SUPPORED, __pyx_t_10) < (0)) __PYX_ERR(0, 105, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":106
 *     P2P_STATUS_OK = NVML_P2P_STATUS_OK
 *     P2P_STATUS_CHIPSET_NOT_SUPPORED = NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED
 *     P2P_STATUS_CHIPSET_NOT_SUPPORTED = NVML_P2P_STATUS_CHIPSET_NOT_SUPPORTED             # <<<<<<<<<<<<<<
 *     P2P_STATUS_GPU_NOT_SUPPORTED = NVML_P2P_STATUS_GPU_NOT_SUPPORTED
 *     P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED = NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpuP2PStatus_t(NVML_P2P_STATUS_CHIPSET_NOT_SUPPORTED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 106, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_P2P_STATUS_CHIPSET_NOT_SUPPORTED, __pyx_t_10) < (0)) __PYX_ERR(0, 106, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":107
 *     P2P_STATUS_CHIPSET_NOT_SUPPORED = NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED
 *     P2P_STATUS_CHIPSET_NOT_SUPPORTED = NVML_P2P_STATUS_CHIPSET_NOT_SUPPORTED
 *     P2P_STATUS_GPU_NOT_SUPPORTED = NVML_P2P_STATUS_GPU_NOT_SUPPORTED             # <<<<<<<<<<<<<<
 *     P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED = NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED
 *     P2P_STATUS_DISABLED_BY_REGKEY = NVML_P2P_STATUS_DISABLED_BY_REGKEY
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpuP2PStatus_t(NVML_P2P_STATUS_GPU_NOT_SUPPORTED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 107, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_P2P_STATUS_GPU_NOT_SUPPORTED, __pyx_t_10) < (0)) __PYX_ERR(0, 107, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":108
 *     P2P_STATUS_CHIPSET_NOT_SUPPORTED = NVML_P2P_STATUS_CHIPSET_NOT_SUPPORTED
 *     P2P_STATUS_GPU_NOT_SUPPORTED = NVML_P2P_STATUS_GPU_NOT_SUPPORTED
 *     P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED = NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED             # <<<<<<<<<<<<<<
 *     P2P_STATUS_DISABLED_BY_REGKEY = NVML_P2P_STATUS_DISABLED_BY_REGKEY
 *     P2P_STATUS_NOT_SUPPORTED = NVML_P2P_STATUS_NOT_SUPPORTED
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpuP2PStatus_t(NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 108, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPP, __pyx_t_10) < (0)) __PYX_ERR(0, 108, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":109
 *     P2P_STATUS_GPU_NOT_SUPPORTED = NVML_P2P_STATUS_GPU_NOT_SUPPORTED
 *     P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED = NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED
 *     P2P_STATUS_DISABLED_BY_REGKEY = NVML_P2P_STATUS_DISABLED_BY_REGKEY             # <<<<<<<<<<<<<<
 *     P2P_STATUS_NOT_SUPPORTED = NVML_P2P_STATUS_NOT_SUPPORTED
 *     P2P_STATUS_UNKNOWN = NVML_P2P_STATUS_UNKNOWN
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpuP2PStatus_t(NVML_P2P_STATUS_DISABLED_BY_REGKEY); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 109, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_P2P_STATUS_DISABLED_BY_REGKEY, __pyx_t_10) < (0)) __PYX_ERR(0, 109, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":110
 *     P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED = NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED
 *     P2P_STATUS_DISABLED_BY_REGKEY = NVML_P2P_STATUS_DISABLED_BY_REGKEY
 *     P2P_STATUS_NOT_SUPPORTED = NVML_P2P_STATUS_NOT_SUPPORTED             # <<<<<<<<<<<<<<
 *     P2P_STATUS_UNKNOWN = NVML_P2P_STATUS_UNKNOWN
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpuP2PStatus_t(NVML_P2P_STATUS_NOT_SUPPORTED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 110, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_P2P_STATUS_NOT_SUPPORTED, __pyx_t_10) < (0)) __PYX_ERR(0, 110, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":111
 *     P2P_STATUS_DISABLED_BY_REGKEY = NVML_P2P_STATUS_DISABLED_BY_REGKEY
 *     P2P_STATUS_NOT_SUPPORTED = NVML_P2P_STATUS_NOT_SUPPORTED
 *     P2P_STATUS_UNKNOWN = NVML_P2P_STATUS_UNKNOWN             # <<<<<<<<<<<<<<
 * 
 * class GpuP2PCapsIndex(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpuP2PStatus_t(NVML_P2P_STATUS_UNKNOWN); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 111, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_P2P_STATUS_UNKNOWN, __pyx_t_10) < (0)) __PYX_ERR(0, 111, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":102
 *     TOPOLOGY_SYSTEM = NVML_TOPOLOGY_SYSTEM
 * 
 * class GpuP2PStatus(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlGpuP2PStatus_t`."""
 *     P2P_STATUS_OK = NVML_P2P_STATUS_OK
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_GpuP2PStatus, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 102, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_GpuP2PStatus, __pyx_t_10) < (0)) __PYX_ERR(0, 102, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":113
 *     P2P_STATUS_UNKNOWN = NVML_P2P_STATUS_UNKNOWN
 * 
 * class GpuP2PCapsIndex(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlGpuP2PCapsIndex_t`."""
 *     P2P_CAPS_INDEX_READ = NVML_P2P_CAPS_INDEX_READ
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_GpuP2PCapsIndex, __pyx_mstate_global->__pyx_n_u_GpuP2PCapsIndex, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlGpuP2PCapsIndex_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 113, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":115
 * class GpuP2PCapsIndex(_IntEnum):
 *     """See `nvmlGpuP2PCapsIndex_t`."""
 *     P2P_CAPS_INDEX_READ = NVML_P2P_CAPS_INDEX_READ             # <<<<<<<<<<<<<<
 *     P2P_CAPS_INDEX_WRITE = NVML_P2P_CAPS_INDEX_WRITE
 *     P2P_CAPS_INDEX_NVLINK = NVML_P2P_CAPS_INDEX_NVLINK
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlGpuP2PCapsIndex_t(NVML_P2P_CAPS_INDEX_READ); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 115, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_P2P_CAPS_INDEX_READ, __pyx_t_11) < (0)) __PYX_ERR(0, 115, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":116
 *     """See `nvmlGpuP2PCapsIndex_t`."""
 *     P2P_CAPS_INDEX_READ = NVML_P2P_CAPS_INDEX_READ
 *     P2P_CAPS_INDEX_WRITE = NVML_P2P_CAPS_INDEX_WRITE             # <<<<<<<<<<<<<<
 *     P2P_CAPS_INDEX_NVLINK = NVML_P2P_CAPS_INDEX_NVLINK
 *     P2P_CAPS_INDEX_ATOMICS = NVML_P2P_CAPS_INDEX_ATOMICS
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlGpuP2PCapsIndex_t(NVML_P2P_CAPS_INDEX_WRITE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 116, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_P2P_CAPS_INDEX_WRITE, __pyx_t_11) < (0)) __PYX_ERR(0, 116, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":117
 *     P2P_CAPS_INDEX_READ = NVML_P2P_CAPS_INDEX_READ
 *     P2P_CAPS_INDEX_WRITE = NVML_P2P_CAPS_INDEX_WRITE
 *     P2P_CAPS_INDEX_NVLINK = NVML_P2P_CAPS_INDEX_NVLINK             # <<<<<<<<<<<<<<
 *     P2P_CAPS_INDEX_ATOMICS = NVML_P2P_CAPS_INDEX_ATOMICS
 *     P2P_CAPS_INDEX_PCI = NVML_P2P_CAPS_INDEX_PCI
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlGpuP2PCapsIndex_t(NVML_P2P_CAPS_INDEX_NVLINK); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 117, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_P2P_CAPS_INDEX_NVLINK, __pyx_t_11) < (0)) __PYX_ERR(0, 117, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":118
 *     P2P_CAPS_INDEX_WRITE = NVML_P2P_CAPS_INDEX_WRITE
 *     P2P_CAPS_INDEX_NVLINK = NVML_P2P_CAPS_INDEX_NVLINK
 *     P2P_CAPS_INDEX_ATOMICS = NVML_P2P_CAPS_INDEX_ATOMICS             # <<<<<<<<<<<<<<
 *     P2P_CAPS_INDEX_PCI = NVML_P2P_CAPS_INDEX_PCI
 *     P2P_CAPS_INDEX_PROP = NVML_P2P_CAPS_INDEX_PROP
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlGpuP2PCapsIndex_t(NVML_P2P_CAPS_INDEX_ATOMICS); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 118, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_P2P_CAPS_INDEX_ATOMICS, __pyx_t_11) < (0)) __PYX_ERR(0, 118, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":119
 *     P2P_CAPS_INDEX_NVLINK = NVML_P2P_CAPS_INDEX_NVLINK
 *     P2P_CAPS_INDEX_ATOMICS = NVML_P2P_CAPS_INDEX_ATOMICS
 *     P2P_CAPS_INDEX_PCI = NVML_P2P_CAPS_INDEX_PCI             # <<<<<<<<<<<<<<
 *     P2P_CAPS_INDEX_PROP = NVML_P2P_CAPS_INDEX_PROP
 *     P2P_CAPS_INDEX_UNKNOWN = NVML_P2P_CAPS_INDEX_UNKNOWN
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlGpuP2PCapsIndex_t(NVML_P2P_CAPS_INDEX_PCI); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 119, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_P2P_CAPS_INDEX_PCI, __pyx_t_11) < (0)) __PYX_ERR(0, 119, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":120
 *     P2P_CAPS_INDEX_ATOMICS = NVML_P2P_CAPS_INDEX_ATOMICS
 *     P2P_CAPS_INDEX_PCI = NVML_P2P_CAPS_INDEX_PCI
 *     P2P_CAPS_INDEX_PROP = NVML_P2P_CAPS_INDEX_PROP             # <<<<<<<<<<<<<<
 *     P2P_CAPS_INDEX_UNKNOWN = NVML_P2P_CAPS_INDEX_UNKNOWN
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlGpuP2PCapsIndex_t(NVML_P2P_CAPS_INDEX_PROP); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 120, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_P2P_CAPS_INDEX_PROP, __pyx_t_11) < (0)) __PYX_ERR(0, 120, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":121
 *     P2P_CAPS_INDEX_PCI = NVML_P2P_CAPS_INDEX_PCI
 *     P2P_CAPS_INDEX_PROP = NVML_P2P_CAPS_INDEX_PROP
 *     P2P_CAPS_INDEX_UNKNOWN = NVML_P2P_CAPS_INDEX_UNKNOWN             # <<<<<<<<<<<<<<
 * 
 * class SamplingType(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlGpuP2PCapsIndex_t(NVML_P2P_CAPS_INDEX_UNKNOWN); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 121, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_P2P_CAPS_INDEX_UNKNOWN, __pyx_t_11) < (0)) __PYX_ERR(0, 121, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":113
 *     P2P_STATUS_UNKNOWN = NVML_P2P_STATUS_UNKNOWN
 * 
 * class GpuP2PCapsIndex(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlGpuP2PCapsIndex_t`."""
 *     P2P_CAPS_INDEX_READ = NVML_P2P_CAPS_INDEX_READ
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GpuP2PCapsIndex, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_GpuP2PCapsIndex, __pyx_t_11) < (0)) __PYX_ERR(0, 113, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":123
 *     P2P_CAPS_INDEX_UNKNOWN = NVML_P2P_CAPS_INDEX_UNKNOWN
 * 
 * class SamplingType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlSamplingType_t`."""
 *     TOTAL_POWER_SAMPLES = NVML_TOTAL_POWER_SAMPLES
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 123, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 123, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 123, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 123, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_SamplingType, __pyx_mstate_global->__pyx_n_u_SamplingType, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlSamplingType_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 123, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 123, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":125
 * class SamplingType(_IntEnum):
 *     """See `nvmlSamplingType_t`."""
 *     TOTAL_POWER_SAMPLES = NVML_TOTAL_POWER_SAMPLES             # <<<<<<<<<<<<<<
 *     GPU_UTILIZATION_SAMPLES = NVML_GPU_UTILIZATION_SAMPLES
 *     MEMORY_UTILIZATION_SAMPLES = NVML_MEMORY_UTILIZATION_SAMPLES
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlSamplingType_t(NVML_TOTAL_POWER_SAMPLES); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_TOTAL_POWER_SAMPLES, __pyx_t_5) < (0)) __PYX_ERR(0, 125, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":126
 *     """See `nvmlSamplingType_t`."""
 *     TOTAL_POWER_SAMPLES = NVML_TOTAL_POWER_SAMPLES
 *     GPU_UTILIZATION_SAMPLES = NVML_GPU_UTILIZATION_SAMPLES             # <<<<<<<<<<<<<<
 *     MEMORY_UTILIZATION_SAMPLES = NVML_MEMORY_UTILIZATION_SAMPLES
 *     ENC_UTILIZATION_SAMPLES = NVML_ENC_UTILIZATION_SAMPLES
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlSamplingType_t(NVML_GPU_UTILIZATION_SAMPLES); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 126, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_GPU_UTILIZATION_SAMPLES, __pyx_t_5) < (0)) __PYX_ERR(0, 126, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":127
 *     TOTAL_POWER_SAMPLES = NVML_TOTAL_POWER_SAMPLES
 *     GPU_UTILIZATION_SAMPLES = NVML_GPU_UTILIZATION_SAMPLES
 *     MEMORY_UTILIZATION_SAMPLES = NVML_MEMORY_UTILIZATION_SAMPLES             # <<<<<<<<<<<<<<
 *     ENC_UTILIZATION_SAMPLES = NVML_ENC_UTILIZATION_SAMPLES
 *     DEC_UTILIZATION_SAMPLES = NVML_DEC_UTILIZATION_SAMPLES
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlSamplingType_t(NVML_MEMORY_UTILIZATION_SAMPLES); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 127, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_MEMORY_UTILIZATION_SAMPLES, __pyx_t_5) < (0)) __PYX_ERR(0, 127, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":128
 *     GPU_UTILIZATION_SAMPLES = NVML_GPU_UTILIZATION_SAMPLES
 *     MEMORY_UTILIZATION_SAMPLES = NVML_MEMORY_UTILIZATION_SAMPLES
 *     ENC_UTILIZATION_SAMPLES = NVML_ENC_UTILIZATION_SAMPLES             # <<<<<<<<<<<<<<
 *     DEC_UTILIZATION_SAMPLES = NVML_DEC_UTILIZATION_SAMPLES
 *     PROCESSOR_CLK_SAMPLES = NVML_PROCESSOR_CLK_SAMPLES
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlSamplingType_t(NVML_ENC_UTILIZATION_SAMPLES); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 128, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_ENC_UTILIZATION_SAMPLES, __pyx_t_5) < (0)) __PYX_ERR(0, 128, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":129
 *     MEMORY_UTILIZATION_SAMPLES = NVML_MEMORY_UTILIZATION_SAMPLES
 *     ENC_UTILIZATION_SAMPLES = NVML_ENC_UTILIZATION_SAMPLES
 *     DEC_UTILIZATION_SAMPLES = NVML_DEC_UTILIZATION_SAMPLES             # <<<<<<<<<<<<<<
 *     PROCESSOR_CLK_SAMPLES = NVML_PROCESSOR_CLK_SAMPLES
 *     MEMORY_CLK_SAMPLES = NVML_MEMORY_CLK_SAMPLES
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlSamplingType_t(NVML_DEC_UTILIZATION_SAMPLES); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 129, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_DEC_UTILIZATION_SAMPLES, __pyx_t_5) < (0)) __PYX_ERR(0, 129, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":130
 *     ENC_UTILIZATION_SAMPLES = NVML_ENC_UTILIZATION_SAMPLES
 *     DEC_UTILIZATION_SAMPLES = NVML_DEC_UTILIZATION_SAMPLES
 *     PROCESSOR_CLK_SAMPLES = NVML_PROCESSOR_CLK_SAMPLES             # <<<<<<<<<<<<<<
 *     MEMORY_CLK_SAMPLES = NVML_MEMORY_CLK_SAMPLES
 *     MODULE_POWER_SAMPLES = NVML_MODULE_POWER_SAMPLES
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlSamplingType_t(NVML_PROCESSOR_CLK_SAMPLES); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 130, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_PROCESSOR_CLK_SAMPLES, __pyx_t_5) < (0)) __PYX_ERR(0, 130, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":131
 *     DEC_UTILIZATION_SAMPLES = NVML_DEC_UTILIZATION_SAMPLES
 *     PROCESSOR_CLK_SAMPLES = NVML_PROCESSOR_CLK_SAMPLES
 *     MEMORY_CLK_SAMPLES = NVML_MEMORY_CLK_SAMPLES             # <<<<<<<<<<<<<<
 *     MODULE_POWER_SAMPLES = NVML_MODULE_POWER_SAMPLES
 *     JPG_UTILIZATION_SAMPLES = NVML_JPG_UTILIZATION_SAMPLES
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlSamplingType_t(NVML_MEMORY_CLK_SAMPLES); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 131, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_MEMORY_CLK_SAMPLES, __pyx_t_5) < (0)) __PYX_ERR(0, 131, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":132
 *     PROCESSOR_CLK_SAMPLES = NVML_PROCESSOR_CLK_SAMPLES
 *     MEMORY_CLK_SAMPLES = NVML_MEMORY_CLK_SAMPLES
 *     MODULE_POWER_SAMPLES = NVML_MODULE_POWER_SAMPLES             # <<<<<<<<<<<<<<
 *     JPG_UTILIZATION_SAMPLES = NVML_JPG_UTILIZATION_SAMPLES
 *     OFA_UTILIZATION_SAMPLES = NVML_OFA_UTILIZATION_SAMPLES
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlSamplingType_t(NVML_MODULE_POWER_SAMPLES); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 132, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_MODULE_POWER_SAMPLES, __pyx_t_5) < (0)) __PYX_ERR(0, 132, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":133
 *     MEMORY_CLK_SAMPLES = NVML_MEMORY_CLK_SAMPLES
 *     MODULE_POWER_SAMPLES = NVML_MODULE_POWER_SAMPLES
 *     JPG_UTILIZATION_SAMPLES = NVML_JPG_UTILIZATION_SAMPLES             # <<<<<<<<<<<<<<
 *     OFA_UTILIZATION_SAMPLES = NVML_OFA_UTILIZATION_SAMPLES
 *     SAMPLINGTYPE_COUNT = NVML_SAMPLINGTYPE_COUNT
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlSamplingType_t(NVML_JPG_UTILIZATION_SAMPLES); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_JPG_UTILIZATION_SAMPLES, __pyx_t_5) < (0)) __PYX_ERR(0, 133, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":134
 *     MODULE_POWER_SAMPLES = NVML_MODULE_POWER_SAMPLES
 *     JPG_UTILIZATION_SAMPLES = NVML_JPG_UTILIZATION_SAMPLES
 *     OFA_UTILIZATION_SAMPLES = NVML_OFA_UTILIZATION_SAMPLES             # <<<<<<<<<<<<<<
 *     SAMPLINGTYPE_COUNT = NVML_SAMPLINGTYPE_COUNT
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlSamplingType_t(NVML_OFA_UTILIZATION_SAMPLES); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 134, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_OFA_UTILIZATION_SAMPLES, __pyx_t_5) < (0)) __PYX_ERR(0, 134, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":135
 *     JPG_UTILIZATION_SAMPLES = NVML_JPG_UTILIZATION_SAMPLES
 *     OFA_UTILIZATION_SAMPLES = NVML_OFA_UTILIZATION_SAMPLES
 *     SAMPLINGTYPE_COUNT = NVML_SAMPLINGTYPE_COUNT             # <<<<<<<<<<<<<<
 * 
 * class PcieUtilCounter(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlSamplingType_t(NVML_SAMPLINGTYPE_COUNT); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 135, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_SAMPLINGTYPE_COUNT, __pyx_t_5) < (0)) __PYX_ERR(0, 135, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":123
 *     P2P_CAPS_INDEX_UNKNOWN = NVML_P2P_CAPS_INDEX_UNKNOWN
 * 
 * class SamplingType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlSamplingType_t`."""
 *     TOTAL_POWER_SAMPLES = NVML_TOTAL_POWER_SAMPLES
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_SamplingType, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 123, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_SamplingType, __pyx_t_5) < (0)) __PYX_ERR(0, 123, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":137
 *     SAMPLINGTYPE_COUNT = NVML_SAMPLINGTYPE_COUNT
 * 
 * class PcieUtilCounter(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlPcieUtilCounter_t`."""
 *     PCIE_UTIL_TX_BYTES = NVML_PCIE_UTIL_TX_BYTES
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 137, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 137, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 137, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 137, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_PcieUtilCounter, __pyx_mstate_global->__pyx_n_u_PcieUtilCounter, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlPcieUtilCounter_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 137, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 137, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":139
 * class PcieUtilCounter(_IntEnum):
 *     """See `nvmlPcieUtilCounter_t`."""
 *     PCIE_UTIL_TX_BYTES = NVML_PCIE_UTIL_TX_BYTES             # <<<<<<<<<<<<<<
 *     PCIE_UTIL_RX_BYTES = NVML_PCIE_UTIL_RX_BYTES
 *     PCIE_UTIL_COUNT = NVML_PCIE_UTIL_COUNT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlPcieUtilCounter_t(NVML_PCIE_UTIL_TX_BYTES); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 139, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_PCIE_UTIL_TX_BYTES, __pyx_t_10) < (0)) __PYX_ERR(0, 139, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":140
 *     """See `nvmlPcieUtilCounter_t`."""
 *     PCIE_UTIL_TX_BYTES = NVML_PCIE_UTIL_TX_BYTES
 *     PCIE_UTIL_RX_BYTES = NVML_PCIE_UTIL_RX_BYTES             # <<<<<<<<<<<<<<
 *     PCIE_UTIL_COUNT = NVML_PCIE_UTIL_COUNT
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlPcieUtilCounter_t(NVML_PCIE_UTIL_RX_BYTES); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 140, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_PCIE_UTIL_RX_BYTES, __pyx_t_10) < (0)) __PYX_ERR(0, 140, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":141
 *     PCIE_UTIL_TX_BYTES = NVML_PCIE_UTIL_TX_BYTES
 *     PCIE_UTIL_RX_BYTES = NVML_PCIE_UTIL_RX_BYTES
 *     PCIE_UTIL_COUNT = NVML_PCIE_UTIL_COUNT             # <<<<<<<<<<<<<<
 * 
 * class ValueType(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlPcieUtilCounter_t(NVML_PCIE_UTIL_COUNT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 141, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_PCIE_UTIL_COUNT, __pyx_t_10) < (0)) __PYX_ERR(0, 141, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":137
 *     SAMPLINGTYPE_COUNT = NVML_SAMPLINGTYPE_COUNT
 * 
 * class PcieUtilCounter(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlPcieUtilCounter_t`."""
 *     PCIE_UTIL_TX_BYTES = NVML_PCIE_UTIL_TX_BYTES
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_PcieUtilCounter, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 137, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_PcieUtilCounter, __pyx_t_10) < (0)) __PYX_ERR(0, 137, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":143
 *     PCIE_UTIL_COUNT = NVML_PCIE_UTIL_COUNT
 * 
 * class ValueType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlValueType_t`."""
 *     DOUBLE = NVML_VALUE_TYPE_DOUBLE
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 143, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 143, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 143, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 143, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_ValueType, __pyx_mstate_global->__pyx_n_u_ValueType, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlValueType_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 143, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 143, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":145
 * class ValueType(_IntEnum):
 *     """See `nvmlValueType_t`."""
 *     DOUBLE = NVML_VALUE_TYPE_DOUBLE             # <<<<<<<<<<<<<<
 *     UNSIGNED_INT = NVML_VALUE_TYPE_UNSIGNED_INT
 *     UNSIGNED_LONG = NVML_VALUE_TYPE_UNSIGNED_LONG
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlValueType_t(NVML_VALUE_TYPE_DOUBLE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 145, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DOUBLE, __pyx_t_11) < (0)) __PYX_ERR(0, 145, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":146
 *     """See `nvmlValueType_t`."""
 *     DOUBLE = NVML_VALUE_TYPE_DOUBLE
 *     UNSIGNED_INT = NVML_VALUE_TYPE_UNSIGNED_INT             # <<<<<<<<<<<<<<
 *     UNSIGNED_LONG = NVML_VALUE_TYPE_UNSIGNED_LONG
 *     UNSIGNED_LONG_LONG = NVML_VALUE_TYPE_UNSIGNED_LONG_LONG
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlValueType_t(NVML_VALUE_TYPE_UNSIGNED_INT); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 146, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_UNSIGNED_INT, __pyx_t_11) < (0)) __PYX_ERR(0, 146, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":147
 *     DOUBLE = NVML_VALUE_TYPE_DOUBLE
 *     UNSIGNED_INT = NVML_VALUE_TYPE_UNSIGNED_INT
 *     UNSIGNED_LONG = NVML_VALUE_TYPE_UNSIGNED_LONG             # <<<<<<<<<<<<<<
 *     UNSIGNED_LONG_LONG = NVML_VALUE_TYPE_UNSIGNED_LONG_LONG
 *     SIGNED_LONG_LONG = NVML_VALUE_TYPE_SIGNED_LONG_LONG
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlValueType_t(NVML_VALUE_TYPE_UNSIGNED_LONG); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 147, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_UNSIGNED_LONG, __pyx_t_11) < (0)) __PYX_ERR(0, 147, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":148
 *     UNSIGNED_INT = NVML_VALUE_TYPE_UNSIGNED_INT
 *     UNSIGNED_LONG = NVML_VALUE_TYPE_UNSIGNED_LONG
 *     UNSIGNED_LONG_LONG = NVML_VALUE_TYPE_UNSIGNED_LONG_LONG             # <<<<<<<<<<<<<<
 *     SIGNED_LONG_LONG = NVML_VALUE_TYPE_SIGNED_LONG_LONG
 *     SIGNED_INT = NVML_VALUE_TYPE_SIGNED_INT
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlValueType_t(NVML_VALUE_TYPE_UNSIGNED_LONG_LONG); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 148, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_UNSIGNED_LONG_LONG, __pyx_t_11) < (0)) __PYX_ERR(0, 148, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":149
 *     UNSIGNED_LONG = NVML_VALUE_TYPE_UNSIGNED_LONG
 *     UNSIGNED_LONG_LONG = NVML_VALUE_TYPE_UNSIGNED_LONG_LONG
 *     SIGNED_LONG_LONG = NVML_VALUE_TYPE_SIGNED_LONG_LONG             # <<<<<<<<<<<<<<
 *     SIGNED_INT = NVML_VALUE_TYPE_SIGNED_INT
 *     UNSIGNED_SHORT = NVML_VALUE_TYPE_UNSIGNED_SHORT
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlValueType_t(NVML_VALUE_TYPE_SIGNED_LONG_LONG); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 149, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_SIGNED_LONG_LONG, __pyx_t_11) < (0)) __PYX_ERR(0, 149, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":150
 *     UNSIGNED_LONG_LONG = NVML_VALUE_TYPE_UNSIGNED_LONG_LONG
 *     SIGNED_LONG_LONG = NVML_VALUE_TYPE_SIGNED_LONG_LONG
 *     SIGNED_INT = NVML_VALUE_TYPE_SIGNED_INT             # <<<<<<<<<<<<<<
 *     UNSIGNED_SHORT = NVML_VALUE_TYPE_UNSIGNED_SHORT
 *     COUNT = NVML_VALUE_TYPE_COUNT
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlValueType_t(NVML_VALUE_TYPE_SIGNED_INT); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 150, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_SIGNED_INT, __pyx_t_11) < (0)) __PYX_ERR(0, 150, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":151
 *     SIGNED_LONG_LONG = NVML_VALUE_TYPE_SIGNED_LONG_LONG
 *     SIGNED_INT = NVML_VALUE_TYPE_SIGNED_INT
 *     UNSIGNED_SHORT = NVML_VALUE_TYPE_UNSIGNED_SHORT             # <<<<<<<<<<<<<<
 *     COUNT = NVML_VALUE_TYPE_COUNT
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlValueType_t(NVML_VALUE_TYPE_UNSIGNED_SHORT); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 151, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_UNSIGNED_SHORT, __pyx_t_11) < (0)) __PYX_ERR(0, 151, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":152
 *     SIGNED_INT = NVML_VALUE_TYPE_SIGNED_INT
 *     UNSIGNED_SHORT = NVML_VALUE_TYPE_UNSIGNED_SHORT
 *     COUNT = NVML_VALUE_TYPE_COUNT             # <<<<<<<<<<<<<<
 * 
 * class PerfPolicyType(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlValueType_t(NVML_VALUE_TYPE_COUNT); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_COUNT, __pyx_t_11) < (0)) __PYX_ERR(0, 152, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":143
 *     PCIE_UTIL_COUNT = NVML_PCIE_UTIL_COUNT
 * 
 * class ValueType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlValueType_t`."""
 *     DOUBLE = NVML_VALUE_TYPE_DOUBLE
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_ValueType, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 143, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_ValueType, __pyx_t_11) < (0)) __PYX_ERR(0, 143, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":154
 *     COUNT = NVML_VALUE_TYPE_COUNT
 * 
 * class PerfPolicyType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlPerfPolicyType_t`."""
 *     PERF_POLICY_POWER = NVML_PERF_POLICY_POWER
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 154, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 154, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 154, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 154, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_PerfPolicyType, __pyx_mstate_global->__pyx_n_u_PerfPolicyType, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlPerfPolicyType_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 154, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 154, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":156
 * class PerfPolicyType(_IntEnum):
 *     """See `nvmlPerfPolicyType_t`."""
 *     PERF_POLICY_POWER = NVML_PERF_POLICY_POWER             # <<<<<<<<<<<<<<
 *     PERF_POLICY_THERMAL = NVML_PERF_POLICY_THERMAL
 *     PERF_POLICY_SYNC_BOOST = NVML_PERF_POLICY_SYNC_BOOST
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlPerfPolicyType_t(NVML_PERF_POLICY_POWER); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 156, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_PERF_POLICY_POWER, __pyx_t_5) < (0)) __PYX_ERR(0, 156, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":157
 *     """See `nvmlPerfPolicyType_t`."""
 *     PERF_POLICY_POWER = NVML_PERF_POLICY_POWER
 *     PERF_POLICY_THERMAL = NVML_PERF_POLICY_THERMAL             # <<<<<<<<<<<<<<
 *     PERF_POLICY_SYNC_BOOST = NVML_PERF_POLICY_SYNC_BOOST
 *     PERF_POLICY_BOARD_LIMIT = NVML_PERF_POLICY_BOARD_LIMIT
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlPerfPolicyType_t(NVML_PERF_POLICY_THERMAL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 157, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_PERF_POLICY_THERMAL, __pyx_t_5) < (0)) __PYX_ERR(0, 157, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":158
 *     PERF_POLICY_POWER = NVML_PERF_POLICY_POWER
 *     PERF_POLICY_THERMAL = NVML_PERF_POLICY_THERMAL
 *     PERF_POLICY_SYNC_BOOST = NVML_PERF_POLICY_SYNC_BOOST             # <<<<<<<<<<<<<<
 *     PERF_POLICY_BOARD_LIMIT = NVML_PERF_POLICY_BOARD_LIMIT
 *     PERF_POLICY_LOW_UTILIZATION = NVML_PERF_POLICY_LOW_UTILIZATION
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlPerfPolicyType_t(NVML_PERF_POLICY_SYNC_BOOST); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 158, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_PERF_POLICY_SYNC_BOOST, __pyx_t_5) < (0)) __PYX_ERR(0, 158, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":159
 *     PERF_POLICY_THERMAL = NVML_PERF_POLICY_THERMAL
 *     PERF_POLICY_SYNC_BOOST = NVML_PERF_POLICY_SYNC_BOOST
 *     PERF_POLICY_BOARD_LIMIT = NVML_PERF_POLICY_BOARD_LIMIT             # <<<<<<<<<<<<<<
 *     PERF_POLICY_LOW_UTILIZATION = NVML_PERF_POLICY_LOW_UTILIZATION
 *     PERF_POLICY_RELIABILITY = NVML_PERF_POLICY_RELIABILITY
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlPerfPolicyType_t(NVML_PERF_POLICY_BOARD_LIMIT); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 159, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_PERF_POLICY_BOARD_LIMIT, __pyx_t_5) < (0)) __PYX_ERR(0, 159, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":160
 *     PERF_POLICY_SYNC_BOOST = NVML_PERF_POLICY_SYNC_BOOST
 *     PERF_POLICY_BOARD_LIMIT = NVML_PERF_POLICY_BOARD_LIMIT
 *     PERF_POLICY_LOW_UTILIZATION = NVML_PERF_POLICY_LOW_UTILIZATION             # <<<<<<<<<<<<<<
 *     PERF_POLICY_RELIABILITY = NVML_PERF_POLICY_RELIABILITY
 *     PERF_POLICY_TOTAL_APP_CLOCKS = NVML_PERF_POLICY_TOTAL_APP_CLOCKS
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlPerfPolicyType_t(NVML_PERF_POLICY_LOW_UTILIZATION); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 160, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_PERF_POLICY_LOW_UTILIZATION, __pyx_t_5) < (0)) __PYX_ERR(0, 160, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":161
 *     PERF_POLICY_BOARD_LIMIT = NVML_PERF_POLICY_BOARD_LIMIT
 *     PERF_POLICY_LOW_UTILIZATION = NVML_PERF_POLICY_LOW_UTILIZATION
 *     PERF_POLICY_RELIABILITY = NVML_PERF_POLICY_RELIABILITY             # <<<<<<<<<<<<<<
 *     PERF_POLICY_TOTAL_APP_CLOCKS = NVML_PERF_POLICY_TOTAL_APP_CLOCKS
 *     PERF_POLICY_TOTAL_BASE_CLOCKS = NVML_PERF_POLICY_TOTAL_BASE_CLOCKS
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlPerfPolicyType_t(NVML_PERF_POLICY_RELIABILITY); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 161, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_PERF_POLICY_RELIABILITY, __pyx_t_5) < (0)) __PYX_ERR(0, 161, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":162
 *     PERF_POLICY_LOW_UTILIZATION = NVML_PERF_POLICY_LOW_UTILIZATION
 *     PERF_POLICY_RELIABILITY = NVML_PERF_POLICY_RELIABILITY
 *     PERF_POLICY_TOTAL_APP_CLOCKS = NVML_PERF_POLICY_TOTAL_APP_CLOCKS             # <<<<<<<<<<<<<<
 *     PERF_POLICY_TOTAL_BASE_CLOCKS = NVML_PERF_POLICY_TOTAL_BASE_CLOCKS
 *     PERF_POLICY_COUNT = NVML_PERF_POLICY_COUNT
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlPerfPolicyType_t(NVML_PERF_POLICY_TOTAL_APP_CLOCKS); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 162, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_PERF_POLICY_TOTAL_APP_CLOCKS, __pyx_t_5) < (0)) __PYX_ERR(0, 162, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":163
 *     PERF_POLICY_RELIABILITY = NVML_PERF_POLICY_RELIABILITY
 *     PERF_POLICY_TOTAL_APP_CLOCKS = NVML_PERF_POLICY_TOTAL_APP_CLOCKS
 *     PERF_POLICY_TOTAL_BASE_CLOCKS = NVML_PERF_POLICY_TOTAL_BASE_CLOCKS             # <<<<<<<<<<<<<<
 *     PERF_POLICY_COUNT = NVML_PERF_POLICY_COUNT
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlPerfPolicyType_t(NVML_PERF_POLICY_TOTAL_BASE_CLOCKS); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 163, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_PERF_POLICY_TOTAL_BASE_CLOCKS, __pyx_t_5) < (0)) __PYX_ERR(0, 163, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":164
 *     PERF_POLICY_TOTAL_APP_CLOCKS = NVML_PERF_POLICY_TOTAL_APP_CLOCKS
 *     PERF_POLICY_TOTAL_BASE_CLOCKS = NVML_PERF_POLICY_TOTAL_BASE_CLOCKS
 *     PERF_POLICY_COUNT = NVML_PERF_POLICY_COUNT             # <<<<<<<<<<<<<<
 * 
 * class ThermalTarget(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlPerfPolicyType_t(NVML_PERF_POLICY_COUNT); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 164, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_PERF_POLICY_COUNT, __pyx_t_5) < (0)) __PYX_ERR(0, 164, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":154
 *     COUNT = NVML_VALUE_TYPE_COUNT
 * 
 * class PerfPolicyType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlPerfPolicyType_t`."""
 *     PERF_POLICY_POWER = NVML_PERF_POLICY_POWER
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PerfPolicyType, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 154, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_PerfPolicyType, __pyx_t_5) < (0)) __PYX_ERR(0, 154, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":166
 *     PERF_POLICY_COUNT = NVML_PERF_POLICY_COUNT
 * 
 * class ThermalTarget(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlThermalTarget_t`."""
 *     NONE = NVML_THERMAL_TARGET_NONE
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 166, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 166, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 166, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 166, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_ThermalTarget, __pyx_mstate_global->__pyx_n_u_ThermalTarget, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlThermalTarget_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 166, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 166, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":168
 * class ThermalTarget(_IntEnum):
 *     """See `nvmlThermalTarget_t`."""
 *     NONE = NVML_THERMAL_TARGET_NONE             # <<<<<<<<<<<<<<
 *     GPU = NVML_THERMAL_TARGET_GPU
 *     MEMORY = NVML_THERMAL_TARGET_MEMORY
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlThermalTarget_t(NVML_THERMAL_TARGET_NONE); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 168, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NONE, __pyx_t_10) < (0)) __PYX_ERR(0, 168, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":169
 *     """See `nvmlThermalTarget_t`."""
 *     NONE = NVML_THERMAL_TARGET_NONE
 *     GPU = NVML_THERMAL_TARGET_GPU             # <<<<<<<<<<<<<<
 *     MEMORY = NVML_THERMAL_TARGET_MEMORY
 *     POWER_SUPPLY = NVML_THERMAL_TARGET_POWER_SUPPLY
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlThermalTarget_t(NVML_THERMAL_TARGET_GPU); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 169, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPU, __pyx_t_10) < (0)) __PYX_ERR(0, 169, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":170
 *     NONE = NVML_THERMAL_TARGET_NONE
 *     GPU = NVML_THERMAL_TARGET_GPU
 *     MEMORY = NVML_THERMAL_TARGET_MEMORY             # <<<<<<<<<<<<<<
 *     POWER_SUPPLY = NVML_THERMAL_TARGET_POWER_SUPPLY
 *     BOARD = NVML_THERMAL_TARGET_BOARD
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlThermalTarget_t(NVML_THERMAL_TARGET_MEMORY); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 170, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MEMORY, __pyx_t_10) < (0)) __PYX_ERR(0, 170, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":171
 *     GPU = NVML_THERMAL_TARGET_GPU
 *     MEMORY = NVML_THERMAL_TARGET_MEMORY
 *     POWER_SUPPLY = NVML_THERMAL_TARGET_POWER_SUPPLY             # <<<<<<<<<<<<<<
 *     BOARD = NVML_THERMAL_TARGET_BOARD
 *     VCD_BOARD = NVML_THERMAL_TARGET_VCD_BOARD
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlThermalTarget_t(NVML_THERMAL_TARGET_POWER_SUPPLY); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 171, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_POWER_SUPPLY, __pyx_t_10) < (0)) __PYX_ERR(0, 171, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":172
 *     MEMORY = NVML_THERMAL_TARGET_MEMORY
 *     POWER_SUPPLY = NVML_THERMAL_TARGET_POWER_SUPPLY
 *     BOARD = NVML_THERMAL_TARGET_BOARD             # <<<<<<<<<<<<<<
 *     VCD_BOARD = NVML_THERMAL_TARGET_VCD_BOARD
 *     VCD_INLET = NVML_THERMAL_TARGET_VCD_INLET
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlThermalTarget_t(NVML_THERMAL_TARGET_BOARD); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 172, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BOARD, __pyx_t_10) < (0)) __PYX_ERR(0, 172, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":173
 *     POWER_SUPPLY = NVML_THERMAL_TARGET_POWER_SUPPLY
 *     BOARD = NVML_THERMAL_TARGET_BOARD
 *     VCD_BOARD = NVML_THERMAL_TARGET_VCD_BOARD             # <<<<<<<<<<<<<<
 *     VCD_INLET = NVML_THERMAL_TARGET_VCD_INLET
 *     VCD_OUTLET = NVML_THERMAL_TARGET_VCD_OUTLET
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlThermalTarget_t(NVML_THERMAL_TARGET_VCD_BOARD); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 173, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_VCD_BOARD, __pyx_t_10) < (0)) __PYX_ERR(0, 173, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":174
 *     BOARD = NVML_THERMAL_TARGET_BOARD
 *     VCD_BOARD = NVML_THERMAL_TARGET_VCD_BOARD
 *     VCD_INLET = NVML_THERMAL_TARGET_VCD_INLET             # <<<<<<<<<<<<<<
 *     VCD_OUTLET = NVML_THERMAL_TARGET_VCD_OUTLET
 *     ALL = NVML_THERMAL_TARGET_ALL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlThermalTarget_t(NVML_THERMAL_TARGET_VCD_INLET); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 174, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_VCD_INLET, __pyx_t_10) < (0)) __PYX_ERR(0, 174, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":175
 *     VCD_BOARD = NVML_THERMAL_TARGET_VCD_BOARD
 *     VCD_INLET = NVML_THERMAL_TARGET_VCD_INLET
 *     VCD_OUTLET = NVML_THERMAL_TARGET_VCD_OUTLET             # <<<<<<<<<<<<<<
 *     ALL = NVML_THERMAL_TARGET_ALL
 *     UNKNOWN = NVML_THERMAL_TARGET_UNKNOWN
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlThermalTarget_t(NVML_THERMAL_TARGET_VCD_OUTLET); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 175, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_VCD_OUTLET, __pyx_t_10) < (0)) __PYX_ERR(0, 175, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":176
 *     VCD_INLET = NVML_THERMAL_TARGET_VCD_INLET
 *     VCD_OUTLET = NVML_THERMAL_TARGET_VCD_OUTLET
 *     ALL = NVML_THERMAL_TARGET_ALL             # <<<<<<<<<<<<<<
 *     UNKNOWN = NVML_THERMAL_TARGET_UNKNOWN
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlThermalTarget_t(NVML_THERMAL_TARGET_ALL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 176, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_ALL, __pyx_t_10) < (0)) __PYX_ERR(0, 176, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":177
 *     VCD_OUTLET = NVML_THERMAL_TARGET_VCD_OUTLET
 *     ALL = NVML_THERMAL_TARGET_ALL
 *     UNKNOWN = NVML_THERMAL_TARGET_UNKNOWN             # <<<<<<<<<<<<<<
 * 
 * class ThermalController(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlThermalTarget_t(NVML_THERMAL_TARGET_UNKNOWN); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 177, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_UNKNOWN, __pyx_t_10) < (0)) __PYX_ERR(0, 177, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":166
 *     PERF_POLICY_COUNT = NVML_PERF_POLICY_COUNT
 * 
 * class ThermalTarget(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlThermalTarget_t`."""
 *     NONE = NVML_THERMAL_TARGET_NONE
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_ThermalTarget, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 166, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_ThermalTarget, __pyx_t_10) < (0)) __PYX_ERR(0, 166, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":179
 *     UNKNOWN = NVML_THERMAL_TARGET_UNKNOWN
 * 
 * class ThermalController(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlThermalController_t`."""
 *     NONE = NVML_THERMAL_CONTROLLER_NONE
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_ThermalController, __pyx_mstate_global->__pyx_n_u_ThermalController, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlThermalController_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 179, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":181
 * class ThermalController(_IntEnum):
 *     """See `nvmlThermalController_t`."""
 *     NONE = NVML_THERMAL_CONTROLLER_NONE             # <<<<<<<<<<<<<<
 *     GPU_INTERNAL = NVML_THERMAL_CONTROLLER_GPU_INTERNAL
 *     ADM1032 = NVML_THERMAL_CONTROLLER_ADM1032
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_NONE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 181, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NONE, __pyx_t_11) < (0)) __PYX_ERR(0, 181, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":182
 *     """See `nvmlThermalController_t`."""
 *     NONE = NVML_THERMAL_CONTROLLER_NONE
 *     GPU_INTERNAL = NVML_THERMAL_CONTROLLER_GPU_INTERNAL             # <<<<<<<<<<<<<<
 *     ADM1032 = NVML_THERMAL_CONTROLLER_ADM1032
 *     ADT7461 = NVML_THERMAL_CONTROLLER_ADT7461
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_GPU_INTERNAL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 182, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_GPU_INTERNAL, __pyx_t_11) < (0)) __PYX_ERR(0, 182, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":183
 *     NONE = NVML_THERMAL_CONTROLLER_NONE
 *     GPU_INTERNAL = NVML_THERMAL_CONTROLLER_GPU_INTERNAL
 *     ADM1032 = NVML_THERMAL_CONTROLLER_ADM1032             # <<<<<<<<<<<<<<
 *     ADT7461 = NVML_THERMAL_CONTROLLER_ADT7461
 *     MAX6649 = NVML_THERMAL_CONTROLLER_MAX6649
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_ADM1032); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 183, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ADM1032, __pyx_t_11) < (0)) __PYX_ERR(0, 183, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":184
 *     GPU_INTERNAL = NVML_THERMAL_CONTROLLER_GPU_INTERNAL
 *     ADM1032 = NVML_THERMAL_CONTROLLER_ADM1032
 *     ADT7461 = NVML_THERMAL_CONTROLLER_ADT7461             # <<<<<<<<<<<<<<
 *     MAX6649 = NVML_THERMAL_CONTROLLER_MAX6649
 *     MAX1617 = NVML_THERMAL_CONTROLLER_MAX1617
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_ADT7461); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 184, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ADT7461, __pyx_t_11) < (0)) __PYX_ERR(0, 184, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":185
 *     ADM1032 = NVML_THERMAL_CONTROLLER_ADM1032
 *     ADT7461 = NVML_THERMAL_CONTROLLER_ADT7461
 *     MAX6649 = NVML_THERMAL_CONTROLLER_MAX6649             # <<<<<<<<<<<<<<
 *     MAX1617 = NVML_THERMAL_CONTROLLER_MAX1617
 *     LM99 = NVML_THERMAL_CONTROLLER_LM99
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_MAX6649); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 185, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_MAX6649, __pyx_t_11) < (0)) __PYX_ERR(0, 185, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":186
 *     ADT7461 = NVML_THERMAL_CONTROLLER_ADT7461
 *     MAX6649 = NVML_THERMAL_CONTROLLER_MAX6649
 *     MAX1617 = NVML_THERMAL_CONTROLLER_MAX1617             # <<<<<<<<<<<<<<
 *     LM99 = NVML_THERMAL_CONTROLLER_LM99
 *     LM89 = NVML_THERMAL_CONTROLLER_LM89
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_MAX1617); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 186, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_MAX1617, __pyx_t_11) < (0)) __PYX_ERR(0, 186, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":187
 *     MAX6649 = NVML_THERMAL_CONTROLLER_MAX6649
 *     MAX1617 = NVML_THERMAL_CONTROLLER_MAX1617
 *     LM99 = NVML_THERMAL_CONTROLLER_LM99             # <<<<<<<<<<<<<<
 *     LM89 = NVML_THERMAL_CONTROLLER_LM89
 *     LM64 = NVML_THERMAL_CONTROLLER_LM64
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_LM99); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 187, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_LM99, __pyx_t_11) < (0)) __PYX_ERR(0, 187, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":188
 *     MAX1617 = NVML_THERMAL_CONTROLLER_MAX1617
 *     LM99 = NVML_THERMAL_CONTROLLER_LM99
 *     LM89 = NVML_THERMAL_CONTROLLER_LM89             # <<<<<<<<<<<<<<
 *     LM64 = NVML_THERMAL_CONTROLLER_LM64
 *     G781 = NVML_THERMAL_CONTROLLER_G781
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_LM89); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 188, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_LM89, __pyx_t_11) < (0)) __PYX_ERR(0, 188, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":189
 *     LM99 = NVML_THERMAL_CONTROLLER_LM99
 *     LM89 = NVML_THERMAL_CONTROLLER_LM89
 *     LM64 = NVML_THERMAL_CONTROLLER_LM64             # <<<<<<<<<<<<<<
 *     G781 = NVML_THERMAL_CONTROLLER_G781
 *     ADT7473 = NVML_THERMAL_CONTROLLER_ADT7473
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_LM64); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 189, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_LM64, __pyx_t_11) < (0)) __PYX_ERR(0, 189, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":190
 *     LM89 = NVML_THERMAL_CONTROLLER_LM89
 *     LM64 = NVML_THERMAL_CONTROLLER_LM64
 *     G781 = NVML_THERMAL_CONTROLLER_G781             # <<<<<<<<<<<<<<
 *     ADT7473 = NVML_THERMAL_CONTROLLER_ADT7473
 *     SBMAX6649 = NVML_THERMAL_CONTROLLER_SBMAX6649
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_G781); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 190, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_G781, __pyx_t_11) < (0)) __PYX_ERR(0, 190, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":191
 *     LM64 = NVML_THERMAL_CONTROLLER_LM64
 *     G781 = NVML_THERMAL_CONTROLLER_G781
 *     ADT7473 = NVML_THERMAL_CONTROLLER_ADT7473             # <<<<<<<<<<<<<<
 *     SBMAX6649 = NVML_THERMAL_CONTROLLER_SBMAX6649
 *     VBIOSEVT = NVML_THERMAL_CONTROLLER_VBIOSEVT
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_ADT7473); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 191, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ADT7473, __pyx_t_11) < (0)) __PYX_ERR(0, 191, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":192
 *     G781 = NVML_THERMAL_CONTROLLER_G781
 *     ADT7473 = NVML_THERMAL_CONTROLLER_ADT7473
 *     SBMAX6649 = NVML_THERMAL_CONTROLLER_SBMAX6649             # <<<<<<<<<<<<<<
 *     VBIOSEVT = NVML_THERMAL_CONTROLLER_VBIOSEVT
 *     OS = NVML_THERMAL_CONTROLLER_OS
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_SBMAX6649); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 192, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_SBMAX6649, __pyx_t_11) < (0)) __PYX_ERR(0, 192, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":193
 *     ADT7473 = NVML_THERMAL_CONTROLLER_ADT7473
 *     SBMAX6649 = NVML_THERMAL_CONTROLLER_SBMAX6649
 *     VBIOSEVT = NVML_THERMAL_CONTROLLER_VBIOSEVT             # <<<<<<<<<<<<<<
 *     OS = NVML_THERMAL_CONTROLLER_OS
 *     NVSYSCON_CANOAS = NVML_THERMAL_CONTROLLER_NVSYSCON_CANOAS
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_VBIOSEVT); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 193, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_VBIOSEVT, __pyx_t_11) < (0)) __PYX_ERR(0, 193, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":194
 *     SBMAX6649 = NVML_THERMAL_CONTROLLER_SBMAX6649
 *     VBIOSEVT = NVML_THERMAL_CONTROLLER_VBIOSEVT
 *     OS = NVML_THERMAL_CONTROLLER_OS             # <<<<<<<<<<<<<<
 *     NVSYSCON_CANOAS = NVML_THERMAL_CONTROLLER_NVSYSCON_CANOAS
 *     NVSYSCON_E551 = NVML_THERMAL_CONTROLLER_NVSYSCON_E551
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_OS); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 194, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_OS, __pyx_t_11) < (0)) __PYX_ERR(0, 194, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":195
 *     VBIOSEVT = NVML_THERMAL_CONTROLLER_VBIOSEVT
 *     OS = NVML_THERMAL_CONTROLLER_OS
 *     NVSYSCON_CANOAS = NVML_THERMAL_CONTROLLER_NVSYSCON_CANOAS             # <<<<<<<<<<<<<<
 *     NVSYSCON_E551 = NVML_THERMAL_CONTROLLER_NVSYSCON_E551
 *     MAX6649R = NVML_THERMAL_CONTROLLER_MAX6649R
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_NVSYSCON_CANOAS); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 195, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NVSYSCON_CANOAS, __pyx_t_11) < (0)) __PYX_ERR(0, 195, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":196
 *     OS = NVML_THERMAL_CONTROLLER_OS
 *     NVSYSCON_CANOAS = NVML_THERMAL_CONTROLLER_NVSYSCON_CANOAS
 *     NVSYSCON_E551 = NVML_THERMAL_CONTROLLER_NVSYSCON_E551             # <<<<<<<<<<<<<<
 *     MAX6649R = NVML_THERMAL_CONTROLLER_MAX6649R
 *     ADT7473S = NVML_THERMAL_CONTROLLER_ADT7473S
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_NVSYSCON_E551); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 196, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NVSYSCON_E551, __pyx_t_11) < (0)) __PYX_ERR(0, 196, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":197
 *     NVSYSCON_CANOAS = NVML_THERMAL_CONTROLLER_NVSYSCON_CANOAS
 *     NVSYSCON_E551 = NVML_THERMAL_CONTROLLER_NVSYSCON_E551
 *     MAX6649R = NVML_THERMAL_CONTROLLER_MAX6649R             # <<<<<<<<<<<<<<
 *     ADT7473S = NVML_THERMAL_CONTROLLER_ADT7473S
 *     UNKNOWN = NVML_THERMAL_CONTROLLER_UNKNOWN
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_MAX6649R); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 197, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_MAX6649R, __pyx_t_11) < (0)) __PYX_ERR(0, 197, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":198
 *     NVSYSCON_E551 = NVML_THERMAL_CONTROLLER_NVSYSCON_E551
 *     MAX6649R = NVML_THERMAL_CONTROLLER_MAX6649R
 *     ADT7473S = NVML_THERMAL_CONTROLLER_ADT7473S             # <<<<<<<<<<<<<<
 *     UNKNOWN = NVML_THERMAL_CONTROLLER_UNKNOWN
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_ADT7473S); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 198, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ADT7473S, __pyx_t_11) < (0)) __PYX_ERR(0, 198, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":199
 *     MAX6649R = NVML_THERMAL_CONTROLLER_MAX6649R
 *     ADT7473S = NVML_THERMAL_CONTROLLER_ADT7473S
 *     UNKNOWN = NVML_THERMAL_CONTROLLER_UNKNOWN             # <<<<<<<<<<<<<<
 * 
 * class CoolerControl(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlThermalController_t(NVML_THERMAL_CONTROLLER_UNKNOWN); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 199, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_UNKNOWN, __pyx_t_11) < (0)) __PYX_ERR(0, 199, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":179
 *     UNKNOWN = NVML_THERMAL_TARGET_UNKNOWN
 * 
 * class ThermalController(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlThermalController_t`."""
 *     NONE = NVML_THERMAL_CONTROLLER_NONE
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_ThermalController, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_ThermalController, __pyx_t_11) < (0)) __PYX_ERR(0, 179, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":201
 *     UNKNOWN = NVML_THERMAL_CONTROLLER_UNKNOWN
 * 
 * class CoolerControl(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlCoolerControl_t`."""
 *     THERMAL_COOLER_SIGNAL_NONE = NVML_THERMAL_COOLER_SIGNAL_NONE
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 201, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 201, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 201, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 201, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_CoolerControl, __pyx_mstate_global->__pyx_n_u_CoolerControl, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlCoolerControl_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 201, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 201, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":203
 * class CoolerControl(_IntEnum):
 *     """See `nvmlCoolerControl_t`."""
 *     THERMAL_COOLER_SIGNAL_NONE = NVML_THERMAL_COOLER_SIGNAL_NONE             # <<<<<<<<<<<<<<
 *     THERMAL_COOLER_SIGNAL_TOGGLE = NVML_THERMAL_COOLER_SIGNAL_TOGGLE
 *     THERMAL_COOLER_SIGNAL_VARIABLE = NVML_THERMAL_COOLER_SIGNAL_VARIABLE
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlCoolerControl_t(NVML_THERMAL_COOLER_SIGNAL_NONE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_THERMAL_COOLER_SIGNAL_NONE, __pyx_t_5) < (0)) __PYX_ERR(0, 203, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":204
 *     """See `nvmlCoolerControl_t`."""
 *     THERMAL_COOLER_SIGNAL_NONE = NVML_THERMAL_COOLER_SIGNAL_NONE
 *     THERMAL_COOLER_SIGNAL_TOGGLE = NVML_THERMAL_COOLER_SIGNAL_TOGGLE             # <<<<<<<<<<<<<<
 *     THERMAL_COOLER_SIGNAL_VARIABLE = NVML_THERMAL_COOLER_SIGNAL_VARIABLE
 *     THERMAL_COOLER_SIGNAL_COUNT = NVML_THERMAL_COOLER_SIGNAL_COUNT
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlCoolerControl_t(NVML_THERMAL_COOLER_SIGNAL_TOGGLE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 204, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_THERMAL_COOLER_SIGNAL_TOGGLE, __pyx_t_5) < (0)) __PYX_ERR(0, 204, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":205
 *     THERMAL_COOLER_SIGNAL_NONE = NVML_THERMAL_COOLER_SIGNAL_NONE
 *     THERMAL_COOLER_SIGNAL_TOGGLE = NVML_THERMAL_COOLER_SIGNAL_TOGGLE
 *     THERMAL_COOLER_SIGNAL_VARIABLE = NVML_THERMAL_COOLER_SIGNAL_VARIABLE             # <<<<<<<<<<<<<<
 *     THERMAL_COOLER_SIGNAL_COUNT = NVML_THERMAL_COOLER_SIGNAL_COUNT
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlCoolerControl_t(NVML_THERMAL_COOLER_SIGNAL_VARIABLE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 205, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_THERMAL_COOLER_SIGNAL_VARIABLE, __pyx_t_5) < (0)) __PYX_ERR(0, 205, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":206
 *     THERMAL_COOLER_SIGNAL_TOGGLE = NVML_THERMAL_COOLER_SIGNAL_TOGGLE
 *     THERMAL_COOLER_SIGNAL_VARIABLE = NVML_THERMAL_COOLER_SIGNAL_VARIABLE
 *     THERMAL_COOLER_SIGNAL_COUNT = NVML_THERMAL_COOLER_SIGNAL_COUNT             # <<<<<<<<<<<<<<
 * 
 * class CoolerTarget(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlCoolerControl_t(NVML_THERMAL_COOLER_SIGNAL_COUNT); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 206, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_THERMAL_COOLER_SIGNAL_COUNT, __pyx_t_5) < (0)) __PYX_ERR(0, 206, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":201
 *     UNKNOWN = NVML_THERMAL_CONTROLLER_UNKNOWN
 * 
 * class CoolerControl(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlCoolerControl_t`."""
 *     THERMAL_COOLER_SIGNAL_NONE = NVML_THERMAL_COOLER_SIGNAL_NONE
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_CoolerControl, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 201, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_CoolerControl, __pyx_t_5) < (0)) __PYX_ERR(0, 201, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":208
 *     THERMAL_COOLER_SIGNAL_COUNT = NVML_THERMAL_COOLER_SIGNAL_COUNT
 * 
 * class CoolerTarget(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlCoolerTarget_t`."""
 *     THERMAL_NONE = NVML_THERMAL_COOLER_TARGET_NONE
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 208, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 208, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 208, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 208, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_CoolerTarget, __pyx_mstate_global->__pyx_n_u_CoolerTarget, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlCoolerTarget_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 208, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 208, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":210
 * class CoolerTarget(_IntEnum):
 *     """See `nvmlCoolerTarget_t`."""
 *     THERMAL_NONE = NVML_THERMAL_COOLER_TARGET_NONE             # <<<<<<<<<<<<<<
 *     THERMAL_GPU = NVML_THERMAL_COOLER_TARGET_GPU
 *     THERMAL_MEMORY = NVML_THERMAL_COOLER_TARGET_MEMORY
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlCoolerTarget_t(NVML_THERMAL_COOLER_TARGET_NONE); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 210, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_THERMAL_NONE, __pyx_t_10) < (0)) __PYX_ERR(0, 210, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":211
 *     """See `nvmlCoolerTarget_t`."""
 *     THERMAL_NONE = NVML_THERMAL_COOLER_TARGET_NONE
 *     THERMAL_GPU = NVML_THERMAL_COOLER_TARGET_GPU             # <<<<<<<<<<<<<<
 *     THERMAL_MEMORY = NVML_THERMAL_COOLER_TARGET_MEMORY
 *     THERMAL_POWER_SUPPLY = NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlCoolerTarget_t(NVML_THERMAL_COOLER_TARGET_GPU); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 211, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_THERMAL_GPU, __pyx_t_10) < (0)) __PYX_ERR(0, 211, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":212
 *     THERMAL_NONE = NVML_THERMAL_COOLER_TARGET_NONE
 *     THERMAL_GPU = NVML_THERMAL_COOLER_TARGET_GPU
 *     THERMAL_MEMORY = NVML_THERMAL_COOLER_TARGET_MEMORY             # <<<<<<<<<<<<<<
 *     THERMAL_POWER_SUPPLY = NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY
 *     THERMAL_GPU_RELATED = NVML_THERMAL_COOLER_TARGET_GPU_RELATED
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlCoolerTarget_t(NVML_THERMAL_COOLER_TARGET_MEMORY); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 212, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_THERMAL_MEMORY, __pyx_t_10) < (0)) __PYX_ERR(0, 212, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":213
 *     THERMAL_GPU = NVML_THERMAL_COOLER_TARGET_GPU
 *     THERMAL_MEMORY = NVML_THERMAL_COOLER_TARGET_MEMORY
 *     THERMAL_POWER_SUPPLY = NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY             # <<<<<<<<<<<<<<
 *     THERMAL_GPU_RELATED = NVML_THERMAL_COOLER_TARGET_GPU_RELATED
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlCoolerTarget_t(NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 213, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_THERMAL_POWER_SUPPLY, __pyx_t_10) < (0)) __PYX_ERR(0, 213, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":214
 *     THERMAL_MEMORY = NVML_THERMAL_COOLER_TARGET_MEMORY
 *     THERMAL_POWER_SUPPLY = NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY
 *     THERMAL_GPU_RELATED = NVML_THERMAL_COOLER_TARGET_GPU_RELATED             # <<<<<<<<<<<<<<
 * 
 * class UUIDType(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlCoolerTarget_t(NVML_THERMAL_COOLER_TARGET_GPU_RELATED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 214, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_THERMAL_GPU_RELATED, __pyx_t_10) < (0)) __PYX_ERR(0, 214, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":208
 *     THERMAL_COOLER_SIGNAL_COUNT = NVML_THERMAL_COOLER_SIGNAL_COUNT
 * 
 * class CoolerTarget(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlCoolerTarget_t`."""
 *     THERMAL_NONE = NVML_THERMAL_COOLER_TARGET_NONE
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_CoolerTarget, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 208, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_CoolerTarget, __pyx_t_10) < (0)) __PYX_ERR(0, 208, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":216
 *     THERMAL_GPU_RELATED = NVML_THERMAL_COOLER_TARGET_GPU_RELATED
 * 
 * class UUIDType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlUUIDType_t`."""
 *     NONE = NVML_UUID_TYPE_NONE
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 216, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 216, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 216, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 216, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_UUIDType, __pyx_mstate_global->__pyx_n_u_UUIDType, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlUUIDType_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 216, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 216, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":218
 * class UUIDType(_IntEnum):
 *     """See `nvmlUUIDType_t`."""
 *     NONE = NVML_UUID_TYPE_NONE             # <<<<<<<<<<<<<<
 *     ASCII = NVML_UUID_TYPE_ASCII
 *     BINARY = NVML_UUID_TYPE_BINARY
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlUUIDType_t(NVML_UUID_TYPE_NONE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 218, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NONE, __pyx_t_11) < (0)) __PYX_ERR(0, 218, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":219
 *     """See `nvmlUUIDType_t`."""
 *     NONE = NVML_UUID_TYPE_NONE
 *     ASCII = NVML_UUID_TYPE_ASCII             # <<<<<<<<<<<<<<
 *     BINARY = NVML_UUID_TYPE_BINARY
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlUUIDType_t(NVML_UUID_TYPE_ASCII); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 219, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ASCII, __pyx_t_11) < (0)) __PYX_ERR(0, 219, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":220
 *     NONE = NVML_UUID_TYPE_NONE
 *     ASCII = NVML_UUID_TYPE_ASCII
 *     BINARY = NVML_UUID_TYPE_BINARY             # <<<<<<<<<<<<<<
 * 
 * class EnableState(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlUUIDType_t(NVML_UUID_TYPE_BINARY); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 220, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_BINARY, __pyx_t_11) < (0)) __PYX_ERR(0, 220, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":216
 *     THERMAL_GPU_RELATED = NVML_THERMAL_COOLER_TARGET_GPU_RELATED
 * 
 * class UUIDType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlUUIDType_t`."""
 *     NONE = NVML_UUID_TYPE_NONE
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_UUIDType, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 216, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_UUIDType, __pyx_t_11) < (0)) __PYX_ERR(0, 216, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":222
 *     BINARY = NVML_UUID_TYPE_BINARY
 * 
 * class EnableState(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlEnableState_t`."""
 *     FEATURE_DISABLED = NVML_FEATURE_DISABLED
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 222, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 222, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 222, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 222, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_EnableState, __pyx_mstate_global->__pyx_n_u_EnableState, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlEnableState_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 222, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 222, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":224
 * class EnableState(_IntEnum):
 *     """See `nvmlEnableState_t`."""
 *     FEATURE_DISABLED = NVML_FEATURE_DISABLED             # <<<<<<<<<<<<<<
 *     FEATURE_ENABLED = NVML_FEATURE_ENABLED
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlEnableState_t(NVML_FEATURE_DISABLED); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 224, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_FEATURE_DISABLED, __pyx_t_5) < (0)) __PYX_ERR(0, 224, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":225
 *     """See `nvmlEnableState_t`."""
 *     FEATURE_DISABLED = NVML_FEATURE_DISABLED
 *     FEATURE_ENABLED = NVML_FEATURE_ENABLED             # <<<<<<<<<<<<<<
 * 
 * class BrandType(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlEnableState_t(NVML_FEATURE_ENABLED); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 225, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_FEATURE_ENABLED, __pyx_t_5) < (0)) __PYX_ERR(0, 225, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":222
 *     BINARY = NVML_UUID_TYPE_BINARY
 * 
 * class EnableState(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlEnableState_t`."""
 *     FEATURE_DISABLED = NVML_FEATURE_DISABLED
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_EnableState, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 222, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_EnableState, __pyx_t_5) < (0)) __PYX_ERR(0, 222, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":227
 *     FEATURE_ENABLED = NVML_FEATURE_ENABLED
 * 
 * class BrandType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlBrandType_t`."""
 *     BRAND_UNKNOWN = NVML_BRAND_UNKNOWN
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 227, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 227, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 227, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 227, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_BrandType, __pyx_mstate_global->__pyx_n_u_BrandType, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlBrandType_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 227, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 227, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":229
 * class BrandType(_IntEnum):
 *     """See `nvmlBrandType_t`."""
 *     BRAND_UNKNOWN = NVML_BRAND_UNKNOWN             # <<<<<<<<<<<<<<
 *     BRAND_QUADRO = NVML_BRAND_QUADRO
 *     BRAND_TESLA = NVML_BRAND_TESLA
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_UNKNOWN); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 229, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_UNKNOWN, __pyx_t_10) < (0)) __PYX_ERR(0, 229, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":230
 *     """See `nvmlBrandType_t`."""
 *     BRAND_UNKNOWN = NVML_BRAND_UNKNOWN
 *     BRAND_QUADRO = NVML_BRAND_QUADRO             # <<<<<<<<<<<<<<
 *     BRAND_TESLA = NVML_BRAND_TESLA
 *     BRAND_NVS = NVML_BRAND_NVS
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_QUADRO); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 230, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_QUADRO, __pyx_t_10) < (0)) __PYX_ERR(0, 230, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":231
 *     BRAND_UNKNOWN = NVML_BRAND_UNKNOWN
 *     BRAND_QUADRO = NVML_BRAND_QUADRO
 *     BRAND_TESLA = NVML_BRAND_TESLA             # <<<<<<<<<<<<<<
 *     BRAND_NVS = NVML_BRAND_NVS
 *     BRAND_GRID = NVML_BRAND_GRID
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_TESLA); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 231, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_TESLA, __pyx_t_10) < (0)) __PYX_ERR(0, 231, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":232
 *     BRAND_QUADRO = NVML_BRAND_QUADRO
 *     BRAND_TESLA = NVML_BRAND_TESLA
 *     BRAND_NVS = NVML_BRAND_NVS             # <<<<<<<<<<<<<<
 *     BRAND_GRID = NVML_BRAND_GRID
 *     BRAND_GEFORCE = NVML_BRAND_GEFORCE
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_NVS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 232, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_NVS, __pyx_t_10) < (0)) __PYX_ERR(0, 232, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":233
 *     BRAND_TESLA = NVML_BRAND_TESLA
 *     BRAND_NVS = NVML_BRAND_NVS
 *     BRAND_GRID = NVML_BRAND_GRID             # <<<<<<<<<<<<<<
 *     BRAND_GEFORCE = NVML_BRAND_GEFORCE
 *     BRAND_TITAN = NVML_BRAND_TITAN
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_GRID); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 233, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_GRID, __pyx_t_10) < (0)) __PYX_ERR(0, 233, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":234
 *     BRAND_NVS = NVML_BRAND_NVS
 *     BRAND_GRID = NVML_BRAND_GRID
 *     BRAND_GEFORCE = NVML_BRAND_GEFORCE             # <<<<<<<<<<<<<<
 *     BRAND_TITAN = NVML_BRAND_TITAN
 *     BRAND_NVIDIA_VAPPS = NVML_BRAND_NVIDIA_VAPPS
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_GEFORCE); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 234, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_GEFORCE, __pyx_t_10) < (0)) __PYX_ERR(0, 234, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":235
 *     BRAND_GRID = NVML_BRAND_GRID
 *     BRAND_GEFORCE = NVML_BRAND_GEFORCE
 *     BRAND_TITAN = NVML_BRAND_TITAN             # <<<<<<<<<<<<<<
 *     BRAND_NVIDIA_VAPPS = NVML_BRAND_NVIDIA_VAPPS
 *     BRAND_NVIDIA_VPC = NVML_BRAND_NVIDIA_VPC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_TITAN); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 235, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_TITAN, __pyx_t_10) < (0)) __PYX_ERR(0, 235, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":236
 *     BRAND_GEFORCE = NVML_BRAND_GEFORCE
 *     BRAND_TITAN = NVML_BRAND_TITAN
 *     BRAND_NVIDIA_VAPPS = NVML_BRAND_NVIDIA_VAPPS             # <<<<<<<<<<<<<<
 *     BRAND_NVIDIA_VPC = NVML_BRAND_NVIDIA_VPC
 *     BRAND_NVIDIA_VCS = NVML_BRAND_NVIDIA_VCS
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_NVIDIA_VAPPS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 236, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_NVIDIA_VAPPS, __pyx_t_10) < (0)) __PYX_ERR(0, 236, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":237
 *     BRAND_TITAN = NVML_BRAND_TITAN
 *     BRAND_NVIDIA_VAPPS = NVML_BRAND_NVIDIA_VAPPS
 *     BRAND_NVIDIA_VPC = NVML_BRAND_NVIDIA_VPC             # <<<<<<<<<<<<<<
 *     BRAND_NVIDIA_VCS = NVML_BRAND_NVIDIA_VCS
 *     BRAND_NVIDIA_VWS = NVML_BRAND_NVIDIA_VWS
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_NVIDIA_VPC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 237, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_NVIDIA_VPC, __pyx_t_10) < (0)) __PYX_ERR(0, 237, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":238
 *     BRAND_NVIDIA_VAPPS = NVML_BRAND_NVIDIA_VAPPS
 *     BRAND_NVIDIA_VPC = NVML_BRAND_NVIDIA_VPC
 *     BRAND_NVIDIA_VCS = NVML_BRAND_NVIDIA_VCS             # <<<<<<<<<<<<<<
 *     BRAND_NVIDIA_VWS = NVML_BRAND_NVIDIA_VWS
 *     BRAND_NVIDIA_CLOUD_GAMING = NVML_BRAND_NVIDIA_CLOUD_GAMING
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_NVIDIA_VCS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 238, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_NVIDIA_VCS, __pyx_t_10) < (0)) __PYX_ERR(0, 238, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":239
 *     BRAND_NVIDIA_VPC = NVML_BRAND_NVIDIA_VPC
 *     BRAND_NVIDIA_VCS = NVML_BRAND_NVIDIA_VCS
 *     BRAND_NVIDIA_VWS = NVML_BRAND_NVIDIA_VWS             # <<<<<<<<<<<<<<
 *     BRAND_NVIDIA_CLOUD_GAMING = NVML_BRAND_NVIDIA_CLOUD_GAMING
 *     BRAND_NVIDIA_VGAMING = NVML_BRAND_NVIDIA_VGAMING
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_NVIDIA_VWS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_NVIDIA_VWS, __pyx_t_10) < (0)) __PYX_ERR(0, 239, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":240
 *     BRAND_NVIDIA_VCS = NVML_BRAND_NVIDIA_VCS
 *     BRAND_NVIDIA_VWS = NVML_BRAND_NVIDIA_VWS
 *     BRAND_NVIDIA_CLOUD_GAMING = NVML_BRAND_NVIDIA_CLOUD_GAMING             # <<<<<<<<<<<<<<
 *     BRAND_NVIDIA_VGAMING = NVML_BRAND_NVIDIA_VGAMING
 *     BRAND_QUADRO_RTX = NVML_BRAND_QUADRO_RTX
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_NVIDIA_CLOUD_GAMING); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 240, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_NVIDIA_CLOUD_GAMING, __pyx_t_10) < (0)) __PYX_ERR(0, 240, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":241
 *     BRAND_NVIDIA_VWS = NVML_BRAND_NVIDIA_VWS
 *     BRAND_NVIDIA_CLOUD_GAMING = NVML_BRAND_NVIDIA_CLOUD_GAMING
 *     BRAND_NVIDIA_VGAMING = NVML_BRAND_NVIDIA_VGAMING             # <<<<<<<<<<<<<<
 *     BRAND_QUADRO_RTX = NVML_BRAND_QUADRO_RTX
 *     BRAND_NVIDIA_RTX = NVML_BRAND_NVIDIA_RTX
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_NVIDIA_VGAMING); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 241, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_NVIDIA_VGAMING, __pyx_t_10) < (0)) __PYX_ERR(0, 241, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":242
 *     BRAND_NVIDIA_CLOUD_GAMING = NVML_BRAND_NVIDIA_CLOUD_GAMING
 *     BRAND_NVIDIA_VGAMING = NVML_BRAND_NVIDIA_VGAMING
 *     BRAND_QUADRO_RTX = NVML_BRAND_QUADRO_RTX             # <<<<<<<<<<<<<<
 *     BRAND_NVIDIA_RTX = NVML_BRAND_NVIDIA_RTX
 *     BRAND_NVIDIA = NVML_BRAND_NVIDIA
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_QUADRO_RTX); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 242, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_QUADRO_RTX, __pyx_t_10) < (0)) __PYX_ERR(0, 242, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":243
 *     BRAND_NVIDIA_VGAMING = NVML_BRAND_NVIDIA_VGAMING
 *     BRAND_QUADRO_RTX = NVML_BRAND_QUADRO_RTX
 *     BRAND_NVIDIA_RTX = NVML_BRAND_NVIDIA_RTX             # <<<<<<<<<<<<<<
 *     BRAND_NVIDIA = NVML_BRAND_NVIDIA
 *     BRAND_GEFORCE_RTX = NVML_BRAND_GEFORCE_RTX
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_NVIDIA_RTX); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 243, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_NVIDIA_RTX, __pyx_t_10) < (0)) __PYX_ERR(0, 243, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":244
 *     BRAND_QUADRO_RTX = NVML_BRAND_QUADRO_RTX
 *     BRAND_NVIDIA_RTX = NVML_BRAND_NVIDIA_RTX
 *     BRAND_NVIDIA = NVML_BRAND_NVIDIA             # <<<<<<<<<<<<<<
 *     BRAND_GEFORCE_RTX = NVML_BRAND_GEFORCE_RTX
 *     BRAND_TITAN_RTX = NVML_BRAND_TITAN_RTX
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_NVIDIA); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 244, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_NVIDIA, __pyx_t_10) < (0)) __PYX_ERR(0, 244, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":245
 *     BRAND_NVIDIA_RTX = NVML_BRAND_NVIDIA_RTX
 *     BRAND_NVIDIA = NVML_BRAND_NVIDIA
 *     BRAND_GEFORCE_RTX = NVML_BRAND_GEFORCE_RTX             # <<<<<<<<<<<<<<
 *     BRAND_TITAN_RTX = NVML_BRAND_TITAN_RTX
 *     BRAND_COUNT = NVML_BRAND_COUNT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_GEFORCE_RTX); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 245, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_GEFORCE_RTX, __pyx_t_10) < (0)) __PYX_ERR(0, 245, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":246
 *     BRAND_NVIDIA = NVML_BRAND_NVIDIA
 *     BRAND_GEFORCE_RTX = NVML_BRAND_GEFORCE_RTX
 *     BRAND_TITAN_RTX = NVML_BRAND_TITAN_RTX             # <<<<<<<<<<<<<<
 *     BRAND_COUNT = NVML_BRAND_COUNT
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_TITAN_RTX); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 246, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_TITAN_RTX, __pyx_t_10) < (0)) __PYX_ERR(0, 246, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":247
 *     BRAND_GEFORCE_RTX = NVML_BRAND_GEFORCE_RTX
 *     BRAND_TITAN_RTX = NVML_BRAND_TITAN_RTX
 *     BRAND_COUNT = NVML_BRAND_COUNT             # <<<<<<<<<<<<<<
 * 
 * class TemperatureThresholds(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlBrandType_t(NVML_BRAND_COUNT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 247, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_BRAND_COUNT, __pyx_t_10) < (0)) __PYX_ERR(0, 247, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":227
 *     FEATURE_ENABLED = NVML_FEATURE_ENABLED
 * 
 * class BrandType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlBrandType_t`."""
 *     BRAND_UNKNOWN = NVML_BRAND_UNKNOWN
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_BrandType, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 227, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_BrandType, __pyx_t_10) < (0)) __PYX_ERR(0, 227, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":249
 *     BRAND_COUNT = NVML_BRAND_COUNT
 * 
 * class TemperatureThresholds(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlTemperatureThresholds_t`."""
 *     TEMPERATURE_THRESHOLD_SHUTDOWN = NVML_TEMPERATURE_THRESHOLD_SHUTDOWN
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 249, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 249, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 249, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 249, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_TemperatureThresholds, __pyx_mstate_global->__pyx_n_u_TemperatureThresholds, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlTemperatureThresholds_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 249, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 249, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":251
 * class TemperatureThresholds(_IntEnum):
 *     """See `nvmlTemperatureThresholds_t`."""
 *     TEMPERATURE_THRESHOLD_SHUTDOWN = NVML_TEMPERATURE_THRESHOLD_SHUTDOWN             # <<<<<<<<<<<<<<
 *     TEMPERATURE_THRESHOLD_SLOWDOWN = NVML_TEMPERATURE_THRESHOLD_SLOWDOWN
 *     TEMPERATURE_THRESHOLD_MEM_MAX = NVML_TEMPERATURE_THRESHOLD_MEM_MAX
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlTemperatureThresholds_t(NVML_TEMPERATURE_THRESHOLD_SHUTDOWN); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 251, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_TEMPERATURE_THRESHOLD_SHUTDOWN, __pyx_t_11) < (0)) __PYX_ERR(0, 251, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":252
 *     """See `nvmlTemperatureThresholds_t`."""
 *     TEMPERATURE_THRESHOLD_SHUTDOWN = NVML_TEMPERATURE_THRESHOLD_SHUTDOWN
 *     TEMPERATURE_THRESHOLD_SLOWDOWN = NVML_TEMPERATURE_THRESHOLD_SLOWDOWN             # <<<<<<<<<<<<<<
 *     TEMPERATURE_THRESHOLD_MEM_MAX = NVML_TEMPERATURE_THRESHOLD_MEM_MAX
 *     TEMPERATURE_THRESHOLD_GPU_MAX = NVML_TEMPERATURE_THRESHOLD_GPU_MAX
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlTemperatureThresholds_t(NVML_TEMPERATURE_THRESHOLD_SLOWDOWN); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 252, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_TEMPERATURE_THRESHOLD_SLOWDOWN, __pyx_t_11) < (0)) __PYX_ERR(0, 252, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":253
 *     TEMPERATURE_THRESHOLD_SHUTDOWN = NVML_TEMPERATURE_THRESHOLD_SHUTDOWN
 *     TEMPERATURE_THRESHOLD_SLOWDOWN = NVML_TEMPERATURE_THRESHOLD_SLOWDOWN
 *     TEMPERATURE_THRESHOLD_MEM_MAX = NVML_TEMPERATURE_THRESHOLD_MEM_MAX             # <<<<<<<<<<<<<<
 *     TEMPERATURE_THRESHOLD_GPU_MAX = NVML_TEMPERATURE_THRESHOLD_GPU_MAX
 *     TEMPERATURE_THRESHOLD_ACOUSTIC_MIN = NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MIN
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlTemperatureThresholds_t(NVML_TEMPERATURE_THRESHOLD_MEM_MAX); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 253, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_TEMPERATURE_THRESHOLD_MEM_MAX, __pyx_t_11) < (0)) __PYX_ERR(0, 253, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":254
 *     TEMPERATURE_THRESHOLD_SLOWDOWN = NVML_TEMPERATURE_THRESHOLD_SLOWDOWN
 *     TEMPERATURE_THRESHOLD_MEM_MAX = NVML_TEMPERATURE_THRESHOLD_MEM_MAX
 *     TEMPERATURE_THRESHOLD_GPU_MAX = NVML_TEMPERATURE_THRESHOLD_GPU_MAX             # <<<<<<<<<<<<<<
 *     TEMPERATURE_THRESHOLD_ACOUSTIC_MIN = NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MIN
 *     TEMPERATURE_THRESHOLD_ACOUSTIC_CURR = NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_CURR
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlTemperatureThresholds_t(NVML_TEMPERATURE_THRESHOLD_GPU_MAX); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 254, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_TEMPERATURE_THRESHOLD_GPU_MAX, __pyx_t_11) < (0)) __PYX_ERR(0, 254, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":255
 *     TEMPERATURE_THRESHOLD_MEM_MAX = NVML_TEMPERATURE_THRESHOLD_MEM_MAX
 *     TEMPERATURE_THRESHOLD_GPU_MAX = NVML_TEMPERATURE_THRESHOLD_GPU_MAX
 *     TEMPERATURE_THRESHOLD_ACOUSTIC_MIN = NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MIN             # <<<<<<<<<<<<<<
 *     TEMPERATURE_THRESHOLD_ACOUSTIC_CURR = NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_CURR
 *     TEMPERATURE_THRESHOLD_ACOUSTIC_MAX = NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MAX
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlTemperatureThresholds_t(NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MIN); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 255, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_TEMPERATURE_THRESHOLD_ACOUSTIC_M, __pyx_t_11) < (0)) __PYX_ERR(0, 255, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":256
 *     TEMPERATURE_THRESHOLD_GPU_MAX = NVML_TEMPERATURE_THRESHOLD_GPU_MAX
 *     TEMPERATURE_THRESHOLD_ACOUSTIC_MIN = NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MIN
 *     TEMPERATURE_THRESHOLD_ACOUSTIC_CURR = NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_CURR             # <<<<<<<<<<<<<<
 *     TEMPERATURE_THRESHOLD_ACOUSTIC_MAX = NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MAX
 *     TEMPERATURE_THRESHOLD_GPS_CURR = NVML_TEMPERATURE_THRESHOLD_GPS_CURR
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlTemperatureThresholds_t(NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_CURR); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 256, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_TEMPERATURE_THRESHOLD_ACOUSTIC_C, __pyx_t_11) < (0)) __PYX_ERR(0, 256, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":257
 *     TEMPERATURE_THRESHOLD_ACOUSTIC_MIN = NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MIN
 *     TEMPERATURE_THRESHOLD_ACOUSTIC_CURR = NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_CURR
 *     TEMPERATURE_THRESHOLD_ACOUSTIC_MAX = NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MAX             # <<<<<<<<<<<<<<
 *     TEMPERATURE_THRESHOLD_GPS_CURR = NVML_TEMPERATURE_THRESHOLD_GPS_CURR
 *     TEMPERATURE_THRESHOLD_COUNT = NVML_TEMPERATURE_THRESHOLD_COUNT
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlTemperatureThresholds_t(NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MAX); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 257, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_TEMPERATURE_THRESHOLD_ACOUSTIC_M_2, __pyx_t_11) < (0)) __PYX_ERR(0, 257, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":258
 *     TEMPERATURE_THRESHOLD_ACOUSTIC_CURR = NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_CURR
 *     TEMPERATURE_THRESHOLD_ACOUSTIC_MAX = NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MAX
 *     TEMPERATURE_THRESHOLD_GPS_CURR = NVML_TEMPERATURE_THRESHOLD_GPS_CURR             # <<<<<<<<<<<<<<
 *     TEMPERATURE_THRESHOLD_COUNT = NVML_TEMPERATURE_THRESHOLD_COUNT
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlTemperatureThresholds_t(NVML_TEMPERATURE_THRESHOLD_GPS_CURR); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 258, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_TEMPERATURE_THRESHOLD_GPS_CURR, __pyx_t_11) < (0)) __PYX_ERR(0, 258, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":259
 *     TEMPERATURE_THRESHOLD_ACOUSTIC_MAX = NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MAX
 *     TEMPERATURE_THRESHOLD_GPS_CURR = NVML_TEMPERATURE_THRESHOLD_GPS_CURR
 *     TEMPERATURE_THRESHOLD_COUNT = NVML_TEMPERATURE_THRESHOLD_COUNT             # <<<<<<<<<<<<<<
 * 
 * class TemperatureSensors(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlTemperatureThresholds_t(NVML_TEMPERATURE_THRESHOLD_COUNT); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 259, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_TEMPERATURE_THRESHOLD_COUNT, __pyx_t_11) < (0)) __PYX_ERR(0, 259, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":249
 *     BRAND_COUNT = NVML_BRAND_COUNT
 * 
 * class TemperatureThresholds(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlTemperatureThresholds_t`."""
 *     TEMPERATURE_THRESHOLD_SHUTDOWN = NVML_TEMPERATURE_THRESHOLD_SHUTDOWN
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_TemperatureThresholds, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 249, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_TemperatureThresholds, __pyx_t_11) < (0)) __PYX_ERR(0, 249, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":261
 *     TEMPERATURE_THRESHOLD_COUNT = NVML_TEMPERATURE_THRESHOLD_COUNT
 * 
 * class TemperatureSensors(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlTemperatureSensors_t`."""
 *     TEMPERATURE_GPU = NVML_TEMPERATURE_GPU
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 261, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 261, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 261, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 261, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_TemperatureSensors, __pyx_mstate_global->__pyx_n_u_TemperatureSensors, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlTemperatureSensors_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 261, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 261, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":263
 * class TemperatureSensors(_IntEnum):
 *     """See `nvmlTemperatureSensors_t`."""
 *     TEMPERATURE_GPU = NVML_TEMPERATURE_GPU             # <<<<<<<<<<<<<<
 *     TEMPERATURE_COUNT = NVML_TEMPERATURE_COUNT
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlTemperatureSensors_t(NVML_TEMPERATURE_GPU); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 263, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_TEMPERATURE_GPU, __pyx_t_5) < (0)) __PYX_ERR(0, 263, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":264
 *     """See `nvmlTemperatureSensors_t`."""
 *     TEMPERATURE_GPU = NVML_TEMPERATURE_GPU
 *     TEMPERATURE_COUNT = NVML_TEMPERATURE_COUNT             # <<<<<<<<<<<<<<
 * 
 * class ComputeMode(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlTemperatureSensors_t(NVML_TEMPERATURE_COUNT); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 264, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_TEMPERATURE_COUNT, __pyx_t_5) < (0)) __PYX_ERR(0, 264, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":261
 *     TEMPERATURE_THRESHOLD_COUNT = NVML_TEMPERATURE_THRESHOLD_COUNT
 * 
 * class TemperatureSensors(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlTemperatureSensors_t`."""
 *     TEMPERATURE_GPU = NVML_TEMPERATURE_GPU
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_TemperatureSensors, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 261, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_TemperatureSensors, __pyx_t_5) < (0)) __PYX_ERR(0, 261, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":266
 *     TEMPERATURE_COUNT = NVML_TEMPERATURE_COUNT
 * 
 * class ComputeMode(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlComputeMode_t`."""
 *     COMPUTEMODE_DEFAULT = NVML_COMPUTEMODE_DEFAULT
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 266, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 266, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 266, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 266, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_ComputeMode, __pyx_mstate_global->__pyx_n_u_ComputeMode, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlComputeMode_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 266, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 266, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":268
 * class ComputeMode(_IntEnum):
 *     """See `nvmlComputeMode_t`."""
 *     COMPUTEMODE_DEFAULT = NVML_COMPUTEMODE_DEFAULT             # <<<<<<<<<<<<<<
 *     COMPUTEMODE_EXCLUSIVE_THREAD = NVML_COMPUTEMODE_EXCLUSIVE_THREAD
 *     COMPUTEMODE_PROHIBITED = NVML_COMPUTEMODE_PROHIBITED
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlComputeMode_t(NVML_COMPUTEMODE_DEFAULT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 268, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_COMPUTEMODE_DEFAULT, __pyx_t_10) < (0)) __PYX_ERR(0, 268, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":269
 *     """See `nvmlComputeMode_t`."""
 *     COMPUTEMODE_DEFAULT = NVML_COMPUTEMODE_DEFAULT
 *     COMPUTEMODE_EXCLUSIVE_THREAD = NVML_COMPUTEMODE_EXCLUSIVE_THREAD             # <<<<<<<<<<<<<<
 *     COMPUTEMODE_PROHIBITED = NVML_COMPUTEMODE_PROHIBITED
 *     COMPUTEMODE_EXCLUSIVE_PROCESS = NVML_COMPUTEMODE_EXCLUSIVE_PROCESS
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlComputeMode_t(NVML_COMPUTEMODE_EXCLUSIVE_THREAD); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 269, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_COMPUTEMODE_EXCLUSIVE_THREAD, __pyx_t_10) < (0)) __PYX_ERR(0, 269, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":270
 *     COMPUTEMODE_DEFAULT = NVML_COMPUTEMODE_DEFAULT
 *     COMPUTEMODE_EXCLUSIVE_THREAD = NVML_COMPUTEMODE_EXCLUSIVE_THREAD
 *     COMPUTEMODE_PROHIBITED = NVML_COMPUTEMODE_PROHIBITED             # <<<<<<<<<<<<<<
 *     COMPUTEMODE_EXCLUSIVE_PROCESS = NVML_COMPUTEMODE_EXCLUSIVE_PROCESS
 *     COMPUTEMODE_COUNT = NVML_COMPUTEMODE_COUNT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlComputeMode_t(NVML_COMPUTEMODE_PROHIBITED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 270, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_COMPUTEMODE_PROHIBITED, __pyx_t_10) < (0)) __PYX_ERR(0, 270, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":271
 *     COMPUTEMODE_EXCLUSIVE_THREAD = NVML_COMPUTEMODE_EXCLUSIVE_THREAD
 *     COMPUTEMODE_PROHIBITED = NVML_COMPUTEMODE_PROHIBITED
 *     COMPUTEMODE_EXCLUSIVE_PROCESS = NVML_COMPUTEMODE_EXCLUSIVE_PROCESS             # <<<<<<<<<<<<<<
 *     COMPUTEMODE_COUNT = NVML_COMPUTEMODE_COUNT
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlComputeMode_t(NVML_COMPUTEMODE_EXCLUSIVE_PROCESS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 271, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_COMPUTEMODE_EXCLUSIVE_PROCESS, __pyx_t_10) < (0)) __PYX_ERR(0, 271, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":272
 *     COMPUTEMODE_PROHIBITED = NVML_COMPUTEMODE_PROHIBITED
 *     COMPUTEMODE_EXCLUSIVE_PROCESS = NVML_COMPUTEMODE_EXCLUSIVE_PROCESS
 *     COMPUTEMODE_COUNT = NVML_COMPUTEMODE_COUNT             # <<<<<<<<<<<<<<
 * 
 * class MemoryErrorType(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlComputeMode_t(NVML_COMPUTEMODE_COUNT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 272, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_COMPUTEMODE_COUNT, __pyx_t_10) < (0)) __PYX_ERR(0, 272, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":266
 *     TEMPERATURE_COUNT = NVML_TEMPERATURE_COUNT
 * 
 * class ComputeMode(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlComputeMode_t`."""
 *     COMPUTEMODE_DEFAULT = NVML_COMPUTEMODE_DEFAULT
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_ComputeMode, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 266, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_ComputeMode, __pyx_t_10) < (0)) __PYX_ERR(0, 266, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":274
 *     COMPUTEMODE_COUNT = NVML_COMPUTEMODE_COUNT
 * 
 * class MemoryErrorType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlMemoryErrorType_t`."""
 *     CORRECTED = NVML_MEMORY_ERROR_TYPE_CORRECTED
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryErrorType, __pyx_mstate_global->__pyx_n_u_MemoryErrorType, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlMemoryErrorType_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 274, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":276
 * class MemoryErrorType(_IntEnum):
 *     """See `nvmlMemoryErrorType_t`."""
 *     CORRECTED = NVML_MEMORY_ERROR_TYPE_CORRECTED             # <<<<<<<<<<<<<<
 *     UNCORRECTED = NVML_MEMORY_ERROR_TYPE_UNCORRECTED
 *     COUNT = NVML_MEMORY_ERROR_TYPE_COUNT
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlMemoryErrorType_t(NVML_MEMORY_ERROR_TYPE_CORRECTED); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 276, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_CORRECTED, __pyx_t_11) < (0)) __PYX_ERR(0, 276, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":277
 *     """See `nvmlMemoryErrorType_t`."""
 *     CORRECTED = NVML_MEMORY_ERROR_TYPE_CORRECTED
 *     UNCORRECTED = NVML_MEMORY_ERROR_TYPE_UNCORRECTED             # <<<<<<<<<<<<<<
 *     COUNT = NVML_MEMORY_ERROR_TYPE_COUNT
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlMemoryErrorType_t(NVML_MEMORY_ERROR_TYPE_UNCORRECTED); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 277, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_UNCORRECTED, __pyx_t_11) < (0)) __PYX_ERR(0, 277, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":278
 *     CORRECTED = NVML_MEMORY_ERROR_TYPE_CORRECTED
 *     UNCORRECTED = NVML_MEMORY_ERROR_TYPE_UNCORRECTED
 *     COUNT = NVML_MEMORY_ERROR_TYPE_COUNT             # <<<<<<<<<<<<<<
 * 
 * class NvlinkVersion(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlMemoryErrorType_t(NVML_MEMORY_ERROR_TYPE_COUNT); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 278, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_COUNT, __pyx_t_11) < (0)) __PYX_ERR(0, 278, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":274
 *     COMPUTEMODE_COUNT = NVML_COMPUTEMODE_COUNT
 * 
 * class MemoryErrorType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlMemoryErrorType_t`."""
 *     CORRECTED = NVML_MEMORY_ERROR_TYPE_CORRECTED
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryErrorType, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_MemoryErrorType, __pyx_t_11) < (0)) __PYX_ERR(0, 274, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":280
 *     COUNT = NVML_MEMORY_ERROR_TYPE_COUNT
 * 
 * class NvlinkVersion(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlNvlinkVersion_t`."""
 *     VERSION_INVALID = NVML_NVLINK_VERSION_INVALID
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 280, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 280, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 280, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 280, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_NvlinkVersion, __pyx_mstate_global->__pyx_n_u_NvlinkVersion, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlNvlinkVersion_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 280, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 280, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":282
 * class NvlinkVersion(_IntEnum):
 *     """See `nvmlNvlinkVersion_t`."""
 *     VERSION_INVALID = NVML_NVLINK_VERSION_INVALID             # <<<<<<<<<<<<<<
 *     VERSION_1_0 = NVML_NVLINK_VERSION_1_0
 *     VERSION_2_0 = NVML_NVLINK_VERSION_2_0
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlNvlinkVersion_t(NVML_NVLINK_VERSION_INVALID); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 282, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VERSION_INVALID, __pyx_t_5) < (0)) __PYX_ERR(0, 282, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":283
 *     """See `nvmlNvlinkVersion_t`."""
 *     VERSION_INVALID = NVML_NVLINK_VERSION_INVALID
 *     VERSION_1_0 = NVML_NVLINK_VERSION_1_0             # <<<<<<<<<<<<<<
 *     VERSION_2_0 = NVML_NVLINK_VERSION_2_0
 *     VERSION_2_2 = NVML_NVLINK_VERSION_2_2
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlNvlinkVersion_t(NVML_NVLINK_VERSION_1_0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 283, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VERSION_1_0, __pyx_t_5) < (0)) __PYX_ERR(0, 283, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":284
 *     VERSION_INVALID = NVML_NVLINK_VERSION_INVALID
 *     VERSION_1_0 = NVML_NVLINK_VERSION_1_0
 *     VERSION_2_0 = NVML_NVLINK_VERSION_2_0             # <<<<<<<<<<<<<<
 *     VERSION_2_2 = NVML_NVLINK_VERSION_2_2
 *     VERSION_3_0 = NVML_NVLINK_VERSION_3_0
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlNvlinkVersion_t(NVML_NVLINK_VERSION_2_0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 284, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VERSION_2_0, __pyx_t_5) < (0)) __PYX_ERR(0, 284, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":285
 *     VERSION_1_0 = NVML_NVLINK_VERSION_1_0
 *     VERSION_2_0 = NVML_NVLINK_VERSION_2_0
 *     VERSION_2_2 = NVML_NVLINK_VERSION_2_2             # <<<<<<<<<<<<<<
 *     VERSION_3_0 = NVML_NVLINK_VERSION_3_0
 *     VERSION_3_1 = NVML_NVLINK_VERSION_3_1
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlNvlinkVersion_t(NVML_NVLINK_VERSION_2_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 285, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VERSION_2_2, __pyx_t_5) < (0)) __PYX_ERR(0, 285, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":286
 *     VERSION_2_0 = NVML_NVLINK_VERSION_2_0
 *     VERSION_2_2 = NVML_NVLINK_VERSION_2_2
 *     VERSION_3_0 = NVML_NVLINK_VERSION_3_0             # <<<<<<<<<<<<<<
 *     VERSION_3_1 = NVML_NVLINK_VERSION_3_1
 *     VERSION_4_0 = NVML_NVLINK_VERSION_4_0
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlNvlinkVersion_t(NVML_NVLINK_VERSION_3_0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 286, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VERSION_3_0, __pyx_t_5) < (0)) __PYX_ERR(0, 286, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":287
 *     VERSION_2_2 = NVML_NVLINK_VERSION_2_2
 *     VERSION_3_0 = NVML_NVLINK_VERSION_3_0
 *     VERSION_3_1 = NVML_NVLINK_VERSION_3_1             # <<<<<<<<<<<<<<
 *     VERSION_4_0 = NVML_NVLINK_VERSION_4_0
 *     VERSION_5_0 = NVML_NVLINK_VERSION_5_0
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlNvlinkVersion_t(NVML_NVLINK_VERSION_3_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 287, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VERSION_3_1, __pyx_t_5) < (0)) __PYX_ERR(0, 287, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":288
 *     VERSION_3_0 = NVML_NVLINK_VERSION_3_0
 *     VERSION_3_1 = NVML_NVLINK_VERSION_3_1
 *     VERSION_4_0 = NVML_NVLINK_VERSION_4_0             # <<<<<<<<<<<<<<
 *     VERSION_5_0 = NVML_NVLINK_VERSION_5_0
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlNvlinkVersion_t(NVML_NVLINK_VERSION_4_0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 288, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VERSION_4_0, __pyx_t_5) < (0)) __PYX_ERR(0, 288, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":289
 *     VERSION_3_1 = NVML_NVLINK_VERSION_3_1
 *     VERSION_4_0 = NVML_NVLINK_VERSION_4_0
 *     VERSION_5_0 = NVML_NVLINK_VERSION_5_0             # <<<<<<<<<<<<<<
 * 
 * class EccCounterType(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlNvlinkVersion_t(NVML_NVLINK_VERSION_5_0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 289, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VERSION_5_0, __pyx_t_5) < (0)) __PYX_ERR(0, 289, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":280
 *     COUNT = NVML_MEMORY_ERROR_TYPE_COUNT
 * 
 * class NvlinkVersion(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlNvlinkVersion_t`."""
 *     VERSION_INVALID = NVML_NVLINK_VERSION_INVALID
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NvlinkVersion, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 280, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_NvlinkVersion, __pyx_t_5) < (0)) __PYX_ERR(0, 280, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":291
 *     VERSION_5_0 = NVML_NVLINK_VERSION_5_0
 * 
 * class EccCounterType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlEccCounterType_t`."""
 *     VOLATILE_ECC = NVML_VOLATILE_ECC
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 291, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 291, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 291, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 291, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_EccCounterType, __pyx_mstate_global->__pyx_n_u_EccCounterType, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlEccCounterType_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 291, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 291, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":293
 * class EccCounterType(_IntEnum):
 *     """See `nvmlEccCounterType_t`."""
 *     VOLATILE_ECC = NVML_VOLATILE_ECC             # <<<<<<<<<<<<<<
 *     AGGREGATE_ECC = NVML_AGGREGATE_ECC
 *     COUNT = NVML_ECC_COUNTER_TYPE_COUNT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlEccCounterType_t(NVML_VOLATILE_ECC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 293, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_VOLATILE_ECC, __pyx_t_10) < (0)) __PYX_ERR(0, 293, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":294
 *     """See `nvmlEccCounterType_t`."""
 *     VOLATILE_ECC = NVML_VOLATILE_ECC
 *     AGGREGATE_ECC = NVML_AGGREGATE_ECC             # <<<<<<<<<<<<<<
 *     COUNT = NVML_ECC_COUNTER_TYPE_COUNT
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlEccCounterType_t(NVML_AGGREGATE_ECC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 294, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_AGGREGATE_ECC, __pyx_t_10) < (0)) __PYX_ERR(0, 294, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":295
 *     VOLATILE_ECC = NVML_VOLATILE_ECC
 *     AGGREGATE_ECC = NVML_AGGREGATE_ECC
 *     COUNT = NVML_ECC_COUNTER_TYPE_COUNT             # <<<<<<<<<<<<<<
 * 
 * class ClockType(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlEccCounterType_t(NVML_ECC_COUNTER_TYPE_COUNT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 295, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_COUNT, __pyx_t_10) < (0)) __PYX_ERR(0, 295, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":291
 *     VERSION_5_0 = NVML_NVLINK_VERSION_5_0
 * 
 * class EccCounterType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlEccCounterType_t`."""
 *     VOLATILE_ECC = NVML_VOLATILE_ECC
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_EccCounterType, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 291, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_EccCounterType, __pyx_t_10) < (0)) __PYX_ERR(0, 291, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":297
 *     COUNT = NVML_ECC_COUNTER_TYPE_COUNT
 * 
 * class ClockType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlClockType_t`."""
 *     CLOCK_GRAPHICS = NVML_CLOCK_GRAPHICS
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_ClockType, __pyx_mstate_global->__pyx_n_u_ClockType, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlClockType_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 297, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":299
 * class ClockType(_IntEnum):
 *     """See `nvmlClockType_t`."""
 *     CLOCK_GRAPHICS = NVML_CLOCK_GRAPHICS             # <<<<<<<<<<<<<<
 *     CLOCK_SM = NVML_CLOCK_SM
 *     CLOCK_MEM = NVML_CLOCK_MEM
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlClockType_t(NVML_CLOCK_GRAPHICS); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 299, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_CLOCK_GRAPHICS, __pyx_t_11) < (0)) __PYX_ERR(0, 299, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":300
 *     """See `nvmlClockType_t`."""
 *     CLOCK_GRAPHICS = NVML_CLOCK_GRAPHICS
 *     CLOCK_SM = NVML_CLOCK_SM             # <<<<<<<<<<<<<<
 *     CLOCK_MEM = NVML_CLOCK_MEM
 *     CLOCK_VIDEO = NVML_CLOCK_VIDEO
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlClockType_t(NVML_CLOCK_SM); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 300, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_CLOCK_SM, __pyx_t_11) < (0)) __PYX_ERR(0, 300, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":301
 *     CLOCK_GRAPHICS = NVML_CLOCK_GRAPHICS
 *     CLOCK_SM = NVML_CLOCK_SM
 *     CLOCK_MEM = NVML_CLOCK_MEM             # <<<<<<<<<<<<<<
 *     CLOCK_VIDEO = NVML_CLOCK_VIDEO
 *     CLOCK_COUNT = NVML_CLOCK_COUNT
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlClockType_t(NVML_CLOCK_MEM); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 301, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_CLOCK_MEM, __pyx_t_11) < (0)) __PYX_ERR(0, 301, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":302
 *     CLOCK_SM = NVML_CLOCK_SM
 *     CLOCK_MEM = NVML_CLOCK_MEM
 *     CLOCK_VIDEO = NVML_CLOCK_VIDEO             # <<<<<<<<<<<<<<
 *     CLOCK_COUNT = NVML_CLOCK_COUNT
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlClockType_t(NVML_CLOCK_VIDEO); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 302, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_CLOCK_VIDEO, __pyx_t_11) < (0)) __PYX_ERR(0, 302, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":303
 *     CLOCK_MEM = NVML_CLOCK_MEM
 *     CLOCK_VIDEO = NVML_CLOCK_VIDEO
 *     CLOCK_COUNT = NVML_CLOCK_COUNT             # <<<<<<<<<<<<<<
 * 
 * class ClockId(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlClockType_t(NVML_CLOCK_COUNT); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 303, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_CLOCK_COUNT, __pyx_t_11) < (0)) __PYX_ERR(0, 303, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":297
 *     COUNT = NVML_ECC_COUNTER_TYPE_COUNT
 * 
 * class ClockType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlClockType_t`."""
 *     CLOCK_GRAPHICS = NVML_CLOCK_GRAPHICS
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_ClockType, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_ClockType, __pyx_t_11) < (0)) __PYX_ERR(0, 297, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":305
 *     CLOCK_COUNT = NVML_CLOCK_COUNT
 * 
 * class ClockId(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlClockId_t`."""
 *     CURRENT = NVML_CLOCK_ID_CURRENT
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_ClockId, __pyx_mstate_global->__pyx_n_u_ClockId, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlClockId_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 305, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":307
 * class ClockId(_IntEnum):
 *     """See `nvmlClockId_t`."""
 *     CURRENT = NVML_CLOCK_ID_CURRENT             # <<<<<<<<<<<<<<
 *     APP_CLOCK_TARGET = NVML_CLOCK_ID_APP_CLOCK_TARGET
 *     APP_CLOCK_DEFAULT = NVML_CLOCK_ID_APP_CLOCK_DEFAULT
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlClockId_t(NVML_CLOCK_ID_CURRENT); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 307, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_CURRENT, __pyx_t_5) < (0)) __PYX_ERR(0, 307, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":308
 *     """See `nvmlClockId_t`."""
 *     CURRENT = NVML_CLOCK_ID_CURRENT
 *     APP_CLOCK_TARGET = NVML_CLOCK_ID_APP_CLOCK_TARGET             # <<<<<<<<<<<<<<
 *     APP_CLOCK_DEFAULT = NVML_CLOCK_ID_APP_CLOCK_DEFAULT
 *     CUSTOMER_BOOST_MAX = NVML_CLOCK_ID_CUSTOMER_BOOST_MAX
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlClockId_t(NVML_CLOCK_ID_APP_CLOCK_TARGET); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 308, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_APP_CLOCK_TARGET, __pyx_t_5) < (0)) __PYX_ERR(0, 308, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":309
 *     CURRENT = NVML_CLOCK_ID_CURRENT
 *     APP_CLOCK_TARGET = NVML_CLOCK_ID_APP_CLOCK_TARGET
 *     APP_CLOCK_DEFAULT = NVML_CLOCK_ID_APP_CLOCK_DEFAULT             # <<<<<<<<<<<<<<
 *     CUSTOMER_BOOST_MAX = NVML_CLOCK_ID_CUSTOMER_BOOST_MAX
 *     COUNT = NVML_CLOCK_ID_COUNT
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlClockId_t(NVML_CLOCK_ID_APP_CLOCK_DEFAULT); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 309, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_APP_CLOCK_DEFAULT, __pyx_t_5) < (0)) __PYX_ERR(0, 309, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":310
 *     APP_CLOCK_TARGET = NVML_CLOCK_ID_APP_CLOCK_TARGET
 *     APP_CLOCK_DEFAULT = NVML_CLOCK_ID_APP_CLOCK_DEFAULT
 *     CUSTOMER_BOOST_MAX = NVML_CLOCK_ID_CUSTOMER_BOOST_MAX             # <<<<<<<<<<<<<<
 *     COUNT = NVML_CLOCK_ID_COUNT
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlClockId_t(NVML_CLOCK_ID_CUSTOMER_BOOST_MAX); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 310, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_CUSTOMER_BOOST_MAX, __pyx_t_5) < (0)) __PYX_ERR(0, 310, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":311
 *     APP_CLOCK_DEFAULT = NVML_CLOCK_ID_APP_CLOCK_DEFAULT
 *     CUSTOMER_BOOST_MAX = NVML_CLOCK_ID_CUSTOMER_BOOST_MAX
 *     COUNT = NVML_CLOCK_ID_COUNT             # <<<<<<<<<<<<<<
 * 
 * class DriverModel(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlClockId_t(NVML_CLOCK_ID_COUNT); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 311, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_COUNT, __pyx_t_5) < (0)) __PYX_ERR(0, 311, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":305
 *     CLOCK_COUNT = NVML_CLOCK_COUNT
 * 
 * class ClockId(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlClockId_t`."""
 *     CURRENT = NVML_CLOCK_ID_CURRENT
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ClockId, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_ClockId, __pyx_t_5) < (0)) __PYX_ERR(0, 305, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":313
 *     COUNT = NVML_CLOCK_ID_COUNT
 * 
 * class DriverModel(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlDriverModel_t`."""
 *     DRIVER_WDDM = NVML_DRIVER_WDDM
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 313, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 313, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 313, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 313, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_DriverModel, __pyx_mstate_global->__pyx_n_u_DriverModel, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlDriverModel_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 313, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 313, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":315
 * class DriverModel(_IntEnum):
 *     """See `nvmlDriverModel_t`."""
 *     DRIVER_WDDM = NVML_DRIVER_WDDM             # <<<<<<<<<<<<<<
 *     DRIVER_WDM = NVML_DRIVER_WDM
 *     DRIVER_MCDM = NVML_DRIVER_MCDM
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlDriverModel_t(NVML_DRIVER_WDDM); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 315, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_DRIVER_WDDM, __pyx_t_10) < (0)) __PYX_ERR(0, 315, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":316
 *     """See `nvmlDriverModel_t`."""
 *     DRIVER_WDDM = NVML_DRIVER_WDDM
 *     DRIVER_WDM = NVML_DRIVER_WDM             # <<<<<<<<<<<<<<
 *     DRIVER_MCDM = NVML_DRIVER_MCDM
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlDriverModel_t(NVML_DRIVER_WDM); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 316, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_DRIVER_WDM, __pyx_t_10) < (0)) __PYX_ERR(0, 316, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":317
 *     DRIVER_WDDM = NVML_DRIVER_WDDM
 *     DRIVER_WDM = NVML_DRIVER_WDM
 *     DRIVER_MCDM = NVML_DRIVER_MCDM             # <<<<<<<<<<<<<<
 * 
 * class Pstates(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlDriverModel_t(NVML_DRIVER_MCDM); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 317, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_DRIVER_MCDM, __pyx_t_10) < (0)) __PYX_ERR(0, 317, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":313
 *     COUNT = NVML_CLOCK_ID_COUNT
 * 
 * class DriverModel(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlDriverModel_t`."""
 *     DRIVER_WDDM = NVML_DRIVER_WDDM
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_DriverModel, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 313, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_DriverModel, __pyx_t_10) < (0)) __PYX_ERR(0, 313, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":319
 *     DRIVER_MCDM = NVML_DRIVER_MCDM
 * 
 * class Pstates(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlPstates_t`."""
 *     PSTATE_0 = NVML_PSTATE_0
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 319, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 319, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 319, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 319, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_Pstates, __pyx_mstate_global->__pyx_n_u_Pstates, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlPstates_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 319, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 319, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":321
 * class Pstates(_IntEnum):
 *     """See `nvmlPstates_t`."""
 *     PSTATE_0 = NVML_PSTATE_0             # <<<<<<<<<<<<<<
 *     PSTATE_1 = NVML_PSTATE_1
 *     PSTATE_2 = NVML_PSTATE_2
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPstates_t(NVML_PSTATE_0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 321, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PSTATE_0, __pyx_t_11) < (0)) __PYX_ERR(0, 321, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":322
 *     """See `nvmlPstates_t`."""
 *     PSTATE_0 = NVML_PSTATE_0
 *     PSTATE_1 = NVML_PSTATE_1             # <<<<<<<<<<<<<<
 *     PSTATE_2 = NVML_PSTATE_2
 *     PSTATE_3 = NVML_PSTATE_3
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPstates_t(NVML_PSTATE_1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 322, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PSTATE_1, __pyx_t_11) < (0)) __PYX_ERR(0, 322, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":323
 *     PSTATE_0 = NVML_PSTATE_0
 *     PSTATE_1 = NVML_PSTATE_1
 *     PSTATE_2 = NVML_PSTATE_2             # <<<<<<<<<<<<<<
 *     PSTATE_3 = NVML_PSTATE_3
 *     PSTATE_4 = NVML_PSTATE_4
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPstates_t(NVML_PSTATE_2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 323, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PSTATE_2, __pyx_t_11) < (0)) __PYX_ERR(0, 323, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":324
 *     PSTATE_1 = NVML_PSTATE_1
 *     PSTATE_2 = NVML_PSTATE_2
 *     PSTATE_3 = NVML_PSTATE_3             # <<<<<<<<<<<<<<
 *     PSTATE_4 = NVML_PSTATE_4
 *     PSTATE_5 = NVML_PSTATE_5
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPstates_t(NVML_PSTATE_3); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 324, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PSTATE_3, __pyx_t_11) < (0)) __PYX_ERR(0, 324, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":325
 *     PSTATE_2 = NVML_PSTATE_2
 *     PSTATE_3 = NVML_PSTATE_3
 *     PSTATE_4 = NVML_PSTATE_4             # <<<<<<<<<<<<<<
 *     PSTATE_5 = NVML_PSTATE_5
 *     PSTATE_6 = NVML_PSTATE_6
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPstates_t(NVML_PSTATE_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 325, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PSTATE_4, __pyx_t_11) < (0)) __PYX_ERR(0, 325, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":326
 *     PSTATE_3 = NVML_PSTATE_3
 *     PSTATE_4 = NVML_PSTATE_4
 *     PSTATE_5 = NVML_PSTATE_5             # <<<<<<<<<<<<<<
 *     PSTATE_6 = NVML_PSTATE_6
 *     PSTATE_7 = NVML_PSTATE_7
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPstates_t(NVML_PSTATE_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 326, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PSTATE_5, __pyx_t_11) < (0)) __PYX_ERR(0, 326, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":327
 *     PSTATE_4 = NVML_PSTATE_4
 *     PSTATE_5 = NVML_PSTATE_5
 *     PSTATE_6 = NVML_PSTATE_6             # <<<<<<<<<<<<<<
 *     PSTATE_7 = NVML_PSTATE_7
 *     PSTATE_8 = NVML_PSTATE_8
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPstates_t(NVML_PSTATE_6); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 327, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PSTATE_6, __pyx_t_11) < (0)) __PYX_ERR(0, 327, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":328
 *     PSTATE_5 = NVML_PSTATE_5
 *     PSTATE_6 = NVML_PSTATE_6
 *     PSTATE_7 = NVML_PSTATE_7             # <<<<<<<<<<<<<<
 *     PSTATE_8 = NVML_PSTATE_8
 *     PSTATE_9 = NVML_PSTATE_9
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPstates_t(NVML_PSTATE_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 328, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PSTATE_7, __pyx_t_11) < (0)) __PYX_ERR(0, 328, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":329
 *     PSTATE_6 = NVML_PSTATE_6
 *     PSTATE_7 = NVML_PSTATE_7
 *     PSTATE_8 = NVML_PSTATE_8             # <<<<<<<<<<<<<<
 *     PSTATE_9 = NVML_PSTATE_9
 *     PSTATE_10 = NVML_PSTATE_10
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPstates_t(NVML_PSTATE_8); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PSTATE_8, __pyx_t_11) < (0)) __PYX_ERR(0, 329, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":330
 *     PSTATE_7 = NVML_PSTATE_7
 *     PSTATE_8 = NVML_PSTATE_8
 *     PSTATE_9 = NVML_PSTATE_9             # <<<<<<<<<<<<<<
 *     PSTATE_10 = NVML_PSTATE_10
 *     PSTATE_11 = NVML_PSTATE_11
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPstates_t(NVML_PSTATE_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 330, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PSTATE_9, __pyx_t_11) < (0)) __PYX_ERR(0, 330, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":331
 *     PSTATE_8 = NVML_PSTATE_8
 *     PSTATE_9 = NVML_PSTATE_9
 *     PSTATE_10 = NVML_PSTATE_10             # <<<<<<<<<<<<<<
 *     PSTATE_11 = NVML_PSTATE_11
 *     PSTATE_12 = NVML_PSTATE_12
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPstates_t(NVML_PSTATE_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 331, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PSTATE_10, __pyx_t_11) < (0)) __PYX_ERR(0, 331, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":332
 *     PSTATE_9 = NVML_PSTATE_9
 *     PSTATE_10 = NVML_PSTATE_10
 *     PSTATE_11 = NVML_PSTATE_11             # <<<<<<<<<<<<<<
 *     PSTATE_12 = NVML_PSTATE_12
 *     PSTATE_13 = NVML_PSTATE_13
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPstates_t(NVML_PSTATE_11); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 332, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PSTATE_11, __pyx_t_11) < (0)) __PYX_ERR(0, 332, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":333
 *     PSTATE_10 = NVML_PSTATE_10
 *     PSTATE_11 = NVML_PSTATE_11
 *     PSTATE_12 = NVML_PSTATE_12             # <<<<<<<<<<<<<<
 *     PSTATE_13 = NVML_PSTATE_13
 *     PSTATE_14 = NVML_PSTATE_14
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPstates_t(NVML_PSTATE_12); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 333, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PSTATE_12, __pyx_t_11) < (0)) __PYX_ERR(0, 333, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":334
 *     PSTATE_11 = NVML_PSTATE_11
 *     PSTATE_12 = NVML_PSTATE_12
 *     PSTATE_13 = NVML_PSTATE_13             # <<<<<<<<<<<<<<
 *     PSTATE_14 = NVML_PSTATE_14
 *     PSTATE_15 = NVML_PSTATE_15
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPstates_t(NVML_PSTATE_13); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 334, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PSTATE_13, __pyx_t_11) < (0)) __PYX_ERR(0, 334, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":335
 *     PSTATE_12 = NVML_PSTATE_12
 *     PSTATE_13 = NVML_PSTATE_13
 *     PSTATE_14 = NVML_PSTATE_14             # <<<<<<<<<<<<<<
 *     PSTATE_15 = NVML_PSTATE_15
 *     PSTATE_UNKNOWN = NVML_PSTATE_UNKNOWN
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPstates_t(NVML_PSTATE_14); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 335, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PSTATE_14, __pyx_t_11) < (0)) __PYX_ERR(0, 335, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":336
 *     PSTATE_13 = NVML_PSTATE_13
 *     PSTATE_14 = NVML_PSTATE_14
 *     PSTATE_15 = NVML_PSTATE_15             # <<<<<<<<<<<<<<
 *     PSTATE_UNKNOWN = NVML_PSTATE_UNKNOWN
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPstates_t(NVML_PSTATE_15); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 336, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PSTATE_15, __pyx_t_11) < (0)) __PYX_ERR(0, 336, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":337
 *     PSTATE_14 = NVML_PSTATE_14
 *     PSTATE_15 = NVML_PSTATE_15
 *     PSTATE_UNKNOWN = NVML_PSTATE_UNKNOWN             # <<<<<<<<<<<<<<
 * 
 * class GpuOperationMode(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPstates_t(NVML_PSTATE_UNKNOWN); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PSTATE_UNKNOWN, __pyx_t_11) < (0)) __PYX_ERR(0, 337, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":319
 *     DRIVER_MCDM = NVML_DRIVER_MCDM
 * 
 * class Pstates(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlPstates_t`."""
 *     PSTATE_0 = NVML_PSTATE_0
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_Pstates, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 319, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_Pstates, __pyx_t_11) < (0)) __PYX_ERR(0, 319, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":339
 *     PSTATE_UNKNOWN = NVML_PSTATE_UNKNOWN
 * 
 * class GpuOperationMode(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlGpuOperationMode_t`."""
 *     GOM_ALL_ON = NVML_GOM_ALL_ON
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 339, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 339, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 339, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 339, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_GpuOperationMode, __pyx_mstate_global->__pyx_n_u_GpuOperationMode, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlGpuOperationMode_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 339, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 339, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":341
 * class GpuOperationMode(_IntEnum):
 *     """See `nvmlGpuOperationMode_t`."""
 *     GOM_ALL_ON = NVML_GOM_ALL_ON             # <<<<<<<<<<<<<<
 *     GOM_COMPUTE = NVML_GOM_COMPUTE
 *     GOM_LOW_DP = NVML_GOM_LOW_DP
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlGpuOperationMode_t(NVML_GOM_ALL_ON); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 341, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_GOM_ALL_ON, __pyx_t_5) < (0)) __PYX_ERR(0, 341, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":342
 *     """See `nvmlGpuOperationMode_t`."""
 *     GOM_ALL_ON = NVML_GOM_ALL_ON
 *     GOM_COMPUTE = NVML_GOM_COMPUTE             # <<<<<<<<<<<<<<
 *     GOM_LOW_DP = NVML_GOM_LOW_DP
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlGpuOperationMode_t(NVML_GOM_COMPUTE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 342, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_GOM_COMPUTE, __pyx_t_5) < (0)) __PYX_ERR(0, 342, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":343
 *     GOM_ALL_ON = NVML_GOM_ALL_ON
 *     GOM_COMPUTE = NVML_GOM_COMPUTE
 *     GOM_LOW_DP = NVML_GOM_LOW_DP             # <<<<<<<<<<<<<<
 * 
 * class InforomObject(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlGpuOperationMode_t(NVML_GOM_LOW_DP); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 343, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_GOM_LOW_DP, __pyx_t_5) < (0)) __PYX_ERR(0, 343, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":339
 *     PSTATE_UNKNOWN = NVML_PSTATE_UNKNOWN
 * 
 * class GpuOperationMode(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlGpuOperationMode_t`."""
 *     GOM_ALL_ON = NVML_GOM_ALL_ON
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_GpuOperationMode, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 339, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_GpuOperationMode, __pyx_t_5) < (0)) __PYX_ERR(0, 339, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":345
 *     GOM_LOW_DP = NVML_GOM_LOW_DP
 * 
 * class InforomObject(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlInforomObject_t`."""
 *     INFOROM_OEM = NVML_INFOROM_OEM
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 345, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 345, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 345, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 345, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_InforomObject, __pyx_mstate_global->__pyx_n_u_InforomObject, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlInforomObject_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 345, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 345, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":347
 * class InforomObject(_IntEnum):
 *     """See `nvmlInforomObject_t`."""
 *     INFOROM_OEM = NVML_INFOROM_OEM             # <<<<<<<<<<<<<<
 *     INFOROM_ECC = NVML_INFOROM_ECC
 *     INFOROM_POWER = NVML_INFOROM_POWER
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlInforomObject_t(NVML_INFOROM_OEM); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 347, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_INFOROM_OEM, __pyx_t_10) < (0)) __PYX_ERR(0, 347, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":348
 *     """See `nvmlInforomObject_t`."""
 *     INFOROM_OEM = NVML_INFOROM_OEM
 *     INFOROM_ECC = NVML_INFOROM_ECC             # <<<<<<<<<<<<<<
 *     INFOROM_POWER = NVML_INFOROM_POWER
 *     INFOROM_DEN = NVML_INFOROM_DEN
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlInforomObject_t(NVML_INFOROM_ECC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 348, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_INFOROM_ECC, __pyx_t_10) < (0)) __PYX_ERR(0, 348, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":349
 *     INFOROM_OEM = NVML_INFOROM_OEM
 *     INFOROM_ECC = NVML_INFOROM_ECC
 *     INFOROM_POWER = NVML_INFOROM_POWER             # <<<<<<<<<<<<<<
 *     INFOROM_DEN = NVML_INFOROM_DEN
 *     INFOROM_COUNT = NVML_INFOROM_COUNT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlInforomObject_t(NVML_INFOROM_POWER); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 349, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_INFOROM_POWER, __pyx_t_10) < (0)) __PYX_ERR(0, 349, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":350
 *     INFOROM_ECC = NVML_INFOROM_ECC
 *     INFOROM_POWER = NVML_INFOROM_POWER
 *     INFOROM_DEN = NVML_INFOROM_DEN             # <<<<<<<<<<<<<<
 *     INFOROM_COUNT = NVML_INFOROM_COUNT
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlInforomObject_t(NVML_INFOROM_DEN); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 350, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_INFOROM_DEN, __pyx_t_10) < (0)) __PYX_ERR(0, 350, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":351
 *     INFOROM_POWER = NVML_INFOROM_POWER
 *     INFOROM_DEN = NVML_INFOROM_DEN
 *     INFOROM_COUNT = NVML_INFOROM_COUNT             # <<<<<<<<<<<<<<
 * 
 * class Return(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlInforomObject_t(NVML_INFOROM_COUNT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 351, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_INFOROM_COUNT, __pyx_t_10) < (0)) __PYX_ERR(0, 351, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":345
 *     GOM_LOW_DP = NVML_GOM_LOW_DP
 * 
 * class InforomObject(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlInforomObject_t`."""
 *     INFOROM_OEM = NVML_INFOROM_OEM
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_InforomObject, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 345, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_InforomObject, __pyx_t_10) < (0)) __PYX_ERR(0, 345, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":353
 *     INFOROM_COUNT = NVML_INFOROM_COUNT
 * 
 * class Return(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlReturn_t`."""
 *     SUCCESS = NVML_SUCCESS
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 353, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 353, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 353, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 353, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_Return, __pyx_mstate_global->__pyx_n_u_Return, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlReturn_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 353, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 353, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":355
 * class Return(_IntEnum):
 *     """See `nvmlReturn_t`."""
 *     SUCCESS = NVML_SUCCESS             # <<<<<<<<<<<<<<
 *     ERROR_UNINITIALIZED = NVML_ERROR_UNINITIALIZED
 *     ERROR_INVALID_ARGUMENT = NVML_ERROR_INVALID_ARGUMENT
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_SUCCESS); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 355, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_SUCCESS, __pyx_t_11) < (0)) __PYX_ERR(0, 355, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":356
 *     """See `nvmlReturn_t`."""
 *     SUCCESS = NVML_SUCCESS
 *     ERROR_UNINITIALIZED = NVML_ERROR_UNINITIALIZED             # <<<<<<<<<<<<<<
 *     ERROR_INVALID_ARGUMENT = NVML_ERROR_INVALID_ARGUMENT
 *     ERROR_NOT_SUPPORTED = NVML_ERROR_NOT_SUPPORTED
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_UNINITIALIZED); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 356, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_UNINITIALIZED, __pyx_t_11) < (0)) __PYX_ERR(0, 356, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":357
 *     SUCCESS = NVML_SUCCESS
 *     ERROR_UNINITIALIZED = NVML_ERROR_UNINITIALIZED
 *     ERROR_INVALID_ARGUMENT = NVML_ERROR_INVALID_ARGUMENT             # <<<<<<<<<<<<<<
 *     ERROR_NOT_SUPPORTED = NVML_ERROR_NOT_SUPPORTED
 *     ERROR_NO_PERMISSION = NVML_ERROR_NO_PERMISSION
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_INVALID_ARGUMENT); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 357, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_INVALID_ARGUMENT, __pyx_t_11) < (0)) __PYX_ERR(0, 357, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":358
 *     ERROR_UNINITIALIZED = NVML_ERROR_UNINITIALIZED
 *     ERROR_INVALID_ARGUMENT = NVML_ERROR_INVALID_ARGUMENT
 *     ERROR_NOT_SUPPORTED = NVML_ERROR_NOT_SUPPORTED             # <<<<<<<<<<<<<<
 *     ERROR_NO_PERMISSION = NVML_ERROR_NO_PERMISSION
 *     ERROR_ALREADY_INITIALIZED = NVML_ERROR_ALREADY_INITIALIZED
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_NOT_SUPPORTED); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 358, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_NOT_SUPPORTED, __pyx_t_11) < (0)) __PYX_ERR(0, 358, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":359
 *     ERROR_INVALID_ARGUMENT = NVML_ERROR_INVALID_ARGUMENT
 *     ERROR_NOT_SUPPORTED = NVML_ERROR_NOT_SUPPORTED
 *     ERROR_NO_PERMISSION = NVML_ERROR_NO_PERMISSION             # <<<<<<<<<<<<<<
 *     ERROR_ALREADY_INITIALIZED = NVML_ERROR_ALREADY_INITIALIZED
 *     ERROR_NOT_FOUND = NVML_ERROR_NOT_FOUND
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_NO_PERMISSION); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 359, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_NO_PERMISSION, __pyx_t_11) < (0)) __PYX_ERR(0, 359, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":360
 *     ERROR_NOT_SUPPORTED = NVML_ERROR_NOT_SUPPORTED
 *     ERROR_NO_PERMISSION = NVML_ERROR_NO_PERMISSION
 *     ERROR_ALREADY_INITIALIZED = NVML_ERROR_ALREADY_INITIALIZED             # <<<<<<<<<<<<<<
 *     ERROR_NOT_FOUND = NVML_ERROR_NOT_FOUND
 *     ERROR_INSUFFICIENT_SIZE = NVML_ERROR_INSUFFICIENT_SIZE
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_ALREADY_INITIALIZED); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 360, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_ALREADY_INITIALIZED, __pyx_t_11) < (0)) __PYX_ERR(0, 360, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":361
 *     ERROR_NO_PERMISSION = NVML_ERROR_NO_PERMISSION
 *     ERROR_ALREADY_INITIALIZED = NVML_ERROR_ALREADY_INITIALIZED
 *     ERROR_NOT_FOUND = NVML_ERROR_NOT_FOUND             # <<<<<<<<<<<<<<
 *     ERROR_INSUFFICIENT_SIZE = NVML_ERROR_INSUFFICIENT_SIZE
 *     ERROR_INSUFFICIENT_POWER = NVML_ERROR_INSUFFICIENT_POWER
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_NOT_FOUND); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 361, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_NOT_FOUND, __pyx_t_11) < (0)) __PYX_ERR(0, 361, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":362
 *     ERROR_ALREADY_INITIALIZED = NVML_ERROR_ALREADY_INITIALIZED
 *     ERROR_NOT_FOUND = NVML_ERROR_NOT_FOUND
 *     ERROR_INSUFFICIENT_SIZE = NVML_ERROR_INSUFFICIENT_SIZE             # <<<<<<<<<<<<<<
 *     ERROR_INSUFFICIENT_POWER = NVML_ERROR_INSUFFICIENT_POWER
 *     ERROR_DRIVER_NOT_LOADED = NVML_ERROR_DRIVER_NOT_LOADED
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_INSUFFICIENT_SIZE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 362, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_INSUFFICIENT_SIZE, __pyx_t_11) < (0)) __PYX_ERR(0, 362, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":363
 *     ERROR_NOT_FOUND = NVML_ERROR_NOT_FOUND
 *     ERROR_INSUFFICIENT_SIZE = NVML_ERROR_INSUFFICIENT_SIZE
 *     ERROR_INSUFFICIENT_POWER = NVML_ERROR_INSUFFICIENT_POWER             # <<<<<<<<<<<<<<
 *     ERROR_DRIVER_NOT_LOADED = NVML_ERROR_DRIVER_NOT_LOADED
 *     ERROR_TIMEOUT = NVML_ERROR_TIMEOUT
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_INSUFFICIENT_POWER); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 363, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_INSUFFICIENT_POWER, __pyx_t_11) < (0)) __PYX_ERR(0, 363, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":364
 *     ERROR_INSUFFICIENT_SIZE = NVML_ERROR_INSUFFICIENT_SIZE
 *     ERROR_INSUFFICIENT_POWER = NVML_ERROR_INSUFFICIENT_POWER
 *     ERROR_DRIVER_NOT_LOADED = NVML_ERROR_DRIVER_NOT_LOADED             # <<<<<<<<<<<<<<
 *     ERROR_TIMEOUT = NVML_ERROR_TIMEOUT
 *     ERROR_IRQ_ISSUE = NVML_ERROR_IRQ_ISSUE
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_DRIVER_NOT_LOADED); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 364, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_DRIVER_NOT_LOADED, __pyx_t_11) < (0)) __PYX_ERR(0, 364, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":365
 *     ERROR_INSUFFICIENT_POWER = NVML_ERROR_INSUFFICIENT_POWER
 *     ERROR_DRIVER_NOT_LOADED = NVML_ERROR_DRIVER_NOT_LOADED
 *     ERROR_TIMEOUT = NVML_ERROR_TIMEOUT             # <<<<<<<<<<<<<<
 *     ERROR_IRQ_ISSUE = NVML_ERROR_IRQ_ISSUE
 *     ERROR_LIBRARY_NOT_FOUND = NVML_ERROR_LIBRARY_NOT_FOUND
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_TIMEOUT); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 365, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_TIMEOUT, __pyx_t_11) < (0)) __PYX_ERR(0, 365, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":366
 *     ERROR_DRIVER_NOT_LOADED = NVML_ERROR_DRIVER_NOT_LOADED
 *     ERROR_TIMEOUT = NVML_ERROR_TIMEOUT
 *     ERROR_IRQ_ISSUE = NVML_ERROR_IRQ_ISSUE             # <<<<<<<<<<<<<<
 *     ERROR_LIBRARY_NOT_FOUND = NVML_ERROR_LIBRARY_NOT_FOUND
 *     ERROR_FUNCTION_NOT_FOUND = NVML_ERROR_FUNCTION_NOT_FOUND
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_IRQ_ISSUE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 366, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_IRQ_ISSUE, __pyx_t_11) < (0)) __PYX_ERR(0, 366, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":367
 *     ERROR_TIMEOUT = NVML_ERROR_TIMEOUT
 *     ERROR_IRQ_ISSUE = NVML_ERROR_IRQ_ISSUE
 *     ERROR_LIBRARY_NOT_FOUND = NVML_ERROR_LIBRARY_NOT_FOUND             # <<<<<<<<<<<<<<
 *     ERROR_FUNCTION_NOT_FOUND = NVML_ERROR_FUNCTION_NOT_FOUND
 *     ERROR_CORRUPTED_INFOROM = NVML_ERROR_CORRUPTED_INFOROM
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_LIBRARY_NOT_FOUND); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 367, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_LIBRARY_NOT_FOUND, __pyx_t_11) < (0)) __PYX_ERR(0, 367, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":368
 *     ERROR_IRQ_ISSUE = NVML_ERROR_IRQ_ISSUE
 *     ERROR_LIBRARY_NOT_FOUND = NVML_ERROR_LIBRARY_NOT_FOUND
 *     ERROR_FUNCTION_NOT_FOUND = NVML_ERROR_FUNCTION_NOT_FOUND             # <<<<<<<<<<<<<<
 *     ERROR_CORRUPTED_INFOROM = NVML_ERROR_CORRUPTED_INFOROM
 *     ERROR_GPU_IS_LOST = NVML_ERROR_GPU_IS_LOST
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_FUNCTION_NOT_FOUND); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_FUNCTION_NOT_FOUND, __pyx_t_11) < (0)) __PYX_ERR(0, 368, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":369
 *     ERROR_LIBRARY_NOT_FOUND = NVML_ERROR_LIBRARY_NOT_FOUND
 *     ERROR_FUNCTION_NOT_FOUND = NVML_ERROR_FUNCTION_NOT_FOUND
 *     ERROR_CORRUPTED_INFOROM = NVML_ERROR_CORRUPTED_INFOROM             # <<<<<<<<<<<<<<
 *     ERROR_GPU_IS_LOST = NVML_ERROR_GPU_IS_LOST
 *     ERROR_RESET_REQUIRED = NVML_ERROR_RESET_REQUIRED
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_CORRUPTED_INFOROM); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 369, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_CORRUPTED_INFOROM, __pyx_t_11) < (0)) __PYX_ERR(0, 369, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":370
 *     ERROR_FUNCTION_NOT_FOUND = NVML_ERROR_FUNCTION_NOT_FOUND
 *     ERROR_CORRUPTED_INFOROM = NVML_ERROR_CORRUPTED_INFOROM
 *     ERROR_GPU_IS_LOST = NVML_ERROR_GPU_IS_LOST             # <<<<<<<<<<<<<<
 *     ERROR_RESET_REQUIRED = NVML_ERROR_RESET_REQUIRED
 *     ERROR_OPERATING_SYSTEM = NVML_ERROR_OPERATING_SYSTEM
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_GPU_IS_LOST); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 370, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_GPU_IS_LOST, __pyx_t_11) < (0)) __PYX_ERR(0, 370, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":371
 *     ERROR_CORRUPTED_INFOROM = NVML_ERROR_CORRUPTED_INFOROM
 *     ERROR_GPU_IS_LOST = NVML_ERROR_GPU_IS_LOST
 *     ERROR_RESET_REQUIRED = NVML_ERROR_RESET_REQUIRED             # <<<<<<<<<<<<<<
 *     ERROR_OPERATING_SYSTEM = NVML_ERROR_OPERATING_SYSTEM
 *     ERROR_LIB_RM_VERSION_MISMATCH = NVML_ERROR_LIB_RM_VERSION_MISMATCH
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_RESET_REQUIRED); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 371, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_RESET_REQUIRED, __pyx_t_11) < (0)) __PYX_ERR(0, 371, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":372
 *     ERROR_GPU_IS_LOST = NVML_ERROR_GPU_IS_LOST
 *     ERROR_RESET_REQUIRED = NVML_ERROR_RESET_REQUIRED
 *     ERROR_OPERATING_SYSTEM = NVML_ERROR_OPERATING_SYSTEM             # <<<<<<<<<<<<<<
 *     ERROR_LIB_RM_VERSION_MISMATCH = NVML_ERROR_LIB_RM_VERSION_MISMATCH
 *     ERROR_IN_USE = NVML_ERROR_IN_USE
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_OPERATING_SYSTEM); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 372, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_OPERATING_SYSTEM, __pyx_t_11) < (0)) __PYX_ERR(0, 372, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":373
 *     ERROR_RESET_REQUIRED = NVML_ERROR_RESET_REQUIRED
 *     ERROR_OPERATING_SYSTEM = NVML_ERROR_OPERATING_SYSTEM
 *     ERROR_LIB_RM_VERSION_MISMATCH = NVML_ERROR_LIB_RM_VERSION_MISMATCH             # <<<<<<<<<<<<<<
 *     ERROR_IN_USE = NVML_ERROR_IN_USE
 *     ERROR_MEMORY = NVML_ERROR_MEMORY
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_LIB_RM_VERSION_MISMATCH); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 373, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_LIB_RM_VERSION_MISMATCH, __pyx_t_11) < (0)) __PYX_ERR(0, 373, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":374
 *     ERROR_OPERATING_SYSTEM = NVML_ERROR_OPERATING_SYSTEM
 *     ERROR_LIB_RM_VERSION_MISMATCH = NVML_ERROR_LIB_RM_VERSION_MISMATCH
 *     ERROR_IN_USE = NVML_ERROR_IN_USE             # <<<<<<<<<<<<<<
 *     ERROR_MEMORY = NVML_ERROR_MEMORY
 *     ERROR_NO_DATA = NVML_ERROR_NO_DATA
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_IN_USE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 374, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_IN_USE, __pyx_t_11) < (0)) __PYX_ERR(0, 374, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":375
 *     ERROR_LIB_RM_VERSION_MISMATCH = NVML_ERROR_LIB_RM_VERSION_MISMATCH
 *     ERROR_IN_USE = NVML_ERROR_IN_USE
 *     ERROR_MEMORY = NVML_ERROR_MEMORY             # <<<<<<<<<<<<<<
 *     ERROR_NO_DATA = NVML_ERROR_NO_DATA
 *     ERROR_VGPU_ECC_NOT_SUPPORTED = NVML_ERROR_VGPU_ECC_NOT_SUPPORTED
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_MEMORY); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 375, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_MEMORY, __pyx_t_11) < (0)) __PYX_ERR(0, 375, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":376
 *     ERROR_IN_USE = NVML_ERROR_IN_USE
 *     ERROR_MEMORY = NVML_ERROR_MEMORY
 *     ERROR_NO_DATA = NVML_ERROR_NO_DATA             # <<<<<<<<<<<<<<
 *     ERROR_VGPU_ECC_NOT_SUPPORTED = NVML_ERROR_VGPU_ECC_NOT_SUPPORTED
 *     ERROR_INSUFFICIENT_RESOURCES = NVML_ERROR_INSUFFICIENT_RESOURCES
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_NO_DATA); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 376, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_NO_DATA, __pyx_t_11) < (0)) __PYX_ERR(0, 376, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":377
 *     ERROR_MEMORY = NVML_ERROR_MEMORY
 *     ERROR_NO_DATA = NVML_ERROR_NO_DATA
 *     ERROR_VGPU_ECC_NOT_SUPPORTED = NVML_ERROR_VGPU_ECC_NOT_SUPPORTED             # <<<<<<<<<<<<<<
 *     ERROR_INSUFFICIENT_RESOURCES = NVML_ERROR_INSUFFICIENT_RESOURCES
 *     ERROR_FREQ_NOT_SUPPORTED = NVML_ERROR_FREQ_NOT_SUPPORTED
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_VGPU_ECC_NOT_SUPPORTED); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 377, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_VGPU_ECC_NOT_SUPPORTED, __pyx_t_11) < (0)) __PYX_ERR(0, 377, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":378
 *     ERROR_NO_DATA = NVML_ERROR_NO_DATA
 *     ERROR_VGPU_ECC_NOT_SUPPORTED = NVML_ERROR_VGPU_ECC_NOT_SUPPORTED
 *     ERROR_INSUFFICIENT_RESOURCES = NVML_ERROR_INSUFFICIENT_RESOURCES             # <<<<<<<<<<<<<<
 *     ERROR_FREQ_NOT_SUPPORTED = NVML_ERROR_FREQ_NOT_SUPPORTED
 *     ERROR_ARGUMENT_VERSION_MISMATCH = NVML_ERROR_ARGUMENT_VERSION_MISMATCH
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_INSUFFICIENT_RESOURCES); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 378, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_INSUFFICIENT_RESOURCES, __pyx_t_11) < (0)) __PYX_ERR(0, 378, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":379
 *     ERROR_VGPU_ECC_NOT_SUPPORTED = NVML_ERROR_VGPU_ECC_NOT_SUPPORTED
 *     ERROR_INSUFFICIENT_RESOURCES = NVML_ERROR_INSUFFICIENT_RESOURCES
 *     ERROR_FREQ_NOT_SUPPORTED = NVML_ERROR_FREQ_NOT_SUPPORTED             # <<<<<<<<<<<<<<
 *     ERROR_ARGUMENT_VERSION_MISMATCH = NVML_ERROR_ARGUMENT_VERSION_MISMATCH
 *     ERROR_DEPRECATED = NVML_ERROR_DEPRECATED
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_FREQ_NOT_SUPPORTED); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 379, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_FREQ_NOT_SUPPORTED, __pyx_t_11) < (0)) __PYX_ERR(0, 379, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":380
 *     ERROR_INSUFFICIENT_RESOURCES = NVML_ERROR_INSUFFICIENT_RESOURCES
 *     ERROR_FREQ_NOT_SUPPORTED = NVML_ERROR_FREQ_NOT_SUPPORTED
 *     ERROR_ARGUMENT_VERSION_MISMATCH = NVML_ERROR_ARGUMENT_VERSION_MISMATCH             # <<<<<<<<<<<<<<
 *     ERROR_DEPRECATED = NVML_ERROR_DEPRECATED
 *     ERROR_NOT_READY = NVML_ERROR_NOT_READY
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_ARGUMENT_VERSION_MISMATCH); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 380, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_ARGUMENT_VERSION_MISMATCH, __pyx_t_11) < (0)) __PYX_ERR(0, 380, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":381
 *     ERROR_FREQ_NOT_SUPPORTED = NVML_ERROR_FREQ_NOT_SUPPORTED
 *     ERROR_ARGUMENT_VERSION_MISMATCH = NVML_ERROR_ARGUMENT_VERSION_MISMATCH
 *     ERROR_DEPRECATED = NVML_ERROR_DEPRECATED             # <<<<<<<<<<<<<<
 *     ERROR_NOT_READY = NVML_ERROR_NOT_READY
 *     ERROR_GPU_NOT_FOUND = NVML_ERROR_GPU_NOT_FOUND
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_DEPRECATED); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 381, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_DEPRECATED, __pyx_t_11) < (0)) __PYX_ERR(0, 381, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":382
 *     ERROR_ARGUMENT_VERSION_MISMATCH = NVML_ERROR_ARGUMENT_VERSION_MISMATCH
 *     ERROR_DEPRECATED = NVML_ERROR_DEPRECATED
 *     ERROR_NOT_READY = NVML_ERROR_NOT_READY             # <<<<<<<<<<<<<<
 *     ERROR_GPU_NOT_FOUND = NVML_ERROR_GPU_NOT_FOUND
 *     ERROR_INVALID_STATE = NVML_ERROR_INVALID_STATE
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_NOT_READY); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 382, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_NOT_READY, __pyx_t_11) < (0)) __PYX_ERR(0, 382, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":383
 *     ERROR_DEPRECATED = NVML_ERROR_DEPRECATED
 *     ERROR_NOT_READY = NVML_ERROR_NOT_READY
 *     ERROR_GPU_NOT_FOUND = NVML_ERROR_GPU_NOT_FOUND             # <<<<<<<<<<<<<<
 *     ERROR_INVALID_STATE = NVML_ERROR_INVALID_STATE
 *     ERROR_RESET_TYPE_NOT_SUPPORTED = NVML_ERROR_RESET_TYPE_NOT_SUPPORTED
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_GPU_NOT_FOUND); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 383, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_GPU_NOT_FOUND, __pyx_t_11) < (0)) __PYX_ERR(0, 383, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":384
 *     ERROR_NOT_READY = NVML_ERROR_NOT_READY
 *     ERROR_GPU_NOT_FOUND = NVML_ERROR_GPU_NOT_FOUND
 *     ERROR_INVALID_STATE = NVML_ERROR_INVALID_STATE             # <<<<<<<<<<<<<<
 *     ERROR_RESET_TYPE_NOT_SUPPORTED = NVML_ERROR_RESET_TYPE_NOT_SUPPORTED
 *     ERROR_UNKNOWN = NVML_ERROR_UNKNOWN
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_INVALID_STATE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 384, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_INVALID_STATE, __pyx_t_11) < (0)) __PYX_ERR(0, 384, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":385
 *     ERROR_GPU_NOT_FOUND = NVML_ERROR_GPU_NOT_FOUND
 *     ERROR_INVALID_STATE = NVML_ERROR_INVALID_STATE
 *     ERROR_RESET_TYPE_NOT_SUPPORTED = NVML_ERROR_RESET_TYPE_NOT_SUPPORTED             # <<<<<<<<<<<<<<
 *     ERROR_UNKNOWN = NVML_ERROR_UNKNOWN
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_RESET_TYPE_NOT_SUPPORTED); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 385, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_RESET_TYPE_NOT_SUPPORTED, __pyx_t_11) < (0)) __PYX_ERR(0, 385, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":386
 *     ERROR_INVALID_STATE = NVML_ERROR_INVALID_STATE
 *     ERROR_RESET_TYPE_NOT_SUPPORTED = NVML_ERROR_RESET_TYPE_NOT_SUPPORTED
 *     ERROR_UNKNOWN = NVML_ERROR_UNKNOWN             # <<<<<<<<<<<<<<
 * 
 * class MemoryLocation(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlReturn_t(NVML_ERROR_UNKNOWN); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 386, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ERROR_UNKNOWN, __pyx_t_11) < (0)) __PYX_ERR(0, 386, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":353
 *     INFOROM_COUNT = NVML_INFOROM_COUNT
 * 
 * class Return(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlReturn_t`."""
 *     SUCCESS = NVML_SUCCESS
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_Return, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 353, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_Return, __pyx_t_11) < (0)) __PYX_ERR(0, 353, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":388
 *     ERROR_UNKNOWN = NVML_ERROR_UNKNOWN
 * 
 * class MemoryLocation(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlMemoryLocation_t`."""
 *     L1_CACHE = NVML_MEMORY_LOCATION_L1_CACHE
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 388, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 388, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 388, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 388, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryLocation, __pyx_mstate_global->__pyx_n_u_MemoryLocation, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlMemoryLocation_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 388, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 388, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":390
 * class MemoryLocation(_IntEnum):
 *     """See `nvmlMemoryLocation_t`."""
 *     L1_CACHE = NVML_MEMORY_LOCATION_L1_CACHE             # <<<<<<<<<<<<<<
 *     L2_CACHE = NVML_MEMORY_LOCATION_L2_CACHE
 *     DRAM = NVML_MEMORY_LOCATION_DRAM
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlMemoryLocation_t(NVML_MEMORY_LOCATION_L1_CACHE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 390, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_L1_CACHE, __pyx_t_5) < (0)) __PYX_ERR(0, 390, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":391
 *     """See `nvmlMemoryLocation_t`."""
 *     L1_CACHE = NVML_MEMORY_LOCATION_L1_CACHE
 *     L2_CACHE = NVML_MEMORY_LOCATION_L2_CACHE             # <<<<<<<<<<<<<<
 *     DRAM = NVML_MEMORY_LOCATION_DRAM
 *     DEVICE_MEMORY = NVML_MEMORY_LOCATION_DEVICE_MEMORY
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlMemoryLocation_t(NVML_MEMORY_LOCATION_L2_CACHE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 391, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_L2_CACHE, __pyx_t_5) < (0)) __PYX_ERR(0, 391, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":392
 *     L1_CACHE = NVML_MEMORY_LOCATION_L1_CACHE
 *     L2_CACHE = NVML_MEMORY_LOCATION_L2_CACHE
 *     DRAM = NVML_MEMORY_LOCATION_DRAM             # <<<<<<<<<<<<<<
 *     DEVICE_MEMORY = NVML_MEMORY_LOCATION_DEVICE_MEMORY
 *     REGISTER_FILE = NVML_MEMORY_LOCATION_REGISTER_FILE
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlMemoryLocation_t(NVML_MEMORY_LOCATION_DRAM); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 392, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_DRAM, __pyx_t_5) < (0)) __PYX_ERR(0, 392, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":393
 *     L2_CACHE = NVML_MEMORY_LOCATION_L2_CACHE
 *     DRAM = NVML_MEMORY_LOCATION_DRAM
 *     DEVICE_MEMORY = NVML_MEMORY_LOCATION_DEVICE_MEMORY             # <<<<<<<<<<<<<<
 *     REGISTER_FILE = NVML_MEMORY_LOCATION_REGISTER_FILE
 *     TEXTURE_MEMORY = NVML_MEMORY_LOCATION_TEXTURE_MEMORY
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlMemoryLocation_t(NVML_MEMORY_LOCATION_DEVICE_MEMORY); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 393, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_DEVICE_MEMORY, __pyx_t_5) < (0)) __PYX_ERR(0, 393, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":394
 *     DRAM = NVML_MEMORY_LOCATION_DRAM
 *     DEVICE_MEMORY = NVML_MEMORY_LOCATION_DEVICE_MEMORY
 *     REGISTER_FILE = NVML_MEMORY_LOCATION_REGISTER_FILE             # <<<<<<<<<<<<<<
 *     TEXTURE_MEMORY = NVML_MEMORY_LOCATION_TEXTURE_MEMORY
 *     TEXTURE_SHM = NVML_MEMORY_LOCATION_TEXTURE_SHM
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlMemoryLocation_t(NVML_MEMORY_LOCATION_REGISTER_FILE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 394, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_REGISTER_FILE, __pyx_t_5) < (0)) __PYX_ERR(0, 394, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":395
 *     DEVICE_MEMORY = NVML_MEMORY_LOCATION_DEVICE_MEMORY
 *     REGISTER_FILE = NVML_MEMORY_LOCATION_REGISTER_FILE
 *     TEXTURE_MEMORY = NVML_MEMORY_LOCATION_TEXTURE_MEMORY             # <<<<<<<<<<<<<<
 *     TEXTURE_SHM = NVML_MEMORY_LOCATION_TEXTURE_SHM
 *     CBU = NVML_MEMORY_LOCATION_CBU
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlMemoryLocation_t(NVML_MEMORY_LOCATION_TEXTURE_MEMORY); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 395, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_TEXTURE_MEMORY, __pyx_t_5) < (0)) __PYX_ERR(0, 395, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":396
 *     REGISTER_FILE = NVML_MEMORY_LOCATION_REGISTER_FILE
 *     TEXTURE_MEMORY = NVML_MEMORY_LOCATION_TEXTURE_MEMORY
 *     TEXTURE_SHM = NVML_MEMORY_LOCATION_TEXTURE_SHM             # <<<<<<<<<<<<<<
 *     CBU = NVML_MEMORY_LOCATION_CBU
 *     SRAM = NVML_MEMORY_LOCATION_SRAM
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlMemoryLocation_t(NVML_MEMORY_LOCATION_TEXTURE_SHM); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_TEXTURE_SHM, __pyx_t_5) < (0)) __PYX_ERR(0, 396, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":397
 *     TEXTURE_MEMORY = NVML_MEMORY_LOCATION_TEXTURE_MEMORY
 *     TEXTURE_SHM = NVML_MEMORY_LOCATION_TEXTURE_SHM
 *     CBU = NVML_MEMORY_LOCATION_CBU             # <<<<<<<<<<<<<<
 *     SRAM = NVML_MEMORY_LOCATION_SRAM
 *     COUNT = NVML_MEMORY_LOCATION_COUNT
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlMemoryLocation_t(NVML_MEMORY_LOCATION_CBU); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_CBU, __pyx_t_5) < (0)) __PYX_ERR(0, 397, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":398
 *     TEXTURE_SHM = NVML_MEMORY_LOCATION_TEXTURE_SHM
 *     CBU = NVML_MEMORY_LOCATION_CBU
 *     SRAM = NVML_MEMORY_LOCATION_SRAM             # <<<<<<<<<<<<<<
 *     COUNT = NVML_MEMORY_LOCATION_COUNT
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlMemoryLocation_t(NVML_MEMORY_LOCATION_SRAM); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 398, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_SRAM, __pyx_t_5) < (0)) __PYX_ERR(0, 398, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":399
 *     CBU = NVML_MEMORY_LOCATION_CBU
 *     SRAM = NVML_MEMORY_LOCATION_SRAM
 *     COUNT = NVML_MEMORY_LOCATION_COUNT             # <<<<<<<<<<<<<<
 * 
 * class PageRetirementCause(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlMemoryLocation_t(NVML_MEMORY_LOCATION_COUNT); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 399, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_COUNT, __pyx_t_5) < (0)) __PYX_ERR(0, 399, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":388
 *     ERROR_UNKNOWN = NVML_ERROR_UNKNOWN
 * 
 * class MemoryLocation(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlMemoryLocation_t`."""
 *     L1_CACHE = NVML_MEMORY_LOCATION_L1_CACHE
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_MemoryLocation, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 388, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_MemoryLocation, __pyx_t_5) < (0)) __PYX_ERR(0, 388, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":401
 *     COUNT = NVML_MEMORY_LOCATION_COUNT
 * 
 * class PageRetirementCause(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlPageRetirementCause_t`."""
 *     MULTIPLE_SINGLE_BIT_ECC_ERRORS = NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_PageRetirementCause, __pyx_mstate_global->__pyx_n_u_PageRetirementCause, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlPageRetirementCause_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 401, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":403
 * class PageRetirementCause(_IntEnum):
 *     """See `nvmlPageRetirementCause_t`."""
 *     MULTIPLE_SINGLE_BIT_ECC_ERRORS = NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS             # <<<<<<<<<<<<<<
 *     DOUBLE_BIT_ECC_ERROR = NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR
 *     COUNT = NVML_PAGE_RETIREMENT_CAUSE_COUNT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlPageRetirementCause_t(NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 403, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_MULTIPLE_SINGLE_BIT_ECC_ERRORS, __pyx_t_10) < (0)) __PYX_ERR(0, 403, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":404
 *     """See `nvmlPageRetirementCause_t`."""
 *     MULTIPLE_SINGLE_BIT_ECC_ERRORS = NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS
 *     DOUBLE_BIT_ECC_ERROR = NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR             # <<<<<<<<<<<<<<
 *     COUNT = NVML_PAGE_RETIREMENT_CAUSE_COUNT
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlPageRetirementCause_t(NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 404, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_DOUBLE_BIT_ECC_ERROR, __pyx_t_10) < (0)) __PYX_ERR(0, 404, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":405
 *     MULTIPLE_SINGLE_BIT_ECC_ERRORS = NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS
 *     DOUBLE_BIT_ECC_ERROR = NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR
 *     COUNT = NVML_PAGE_RETIREMENT_CAUSE_COUNT             # <<<<<<<<<<<<<<
 * 
 * class RestrictedAPI(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlPageRetirementCause_t(NVML_PAGE_RETIREMENT_CAUSE_COUNT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 405, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_COUNT, __pyx_t_10) < (0)) __PYX_ERR(0, 405, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":401
 *     COUNT = NVML_MEMORY_LOCATION_COUNT
 * 
 * class PageRetirementCause(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlPageRetirementCause_t`."""
 *     MULTIPLE_SINGLE_BIT_ECC_ERRORS = NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_PageRetirementCause, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_PageRetirementCause, __pyx_t_10) < (0)) __PYX_ERR(0, 401, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":407
 *     COUNT = NVML_PAGE_RETIREMENT_CAUSE_COUNT
 * 
 * class RestrictedAPI(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlRestrictedAPI_t`."""
 *     SET_APPLICATION_CLOCKS = NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 407, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 407, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 407, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 407, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_RestrictedAPI, __pyx_mstate_global->__pyx_n_u_RestrictedAPI, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlRestrictedAPI_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 407, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 407, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":409
 * class RestrictedAPI(_IntEnum):
 *     """See `nvmlRestrictedAPI_t`."""
 *     SET_APPLICATION_CLOCKS = NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS             # <<<<<<<<<<<<<<
 *     SET_AUTO_BOOSTED_CLOCKS = NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS
 *     COUNT = NVML_RESTRICTED_API_COUNT
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlRestrictedAPI_t(NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 409, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_SET_APPLICATION_CLOCKS, __pyx_t_11) < (0)) __PYX_ERR(0, 409, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":410
 *     """See `nvmlRestrictedAPI_t`."""
 *     SET_APPLICATION_CLOCKS = NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS
 *     SET_AUTO_BOOSTED_CLOCKS = NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS             # <<<<<<<<<<<<<<
 *     COUNT = NVML_RESTRICTED_API_COUNT
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlRestrictedAPI_t(NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 410, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_SET_AUTO_BOOSTED_CLOCKS, __pyx_t_11) < (0)) __PYX_ERR(0, 410, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":411
 *     SET_APPLICATION_CLOCKS = NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS
 *     SET_AUTO_BOOSTED_CLOCKS = NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS
 *     COUNT = NVML_RESTRICTED_API_COUNT             # <<<<<<<<<<<<<<
 * 
 * class GpuUtilizationDomainId(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlRestrictedAPI_t(NVML_RESTRICTED_API_COUNT); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 411, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_COUNT, __pyx_t_11) < (0)) __PYX_ERR(0, 411, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":407
 *     COUNT = NVML_PAGE_RETIREMENT_CAUSE_COUNT
 * 
 * class RestrictedAPI(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlRestrictedAPI_t`."""
 *     SET_APPLICATION_CLOCKS = NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_RestrictedAPI, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 407, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_RestrictedAPI, __pyx_t_11) < (0)) __PYX_ERR(0, 407, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":413
 *     COUNT = NVML_RESTRICTED_API_COUNT
 * 
 * class GpuUtilizationDomainId(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlGpuUtilizationDomainId_t`."""
 *     GPU_UTILIZATION_DOMAIN_GPU = NVML_GPU_UTILIZATION_DOMAIN_GPU
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 413, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 413, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 413, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 413, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_GpuUtilizationDomainId, __pyx_mstate_global->__pyx_n_u_GpuUtilizationDomainId, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlGpuUtilizationDomainId_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 413, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 413, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":415
 * class GpuUtilizationDomainId(_IntEnum):
 *     """See `nvmlGpuUtilizationDomainId_t`."""
 *     GPU_UTILIZATION_DOMAIN_GPU = NVML_GPU_UTILIZATION_DOMAIN_GPU             # <<<<<<<<<<<<<<
 *     GPU_UTILIZATION_DOMAIN_FB = NVML_GPU_UTILIZATION_DOMAIN_FB
 *     GPU_UTILIZATION_DOMAIN_VID = NVML_GPU_UTILIZATION_DOMAIN_VID
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlGpuUtilizationDomainId_t(NVML_GPU_UTILIZATION_DOMAIN_GPU); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 415, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_GPU_UTILIZATION_DOMAIN_GPU, __pyx_t_5) < (0)) __PYX_ERR(0, 415, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":416
 *     """See `nvmlGpuUtilizationDomainId_t`."""
 *     GPU_UTILIZATION_DOMAIN_GPU = NVML_GPU_UTILIZATION_DOMAIN_GPU
 *     GPU_UTILIZATION_DOMAIN_FB = NVML_GPU_UTILIZATION_DOMAIN_FB             # <<<<<<<<<<<<<<
 *     GPU_UTILIZATION_DOMAIN_VID = NVML_GPU_UTILIZATION_DOMAIN_VID
 *     GPU_UTILIZATION_DOMAIN_BUS = NVML_GPU_UTILIZATION_DOMAIN_BUS
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlGpuUtilizationDomainId_t(NVML_GPU_UTILIZATION_DOMAIN_FB); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 416, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_GPU_UTILIZATION_DOMAIN_FB, __pyx_t_5) < (0)) __PYX_ERR(0, 416, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":417
 *     GPU_UTILIZATION_DOMAIN_GPU = NVML_GPU_UTILIZATION_DOMAIN_GPU
 *     GPU_UTILIZATION_DOMAIN_FB = NVML_GPU_UTILIZATION_DOMAIN_FB
 *     GPU_UTILIZATION_DOMAIN_VID = NVML_GPU_UTILIZATION_DOMAIN_VID             # <<<<<<<<<<<<<<
 *     GPU_UTILIZATION_DOMAIN_BUS = NVML_GPU_UTILIZATION_DOMAIN_BUS
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlGpuUtilizationDomainId_t(NVML_GPU_UTILIZATION_DOMAIN_VID); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 417, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_GPU_UTILIZATION_DOMAIN_VID, __pyx_t_5) < (0)) __PYX_ERR(0, 417, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":418
 *     GPU_UTILIZATION_DOMAIN_FB = NVML_GPU_UTILIZATION_DOMAIN_FB
 *     GPU_UTILIZATION_DOMAIN_VID = NVML_GPU_UTILIZATION_DOMAIN_VID
 *     GPU_UTILIZATION_DOMAIN_BUS = NVML_GPU_UTILIZATION_DOMAIN_BUS             # <<<<<<<<<<<<<<
 * 
 * class GpuVirtualizationMode(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlGpuUtilizationDomainId_t(NVML_GPU_UTILIZATION_DOMAIN_BUS); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 418, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_GPU_UTILIZATION_DOMAIN_BUS, __pyx_t_5) < (0)) __PYX_ERR(0, 418, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":413
 *     COUNT = NVML_RESTRICTED_API_COUNT
 * 
 * class GpuUtilizationDomainId(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlGpuUtilizationDomainId_t`."""
 *     GPU_UTILIZATION_DOMAIN_GPU = NVML_GPU_UTILIZATION_DOMAIN_GPU
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_GpuUtilizationDomainId, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 413, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_GpuUtilizationDomainId, __pyx_t_5) < (0)) __PYX_ERR(0, 413, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":420
 *     GPU_UTILIZATION_DOMAIN_BUS = NVML_GPU_UTILIZATION_DOMAIN_BUS
 * 
 * class GpuVirtualizationMode(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlGpuVirtualizationMode_t`."""
 *     NONE = NVML_GPU_VIRTUALIZATION_MODE_NONE
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 420, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 420, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 420, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 420, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_GpuVirtualizationMode, __pyx_mstate_global->__pyx_n_u_GpuVirtualizationMode, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlGpuVirtualizationMode_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 420, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 420, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":422
 * class GpuVirtualizationMode(_IntEnum):
 *     """See `nvmlGpuVirtualizationMode_t`."""
 *     NONE = NVML_GPU_VIRTUALIZATION_MODE_NONE             # <<<<<<<<<<<<<<
 *     PASSTHROUGH = NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH
 *     VGPU = NVML_GPU_VIRTUALIZATION_MODE_VGPU
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpuVirtualizationMode_t(NVML_GPU_VIRTUALIZATION_MODE_NONE); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 422, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NONE, __pyx_t_10) < (0)) __PYX_ERR(0, 422, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":423
 *     """See `nvmlGpuVirtualizationMode_t`."""
 *     NONE = NVML_GPU_VIRTUALIZATION_MODE_NONE
 *     PASSTHROUGH = NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH             # <<<<<<<<<<<<<<
 *     VGPU = NVML_GPU_VIRTUALIZATION_MODE_VGPU
 *     HOST_VGPU = NVML_GPU_VIRTUALIZATION_MODE_HOST_VGPU
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpuVirtualizationMode_t(NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 423, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_PASSTHROUGH, __pyx_t_10) < (0)) __PYX_ERR(0, 423, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":424
 *     NONE = NVML_GPU_VIRTUALIZATION_MODE_NONE
 *     PASSTHROUGH = NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH
 *     VGPU = NVML_GPU_VIRTUALIZATION_MODE_VGPU             # <<<<<<<<<<<<<<
 *     HOST_VGPU = NVML_GPU_VIRTUALIZATION_MODE_HOST_VGPU
 *     HOST_VSGA = NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpuVirtualizationMode_t(NVML_GPU_VIRTUALIZATION_MODE_VGPU); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 424, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_VGPU, __pyx_t_10) < (0)) __PYX_ERR(0, 424, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":425
 *     PASSTHROUGH = NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH
 *     VGPU = NVML_GPU_VIRTUALIZATION_MODE_VGPU
 *     HOST_VGPU = NVML_GPU_VIRTUALIZATION_MODE_HOST_VGPU             # <<<<<<<<<<<<<<
 *     HOST_VSGA = NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpuVirtualizationMode_t(NVML_GPU_VIRTUALIZATION_MODE_HOST_VGPU); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 425, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_HOST_VGPU, __pyx_t_10) < (0)) __PYX_ERR(0, 425, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":426
 *     VGPU = NVML_GPU_VIRTUALIZATION_MODE_VGPU
 *     HOST_VGPU = NVML_GPU_VIRTUALIZATION_MODE_HOST_VGPU
 *     HOST_VSGA = NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA             # <<<<<<<<<<<<<<
 * 
 * class HostVgpuMode(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpuVirtualizationMode_t(NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 426, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_HOST_VSGA, __pyx_t_10) < (0)) __PYX_ERR(0, 426, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":420
 *     GPU_UTILIZATION_DOMAIN_BUS = NVML_GPU_UTILIZATION_DOMAIN_BUS
 * 
 * class GpuVirtualizationMode(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlGpuVirtualizationMode_t`."""
 *     NONE = NVML_GPU_VIRTUALIZATION_MODE_NONE
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_GpuVirtualizationMode, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 420, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_GpuVirtualizationMode, __pyx_t_10) < (0)) __PYX_ERR(0, 420, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":428
 *     HOST_VSGA = NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA
 * 
 * class HostVgpuMode(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlHostVgpuMode_t`."""
 *     NON_SRIOV = NVML_HOST_VGPU_MODE_NON_SRIOV
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 428, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 428, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 428, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 428, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_HostVgpuMode, __pyx_mstate_global->__pyx_n_u_HostVgpuMode, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlHostVgpuMode_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 428, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 428, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":430
 * class HostVgpuMode(_IntEnum):
 *     """See `nvmlHostVgpuMode_t`."""
 *     NON_SRIOV = NVML_HOST_VGPU_MODE_NON_SRIOV             # <<<<<<<<<<<<<<
 *     SRIOV = NVML_HOST_VGPU_MODE_SRIOV
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlHostVgpuMode_t(NVML_HOST_VGPU_MODE_NON_SRIOV); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 430, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NON_SRIOV, __pyx_t_11) < (0)) __PYX_ERR(0, 430, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":431
 *     """See `nvmlHostVgpuMode_t`."""
 *     NON_SRIOV = NVML_HOST_VGPU_MODE_NON_SRIOV
 *     SRIOV = NVML_HOST_VGPU_MODE_SRIOV             # <<<<<<<<<<<<<<
 * 
 * class VgpuVmIdType(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlHostVgpuMode_t(NVML_HOST_VGPU_MODE_SRIOV); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 431, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_SRIOV, __pyx_t_11) < (0)) __PYX_ERR(0, 431, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":428
 *     HOST_VSGA = NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA
 * 
 * class HostVgpuMode(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlHostVgpuMode_t`."""
 *     NON_SRIOV = NVML_HOST_VGPU_MODE_NON_SRIOV
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_HostVgpuMode, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 428, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_HostVgpuMode, __pyx_t_11) < (0)) __PYX_ERR(0, 428, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":433
 *     SRIOV = NVML_HOST_VGPU_MODE_SRIOV
 * 
 * class VgpuVmIdType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlVgpuVmIdType_t`."""
 *     VGPU_VM_ID_DOMAIN_ID = NVML_VGPU_VM_ID_DOMAIN_ID
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 433, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 433, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 433, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 433, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_VgpuVmIdType, __pyx_mstate_global->__pyx_n_u_VgpuVmIdType, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlVgpuVmIdType_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 433, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 433, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":435
 * class VgpuVmIdType(_IntEnum):
 *     """See `nvmlVgpuVmIdType_t`."""
 *     VGPU_VM_ID_DOMAIN_ID = NVML_VGPU_VM_ID_DOMAIN_ID             # <<<<<<<<<<<<<<
 *     VGPU_VM_ID_UUID = NVML_VGPU_VM_ID_UUID
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlVgpuVmIdType_t(NVML_VGPU_VM_ID_DOMAIN_ID); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 435, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VGPU_VM_ID_DOMAIN_ID, __pyx_t_5) < (0)) __PYX_ERR(0, 435, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":436
 *     """See `nvmlVgpuVmIdType_t`."""
 *     VGPU_VM_ID_DOMAIN_ID = NVML_VGPU_VM_ID_DOMAIN_ID
 *     VGPU_VM_ID_UUID = NVML_VGPU_VM_ID_UUID             # <<<<<<<<<<<<<<
 * 
 * class VgpuGuestInfoState(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlVgpuVmIdType_t(NVML_VGPU_VM_ID_UUID); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 436, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VGPU_VM_ID_UUID, __pyx_t_5) < (0)) __PYX_ERR(0, 436, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":433
 *     SRIOV = NVML_HOST_VGPU_MODE_SRIOV
 * 
 * class VgpuVmIdType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlVgpuVmIdType_t`."""
 *     VGPU_VM_ID_DOMAIN_ID = NVML_VGPU_VM_ID_DOMAIN_ID
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_VgpuVmIdType, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 433, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_VgpuVmIdType, __pyx_t_5) < (0)) __PYX_ERR(0, 433, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":438
 *     VGPU_VM_ID_UUID = NVML_VGPU_VM_ID_UUID
 * 
 * class VgpuGuestInfoState(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlVgpuGuestInfoState_t`."""
 *     VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED = NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_VgpuGuestInfoState, __pyx_mstate_global->__pyx_n_u_VgpuGuestInfoState, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlVgpuGuestInfoState_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 438, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":440
 * class VgpuGuestInfoState(_IntEnum):
 *     """See `nvmlVgpuGuestInfoState_t`."""
 *     VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED = NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED             # <<<<<<<<<<<<<<
 *     VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED = NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlVgpuGuestInfoState_t(NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 440, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_VGPU_INSTANCE_GUEST_INFO_STATE_U, __pyx_t_10) < (0)) __PYX_ERR(0, 440, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":441
 *     """See `nvmlVgpuGuestInfoState_t`."""
 *     VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED = NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED
 *     VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED = NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED             # <<<<<<<<<<<<<<
 * 
 * class GridLicenseFeatureCode(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlVgpuGuestInfoState_t(NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_VGPU_INSTANCE_GUEST_INFO_STATE_I, __pyx_t_10) < (0)) __PYX_ERR(0, 441, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":438
 *     VGPU_VM_ID_UUID = NVML_VGPU_VM_ID_UUID
 * 
 * class VgpuGuestInfoState(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlVgpuGuestInfoState_t`."""
 *     VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED = NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VgpuGuestInfoState, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_VgpuGuestInfoState, __pyx_t_10) < (0)) __PYX_ERR(0, 438, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":443
 *     VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED = NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED
 * 
 * class GridLicenseFeatureCode(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlGridLicenseFeatureCode_t`."""
 *     UNKNOWN = NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_GridLicenseFeatureCode, __pyx_mstate_global->__pyx_n_u_GridLicenseFeatureCode, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlGridLicenseFeatureCode_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 443, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":445
 * class GridLicenseFeatureCode(_IntEnum):
 *     """See `nvmlGridLicenseFeatureCode_t`."""
 *     UNKNOWN = NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN             # <<<<<<<<<<<<<<
 *     VGPU = NVML_GRID_LICENSE_FEATURE_CODE_VGPU
 *     NVIDIA_RTX = NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlGridLicenseFeatureCode_t(NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 445, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_UNKNOWN, __pyx_t_11) < (0)) __PYX_ERR(0, 445, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":446
 *     """See `nvmlGridLicenseFeatureCode_t`."""
 *     UNKNOWN = NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN
 *     VGPU = NVML_GRID_LICENSE_FEATURE_CODE_VGPU             # <<<<<<<<<<<<<<
 *     NVIDIA_RTX = NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX
 *     VWORKSTATION = NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlGridLicenseFeatureCode_t(NVML_GRID_LICENSE_FEATURE_CODE_VGPU); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 446, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_VGPU, __pyx_t_11) < (0)) __PYX_ERR(0, 446, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":447
 *     UNKNOWN = NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN
 *     VGPU = NVML_GRID_LICENSE_FEATURE_CODE_VGPU
 *     NVIDIA_RTX = NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX             # <<<<<<<<<<<<<<
 *     VWORKSTATION = NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION
 *     GAMING = NVML_GRID_LICENSE_FEATURE_CODE_GAMING
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlGridLicenseFeatureCode_t(NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 447, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NVIDIA_RTX, __pyx_t_11) < (0)) __PYX_ERR(0, 447, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":448
 *     VGPU = NVML_GRID_LICENSE_FEATURE_CODE_VGPU
 *     NVIDIA_RTX = NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX
 *     VWORKSTATION = NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION             # <<<<<<<<<<<<<<
 *     GAMING = NVML_GRID_LICENSE_FEATURE_CODE_GAMING
 *     COMPUTE = NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlGridLicenseFeatureCode_t(NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 448, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_VWORKSTATION, __pyx_t_11) < (0)) __PYX_ERR(0, 448, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":449
 *     NVIDIA_RTX = NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX
 *     VWORKSTATION = NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION
 *     GAMING = NVML_GRID_LICENSE_FEATURE_CODE_GAMING             # <<<<<<<<<<<<<<
 *     COMPUTE = NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlGridLicenseFeatureCode_t(NVML_GRID_LICENSE_FEATURE_CODE_GAMING); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 449, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_GAMING, __pyx_t_11) < (0)) __PYX_ERR(0, 449, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":450
 *     VWORKSTATION = NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION
 *     GAMING = NVML_GRID_LICENSE_FEATURE_CODE_GAMING
 *     COMPUTE = NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE             # <<<<<<<<<<<<<<
 * 
 * class VgpuCapability(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlGridLicenseFeatureCode_t(NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 450, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_COMPUTE, __pyx_t_11) < (0)) __PYX_ERR(0, 450, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":443
 *     VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED = NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED
 * 
 * class GridLicenseFeatureCode(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlGridLicenseFeatureCode_t`."""
 *     UNKNOWN = NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GridLicenseFeatureCode, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 443, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_GridLicenseFeatureCode, __pyx_t_11) < (0)) __PYX_ERR(0, 443, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":452
 *     COMPUTE = NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE
 * 
 * class VgpuCapability(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlVgpuCapability_t`."""
 *     VGPU_CAP_NVLINK_P2P = NVML_VGPU_CAP_NVLINK_P2P
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 452, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 452, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 452, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 452, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_VgpuCapability, __pyx_mstate_global->__pyx_n_u_VgpuCapability, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlVgpuCapability_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 452, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 452, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":454
 * class VgpuCapability(_IntEnum):
 *     """See `nvmlVgpuCapability_t`."""
 *     VGPU_CAP_NVLINK_P2P = NVML_VGPU_CAP_NVLINK_P2P             # <<<<<<<<<<<<<<
 *     VGPU_CAP_GPUDIRECT = NVML_VGPU_CAP_GPUDIRECT
 *     VGPU_CAP_MULTI_VGPU_EXCLUSIVE = NVML_VGPU_CAP_MULTI_VGPU_EXCLUSIVE
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlVgpuCapability_t(NVML_VGPU_CAP_NVLINK_P2P); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 454, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VGPU_CAP_NVLINK_P2P, __pyx_t_5) < (0)) __PYX_ERR(0, 454, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":455
 *     """See `nvmlVgpuCapability_t`."""
 *     VGPU_CAP_NVLINK_P2P = NVML_VGPU_CAP_NVLINK_P2P
 *     VGPU_CAP_GPUDIRECT = NVML_VGPU_CAP_GPUDIRECT             # <<<<<<<<<<<<<<
 *     VGPU_CAP_MULTI_VGPU_EXCLUSIVE = NVML_VGPU_CAP_MULTI_VGPU_EXCLUSIVE
 *     VGPU_CAP_EXCLUSIVE_TYPE = NVML_VGPU_CAP_EXCLUSIVE_TYPE
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlVgpuCapability_t(NVML_VGPU_CAP_GPUDIRECT); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 455, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VGPU_CAP_GPUDIRECT, __pyx_t_5) < (0)) __PYX_ERR(0, 455, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":456
 *     VGPU_CAP_NVLINK_P2P = NVML_VGPU_CAP_NVLINK_P2P
 *     VGPU_CAP_GPUDIRECT = NVML_VGPU_CAP_GPUDIRECT
 *     VGPU_CAP_MULTI_VGPU_EXCLUSIVE = NVML_VGPU_CAP_MULTI_VGPU_EXCLUSIVE             # <<<<<<<<<<<<<<
 *     VGPU_CAP_EXCLUSIVE_TYPE = NVML_VGPU_CAP_EXCLUSIVE_TYPE
 *     VGPU_CAP_EXCLUSIVE_SIZE = NVML_VGPU_CAP_EXCLUSIVE_SIZE
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlVgpuCapability_t(NVML_VGPU_CAP_MULTI_VGPU_EXCLUSIVE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 456, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VGPU_CAP_MULTI_VGPU_EXCLUSIVE, __pyx_t_5) < (0)) __PYX_ERR(0, 456, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":457
 *     VGPU_CAP_GPUDIRECT = NVML_VGPU_CAP_GPUDIRECT
 *     VGPU_CAP_MULTI_VGPU_EXCLUSIVE = NVML_VGPU_CAP_MULTI_VGPU_EXCLUSIVE
 *     VGPU_CAP_EXCLUSIVE_TYPE = NVML_VGPU_CAP_EXCLUSIVE_TYPE             # <<<<<<<<<<<<<<
 *     VGPU_CAP_EXCLUSIVE_SIZE = NVML_VGPU_CAP_EXCLUSIVE_SIZE
 *     VGPU_CAP_COUNT = NVML_VGPU_CAP_COUNT
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlVgpuCapability_t(NVML_VGPU_CAP_EXCLUSIVE_TYPE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 457, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VGPU_CAP_EXCLUSIVE_TYPE, __pyx_t_5) < (0)) __PYX_ERR(0, 457, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":458
 *     VGPU_CAP_MULTI_VGPU_EXCLUSIVE = NVML_VGPU_CAP_MULTI_VGPU_EXCLUSIVE
 *     VGPU_CAP_EXCLUSIVE_TYPE = NVML_VGPU_CAP_EXCLUSIVE_TYPE
 *     VGPU_CAP_EXCLUSIVE_SIZE = NVML_VGPU_CAP_EXCLUSIVE_SIZE             # <<<<<<<<<<<<<<
 *     VGPU_CAP_COUNT = NVML_VGPU_CAP_COUNT
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlVgpuCapability_t(NVML_VGPU_CAP_EXCLUSIVE_SIZE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 458, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VGPU_CAP_EXCLUSIVE_SIZE, __pyx_t_5) < (0)) __PYX_ERR(0, 458, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":459
 *     VGPU_CAP_EXCLUSIVE_TYPE = NVML_VGPU_CAP_EXCLUSIVE_TYPE
 *     VGPU_CAP_EXCLUSIVE_SIZE = NVML_VGPU_CAP_EXCLUSIVE_SIZE
 *     VGPU_CAP_COUNT = NVML_VGPU_CAP_COUNT             # <<<<<<<<<<<<<<
 * 
 * class VgpuDriverCapability(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlVgpuCapability_t(NVML_VGPU_CAP_COUNT); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 459, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VGPU_CAP_COUNT, __pyx_t_5) < (0)) __PYX_ERR(0, 459, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":452
 *     COMPUTE = NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE
 * 
 * class VgpuCapability(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlVgpuCapability_t`."""
 *     VGPU_CAP_NVLINK_P2P = NVML_VGPU_CAP_NVLINK_P2P
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_VgpuCapability, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 452, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_VgpuCapability, __pyx_t_5) < (0)) __PYX_ERR(0, 452, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":461
 *     VGPU_CAP_COUNT = NVML_VGPU_CAP_COUNT
 * 
 * class VgpuDriverCapability(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlVgpuDriverCapability_t`."""
 *     VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU = NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 461, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 461, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 461, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 461, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_VgpuDriverCapability, __pyx_mstate_global->__pyx_n_u_VgpuDriverCapability, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlVgpuDriverCapability_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 461, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 461, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":463
 * class VgpuDriverCapability(_IntEnum):
 *     """See `nvmlVgpuDriverCapability_t`."""
 *     VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU = NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU             # <<<<<<<<<<<<<<
 *     VGPU_DRIVER_CAP_WARM_UPDATE = NVML_VGPU_DRIVER_CAP_WARM_UPDATE
 *     VGPU_DRIVER_CAP_COUNT = NVML_VGPU_DRIVER_CAP_COUNT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlVgpuDriverCapability_t(NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 463, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_VGPU_DRIVER_CAP_HETEROGENEOUS_MU, __pyx_t_10) < (0)) __PYX_ERR(0, 463, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":464
 *     """See `nvmlVgpuDriverCapability_t`."""
 *     VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU = NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU
 *     VGPU_DRIVER_CAP_WARM_UPDATE = NVML_VGPU_DRIVER_CAP_WARM_UPDATE             # <<<<<<<<<<<<<<
 *     VGPU_DRIVER_CAP_COUNT = NVML_VGPU_DRIVER_CAP_COUNT
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlVgpuDriverCapability_t(NVML_VGPU_DRIVER_CAP_WARM_UPDATE); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 464, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_VGPU_DRIVER_CAP_WARM_UPDATE, __pyx_t_10) < (0)) __PYX_ERR(0, 464, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":465
 *     VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU = NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU
 *     VGPU_DRIVER_CAP_WARM_UPDATE = NVML_VGPU_DRIVER_CAP_WARM_UPDATE
 *     VGPU_DRIVER_CAP_COUNT = NVML_VGPU_DRIVER_CAP_COUNT             # <<<<<<<<<<<<<<
 * 
 * class DeviceVgpuCapability(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlVgpuDriverCapability_t(NVML_VGPU_DRIVER_CAP_COUNT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 465, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_VGPU_DRIVER_CAP_COUNT, __pyx_t_10) < (0)) __PYX_ERR(0, 465, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":461
 *     VGPU_CAP_COUNT = NVML_VGPU_CAP_COUNT
 * 
 * class VgpuDriverCapability(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlVgpuDriverCapability_t`."""
 *     VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU = NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VgpuDriverCapability, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 461, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_VgpuDriverCapability, __pyx_t_10) < (0)) __PYX_ERR(0, 461, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":467
 *     VGPU_DRIVER_CAP_COUNT = NVML_VGPU_DRIVER_CAP_COUNT
 * 
 * class DeviceVgpuCapability(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlDeviceVgpuCapability_t`."""
 *     DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU = NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 467, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 467, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 467, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 467, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_DeviceVgpuCapability, __pyx_mstate_global->__pyx_n_u_DeviceVgpuCapability, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlDeviceVgpuCapability_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 467, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 467, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":469
 * class DeviceVgpuCapability(_IntEnum):
 *     """See `nvmlDeviceVgpuCapability_t`."""
 *     DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU = NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU             # <<<<<<<<<<<<<<
 *     DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES = NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES
 *     DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES = NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlDeviceVgpuCapability_t(NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 469, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEVICE_VGPU_CAP_FRACTIONAL_MULTI, __pyx_t_11) < (0)) __PYX_ERR(0, 469, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":470
 *     """See `nvmlDeviceVgpuCapability_t`."""
 *     DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU = NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU
 *     DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES = NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES             # <<<<<<<<<<<<<<
 *     DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES = NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES
 *     DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW = NVML_DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlDeviceVgpuCapability_t(NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 470, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEVICE_VGPU_CAP_HETEROGENEOUS_TI, __pyx_t_11) < (0)) __PYX_ERR(0, 470, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":471
 *     DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU = NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU
 *     DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES = NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES
 *     DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES = NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES             # <<<<<<<<<<<<<<
 *     DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW = NVML_DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW
 *     DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW = NVML_DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlDeviceVgpuCapability_t(NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 471, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEVICE_VGPU_CAP_HETEROGENEOUS_TI_2, __pyx_t_11) < (0)) __PYX_ERR(0, 471, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":472
 *     DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES = NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES
 *     DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES = NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES
 *     DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW = NVML_DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW             # <<<<<<<<<<<<<<
 *     DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW = NVML_DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW
 *     DEVICE_VGPU_CAP_DEVICE_STREAMING = NVML_DEVICE_VGPU_CAP_DEVICE_STREAMING
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlDeviceVgpuCapability_t(NVML_DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 472, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEVICE_VGPU_CAP_READ_DEVICE_BUFF, __pyx_t_11) < (0)) __PYX_ERR(0, 472, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":473
 *     DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES = NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES
 *     DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW = NVML_DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW
 *     DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW = NVML_DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW             # <<<<<<<<<<<<<<
 *     DEVICE_VGPU_CAP_DEVICE_STREAMING = NVML_DEVICE_VGPU_CAP_DEVICE_STREAMING
 *     DEVICE_VGPU_CAP_MINI_QUARTER_GPU = NVML_DEVICE_VGPU_CAP_MINI_QUARTER_GPU
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlDeviceVgpuCapability_t(NVML_DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 473, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEVICE_VGPU_CAP_WRITE_DEVICE_BUF, __pyx_t_11) < (0)) __PYX_ERR(0, 473, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":474
 *     DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW = NVML_DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW
 *     DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW = NVML_DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW
 *     DEVICE_VGPU_CAP_DEVICE_STREAMING = NVML_DEVICE_VGPU_CAP_DEVICE_STREAMING             # <<<<<<<<<<<<<<
 *     DEVICE_VGPU_CAP_MINI_QUARTER_GPU = NVML_DEVICE_VGPU_CAP_MINI_QUARTER_GPU
 *     DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU = NVML_DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlDeviceVgpuCapability_t(NVML_DEVICE_VGPU_CAP_DEVICE_STREAMING); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 474, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEVICE_VGPU_CAP_DEVICE_STREAMING, __pyx_t_11) < (0)) __PYX_ERR(0, 474, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":475
 *     DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW = NVML_DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW
 *     DEVICE_VGPU_CAP_DEVICE_STREAMING = NVML_DEVICE_VGPU_CAP_DEVICE_STREAMING
 *     DEVICE_VGPU_CAP_MINI_QUARTER_GPU = NVML_DEVICE_VGPU_CAP_MINI_QUARTER_GPU             # <<<<<<<<<<<<<<
 *     DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU = NVML_DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU
 *     DEVICE_VGPU_CAP_WARM_UPDATE = NVML_DEVICE_VGPU_CAP_WARM_UPDATE
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlDeviceVgpuCapability_t(NVML_DEVICE_VGPU_CAP_MINI_QUARTER_GPU); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 475, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEVICE_VGPU_CAP_MINI_QUARTER_GPU, __pyx_t_11) < (0)) __PYX_ERR(0, 475, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":476
 *     DEVICE_VGPU_CAP_DEVICE_STREAMING = NVML_DEVICE_VGPU_CAP_DEVICE_STREAMING
 *     DEVICE_VGPU_CAP_MINI_QUARTER_GPU = NVML_DEVICE_VGPU_CAP_MINI_QUARTER_GPU
 *     DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU = NVML_DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU             # <<<<<<<<<<<<<<
 *     DEVICE_VGPU_CAP_WARM_UPDATE = NVML_DEVICE_VGPU_CAP_WARM_UPDATE
 *     DEVICE_VGPU_CAP_HOMOGENEOUS_PLACEMENTS = NVML_DEVICE_VGPU_CAP_HOMOGENEOUS_PLACEMENTS
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlDeviceVgpuCapability_t(NVML_DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 476, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEVICE_VGPU_CAP_COMPUTE_MEDIA_EN, __pyx_t_11) < (0)) __PYX_ERR(0, 476, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":477
 *     DEVICE_VGPU_CAP_MINI_QUARTER_GPU = NVML_DEVICE_VGPU_CAP_MINI_QUARTER_GPU
 *     DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU = NVML_DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU
 *     DEVICE_VGPU_CAP_WARM_UPDATE = NVML_DEVICE_VGPU_CAP_WARM_UPDATE             # <<<<<<<<<<<<<<
 *     DEVICE_VGPU_CAP_HOMOGENEOUS_PLACEMENTS = NVML_DEVICE_VGPU_CAP_HOMOGENEOUS_PLACEMENTS
 *     DEVICE_VGPU_CAP_MIG_TIMESLICING_SUPPORTED = NVML_DEVICE_VGPU_CAP_MIG_TIMESLICING_SUPPORTED
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlDeviceVgpuCapability_t(NVML_DEVICE_VGPU_CAP_WARM_UPDATE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 477, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEVICE_VGPU_CAP_WARM_UPDATE, __pyx_t_11) < (0)) __PYX_ERR(0, 477, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":478
 *     DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU = NVML_DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU
 *     DEVICE_VGPU_CAP_WARM_UPDATE = NVML_DEVICE_VGPU_CAP_WARM_UPDATE
 *     DEVICE_VGPU_CAP_HOMOGENEOUS_PLACEMENTS = NVML_DEVICE_VGPU_CAP_HOMOGENEOUS_PLACEMENTS             # <<<<<<<<<<<<<<
 *     DEVICE_VGPU_CAP_MIG_TIMESLICING_SUPPORTED = NVML_DEVICE_VGPU_CAP_MIG_TIMESLICING_SUPPORTED
 *     DEVICE_VGPU_CAP_MIG_TIMESLICING_ENABLED = NVML_DEVICE_VGPU_CAP_MIG_TIMESLICING_ENABLED
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlDeviceVgpuCapability_t(NVML_DEVICE_VGPU_CAP_HOMOGENEOUS_PLACEMENTS); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 478, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEVICE_VGPU_CAP_HOMOGENEOUS_PLAC, __pyx_t_11) < (0)) __PYX_ERR(0, 478, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":479
 *     DEVICE_VGPU_CAP_WARM_UPDATE = NVML_DEVICE_VGPU_CAP_WARM_UPDATE
 *     DEVICE_VGPU_CAP_HOMOGENEOUS_PLACEMENTS = NVML_DEVICE_VGPU_CAP_HOMOGENEOUS_PLACEMENTS
 *     DEVICE_VGPU_CAP_MIG_TIMESLICING_SUPPORTED = NVML_DEVICE_VGPU_CAP_MIG_TIMESLICING_SUPPORTED             # <<<<<<<<<<<<<<
 *     DEVICE_VGPU_CAP_MIG_TIMESLICING_ENABLED = NVML_DEVICE_VGPU_CAP_MIG_TIMESLICING_ENABLED
 *     DEVICE_VGPU_CAP_COUNT = NVML_DEVICE_VGPU_CAP_COUNT
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlDeviceVgpuCapability_t(NVML_DEVICE_VGPU_CAP_MIG_TIMESLICING_SUPPORTED); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 479, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEVICE_VGPU_CAP_MIG_TIMESLICING, __pyx_t_11) < (0)) __PYX_ERR(0, 479, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":480
 *     DEVICE_VGPU_CAP_HOMOGENEOUS_PLACEMENTS = NVML_DEVICE_VGPU_CAP_HOMOGENEOUS_PLACEMENTS
 *     DEVICE_VGPU_CAP_MIG_TIMESLICING_SUPPORTED = NVML_DEVICE_VGPU_CAP_MIG_TIMESLICING_SUPPORTED
 *     DEVICE_VGPU_CAP_MIG_TIMESLICING_ENABLED = NVML_DEVICE_VGPU_CAP_MIG_TIMESLICING_ENABLED             # <<<<<<<<<<<<<<
 *     DEVICE_VGPU_CAP_COUNT = NVML_DEVICE_VGPU_CAP_COUNT
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlDeviceVgpuCapability_t(NVML_DEVICE_VGPU_CAP_MIG_TIMESLICING_ENABLED); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 480, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEVICE_VGPU_CAP_MIG_TIMESLICING_2, __pyx_t_11) < (0)) __PYX_ERR(0, 480, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":481
 *     DEVICE_VGPU_CAP_MIG_TIMESLICING_SUPPORTED = NVML_DEVICE_VGPU_CAP_MIG_TIMESLICING_SUPPORTED
 *     DEVICE_VGPU_CAP_MIG_TIMESLICING_ENABLED = NVML_DEVICE_VGPU_CAP_MIG_TIMESLICING_ENABLED
 *     DEVICE_VGPU_CAP_COUNT = NVML_DEVICE_VGPU_CAP_COUNT             # <<<<<<<<<<<<<<
 * 
 * class DeviceGpuRecoveryAction(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlDeviceVgpuCapability_t(NVML_DEVICE_VGPU_CAP_COUNT); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 481, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEVICE_VGPU_CAP_COUNT, __pyx_t_11) < (0)) __PYX_ERR(0, 481, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":467
 *     VGPU_DRIVER_CAP_COUNT = NVML_VGPU_DRIVER_CAP_COUNT
 * 
 * class DeviceVgpuCapability(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlDeviceVgpuCapability_t`."""
 *     DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU = NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_DeviceVgpuCapability, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 467, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_DeviceVgpuCapability, __pyx_t_11) < (0)) __PYX_ERR(0, 467, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":483
 *     DEVICE_VGPU_CAP_COUNT = NVML_DEVICE_VGPU_CAP_COUNT
 * 
 * class DeviceGpuRecoveryAction(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlDeviceGpuRecoveryAction_t`."""
 *     GPU_RECOVERY_ACTION_NONE = NVML_GPU_RECOVERY_ACTION_NONE
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 483, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 483, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 483, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 483, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_DeviceGpuRecoveryAction, __pyx_mstate_global->__pyx_n_u_DeviceGpuRecoveryAction, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlDeviceGpuRecoveryAction); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 483, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 483, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":485
 * class DeviceGpuRecoveryAction(_IntEnum):
 *     """See `nvmlDeviceGpuRecoveryAction_t`."""
 *     GPU_RECOVERY_ACTION_NONE = NVML_GPU_RECOVERY_ACTION_NONE             # <<<<<<<<<<<<<<
 *     GPU_RECOVERY_ACTION_GPU_RESET = NVML_GPU_RECOVERY_ACTION_GPU_RESET
 *     GPU_RECOVERY_ACTION_NODE_REBOOT = NVML_GPU_RECOVERY_ACTION_NODE_REBOOT
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlDeviceGpuRecoveryAction_t(NVML_GPU_RECOVERY_ACTION_NONE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 485, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_GPU_RECOVERY_ACTION_NONE, __pyx_t_5) < (0)) __PYX_ERR(0, 485, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":486
 *     """See `nvmlDeviceGpuRecoveryAction_t`."""
 *     GPU_RECOVERY_ACTION_NONE = NVML_GPU_RECOVERY_ACTION_NONE
 *     GPU_RECOVERY_ACTION_GPU_RESET = NVML_GPU_RECOVERY_ACTION_GPU_RESET             # <<<<<<<<<<<<<<
 *     GPU_RECOVERY_ACTION_NODE_REBOOT = NVML_GPU_RECOVERY_ACTION_NODE_REBOOT
 *     GPU_RECOVERY_ACTION_DRAIN_P2P = NVML_GPU_RECOVERY_ACTION_DRAIN_P2P
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlDeviceGpuRecoveryAction_t(NVML_GPU_RECOVERY_ACTION_GPU_RESET); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 486, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_GPU_RECOVERY_ACTION_GPU_RESET, __pyx_t_5) < (0)) __PYX_ERR(0, 486, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":487
 *     GPU_RECOVERY_ACTION_NONE = NVML_GPU_RECOVERY_ACTION_NONE
 *     GPU_RECOVERY_ACTION_GPU_RESET = NVML_GPU_RECOVERY_ACTION_GPU_RESET
 *     GPU_RECOVERY_ACTION_NODE_REBOOT = NVML_GPU_RECOVERY_ACTION_NODE_REBOOT             # <<<<<<<<<<<<<<
 *     GPU_RECOVERY_ACTION_DRAIN_P2P = NVML_GPU_RECOVERY_ACTION_DRAIN_P2P
 *     GPU_RECOVERY_ACTION_DRAIN_AND_RESET = NVML_GPU_RECOVERY_ACTION_DRAIN_AND_RESET
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlDeviceGpuRecoveryAction_t(NVML_GPU_RECOVERY_ACTION_NODE_REBOOT); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 487, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_GPU_RECOVERY_ACTION_NODE_REBOOT, __pyx_t_5) < (0)) __PYX_ERR(0, 487, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":488
 *     GPU_RECOVERY_ACTION_GPU_RESET = NVML_GPU_RECOVERY_ACTION_GPU_RESET
 *     GPU_RECOVERY_ACTION_NODE_REBOOT = NVML_GPU_RECOVERY_ACTION_NODE_REBOOT
 *     GPU_RECOVERY_ACTION_DRAIN_P2P = NVML_GPU_RECOVERY_ACTION_DRAIN_P2P             # <<<<<<<<<<<<<<
 *     GPU_RECOVERY_ACTION_DRAIN_AND_RESET = NVML_GPU_RECOVERY_ACTION_DRAIN_AND_RESET
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlDeviceGpuRecoveryAction_t(NVML_GPU_RECOVERY_ACTION_DRAIN_P2P); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 488, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_GPU_RECOVERY_ACTION_DRAIN_P2P, __pyx_t_5) < (0)) __PYX_ERR(0, 488, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":489
 *     GPU_RECOVERY_ACTION_NODE_REBOOT = NVML_GPU_RECOVERY_ACTION_NODE_REBOOT
 *     GPU_RECOVERY_ACTION_DRAIN_P2P = NVML_GPU_RECOVERY_ACTION_DRAIN_P2P
 *     GPU_RECOVERY_ACTION_DRAIN_AND_RESET = NVML_GPU_RECOVERY_ACTION_DRAIN_AND_RESET             # <<<<<<<<<<<<<<
 * 
 * class FanState(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlDeviceGpuRecoveryAction_t(NVML_GPU_RECOVERY_ACTION_DRAIN_AND_RESET); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 489, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_GPU_RECOVERY_ACTION_DRAIN_AND_RE, __pyx_t_5) < (0)) __PYX_ERR(0, 489, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":483
 *     DEVICE_VGPU_CAP_COUNT = NVML_DEVICE_VGPU_CAP_COUNT
 * 
 * class DeviceGpuRecoveryAction(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlDeviceGpuRecoveryAction_t`."""
 *     GPU_RECOVERY_ACTION_NONE = NVML_GPU_RECOVERY_ACTION_NONE
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DeviceGpuRecoveryAction, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 483, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_DeviceGpuRecoveryAction, __pyx_t_5) < (0)) __PYX_ERR(0, 483, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":491
 *     GPU_RECOVERY_ACTION_DRAIN_AND_RESET = NVML_GPU_RECOVERY_ACTION_DRAIN_AND_RESET
 * 
 * class FanState(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlFanState_t`."""
 *     FAN_NORMAL = NVML_FAN_NORMAL
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 491, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 491, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 491, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 491, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_FanState, __pyx_mstate_global->__pyx_n_u_FanState, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlFanState_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 491, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 491, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":493
 * class FanState(_IntEnum):
 *     """See `nvmlFanState_t`."""
 *     FAN_NORMAL = NVML_FAN_NORMAL             # <<<<<<<<<<<<<<
 *     FAN_FAILED = NVML_FAN_FAILED
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlFanState_t(NVML_FAN_NORMAL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 493, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_FAN_NORMAL, __pyx_t_10) < (0)) __PYX_ERR(0, 493, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":494
 *     """See `nvmlFanState_t`."""
 *     FAN_NORMAL = NVML_FAN_NORMAL
 *     FAN_FAILED = NVML_FAN_FAILED             # <<<<<<<<<<<<<<
 * 
 * class LedColor(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlFanState_t(NVML_FAN_FAILED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 494, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_FAN_FAILED, __pyx_t_10) < (0)) __PYX_ERR(0, 494, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":491
 *     GPU_RECOVERY_ACTION_DRAIN_AND_RESET = NVML_GPU_RECOVERY_ACTION_DRAIN_AND_RESET
 * 
 * class FanState(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlFanState_t`."""
 *     FAN_NORMAL = NVML_FAN_NORMAL
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_FanState, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 491, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_FanState, __pyx_t_10) < (0)) __PYX_ERR(0, 491, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":496
 *     FAN_FAILED = NVML_FAN_FAILED
 * 
 * class LedColor(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlLedColor_t`."""
 *     GREEN = NVML_LED_COLOR_GREEN
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 496, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 496, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 496, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 496, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_LedColor, __pyx_mstate_global->__pyx_n_u_LedColor, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlLedColor_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 496, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 496, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":498
 * class LedColor(_IntEnum):
 *     """See `nvmlLedColor_t`."""
 *     GREEN = NVML_LED_COLOR_GREEN             # <<<<<<<<<<<<<<
 *     AMBER = NVML_LED_COLOR_AMBER
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlLedColor_t(NVML_LED_COLOR_GREEN); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 498, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_GREEN, __pyx_t_11) < (0)) __PYX_ERR(0, 498, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":499
 *     """See `nvmlLedColor_t`."""
 *     GREEN = NVML_LED_COLOR_GREEN
 *     AMBER = NVML_LED_COLOR_AMBER             # <<<<<<<<<<<<<<
 * 
 * class EncoderType(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlLedColor_t(NVML_LED_COLOR_AMBER); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 499, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_AMBER, __pyx_t_11) < (0)) __PYX_ERR(0, 499, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":496
 *     FAN_FAILED = NVML_FAN_FAILED
 * 
 * class LedColor(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlLedColor_t`."""
 *     GREEN = NVML_LED_COLOR_GREEN
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_LedColor, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 496, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_LedColor, __pyx_t_11) < (0)) __PYX_ERR(0, 496, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":501
 *     AMBER = NVML_LED_COLOR_AMBER
 * 
 * class EncoderType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlEncoderType_t`."""
 *     ENCODER_QUERY_H264 = NVML_ENCODER_QUERY_H264
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 501, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 501, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 501, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 501, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_EncoderType, __pyx_mstate_global->__pyx_n_u_EncoderType, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlEncoderType_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 501, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 501, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":503
 * class EncoderType(_IntEnum):
 *     """See `nvmlEncoderType_t`."""
 *     ENCODER_QUERY_H264 = NVML_ENCODER_QUERY_H264             # <<<<<<<<<<<<<<
 *     ENCODER_QUERY_HEVC = NVML_ENCODER_QUERY_HEVC
 *     ENCODER_QUERY_AV1 = NVML_ENCODER_QUERY_AV1
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlEncoderType_t(NVML_ENCODER_QUERY_H264); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 503, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_ENCODER_QUERY_H264, __pyx_t_5) < (0)) __PYX_ERR(0, 503, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":504
 *     """See `nvmlEncoderType_t`."""
 *     ENCODER_QUERY_H264 = NVML_ENCODER_QUERY_H264
 *     ENCODER_QUERY_HEVC = NVML_ENCODER_QUERY_HEVC             # <<<<<<<<<<<<<<
 *     ENCODER_QUERY_AV1 = NVML_ENCODER_QUERY_AV1
 *     ENCODER_QUERY_UNKNOWN = NVML_ENCODER_QUERY_UNKNOWN
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlEncoderType_t(NVML_ENCODER_QUERY_HEVC); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_ENCODER_QUERY_HEVC, __pyx_t_5) < (0)) __PYX_ERR(0, 504, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":505
 *     ENCODER_QUERY_H264 = NVML_ENCODER_QUERY_H264
 *     ENCODER_QUERY_HEVC = NVML_ENCODER_QUERY_HEVC
 *     ENCODER_QUERY_AV1 = NVML_ENCODER_QUERY_AV1             # <<<<<<<<<<<<<<
 *     ENCODER_QUERY_UNKNOWN = NVML_ENCODER_QUERY_UNKNOWN
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlEncoderType_t(NVML_ENCODER_QUERY_AV1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 505, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_ENCODER_QUERY_AV1, __pyx_t_5) < (0)) __PYX_ERR(0, 505, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":506
 *     ENCODER_QUERY_HEVC = NVML_ENCODER_QUERY_HEVC
 *     ENCODER_QUERY_AV1 = NVML_ENCODER_QUERY_AV1
 *     ENCODER_QUERY_UNKNOWN = NVML_ENCODER_QUERY_UNKNOWN             # <<<<<<<<<<<<<<
 * 
 * class FBCSessionType(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlEncoderType_t(NVML_ENCODER_QUERY_UNKNOWN); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 506, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_ENCODER_QUERY_UNKNOWN, __pyx_t_5) < (0)) __PYX_ERR(0, 506, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":501
 *     AMBER = NVML_LED_COLOR_AMBER
 * 
 * class EncoderType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlEncoderType_t`."""
 *     ENCODER_QUERY_H264 = NVML_ENCODER_QUERY_H264
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_EncoderType, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 501, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_EncoderType, __pyx_t_5) < (0)) __PYX_ERR(0, 501, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":508
 *     ENCODER_QUERY_UNKNOWN = NVML_ENCODER_QUERY_UNKNOWN
 * 
 * class FBCSessionType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlFBCSessionType_t`."""
 *     UNKNOWN = NVML_FBC_SESSION_TYPE_UNKNOWN
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 508, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 508, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 508, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 508, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_FBCSessionType, __pyx_mstate_global->__pyx_n_u_FBCSessionType, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlFBCSessionType_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 508, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 508, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":510
 * class FBCSessionType(_IntEnum):
 *     """See `nvmlFBCSessionType_t`."""
 *     UNKNOWN = NVML_FBC_SESSION_TYPE_UNKNOWN             # <<<<<<<<<<<<<<
 *     TOSYS = NVML_FBC_SESSION_TYPE_TOSYS
 *     CUDA = NVML_FBC_SESSION_TYPE_CUDA
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlFBCSessionType_t(NVML_FBC_SESSION_TYPE_UNKNOWN); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 510, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_UNKNOWN, __pyx_t_10) < (0)) __PYX_ERR(0, 510, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":511
 *     """See `nvmlFBCSessionType_t`."""
 *     UNKNOWN = NVML_FBC_SESSION_TYPE_UNKNOWN
 *     TOSYS = NVML_FBC_SESSION_TYPE_TOSYS             # <<<<<<<<<<<<<<
 *     CUDA = NVML_FBC_SESSION_TYPE_CUDA
 *     VID = NVML_FBC_SESSION_TYPE_VID
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlFBCSessionType_t(NVML_FBC_SESSION_TYPE_TOSYS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 511, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_TOSYS, __pyx_t_10) < (0)) __PYX_ERR(0, 511, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":512
 *     UNKNOWN = NVML_FBC_SESSION_TYPE_UNKNOWN
 *     TOSYS = NVML_FBC_SESSION_TYPE_TOSYS
 *     CUDA = NVML_FBC_SESSION_TYPE_CUDA             # <<<<<<<<<<<<<<
 *     VID = NVML_FBC_SESSION_TYPE_VID
 *     HWENC = NVML_FBC_SESSION_TYPE_HWENC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlFBCSessionType_t(NVML_FBC_SESSION_TYPE_CUDA); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 512, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_CUDA, __pyx_t_10) < (0)) __PYX_ERR(0, 512, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":513
 *     TOSYS = NVML_FBC_SESSION_TYPE_TOSYS
 *     CUDA = NVML_FBC_SESSION_TYPE_CUDA
 *     VID = NVML_FBC_SESSION_TYPE_VID             # <<<<<<<<<<<<<<
 *     HWENC = NVML_FBC_SESSION_TYPE_HWENC
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlFBCSessionType_t(NVML_FBC_SESSION_TYPE_VID); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 513, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_VID, __pyx_t_10) < (0)) __PYX_ERR(0, 513, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":514
 *     CUDA = NVML_FBC_SESSION_TYPE_CUDA
 *     VID = NVML_FBC_SESSION_TYPE_VID
 *     HWENC = NVML_FBC_SESSION_TYPE_HWENC             # <<<<<<<<<<<<<<
 * 
 * class DetachGpuState(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlFBCSessionType_t(NVML_FBC_SESSION_TYPE_HWENC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 514, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_HWENC, __pyx_t_10) < (0)) __PYX_ERR(0, 514, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":508
 *     ENCODER_QUERY_UNKNOWN = NVML_ENCODER_QUERY_UNKNOWN
 * 
 * class FBCSessionType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlFBCSessionType_t`."""
 *     UNKNOWN = NVML_FBC_SESSION_TYPE_UNKNOWN
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_FBCSessionType, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 508, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_FBCSessionType, __pyx_t_10) < (0)) __PYX_ERR(0, 508, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":516
 *     HWENC = NVML_FBC_SESSION_TYPE_HWENC
 * 
 * class DetachGpuState(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlDetachGpuState_t`."""
 *     DETACH_GPU_KEEP = NVML_DETACH_GPU_KEEP
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_DetachGpuState, __pyx_mstate_global->__pyx_n_u_DetachGpuState, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlDetachGpuState_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 516, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":518
 * class DetachGpuState(_IntEnum):
 *     """See `nvmlDetachGpuState_t`."""
 *     DETACH_GPU_KEEP = NVML_DETACH_GPU_KEEP             # <<<<<<<<<<<<<<
 *     DETACH_GPU_REMOVE = NVML_DETACH_GPU_REMOVE
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlDetachGpuState_t(NVML_DETACH_GPU_KEEP); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 518, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DETACH_GPU_KEEP, __pyx_t_11) < (0)) __PYX_ERR(0, 518, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":519
 *     """See `nvmlDetachGpuState_t`."""
 *     DETACH_GPU_KEEP = NVML_DETACH_GPU_KEEP
 *     DETACH_GPU_REMOVE = NVML_DETACH_GPU_REMOVE             # <<<<<<<<<<<<<<
 * 
 * class PcieLinkState(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlDetachGpuState_t(NVML_DETACH_GPU_REMOVE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 519, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DETACH_GPU_REMOVE, __pyx_t_11) < (0)) __PYX_ERR(0, 519, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":516
 *     HWENC = NVML_FBC_SESSION_TYPE_HWENC
 * 
 * class DetachGpuState(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlDetachGpuState_t`."""
 *     DETACH_GPU_KEEP = NVML_DETACH_GPU_KEEP
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_DetachGpuState, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_DetachGpuState, __pyx_t_11) < (0)) __PYX_ERR(0, 516, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":521
 *     DETACH_GPU_REMOVE = NVML_DETACH_GPU_REMOVE
 * 
 * class PcieLinkState(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlPcieLinkState_t`."""
 *     PCIE_LINK_KEEP = NVML_PCIE_LINK_KEEP
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 521, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 521, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 521, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 521, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_PcieLinkState, __pyx_mstate_global->__pyx_n_u_PcieLinkState, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlPcieLinkState_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 521, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 521, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":523
 * class PcieLinkState(_IntEnum):
 *     """See `nvmlPcieLinkState_t`."""
 *     PCIE_LINK_KEEP = NVML_PCIE_LINK_KEEP             # <<<<<<<<<<<<<<
 *     PCIE_LINK_SHUT_DOWN = NVML_PCIE_LINK_SHUT_DOWN
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlPcieLinkState_t(NVML_PCIE_LINK_KEEP); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_PCIE_LINK_KEEP, __pyx_t_5) < (0)) __PYX_ERR(0, 523, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":524
 *     """See `nvmlPcieLinkState_t`."""
 *     PCIE_LINK_KEEP = NVML_PCIE_LINK_KEEP
 *     PCIE_LINK_SHUT_DOWN = NVML_PCIE_LINK_SHUT_DOWN             # <<<<<<<<<<<<<<
 * 
 * class ClockLimitId(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlPcieLinkState_t(NVML_PCIE_LINK_SHUT_DOWN); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 524, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_PCIE_LINK_SHUT_DOWN, __pyx_t_5) < (0)) __PYX_ERR(0, 524, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":521
 *     DETACH_GPU_REMOVE = NVML_DETACH_GPU_REMOVE
 * 
 * class PcieLinkState(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlPcieLinkState_t`."""
 *     PCIE_LINK_KEEP = NVML_PCIE_LINK_KEEP
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PcieLinkState, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 521, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_PcieLinkState, __pyx_t_5) < (0)) __PYX_ERR(0, 521, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":526
 *     PCIE_LINK_SHUT_DOWN = NVML_PCIE_LINK_SHUT_DOWN
 * 
 * class ClockLimitId(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlClockLimitId_t`."""
 *     RANGE_START = NVML_CLOCK_LIMIT_ID_RANGE_START
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 526, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 526, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 526, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 526, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_ClockLimitId, __pyx_mstate_global->__pyx_n_u_ClockLimitId, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlClockLimitId_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 526, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 526, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":528
 * class ClockLimitId(_IntEnum):
 *     """See `nvmlClockLimitId_t`."""
 *     RANGE_START = NVML_CLOCK_LIMIT_ID_RANGE_START             # <<<<<<<<<<<<<<
 *     TDP = NVML_CLOCK_LIMIT_ID_TDP
 *     UNLIMITED = NVML_CLOCK_LIMIT_ID_UNLIMITED
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlClockLimitId_t(NVML_CLOCK_LIMIT_ID_RANGE_START); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 528, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_RANGE_START, __pyx_t_10) < (0)) __PYX_ERR(0, 528, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":529
 *     """See `nvmlClockLimitId_t`."""
 *     RANGE_START = NVML_CLOCK_LIMIT_ID_RANGE_START
 *     TDP = NVML_CLOCK_LIMIT_ID_TDP             # <<<<<<<<<<<<<<
 *     UNLIMITED = NVML_CLOCK_LIMIT_ID_UNLIMITED
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlClockLimitId_t(NVML_CLOCK_LIMIT_ID_TDP); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 529, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_TDP, __pyx_t_10) < (0)) __PYX_ERR(0, 529, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":530
 *     RANGE_START = NVML_CLOCK_LIMIT_ID_RANGE_START
 *     TDP = NVML_CLOCK_LIMIT_ID_TDP
 *     UNLIMITED = NVML_CLOCK_LIMIT_ID_UNLIMITED             # <<<<<<<<<<<<<<
 * 
 * class VgpuVmCompatibility(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlClockLimitId_t(NVML_CLOCK_LIMIT_ID_UNLIMITED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 530, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_UNLIMITED, __pyx_t_10) < (0)) __PYX_ERR(0, 530, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":526
 *     PCIE_LINK_SHUT_DOWN = NVML_PCIE_LINK_SHUT_DOWN
 * 
 * class ClockLimitId(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlClockLimitId_t`."""
 *     RANGE_START = NVML_CLOCK_LIMIT_ID_RANGE_START
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_ClockLimitId, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 526, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_ClockLimitId, __pyx_t_10) < (0)) __PYX_ERR(0, 526, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":532
 *     UNLIMITED = NVML_CLOCK_LIMIT_ID_UNLIMITED
 * 
 * class VgpuVmCompatibility(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlVgpuVmCompatibility_t`."""
 *     NONE = NVML_VGPU_VM_COMPATIBILITY_NONE
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 532, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 532, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 532, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 532, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_VgpuVmCompatibility, __pyx_mstate_global->__pyx_n_u_VgpuVmCompatibility, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlVgpuVmCompatibility_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 532, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 532, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":534
 * class VgpuVmCompatibility(_IntEnum):
 *     """See `nvmlVgpuVmCompatibility_t`."""
 *     NONE = NVML_VGPU_VM_COMPATIBILITY_NONE             # <<<<<<<<<<<<<<
 *     COLD = NVML_VGPU_VM_COMPATIBILITY_COLD
 *     HIBERNATE = NVML_VGPU_VM_COMPATIBILITY_HIBERNATE
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlVgpuVmCompatibility_t(NVML_VGPU_VM_COMPATIBILITY_NONE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 534, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NONE, __pyx_t_11) < (0)) __PYX_ERR(0, 534, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":535
 *     """See `nvmlVgpuVmCompatibility_t`."""
 *     NONE = NVML_VGPU_VM_COMPATIBILITY_NONE
 *     COLD = NVML_VGPU_VM_COMPATIBILITY_COLD             # <<<<<<<<<<<<<<
 *     HIBERNATE = NVML_VGPU_VM_COMPATIBILITY_HIBERNATE
 *     SLEEP = NVML_VGPU_VM_COMPATIBILITY_SLEEP
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlVgpuVmCompatibility_t(NVML_VGPU_VM_COMPATIBILITY_COLD); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 535, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_COLD, __pyx_t_11) < (0)) __PYX_ERR(0, 535, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":536
 *     NONE = NVML_VGPU_VM_COMPATIBILITY_NONE
 *     COLD = NVML_VGPU_VM_COMPATIBILITY_COLD
 *     HIBERNATE = NVML_VGPU_VM_COMPATIBILITY_HIBERNATE             # <<<<<<<<<<<<<<
 *     SLEEP = NVML_VGPU_VM_COMPATIBILITY_SLEEP
 *     LIVE = NVML_VGPU_VM_COMPATIBILITY_LIVE
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlVgpuVmCompatibility_t(NVML_VGPU_VM_COMPATIBILITY_HIBERNATE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 536, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_HIBERNATE, __pyx_t_11) < (0)) __PYX_ERR(0, 536, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":537
 *     COLD = NVML_VGPU_VM_COMPATIBILITY_COLD
 *     HIBERNATE = NVML_VGPU_VM_COMPATIBILITY_HIBERNATE
 *     SLEEP = NVML_VGPU_VM_COMPATIBILITY_SLEEP             # <<<<<<<<<<<<<<
 *     LIVE = NVML_VGPU_VM_COMPATIBILITY_LIVE
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlVgpuVmCompatibility_t(NVML_VGPU_VM_COMPATIBILITY_SLEEP); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 537, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_SLEEP, __pyx_t_11) < (0)) __PYX_ERR(0, 537, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":538
 *     HIBERNATE = NVML_VGPU_VM_COMPATIBILITY_HIBERNATE
 *     SLEEP = NVML_VGPU_VM_COMPATIBILITY_SLEEP
 *     LIVE = NVML_VGPU_VM_COMPATIBILITY_LIVE             # <<<<<<<<<<<<<<
 * 
 * class VgpuPgpuCompatibilityLimitCode(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlVgpuVmCompatibility_t(NVML_VGPU_VM_COMPATIBILITY_LIVE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 538, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_LIVE, __pyx_t_11) < (0)) __PYX_ERR(0, 538, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":532
 *     UNLIMITED = NVML_CLOCK_LIMIT_ID_UNLIMITED
 * 
 * class VgpuVmCompatibility(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlVgpuVmCompatibility_t`."""
 *     NONE = NVML_VGPU_VM_COMPATIBILITY_NONE
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_VgpuVmCompatibility, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 532, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_VgpuVmCompatibility, __pyx_t_11) < (0)) __PYX_ERR(0, 532, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":540
 *     LIVE = NVML_VGPU_VM_COMPATIBILITY_LIVE
 * 
 * class VgpuPgpuCompatibilityLimitCode(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlVgpuPgpuCompatibilityLimitCode_t`."""
 *     VGPU_COMPATIBILITY_LIMIT_NONE = NVML_VGPU_COMPATIBILITY_LIMIT_NONE
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 540, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 540, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 540, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 540, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_VgpuPgpuCompatibilityLimitCode, __pyx_mstate_global->__pyx_n_u_VgpuPgpuCompatibilityLimitCode, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlVgpuPgpuCompatibilityLim); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 540, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 540, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":542
 * class VgpuPgpuCompatibilityLimitCode(_IntEnum):
 *     """See `nvmlVgpuPgpuCompatibilityLimitCode_t`."""
 *     VGPU_COMPATIBILITY_LIMIT_NONE = NVML_VGPU_COMPATIBILITY_LIMIT_NONE             # <<<<<<<<<<<<<<
 *     VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER = NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER
 *     VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER = NVML_VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlVgpuPgpuCompatibilityLimitCode_t(NVML_VGPU_COMPATIBILITY_LIMIT_NONE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 542, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VGPU_COMPATIBILITY_LIMIT_NONE, __pyx_t_5) < (0)) __PYX_ERR(0, 542, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":543
 *     """See `nvmlVgpuPgpuCompatibilityLimitCode_t`."""
 *     VGPU_COMPATIBILITY_LIMIT_NONE = NVML_VGPU_COMPATIBILITY_LIMIT_NONE
 *     VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER = NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER             # <<<<<<<<<<<<<<
 *     VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER = NVML_VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER
 *     VGPU_COMPATIBILITY_LIMIT_GPU = NVML_VGPU_COMPATIBILITY_LIMIT_GPU
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlVgpuPgpuCompatibilityLimitCode_t(NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 543, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VGPU_COMPATIBILITY_LIMIT_HOST_DR, __pyx_t_5) < (0)) __PYX_ERR(0, 543, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":544
 *     VGPU_COMPATIBILITY_LIMIT_NONE = NVML_VGPU_COMPATIBILITY_LIMIT_NONE
 *     VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER = NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER
 *     VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER = NVML_VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER             # <<<<<<<<<<<<<<
 *     VGPU_COMPATIBILITY_LIMIT_GPU = NVML_VGPU_COMPATIBILITY_LIMIT_GPU
 *     VGPU_COMPATIBILITY_LIMIT_OTHER = NVML_VGPU_COMPATIBILITY_LIMIT_OTHER
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlVgpuPgpuCompatibilityLimitCode_t(NVML_VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 544, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VGPU_COMPATIBILITY_LIMIT_GUEST_D, __pyx_t_5) < (0)) __PYX_ERR(0, 544, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":545
 *     VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER = NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER
 *     VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER = NVML_VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER
 *     VGPU_COMPATIBILITY_LIMIT_GPU = NVML_VGPU_COMPATIBILITY_LIMIT_GPU             # <<<<<<<<<<<<<<
 *     VGPU_COMPATIBILITY_LIMIT_OTHER = NVML_VGPU_COMPATIBILITY_LIMIT_OTHER
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlVgpuPgpuCompatibilityLimitCode_t(NVML_VGPU_COMPATIBILITY_LIMIT_GPU); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 545, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VGPU_COMPATIBILITY_LIMIT_GPU, __pyx_t_5) < (0)) __PYX_ERR(0, 545, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":546
 *     VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER = NVML_VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER
 *     VGPU_COMPATIBILITY_LIMIT_GPU = NVML_VGPU_COMPATIBILITY_LIMIT_GPU
 *     VGPU_COMPATIBILITY_LIMIT_OTHER = NVML_VGPU_COMPATIBILITY_LIMIT_OTHER             # <<<<<<<<<<<<<<
 * 
 * class GpmMetricId(_IntEnum):
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlVgpuPgpuCompatibilityLimitCode_t(NVML_VGPU_COMPATIBILITY_LIMIT_OTHER); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 546, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VGPU_COMPATIBILITY_LIMIT_OTHER, __pyx_t_5) < (0)) __PYX_ERR(0, 546, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":540
 *     LIVE = NVML_VGPU_VM_COMPATIBILITY_LIVE
 * 
 * class VgpuPgpuCompatibilityLimitCode(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlVgpuPgpuCompatibilityLimitCode_t`."""
 *     VGPU_COMPATIBILITY_LIMIT_NONE = NVML_VGPU_COMPATIBILITY_LIMIT_NONE
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_VgpuPgpuCompatibilityLimitCode, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 540, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_VgpuPgpuCompatibilityLimitCode, __pyx_t_5) < (0)) __PYX_ERR(0, 540, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":548
 *     VGPU_COMPATIBILITY_LIMIT_OTHER = NVML_VGPU_COMPATIBILITY_LIMIT_OTHER
 * 
 * class GpmMetricId(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlGpmMetricId_t`."""
 *     GPM_METRIC_GRAPHICS_UTIL = NVML_GPM_METRIC_GRAPHICS_UTIL
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_GpmMetricId, __pyx_mstate_global->__pyx_n_u_GpmMetricId, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlGpmMetricId_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 548, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":550
 * class GpmMetricId(_IntEnum):
 *     """See `nvmlGpmMetricId_t`."""
 *     GPM_METRIC_GRAPHICS_UTIL = NVML_GPM_METRIC_GRAPHICS_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_SM_UTIL = NVML_GPM_METRIC_SM_UTIL
 *     GPM_METRIC_SM_OCCUPANCY = NVML_GPM_METRIC_SM_OCCUPANCY
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GRAPHICS_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 550, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GRAPHICS_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 550, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":551
 *     """See `nvmlGpmMetricId_t`."""
 *     GPM_METRIC_GRAPHICS_UTIL = NVML_GPM_METRIC_GRAPHICS_UTIL
 *     GPM_METRIC_SM_UTIL = NVML_GPM_METRIC_SM_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_SM_OCCUPANCY = NVML_GPM_METRIC_SM_OCCUPANCY
 *     GPM_METRIC_INTEGER_UTIL = NVML_GPM_METRIC_INTEGER_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_SM_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 551, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_SM_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 551, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":552
 *     GPM_METRIC_GRAPHICS_UTIL = NVML_GPM_METRIC_GRAPHICS_UTIL
 *     GPM_METRIC_SM_UTIL = NVML_GPM_METRIC_SM_UTIL
 *     GPM_METRIC_SM_OCCUPANCY = NVML_GPM_METRIC_SM_OCCUPANCY             # <<<<<<<<<<<<<<
 *     GPM_METRIC_INTEGER_UTIL = NVML_GPM_METRIC_INTEGER_UTIL
 *     GPM_METRIC_ANY_TENSOR_UTIL = NVML_GPM_METRIC_ANY_TENSOR_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_SM_OCCUPANCY); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 552, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_SM_OCCUPANCY, __pyx_t_10) < (0)) __PYX_ERR(0, 552, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":553
 *     GPM_METRIC_SM_UTIL = NVML_GPM_METRIC_SM_UTIL
 *     GPM_METRIC_SM_OCCUPANCY = NVML_GPM_METRIC_SM_OCCUPANCY
 *     GPM_METRIC_INTEGER_UTIL = NVML_GPM_METRIC_INTEGER_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_ANY_TENSOR_UTIL = NVML_GPM_METRIC_ANY_TENSOR_UTIL
 *     GPM_METRIC_DFMA_TENSOR_UTIL = NVML_GPM_METRIC_DFMA_TENSOR_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_INTEGER_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 553, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_INTEGER_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 553, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":554
 *     GPM_METRIC_SM_OCCUPANCY = NVML_GPM_METRIC_SM_OCCUPANCY
 *     GPM_METRIC_INTEGER_UTIL = NVML_GPM_METRIC_INTEGER_UTIL
 *     GPM_METRIC_ANY_TENSOR_UTIL = NVML_GPM_METRIC_ANY_TENSOR_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_DFMA_TENSOR_UTIL = NVML_GPM_METRIC_DFMA_TENSOR_UTIL
 *     GPM_METRIC_HMMA_TENSOR_UTIL = NVML_GPM_METRIC_HMMA_TENSOR_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_ANY_TENSOR_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 554, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_ANY_TENSOR_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 554, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":555
 *     GPM_METRIC_INTEGER_UTIL = NVML_GPM_METRIC_INTEGER_UTIL
 *     GPM_METRIC_ANY_TENSOR_UTIL = NVML_GPM_METRIC_ANY_TENSOR_UTIL
 *     GPM_METRIC_DFMA_TENSOR_UTIL = NVML_GPM_METRIC_DFMA_TENSOR_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_HMMA_TENSOR_UTIL = NVML_GPM_METRIC_HMMA_TENSOR_UTIL
 *     GPM_METRIC_IMMA_TENSOR_UTIL = NVML_GPM_METRIC_IMMA_TENSOR_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_DFMA_TENSOR_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 555, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_DFMA_TENSOR_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 555, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":556
 *     GPM_METRIC_ANY_TENSOR_UTIL = NVML_GPM_METRIC_ANY_TENSOR_UTIL
 *     GPM_METRIC_DFMA_TENSOR_UTIL = NVML_GPM_METRIC_DFMA_TENSOR_UTIL
 *     GPM_METRIC_HMMA_TENSOR_UTIL = NVML_GPM_METRIC_HMMA_TENSOR_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_IMMA_TENSOR_UTIL = NVML_GPM_METRIC_IMMA_TENSOR_UTIL
 *     GPM_METRIC_DRAM_BW_UTIL = NVML_GPM_METRIC_DRAM_BW_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_HMMA_TENSOR_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 556, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_HMMA_TENSOR_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 556, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":557
 *     GPM_METRIC_DFMA_TENSOR_UTIL = NVML_GPM_METRIC_DFMA_TENSOR_UTIL
 *     GPM_METRIC_HMMA_TENSOR_UTIL = NVML_GPM_METRIC_HMMA_TENSOR_UTIL
 *     GPM_METRIC_IMMA_TENSOR_UTIL = NVML_GPM_METRIC_IMMA_TENSOR_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_DRAM_BW_UTIL = NVML_GPM_METRIC_DRAM_BW_UTIL
 *     GPM_METRIC_FP64_UTIL = NVML_GPM_METRIC_FP64_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_IMMA_TENSOR_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_IMMA_TENSOR_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 557, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":558
 *     GPM_METRIC_HMMA_TENSOR_UTIL = NVML_GPM_METRIC_HMMA_TENSOR_UTIL
 *     GPM_METRIC_IMMA_TENSOR_UTIL = NVML_GPM_METRIC_IMMA_TENSOR_UTIL
 *     GPM_METRIC_DRAM_BW_UTIL = NVML_GPM_METRIC_DRAM_BW_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_FP64_UTIL = NVML_GPM_METRIC_FP64_UTIL
 *     GPM_METRIC_FP32_UTIL = NVML_GPM_METRIC_FP32_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_DRAM_BW_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 558, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_DRAM_BW_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 558, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":559
 *     GPM_METRIC_IMMA_TENSOR_UTIL = NVML_GPM_METRIC_IMMA_TENSOR_UTIL
 *     GPM_METRIC_DRAM_BW_UTIL = NVML_GPM_METRIC_DRAM_BW_UTIL
 *     GPM_METRIC_FP64_UTIL = NVML_GPM_METRIC_FP64_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_FP32_UTIL = NVML_GPM_METRIC_FP32_UTIL
 *     GPM_METRIC_FP16_UTIL = NVML_GPM_METRIC_FP16_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_FP64_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 559, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_FP64_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 559, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":560
 *     GPM_METRIC_DRAM_BW_UTIL = NVML_GPM_METRIC_DRAM_BW_UTIL
 *     GPM_METRIC_FP64_UTIL = NVML_GPM_METRIC_FP64_UTIL
 *     GPM_METRIC_FP32_UTIL = NVML_GPM_METRIC_FP32_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_FP16_UTIL = NVML_GPM_METRIC_FP16_UTIL
 *     GPM_METRIC_PCIE_TX_PER_SEC = NVML_GPM_METRIC_PCIE_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_FP32_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 560, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_FP32_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 560, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":561
 *     GPM_METRIC_FP64_UTIL = NVML_GPM_METRIC_FP64_UTIL
 *     GPM_METRIC_FP32_UTIL = NVML_GPM_METRIC_FP32_UTIL
 *     GPM_METRIC_FP16_UTIL = NVML_GPM_METRIC_FP16_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_PCIE_TX_PER_SEC = NVML_GPM_METRIC_PCIE_TX_PER_SEC
 *     GPM_METRIC_PCIE_RX_PER_SEC = NVML_GPM_METRIC_PCIE_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_FP16_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 561, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_FP16_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 561, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":562
 *     GPM_METRIC_FP32_UTIL = NVML_GPM_METRIC_FP32_UTIL
 *     GPM_METRIC_FP16_UTIL = NVML_GPM_METRIC_FP16_UTIL
 *     GPM_METRIC_PCIE_TX_PER_SEC = NVML_GPM_METRIC_PCIE_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_PCIE_RX_PER_SEC = NVML_GPM_METRIC_PCIE_RX_PER_SEC
 *     GPM_METRIC_NVDEC_0_UTIL = NVML_GPM_METRIC_NVDEC_0_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_PCIE_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 562, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_PCIE_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 562, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":563
 *     GPM_METRIC_FP16_UTIL = NVML_GPM_METRIC_FP16_UTIL
 *     GPM_METRIC_PCIE_TX_PER_SEC = NVML_GPM_METRIC_PCIE_TX_PER_SEC
 *     GPM_METRIC_PCIE_RX_PER_SEC = NVML_GPM_METRIC_PCIE_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVDEC_0_UTIL = NVML_GPM_METRIC_NVDEC_0_UTIL
 *     GPM_METRIC_NVDEC_1_UTIL = NVML_GPM_METRIC_NVDEC_1_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_PCIE_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 563, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_PCIE_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 563, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":564
 *     GPM_METRIC_PCIE_TX_PER_SEC = NVML_GPM_METRIC_PCIE_TX_PER_SEC
 *     GPM_METRIC_PCIE_RX_PER_SEC = NVML_GPM_METRIC_PCIE_RX_PER_SEC
 *     GPM_METRIC_NVDEC_0_UTIL = NVML_GPM_METRIC_NVDEC_0_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVDEC_1_UTIL = NVML_GPM_METRIC_NVDEC_1_UTIL
 *     GPM_METRIC_NVDEC_2_UTIL = NVML_GPM_METRIC_NVDEC_2_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVDEC_0_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 564, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVDEC_0_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 564, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":565
 *     GPM_METRIC_PCIE_RX_PER_SEC = NVML_GPM_METRIC_PCIE_RX_PER_SEC
 *     GPM_METRIC_NVDEC_0_UTIL = NVML_GPM_METRIC_NVDEC_0_UTIL
 *     GPM_METRIC_NVDEC_1_UTIL = NVML_GPM_METRIC_NVDEC_1_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVDEC_2_UTIL = NVML_GPM_METRIC_NVDEC_2_UTIL
 *     GPM_METRIC_NVDEC_3_UTIL = NVML_GPM_METRIC_NVDEC_3_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVDEC_1_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 565, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVDEC_1_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 565, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":566
 *     GPM_METRIC_NVDEC_0_UTIL = NVML_GPM_METRIC_NVDEC_0_UTIL
 *     GPM_METRIC_NVDEC_1_UTIL = NVML_GPM_METRIC_NVDEC_1_UTIL
 *     GPM_METRIC_NVDEC_2_UTIL = NVML_GPM_METRIC_NVDEC_2_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVDEC_3_UTIL = NVML_GPM_METRIC_NVDEC_3_UTIL
 *     GPM_METRIC_NVDEC_4_UTIL = NVML_GPM_METRIC_NVDEC_4_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVDEC_2_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 566, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVDEC_2_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 566, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":567
 *     GPM_METRIC_NVDEC_1_UTIL = NVML_GPM_METRIC_NVDEC_1_UTIL
 *     GPM_METRIC_NVDEC_2_UTIL = NVML_GPM_METRIC_NVDEC_2_UTIL
 *     GPM_METRIC_NVDEC_3_UTIL = NVML_GPM_METRIC_NVDEC_3_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVDEC_4_UTIL = NVML_GPM_METRIC_NVDEC_4_UTIL
 *     GPM_METRIC_NVDEC_5_UTIL = NVML_GPM_METRIC_NVDEC_5_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVDEC_3_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 567, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVDEC_3_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 567, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":568
 *     GPM_METRIC_NVDEC_2_UTIL = NVML_GPM_METRIC_NVDEC_2_UTIL
 *     GPM_METRIC_NVDEC_3_UTIL = NVML_GPM_METRIC_NVDEC_3_UTIL
 *     GPM_METRIC_NVDEC_4_UTIL = NVML_GPM_METRIC_NVDEC_4_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVDEC_5_UTIL = NVML_GPM_METRIC_NVDEC_5_UTIL
 *     GPM_METRIC_NVDEC_6_UTIL = NVML_GPM_METRIC_NVDEC_6_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVDEC_4_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVDEC_4_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 568, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":569
 *     GPM_METRIC_NVDEC_3_UTIL = NVML_GPM_METRIC_NVDEC_3_UTIL
 *     GPM_METRIC_NVDEC_4_UTIL = NVML_GPM_METRIC_NVDEC_4_UTIL
 *     GPM_METRIC_NVDEC_5_UTIL = NVML_GPM_METRIC_NVDEC_5_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVDEC_6_UTIL = NVML_GPM_METRIC_NVDEC_6_UTIL
 *     GPM_METRIC_NVDEC_7_UTIL = NVML_GPM_METRIC_NVDEC_7_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVDEC_5_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 569, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVDEC_5_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 569, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":570
 *     GPM_METRIC_NVDEC_4_UTIL = NVML_GPM_METRIC_NVDEC_4_UTIL
 *     GPM_METRIC_NVDEC_5_UTIL = NVML_GPM_METRIC_NVDEC_5_UTIL
 *     GPM_METRIC_NVDEC_6_UTIL = NVML_GPM_METRIC_NVDEC_6_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVDEC_7_UTIL = NVML_GPM_METRIC_NVDEC_7_UTIL
 *     GPM_METRIC_NVJPG_0_UTIL = NVML_GPM_METRIC_NVJPG_0_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVDEC_6_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 570, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVDEC_6_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 570, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":571
 *     GPM_METRIC_NVDEC_5_UTIL = NVML_GPM_METRIC_NVDEC_5_UTIL
 *     GPM_METRIC_NVDEC_6_UTIL = NVML_GPM_METRIC_NVDEC_6_UTIL
 *     GPM_METRIC_NVDEC_7_UTIL = NVML_GPM_METRIC_NVDEC_7_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVJPG_0_UTIL = NVML_GPM_METRIC_NVJPG_0_UTIL
 *     GPM_METRIC_NVJPG_1_UTIL = NVML_GPM_METRIC_NVJPG_1_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVDEC_7_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 571, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVDEC_7_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 571, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":572
 *     GPM_METRIC_NVDEC_6_UTIL = NVML_GPM_METRIC_NVDEC_6_UTIL
 *     GPM_METRIC_NVDEC_7_UTIL = NVML_GPM_METRIC_NVDEC_7_UTIL
 *     GPM_METRIC_NVJPG_0_UTIL = NVML_GPM_METRIC_NVJPG_0_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVJPG_1_UTIL = NVML_GPM_METRIC_NVJPG_1_UTIL
 *     GPM_METRIC_NVJPG_2_UTIL = NVML_GPM_METRIC_NVJPG_2_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVJPG_0_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 572, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVJPG_0_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 572, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":573
 *     GPM_METRIC_NVDEC_7_UTIL = NVML_GPM_METRIC_NVDEC_7_UTIL
 *     GPM_METRIC_NVJPG_0_UTIL = NVML_GPM_METRIC_NVJPG_0_UTIL
 *     GPM_METRIC_NVJPG_1_UTIL = NVML_GPM_METRIC_NVJPG_1_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVJPG_2_UTIL = NVML_GPM_METRIC_NVJPG_2_UTIL
 *     GPM_METRIC_NVJPG_3_UTIL = NVML_GPM_METRIC_NVJPG_3_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVJPG_1_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 573, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVJPG_1_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 573, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":574
 *     GPM_METRIC_NVJPG_0_UTIL = NVML_GPM_METRIC_NVJPG_0_UTIL
 *     GPM_METRIC_NVJPG_1_UTIL = NVML_GPM_METRIC_NVJPG_1_UTIL
 *     GPM_METRIC_NVJPG_2_UTIL = NVML_GPM_METRIC_NVJPG_2_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVJPG_3_UTIL = NVML_GPM_METRIC_NVJPG_3_UTIL
 *     GPM_METRIC_NVJPG_4_UTIL = NVML_GPM_METRIC_NVJPG_4_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVJPG_2_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 574, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVJPG_2_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 574, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":575
 *     GPM_METRIC_NVJPG_1_UTIL = NVML_GPM_METRIC_NVJPG_1_UTIL
 *     GPM_METRIC_NVJPG_2_UTIL = NVML_GPM_METRIC_NVJPG_2_UTIL
 *     GPM_METRIC_NVJPG_3_UTIL = NVML_GPM_METRIC_NVJPG_3_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVJPG_4_UTIL = NVML_GPM_METRIC_NVJPG_4_UTIL
 *     GPM_METRIC_NVJPG_5_UTIL = NVML_GPM_METRIC_NVJPG_5_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVJPG_3_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 575, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVJPG_3_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 575, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":576
 *     GPM_METRIC_NVJPG_2_UTIL = NVML_GPM_METRIC_NVJPG_2_UTIL
 *     GPM_METRIC_NVJPG_3_UTIL = NVML_GPM_METRIC_NVJPG_3_UTIL
 *     GPM_METRIC_NVJPG_4_UTIL = NVML_GPM_METRIC_NVJPG_4_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVJPG_5_UTIL = NVML_GPM_METRIC_NVJPG_5_UTIL
 *     GPM_METRIC_NVJPG_6_UTIL = NVML_GPM_METRIC_NVJPG_6_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVJPG_4_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 576, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVJPG_4_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 576, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":577
 *     GPM_METRIC_NVJPG_3_UTIL = NVML_GPM_METRIC_NVJPG_3_UTIL
 *     GPM_METRIC_NVJPG_4_UTIL = NVML_GPM_METRIC_NVJPG_4_UTIL
 *     GPM_METRIC_NVJPG_5_UTIL = NVML_GPM_METRIC_NVJPG_5_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVJPG_6_UTIL = NVML_GPM_METRIC_NVJPG_6_UTIL
 *     GPM_METRIC_NVJPG_7_UTIL = NVML_GPM_METRIC_NVJPG_7_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVJPG_5_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 577, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVJPG_5_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 577, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":578
 *     GPM_METRIC_NVJPG_4_UTIL = NVML_GPM_METRIC_NVJPG_4_UTIL
 *     GPM_METRIC_NVJPG_5_UTIL = NVML_GPM_METRIC_NVJPG_5_UTIL
 *     GPM_METRIC_NVJPG_6_UTIL = NVML_GPM_METRIC_NVJPG_6_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVJPG_7_UTIL = NVML_GPM_METRIC_NVJPG_7_UTIL
 *     GPM_METRIC_NVOFA_0_UTIL = NVML_GPM_METRIC_NVOFA_0_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVJPG_6_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 578, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVJPG_6_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 578, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":579
 *     GPM_METRIC_NVJPG_5_UTIL = NVML_GPM_METRIC_NVJPG_5_UTIL
 *     GPM_METRIC_NVJPG_6_UTIL = NVML_GPM_METRIC_NVJPG_6_UTIL
 *     GPM_METRIC_NVJPG_7_UTIL = NVML_GPM_METRIC_NVJPG_7_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVOFA_0_UTIL = NVML_GPM_METRIC_NVOFA_0_UTIL
 *     GPM_METRIC_NVOFA_1_UTIL = NVML_GPM_METRIC_NVOFA_1_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVJPG_7_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 579, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVJPG_7_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 579, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":580
 *     GPM_METRIC_NVJPG_6_UTIL = NVML_GPM_METRIC_NVJPG_6_UTIL
 *     GPM_METRIC_NVJPG_7_UTIL = NVML_GPM_METRIC_NVJPG_7_UTIL
 *     GPM_METRIC_NVOFA_0_UTIL = NVML_GPM_METRIC_NVOFA_0_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVOFA_1_UTIL = NVML_GPM_METRIC_NVOFA_1_UTIL
 *     GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVOFA_0_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 580, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVOFA_0_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 580, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":581
 *     GPM_METRIC_NVJPG_7_UTIL = NVML_GPM_METRIC_NVJPG_7_UTIL
 *     GPM_METRIC_NVOFA_0_UTIL = NVML_GPM_METRIC_NVOFA_0_UTIL
 *     GPM_METRIC_NVOFA_1_UTIL = NVML_GPM_METRIC_NVOFA_1_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC
 *     GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVOFA_1_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 581, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVOFA_1_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 581, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":582
 *     GPM_METRIC_NVOFA_0_UTIL = NVML_GPM_METRIC_NVOFA_0_UTIL
 *     GPM_METRIC_NVOFA_1_UTIL = NVML_GPM_METRIC_NVOFA_1_UTIL
 *     GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L0_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L0_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 582, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_TOTAL_RX_PER_S, __pyx_t_10) < (0)) __PYX_ERR(0, 582, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":583
 *     GPM_METRIC_NVOFA_1_UTIL = NVML_GPM_METRIC_NVOFA_1_UTIL
 *     GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC
 *     GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L0_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L0_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L0_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L0_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 583, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_TOTAL_TX_PER_S, __pyx_t_10) < (0)) __PYX_ERR(0, 583, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":584
 *     GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC
 *     GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L0_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L0_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L0_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L0_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L1_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L1_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L0_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 584, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L0_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 584, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":585
 *     GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L0_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L0_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L0_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L0_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L1_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L1_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L1_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L1_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L0_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 585, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L0_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 585, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":586
 *     GPM_METRIC_NVLINK_L0_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L0_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L0_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L0_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L1_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L1_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L1_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L1_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L2_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L2_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L1_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 586, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L1_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 586, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":587
 *     GPM_METRIC_NVLINK_L0_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L0_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L1_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L1_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L1_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L1_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L2_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L2_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L2_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L2_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L1_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 587, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L1_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 587, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":588
 *     GPM_METRIC_NVLINK_L1_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L1_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L1_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L1_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L2_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L2_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L2_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L2_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L3_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L3_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L2_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 588, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L2_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 588, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":589
 *     GPM_METRIC_NVLINK_L1_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L1_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L2_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L2_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L2_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L2_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L3_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L3_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L3_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L3_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L2_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 589, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L2_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 589, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":590
 *     GPM_METRIC_NVLINK_L2_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L2_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L2_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L2_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L3_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L3_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L3_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L3_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L4_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L4_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L3_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 590, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L3_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 590, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":591
 *     GPM_METRIC_NVLINK_L2_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L2_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L3_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L3_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L3_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L3_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L4_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L4_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L4_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L4_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L3_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 591, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L3_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 591, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":592
 *     GPM_METRIC_NVLINK_L3_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L3_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L3_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L3_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L4_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L4_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L4_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L4_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L5_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L5_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L4_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 592, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L4_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 592, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":593
 *     GPM_METRIC_NVLINK_L3_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L3_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L4_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L4_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L4_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L4_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L5_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L5_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L5_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L5_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L4_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 593, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L4_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 593, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":594
 *     GPM_METRIC_NVLINK_L4_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L4_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L4_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L4_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L5_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L5_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L5_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L5_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L6_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L6_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L5_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 594, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L5_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 594, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":595
 *     GPM_METRIC_NVLINK_L4_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L4_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L5_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L5_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L5_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L5_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L6_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L6_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L6_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L6_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L5_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 595, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L5_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 595, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":596
 *     GPM_METRIC_NVLINK_L5_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L5_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L5_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L5_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L6_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L6_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L6_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L6_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L7_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L7_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L6_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 596, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L6_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 596, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":597
 *     GPM_METRIC_NVLINK_L5_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L5_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L6_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L6_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L6_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L6_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L7_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L7_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L7_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L7_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L6_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 597, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L6_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 597, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":598
 *     GPM_METRIC_NVLINK_L6_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L6_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L6_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L6_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L7_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L7_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L7_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L7_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L8_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L8_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L7_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 598, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L7_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 598, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":599
 *     GPM_METRIC_NVLINK_L6_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L6_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L7_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L7_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L7_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L7_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L8_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L8_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L8_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L8_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L7_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 599, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L7_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 599, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":600
 *     GPM_METRIC_NVLINK_L7_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L7_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L7_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L7_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L8_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L8_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L8_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L8_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L9_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L9_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L8_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 600, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L8_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 600, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":601
 *     GPM_METRIC_NVLINK_L7_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L7_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L8_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L8_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L8_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L8_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L9_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L9_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L9_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L9_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L8_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 601, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L8_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 601, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":602
 *     GPM_METRIC_NVLINK_L8_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L8_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L8_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L8_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L9_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L9_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L9_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L9_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L10_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L10_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L9_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 602, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L9_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 602, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":603
 *     GPM_METRIC_NVLINK_L8_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L8_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L9_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L9_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L9_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L9_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L10_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L10_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L10_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L10_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L9_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 603, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L9_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 603, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":604
 *     GPM_METRIC_NVLINK_L9_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L9_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L9_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L9_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L10_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L10_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L10_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L10_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L11_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L11_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L10_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 604, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L10_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 604, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":605
 *     GPM_METRIC_NVLINK_L9_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L9_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L10_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L10_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L10_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L10_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L11_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L11_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L11_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L11_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L10_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 605, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L10_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 605, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":606
 *     GPM_METRIC_NVLINK_L10_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L10_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L10_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L10_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L11_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L11_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L11_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L11_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L12_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L12_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L11_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 606, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L11_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 606, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":607
 *     GPM_METRIC_NVLINK_L10_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L10_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L11_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L11_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L11_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L11_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L12_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L12_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L12_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L12_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L11_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L11_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 607, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":608
 *     GPM_METRIC_NVLINK_L11_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L11_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L11_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L11_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L12_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L12_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L12_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L12_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L13_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L13_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L12_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 608, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L12_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 608, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":609
 *     GPM_METRIC_NVLINK_L11_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L11_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L12_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L12_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L12_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L12_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L13_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L13_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L13_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L13_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L12_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 609, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L12_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 609, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":610
 *     GPM_METRIC_NVLINK_L12_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L12_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L12_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L12_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L13_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L13_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L13_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L13_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L14_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L14_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L13_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 610, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L13_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 610, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":611
 *     GPM_METRIC_NVLINK_L12_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L12_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L13_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L13_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L13_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L13_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L14_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L14_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L14_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L14_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L13_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 611, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L13_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 611, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":612
 *     GPM_METRIC_NVLINK_L13_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L13_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L13_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L13_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L14_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L14_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L14_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L14_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L15_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L15_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L14_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 612, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L14_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 612, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":613
 *     GPM_METRIC_NVLINK_L13_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L13_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L14_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L14_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L14_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L14_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L15_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L15_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L15_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L15_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L14_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 613, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L14_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 613, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":614
 *     GPM_METRIC_NVLINK_L14_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L14_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L14_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L14_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L15_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L15_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L15_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L15_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L16_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L16_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L15_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 614, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L15_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 614, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":615
 *     GPM_METRIC_NVLINK_L14_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L14_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L15_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L15_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L15_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L15_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L16_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L16_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L16_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L16_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L15_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 615, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L15_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 615, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":616
 *     GPM_METRIC_NVLINK_L15_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L15_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L15_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L15_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L16_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L16_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L16_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L16_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L17_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L17_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L16_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 616, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L16_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 616, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":617
 *     GPM_METRIC_NVLINK_L15_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L15_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L16_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L16_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L16_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L16_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L17_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L17_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L17_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L17_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L16_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 617, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L16_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 617, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":618
 *     GPM_METRIC_NVLINK_L16_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L16_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L16_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L16_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L17_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L17_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVLINK_L17_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L17_TX_PER_SEC
 *     GPM_METRIC_C2C_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_TOTAL_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L17_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 618, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L17_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 618, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":619
 *     GPM_METRIC_NVLINK_L16_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L16_TX_PER_SEC
 *     GPM_METRIC_NVLINK_L17_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L17_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L17_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L17_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_TOTAL_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVLINK_L17_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 619, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVLINK_L17_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 619, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":620
 *     GPM_METRIC_NVLINK_L17_RX_PER_SEC = NVML_GPM_METRIC_NVLINK_L17_RX_PER_SEC
 *     GPM_METRIC_NVLINK_L17_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L17_TX_PER_SEC
 *     GPM_METRIC_C2C_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_TOTAL_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_DATA_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_TOTAL_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 620, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_TOTAL_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 620, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":621
 *     GPM_METRIC_NVLINK_L17_TX_PER_SEC = NVML_GPM_METRIC_NVLINK_L17_TX_PER_SEC
 *     GPM_METRIC_C2C_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_TOTAL_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_DATA_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_TOTAL_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 621, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_TOTAL_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 621, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":622
 *     GPM_METRIC_C2C_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_DATA_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK0_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_TOTAL_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_DATA_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 622, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_DATA_TX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 622, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":623
 *     GPM_METRIC_C2C_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_DATA_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK0_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK0_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_TOTAL_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_DATA_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 623, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_DATA_RX_PER_SEC, __pyx_t_10) < (0)) __PYX_ERR(0, 623, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":624
 *     GPM_METRIC_C2C_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK0_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_TOTAL_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK0_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK0_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_DATA_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK0_TOTAL_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 624, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK0_TOTAL_TX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 624, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":625
 *     GPM_METRIC_C2C_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK0_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK0_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_TOTAL_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK0_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK0_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_DATA_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK0_TOTAL_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 625, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK0_TOTAL_RX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 625, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":626
 *     GPM_METRIC_C2C_LINK0_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK0_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK0_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_DATA_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK0_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK1_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_TOTAL_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK0_DATA_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 626, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK0_DATA_TX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 626, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":627
 *     GPM_METRIC_C2C_LINK0_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK0_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK0_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_DATA_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK1_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK1_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_TOTAL_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK0_DATA_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 627, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK0_DATA_RX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 627, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":628
 *     GPM_METRIC_C2C_LINK0_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK0_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK1_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_TOTAL_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK1_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK1_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_DATA_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK1_TOTAL_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 628, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK1_TOTAL_TX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 628, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":629
 *     GPM_METRIC_C2C_LINK0_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK0_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK1_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK1_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_TOTAL_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK1_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK1_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_DATA_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK1_TOTAL_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK1_TOTAL_RX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 629, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":630
 *     GPM_METRIC_C2C_LINK1_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK1_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK1_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_DATA_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK1_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK2_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_TOTAL_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK1_DATA_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 630, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK1_DATA_TX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 630, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":631
 *     GPM_METRIC_C2C_LINK1_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK1_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK1_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_DATA_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK2_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK2_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_TOTAL_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK1_DATA_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 631, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK1_DATA_RX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 631, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":632
 *     GPM_METRIC_C2C_LINK1_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK1_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK2_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_TOTAL_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK2_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK2_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_DATA_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK2_TOTAL_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 632, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK2_TOTAL_TX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 632, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":633
 *     GPM_METRIC_C2C_LINK1_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK1_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK2_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK2_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_TOTAL_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK2_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK2_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_DATA_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK2_TOTAL_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 633, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK2_TOTAL_RX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 633, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":634
 *     GPM_METRIC_C2C_LINK2_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK2_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK2_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_DATA_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK2_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK3_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_TOTAL_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK2_DATA_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 634, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK2_DATA_TX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 634, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":635
 *     GPM_METRIC_C2C_LINK2_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK2_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK2_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_DATA_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK3_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK3_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_TOTAL_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK2_DATA_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 635, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK2_DATA_RX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 635, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":636
 *     GPM_METRIC_C2C_LINK2_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK2_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK3_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_TOTAL_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK3_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK3_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_DATA_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK3_TOTAL_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 636, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK3_TOTAL_TX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 636, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":637
 *     GPM_METRIC_C2C_LINK2_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK2_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK3_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK3_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_TOTAL_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK3_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK3_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_DATA_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK3_TOTAL_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 637, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK3_TOTAL_RX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 637, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":638
 *     GPM_METRIC_C2C_LINK3_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK3_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK3_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_DATA_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK3_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK4_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_TOTAL_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK3_DATA_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 638, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK3_DATA_TX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 638, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":639
 *     GPM_METRIC_C2C_LINK3_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK3_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK3_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_DATA_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK4_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK4_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_TOTAL_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK3_DATA_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 639, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK3_DATA_RX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 639, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":640
 *     GPM_METRIC_C2C_LINK3_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK3_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK4_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_TOTAL_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK4_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK4_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_DATA_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK4_TOTAL_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 640, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK4_TOTAL_TX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 640, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":641
 *     GPM_METRIC_C2C_LINK3_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK3_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK4_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK4_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_TOTAL_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK4_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK4_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_DATA_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK4_TOTAL_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 641, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK4_TOTAL_RX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 641, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":642
 *     GPM_METRIC_C2C_LINK4_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK4_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK4_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_DATA_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK4_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK5_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_TOTAL_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK4_DATA_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 642, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK4_DATA_TX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 642, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":643
 *     GPM_METRIC_C2C_LINK4_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK4_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK4_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_DATA_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK5_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK5_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_TOTAL_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK4_DATA_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 643, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK4_DATA_RX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 643, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":644
 *     GPM_METRIC_C2C_LINK4_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK4_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK5_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_TOTAL_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK5_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK5_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_DATA_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK5_TOTAL_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 644, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK5_TOTAL_TX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 644, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":645
 *     GPM_METRIC_C2C_LINK4_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK4_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK5_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK5_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_TOTAL_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK5_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK5_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_DATA_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK5_TOTAL_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 645, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK5_TOTAL_RX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 645, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":646
 *     GPM_METRIC_C2C_LINK5_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK5_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK5_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_DATA_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK5_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK6_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_TOTAL_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK5_DATA_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 646, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK5_DATA_TX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 646, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":647
 *     GPM_METRIC_C2C_LINK5_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK5_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK5_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_DATA_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK6_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK6_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_TOTAL_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK5_DATA_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK5_DATA_RX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 647, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":648
 *     GPM_METRIC_C2C_LINK5_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK5_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK6_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_TOTAL_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK6_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK6_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_DATA_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK6_TOTAL_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 648, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK6_TOTAL_TX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 648, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":649
 *     GPM_METRIC_C2C_LINK5_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK5_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK6_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK6_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_TOTAL_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK6_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK6_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_DATA_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK6_TOTAL_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 649, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK6_TOTAL_RX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 649, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":650
 *     GPM_METRIC_C2C_LINK6_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK6_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK6_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_DATA_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK6_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK7_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_TOTAL_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK6_DATA_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 650, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK6_DATA_TX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 650, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":651
 *     GPM_METRIC_C2C_LINK6_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK6_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK6_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_DATA_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK7_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK7_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_TOTAL_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK6_DATA_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 651, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK6_DATA_RX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 651, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":652
 *     GPM_METRIC_C2C_LINK6_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK6_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK7_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_TOTAL_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK7_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK7_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_DATA_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK7_TOTAL_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 652, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK7_TOTAL_TX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 652, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":653
 *     GPM_METRIC_C2C_LINK6_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK6_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK7_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK7_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_TOTAL_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK7_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK7_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_DATA_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK7_TOTAL_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 653, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK7_TOTAL_RX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 653, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":654
 *     GPM_METRIC_C2C_LINK7_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK7_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK7_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_DATA_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK7_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK8_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_TOTAL_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK7_DATA_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 654, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK7_DATA_TX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 654, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":655
 *     GPM_METRIC_C2C_LINK7_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK7_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK7_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_DATA_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK8_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK8_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_TOTAL_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK7_DATA_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 655, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK7_DATA_RX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 655, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":656
 *     GPM_METRIC_C2C_LINK7_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK7_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK8_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_TOTAL_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK8_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK8_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_DATA_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK8_TOTAL_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 656, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK8_TOTAL_TX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 656, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":657
 *     GPM_METRIC_C2C_LINK7_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK7_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK8_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK8_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_TOTAL_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK8_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK8_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_DATA_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK8_TOTAL_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 657, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK8_TOTAL_RX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 657, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":658
 *     GPM_METRIC_C2C_LINK8_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK8_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK8_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_DATA_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK8_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK9_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_TOTAL_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK8_DATA_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 658, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK8_DATA_TX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 658, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":659
 *     GPM_METRIC_C2C_LINK8_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK8_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK8_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_DATA_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK9_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK9_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_TOTAL_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK8_DATA_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 659, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK8_DATA_RX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 659, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":660
 *     GPM_METRIC_C2C_LINK8_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK8_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK9_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_TOTAL_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK9_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK9_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_DATA_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK9_TOTAL_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 660, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK9_TOTAL_TX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 660, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":661
 *     GPM_METRIC_C2C_LINK8_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK8_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK9_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK9_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_TOTAL_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK9_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK9_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_DATA_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK9_TOTAL_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 661, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK9_TOTAL_RX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 661, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":662
 *     GPM_METRIC_C2C_LINK9_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK9_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK9_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_DATA_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK9_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK10_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_TOTAL_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK9_DATA_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 662, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK9_DATA_TX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 662, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":663
 *     GPM_METRIC_C2C_LINK9_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK9_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK9_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_DATA_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK10_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK10_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_TOTAL_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK9_DATA_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 663, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK9_DATA_RX_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 663, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":664
 *     GPM_METRIC_C2C_LINK9_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK9_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK10_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_TOTAL_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK10_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK10_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_DATA_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK10_TOTAL_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 664, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK10_TOTAL_TX_P, __pyx_t_10) < (0)) __PYX_ERR(0, 664, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":665
 *     GPM_METRIC_C2C_LINK9_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK9_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK10_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK10_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_TOTAL_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK10_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK10_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_DATA_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK10_TOTAL_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 665, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK10_TOTAL_RX_P, __pyx_t_10) < (0)) __PYX_ERR(0, 665, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":666
 *     GPM_METRIC_C2C_LINK10_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK10_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK10_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_DATA_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK10_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK11_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_TOTAL_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK10_DATA_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 666, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK10_DATA_TX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 666, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":667
 *     GPM_METRIC_C2C_LINK10_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK10_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK10_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_DATA_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK11_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK11_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_TOTAL_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK10_DATA_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 667, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK10_DATA_RX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 667, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":668
 *     GPM_METRIC_C2C_LINK10_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK10_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK11_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_TOTAL_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK11_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK11_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_DATA_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK11_TOTAL_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 668, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK11_TOTAL_TX_P, __pyx_t_10) < (0)) __PYX_ERR(0, 668, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":669
 *     GPM_METRIC_C2C_LINK10_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK10_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK11_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK11_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_TOTAL_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK11_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK11_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_DATA_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK11_TOTAL_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 669, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK11_TOTAL_RX_P, __pyx_t_10) < (0)) __PYX_ERR(0, 669, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":670
 *     GPM_METRIC_C2C_LINK11_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK11_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK11_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_DATA_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK11_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK12_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_TOTAL_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK11_DATA_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 670, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK11_DATA_TX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 670, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":671
 *     GPM_METRIC_C2C_LINK11_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK11_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK11_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_DATA_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK12_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK12_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_TOTAL_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK11_DATA_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 671, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK11_DATA_RX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 671, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":672
 *     GPM_METRIC_C2C_LINK11_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK11_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK12_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_TOTAL_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK12_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK12_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_DATA_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK12_TOTAL_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 672, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK12_TOTAL_TX_P, __pyx_t_10) < (0)) __PYX_ERR(0, 672, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":673
 *     GPM_METRIC_C2C_LINK11_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK11_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK12_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK12_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_TOTAL_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK12_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK12_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_DATA_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK12_TOTAL_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 673, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK12_TOTAL_RX_P, __pyx_t_10) < (0)) __PYX_ERR(0, 673, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":674
 *     GPM_METRIC_C2C_LINK12_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK12_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK12_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_DATA_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK12_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK13_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_TOTAL_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK12_DATA_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 674, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK12_DATA_TX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 674, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":675
 *     GPM_METRIC_C2C_LINK12_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK12_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK12_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_DATA_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK13_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK13_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_TOTAL_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK12_DATA_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 675, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK12_DATA_RX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 675, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":676
 *     GPM_METRIC_C2C_LINK12_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK12_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK13_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_TOTAL_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK13_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK13_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_DATA_TX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK13_TOTAL_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 676, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK13_TOTAL_TX_P, __pyx_t_10) < (0)) __PYX_ERR(0, 676, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":677
 *     GPM_METRIC_C2C_LINK12_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK12_DATA_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK13_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK13_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_TOTAL_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK13_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK13_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_DATA_RX_PER_SEC
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK13_TOTAL_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 677, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK13_TOTAL_RX_P, __pyx_t_10) < (0)) __PYX_ERR(0, 677, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":678
 *     GPM_METRIC_C2C_LINK13_TOTAL_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_TOTAL_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK13_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK13_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_DATA_TX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_C2C_LINK13_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_DATA_RX_PER_SEC
 *     GPM_METRIC_HOSTMEM_CACHE_HIT = NVML_GPM_METRIC_HOSTMEM_CACHE_HIT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK13_DATA_TX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 678, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK13_DATA_TX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 678, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":679
 *     GPM_METRIC_C2C_LINK13_TOTAL_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_TOTAL_RX_PER_SEC
 *     GPM_METRIC_C2C_LINK13_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK13_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_DATA_RX_PER_SEC             # <<<<<<<<<<<<<<
 *     GPM_METRIC_HOSTMEM_CACHE_HIT = NVML_GPM_METRIC_HOSTMEM_CACHE_HIT
 *     GPM_METRIC_HOSTMEM_CACHE_MISS = NVML_GPM_METRIC_HOSTMEM_CACHE_MISS
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_C2C_LINK13_DATA_RX_PER_SEC); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 679, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_C2C_LINK13_DATA_RX_PE, __pyx_t_10) < (0)) __PYX_ERR(0, 679, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":680
 *     GPM_METRIC_C2C_LINK13_DATA_TX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_DATA_TX_PER_SEC
 *     GPM_METRIC_C2C_LINK13_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_DATA_RX_PER_SEC
 *     GPM_METRIC_HOSTMEM_CACHE_HIT = NVML_GPM_METRIC_HOSTMEM_CACHE_HIT             # <<<<<<<<<<<<<<
 *     GPM_METRIC_HOSTMEM_CACHE_MISS = NVML_GPM_METRIC_HOSTMEM_CACHE_MISS
 *     GPM_METRIC_PEERMEM_CACHE_HIT = NVML_GPM_METRIC_PEERMEM_CACHE_HIT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_HOSTMEM_CACHE_HIT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 680, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_HOSTMEM_CACHE_HIT, __pyx_t_10) < (0)) __PYX_ERR(0, 680, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":681
 *     GPM_METRIC_C2C_LINK13_DATA_RX_PER_SEC = NVML_GPM_METRIC_C2C_LINK13_DATA_RX_PER_SEC
 *     GPM_METRIC_HOSTMEM_CACHE_HIT = NVML_GPM_METRIC_HOSTMEM_CACHE_HIT
 *     GPM_METRIC_HOSTMEM_CACHE_MISS = NVML_GPM_METRIC_HOSTMEM_CACHE_MISS             # <<<<<<<<<<<<<<
 *     GPM_METRIC_PEERMEM_CACHE_HIT = NVML_GPM_METRIC_PEERMEM_CACHE_HIT
 *     GPM_METRIC_PEERMEM_CACHE_MISS = NVML_GPM_METRIC_PEERMEM_CACHE_MISS
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_HOSTMEM_CACHE_MISS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 681, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_HOSTMEM_CACHE_MISS, __pyx_t_10) < (0)) __PYX_ERR(0, 681, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":682
 *     GPM_METRIC_HOSTMEM_CACHE_HIT = NVML_GPM_METRIC_HOSTMEM_CACHE_HIT
 *     GPM_METRIC_HOSTMEM_CACHE_MISS = NVML_GPM_METRIC_HOSTMEM_CACHE_MISS
 *     GPM_METRIC_PEERMEM_CACHE_HIT = NVML_GPM_METRIC_PEERMEM_CACHE_HIT             # <<<<<<<<<<<<<<
 *     GPM_METRIC_PEERMEM_CACHE_MISS = NVML_GPM_METRIC_PEERMEM_CACHE_MISS
 *     GPM_METRIC_DRAM_CACHE_HIT = NVML_GPM_METRIC_DRAM_CACHE_HIT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_PEERMEM_CACHE_HIT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 682, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_PEERMEM_CACHE_HIT, __pyx_t_10) < (0)) __PYX_ERR(0, 682, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":683
 *     GPM_METRIC_HOSTMEM_CACHE_MISS = NVML_GPM_METRIC_HOSTMEM_CACHE_MISS
 *     GPM_METRIC_PEERMEM_CACHE_HIT = NVML_GPM_METRIC_PEERMEM_CACHE_HIT
 *     GPM_METRIC_PEERMEM_CACHE_MISS = NVML_GPM_METRIC_PEERMEM_CACHE_MISS             # <<<<<<<<<<<<<<
 *     GPM_METRIC_DRAM_CACHE_HIT = NVML_GPM_METRIC_DRAM_CACHE_HIT
 *     GPM_METRIC_DRAM_CACHE_MISS = NVML_GPM_METRIC_DRAM_CACHE_MISS
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_PEERMEM_CACHE_MISS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 683, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_PEERMEM_CACHE_MISS, __pyx_t_10) < (0)) __PYX_ERR(0, 683, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":684
 *     GPM_METRIC_PEERMEM_CACHE_HIT = NVML_GPM_METRIC_PEERMEM_CACHE_HIT
 *     GPM_METRIC_PEERMEM_CACHE_MISS = NVML_GPM_METRIC_PEERMEM_CACHE_MISS
 *     GPM_METRIC_DRAM_CACHE_HIT = NVML_GPM_METRIC_DRAM_CACHE_HIT             # <<<<<<<<<<<<<<
 *     GPM_METRIC_DRAM_CACHE_MISS = NVML_GPM_METRIC_DRAM_CACHE_MISS
 *     GPM_METRIC_NVENC_0_UTIL = NVML_GPM_METRIC_NVENC_0_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_DRAM_CACHE_HIT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 684, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_DRAM_CACHE_HIT, __pyx_t_10) < (0)) __PYX_ERR(0, 684, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":685
 *     GPM_METRIC_PEERMEM_CACHE_MISS = NVML_GPM_METRIC_PEERMEM_CACHE_MISS
 *     GPM_METRIC_DRAM_CACHE_HIT = NVML_GPM_METRIC_DRAM_CACHE_HIT
 *     GPM_METRIC_DRAM_CACHE_MISS = NVML_GPM_METRIC_DRAM_CACHE_MISS             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVENC_0_UTIL = NVML_GPM_METRIC_NVENC_0_UTIL
 *     GPM_METRIC_NVENC_1_UTIL = NVML_GPM_METRIC_NVENC_1_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_DRAM_CACHE_MISS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 685, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_DRAM_CACHE_MISS, __pyx_t_10) < (0)) __PYX_ERR(0, 685, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":686
 *     GPM_METRIC_DRAM_CACHE_HIT = NVML_GPM_METRIC_DRAM_CACHE_HIT
 *     GPM_METRIC_DRAM_CACHE_MISS = NVML_GPM_METRIC_DRAM_CACHE_MISS
 *     GPM_METRIC_NVENC_0_UTIL = NVML_GPM_METRIC_NVENC_0_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVENC_1_UTIL = NVML_GPM_METRIC_NVENC_1_UTIL
 *     GPM_METRIC_NVENC_2_UTIL = NVML_GPM_METRIC_NVENC_2_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVENC_0_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVENC_0_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 686, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":687
 *     GPM_METRIC_DRAM_CACHE_MISS = NVML_GPM_METRIC_DRAM_CACHE_MISS
 *     GPM_METRIC_NVENC_0_UTIL = NVML_GPM_METRIC_NVENC_0_UTIL
 *     GPM_METRIC_NVENC_1_UTIL = NVML_GPM_METRIC_NVENC_1_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVENC_2_UTIL = NVML_GPM_METRIC_NVENC_2_UTIL
 *     GPM_METRIC_NVENC_3_UTIL = NVML_GPM_METRIC_NVENC_3_UTIL
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVENC_1_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 687, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVENC_1_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 687, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":688
 *     GPM_METRIC_NVENC_0_UTIL = NVML_GPM_METRIC_NVENC_0_UTIL
 *     GPM_METRIC_NVENC_1_UTIL = NVML_GPM_METRIC_NVENC_1_UTIL
 *     GPM_METRIC_NVENC_2_UTIL = NVML_GPM_METRIC_NVENC_2_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_NVENC_3_UTIL = NVML_GPM_METRIC_NVENC_3_UTIL
 *     GPM_METRIC_GR0_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ELAPSED
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVENC_2_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 688, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVENC_2_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 688, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":689
 *     GPM_METRIC_NVENC_1_UTIL = NVML_GPM_METRIC_NVENC_1_UTIL
 *     GPM_METRIC_NVENC_2_UTIL = NVML_GPM_METRIC_NVENC_2_UTIL
 *     GPM_METRIC_NVENC_3_UTIL = NVML_GPM_METRIC_NVENC_3_UTIL             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR0_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR0_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ACTIVE
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_NVENC_3_UTIL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 689, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_NVENC_3_UTIL, __pyx_t_10) < (0)) __PYX_ERR(0, 689, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":690
 *     GPM_METRIC_NVENC_2_UTIL = NVML_GPM_METRIC_NVENC_2_UTIL
 *     GPM_METRIC_NVENC_3_UTIL = NVML_GPM_METRIC_NVENC_3_UTIL
 *     GPM_METRIC_GR0_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ELAPSED             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR0_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR0_CTXSW_REQUESTS = NVML_GPM_METRIC_GR0_CTXSW_REQUESTS
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ELAPSED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 690, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR0_CTXSW_CYCLES_ELAP, __pyx_t_10) < (0)) __PYX_ERR(0, 690, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":691
 *     GPM_METRIC_NVENC_3_UTIL = NVML_GPM_METRIC_NVENC_3_UTIL
 *     GPM_METRIC_GR0_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR0_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ACTIVE             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR0_CTXSW_REQUESTS = NVML_GPM_METRIC_GR0_CTXSW_REQUESTS
 *     GPM_METRIC_GR0_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR0_CTXSW_CYCLES_PER_REQ
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ACTIVE); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 691, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR0_CTXSW_CYCLES_ACTI, __pyx_t_10) < (0)) __PYX_ERR(0, 691, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":692
 *     GPM_METRIC_GR0_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR0_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR0_CTXSW_REQUESTS = NVML_GPM_METRIC_GR0_CTXSW_REQUESTS             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR0_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR0_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR0_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR0_CTXSW_ACTIVE_PCT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR0_CTXSW_REQUESTS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 692, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR0_CTXSW_REQUESTS, __pyx_t_10) < (0)) __PYX_ERR(0, 692, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":693
 *     GPM_METRIC_GR0_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR0_CTXSW_REQUESTS = NVML_GPM_METRIC_GR0_CTXSW_REQUESTS
 *     GPM_METRIC_GR0_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR0_CTXSW_CYCLES_PER_REQ             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR0_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR0_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR1_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ELAPSED
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR0_CTXSW_CYCLES_PER_REQ); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 693, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR0_CTXSW_CYCLES_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 693, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":694
 *     GPM_METRIC_GR0_CTXSW_REQUESTS = NVML_GPM_METRIC_GR0_CTXSW_REQUESTS
 *     GPM_METRIC_GR0_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR0_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR0_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR0_CTXSW_ACTIVE_PCT             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR1_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR1_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ACTIVE
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR0_CTXSW_ACTIVE_PCT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 694, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR0_CTXSW_ACTIVE_PCT, __pyx_t_10) < (0)) __PYX_ERR(0, 694, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":695
 *     GPM_METRIC_GR0_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR0_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR0_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR0_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR1_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ELAPSED             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR1_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR1_CTXSW_REQUESTS = NVML_GPM_METRIC_GR1_CTXSW_REQUESTS
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ELAPSED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 695, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR1_CTXSW_CYCLES_ELAP, __pyx_t_10) < (0)) __PYX_ERR(0, 695, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":696
 *     GPM_METRIC_GR0_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR0_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR1_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR1_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ACTIVE             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR1_CTXSW_REQUESTS = NVML_GPM_METRIC_GR1_CTXSW_REQUESTS
 *     GPM_METRIC_GR1_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR1_CTXSW_CYCLES_PER_REQ
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ACTIVE); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 696, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR1_CTXSW_CYCLES_ACTI, __pyx_t_10) < (0)) __PYX_ERR(0, 696, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":697
 *     GPM_METRIC_GR1_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR1_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR1_CTXSW_REQUESTS = NVML_GPM_METRIC_GR1_CTXSW_REQUESTS             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR1_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR1_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR1_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR1_CTXSW_ACTIVE_PCT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR1_CTXSW_REQUESTS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 697, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR1_CTXSW_REQUESTS, __pyx_t_10) < (0)) __PYX_ERR(0, 697, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":698
 *     GPM_METRIC_GR1_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR1_CTXSW_REQUESTS = NVML_GPM_METRIC_GR1_CTXSW_REQUESTS
 *     GPM_METRIC_GR1_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR1_CTXSW_CYCLES_PER_REQ             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR1_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR1_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR2_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ELAPSED
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR1_CTXSW_CYCLES_PER_REQ); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 698, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR1_CTXSW_CYCLES_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 698, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":699
 *     GPM_METRIC_GR1_CTXSW_REQUESTS = NVML_GPM_METRIC_GR1_CTXSW_REQUESTS
 *     GPM_METRIC_GR1_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR1_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR1_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR1_CTXSW_ACTIVE_PCT             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR2_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR2_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ACTIVE
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR1_CTXSW_ACTIVE_PCT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 699, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR1_CTXSW_ACTIVE_PCT, __pyx_t_10) < (0)) __PYX_ERR(0, 699, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":700
 *     GPM_METRIC_GR1_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR1_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR1_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR1_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR2_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ELAPSED             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR2_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR2_CTXSW_REQUESTS = NVML_GPM_METRIC_GR2_CTXSW_REQUESTS
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ELAPSED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 700, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR2_CTXSW_CYCLES_ELAP, __pyx_t_10) < (0)) __PYX_ERR(0, 700, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":701
 *     GPM_METRIC_GR1_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR1_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR2_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR2_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ACTIVE             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR2_CTXSW_REQUESTS = NVML_GPM_METRIC_GR2_CTXSW_REQUESTS
 *     GPM_METRIC_GR2_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR2_CTXSW_CYCLES_PER_REQ
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ACTIVE); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 701, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR2_CTXSW_CYCLES_ACTI, __pyx_t_10) < (0)) __PYX_ERR(0, 701, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":702
 *     GPM_METRIC_GR2_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR2_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR2_CTXSW_REQUESTS = NVML_GPM_METRIC_GR2_CTXSW_REQUESTS             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR2_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR2_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR2_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR2_CTXSW_ACTIVE_PCT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR2_CTXSW_REQUESTS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 702, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR2_CTXSW_REQUESTS, __pyx_t_10) < (0)) __PYX_ERR(0, 702, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":703
 *     GPM_METRIC_GR2_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR2_CTXSW_REQUESTS = NVML_GPM_METRIC_GR2_CTXSW_REQUESTS
 *     GPM_METRIC_GR2_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR2_CTXSW_CYCLES_PER_REQ             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR2_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR2_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR3_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ELAPSED
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR2_CTXSW_CYCLES_PER_REQ); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 703, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR2_CTXSW_CYCLES_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 703, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":704
 *     GPM_METRIC_GR2_CTXSW_REQUESTS = NVML_GPM_METRIC_GR2_CTXSW_REQUESTS
 *     GPM_METRIC_GR2_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR2_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR2_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR2_CTXSW_ACTIVE_PCT             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR3_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR3_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ACTIVE
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR2_CTXSW_ACTIVE_PCT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 704, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR2_CTXSW_ACTIVE_PCT, __pyx_t_10) < (0)) __PYX_ERR(0, 704, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":705
 *     GPM_METRIC_GR2_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR2_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR2_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR2_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR3_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ELAPSED             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR3_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR3_CTXSW_REQUESTS = NVML_GPM_METRIC_GR3_CTXSW_REQUESTS
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ELAPSED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 705, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR3_CTXSW_CYCLES_ELAP, __pyx_t_10) < (0)) __PYX_ERR(0, 705, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":706
 *     GPM_METRIC_GR2_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR2_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR3_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR3_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ACTIVE             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR3_CTXSW_REQUESTS = NVML_GPM_METRIC_GR3_CTXSW_REQUESTS
 *     GPM_METRIC_GR3_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR3_CTXSW_CYCLES_PER_REQ
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ACTIVE); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 706, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR3_CTXSW_CYCLES_ACTI, __pyx_t_10) < (0)) __PYX_ERR(0, 706, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":707
 *     GPM_METRIC_GR3_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR3_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR3_CTXSW_REQUESTS = NVML_GPM_METRIC_GR3_CTXSW_REQUESTS             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR3_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR3_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR3_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR3_CTXSW_ACTIVE_PCT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR3_CTXSW_REQUESTS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 707, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR3_CTXSW_REQUESTS, __pyx_t_10) < (0)) __PYX_ERR(0, 707, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":708
 *     GPM_METRIC_GR3_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR3_CTXSW_REQUESTS = NVML_GPM_METRIC_GR3_CTXSW_REQUESTS
 *     GPM_METRIC_GR3_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR3_CTXSW_CYCLES_PER_REQ             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR3_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR3_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR4_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ELAPSED
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR3_CTXSW_CYCLES_PER_REQ); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 708, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR3_CTXSW_CYCLES_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 708, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":709
 *     GPM_METRIC_GR3_CTXSW_REQUESTS = NVML_GPM_METRIC_GR3_CTXSW_REQUESTS
 *     GPM_METRIC_GR3_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR3_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR3_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR3_CTXSW_ACTIVE_PCT             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR4_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR4_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ACTIVE
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR3_CTXSW_ACTIVE_PCT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 709, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR3_CTXSW_ACTIVE_PCT, __pyx_t_10) < (0)) __PYX_ERR(0, 709, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":710
 *     GPM_METRIC_GR3_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR3_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR3_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR3_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR4_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ELAPSED             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR4_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR4_CTXSW_REQUESTS = NVML_GPM_METRIC_GR4_CTXSW_REQUESTS
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ELAPSED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 710, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR4_CTXSW_CYCLES_ELAP, __pyx_t_10) < (0)) __PYX_ERR(0, 710, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":711
 *     GPM_METRIC_GR3_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR3_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR4_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR4_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ACTIVE             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR4_CTXSW_REQUESTS = NVML_GPM_METRIC_GR4_CTXSW_REQUESTS
 *     GPM_METRIC_GR4_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR4_CTXSW_CYCLES_PER_REQ
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ACTIVE); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR4_CTXSW_CYCLES_ACTI, __pyx_t_10) < (0)) __PYX_ERR(0, 711, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":712
 *     GPM_METRIC_GR4_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR4_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR4_CTXSW_REQUESTS = NVML_GPM_METRIC_GR4_CTXSW_REQUESTS             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR4_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR4_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR4_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR4_CTXSW_ACTIVE_PCT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR4_CTXSW_REQUESTS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 712, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR4_CTXSW_REQUESTS, __pyx_t_10) < (0)) __PYX_ERR(0, 712, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":713
 *     GPM_METRIC_GR4_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR4_CTXSW_REQUESTS = NVML_GPM_METRIC_GR4_CTXSW_REQUESTS
 *     GPM_METRIC_GR4_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR4_CTXSW_CYCLES_PER_REQ             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR4_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR4_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR5_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ELAPSED
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR4_CTXSW_CYCLES_PER_REQ); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 713, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR4_CTXSW_CYCLES_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 713, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":714
 *     GPM_METRIC_GR4_CTXSW_REQUESTS = NVML_GPM_METRIC_GR4_CTXSW_REQUESTS
 *     GPM_METRIC_GR4_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR4_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR4_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR4_CTXSW_ACTIVE_PCT             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR5_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR5_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ACTIVE
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR4_CTXSW_ACTIVE_PCT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 714, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR4_CTXSW_ACTIVE_PCT, __pyx_t_10) < (0)) __PYX_ERR(0, 714, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":715
 *     GPM_METRIC_GR4_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR4_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR4_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR4_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR5_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ELAPSED             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR5_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR5_CTXSW_REQUESTS = NVML_GPM_METRIC_GR5_CTXSW_REQUESTS
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ELAPSED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 715, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR5_CTXSW_CYCLES_ELAP, __pyx_t_10) < (0)) __PYX_ERR(0, 715, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":716
 *     GPM_METRIC_GR4_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR4_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR5_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR5_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ACTIVE             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR5_CTXSW_REQUESTS = NVML_GPM_METRIC_GR5_CTXSW_REQUESTS
 *     GPM_METRIC_GR5_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR5_CTXSW_CYCLES_PER_REQ
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ACTIVE); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 716, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR5_CTXSW_CYCLES_ACTI, __pyx_t_10) < (0)) __PYX_ERR(0, 716, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":717
 *     GPM_METRIC_GR5_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR5_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR5_CTXSW_REQUESTS = NVML_GPM_METRIC_GR5_CTXSW_REQUESTS             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR5_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR5_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR5_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR5_CTXSW_ACTIVE_PCT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR5_CTXSW_REQUESTS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 717, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR5_CTXSW_REQUESTS, __pyx_t_10) < (0)) __PYX_ERR(0, 717, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":718
 *     GPM_METRIC_GR5_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR5_CTXSW_REQUESTS = NVML_GPM_METRIC_GR5_CTXSW_REQUESTS
 *     GPM_METRIC_GR5_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR5_CTXSW_CYCLES_PER_REQ             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR5_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR5_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR6_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ELAPSED
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR5_CTXSW_CYCLES_PER_REQ); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 718, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR5_CTXSW_CYCLES_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 718, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":719
 *     GPM_METRIC_GR5_CTXSW_REQUESTS = NVML_GPM_METRIC_GR5_CTXSW_REQUESTS
 *     GPM_METRIC_GR5_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR5_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR5_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR5_CTXSW_ACTIVE_PCT             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR6_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR6_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ACTIVE
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR5_CTXSW_ACTIVE_PCT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 719, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR5_CTXSW_ACTIVE_PCT, __pyx_t_10) < (0)) __PYX_ERR(0, 719, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":720
 *     GPM_METRIC_GR5_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR5_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR5_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR5_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR6_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ELAPSED             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR6_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR6_CTXSW_REQUESTS = NVML_GPM_METRIC_GR6_CTXSW_REQUESTS
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ELAPSED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 720, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR6_CTXSW_CYCLES_ELAP, __pyx_t_10) < (0)) __PYX_ERR(0, 720, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":721
 *     GPM_METRIC_GR5_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR5_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR6_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR6_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ACTIVE             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR6_CTXSW_REQUESTS = NVML_GPM_METRIC_GR6_CTXSW_REQUESTS
 *     GPM_METRIC_GR6_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR6_CTXSW_CYCLES_PER_REQ
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ACTIVE); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 721, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR6_CTXSW_CYCLES_ACTI, __pyx_t_10) < (0)) __PYX_ERR(0, 721, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":722
 *     GPM_METRIC_GR6_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR6_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR6_CTXSW_REQUESTS = NVML_GPM_METRIC_GR6_CTXSW_REQUESTS             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR6_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR6_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR6_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR6_CTXSW_ACTIVE_PCT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR6_CTXSW_REQUESTS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 722, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR6_CTXSW_REQUESTS, __pyx_t_10) < (0)) __PYX_ERR(0, 722, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":723
 *     GPM_METRIC_GR6_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR6_CTXSW_REQUESTS = NVML_GPM_METRIC_GR6_CTXSW_REQUESTS
 *     GPM_METRIC_GR6_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR6_CTXSW_CYCLES_PER_REQ             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR6_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR6_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR7_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ELAPSED
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR6_CTXSW_CYCLES_PER_REQ); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 723, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR6_CTXSW_CYCLES_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 723, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":724
 *     GPM_METRIC_GR6_CTXSW_REQUESTS = NVML_GPM_METRIC_GR6_CTXSW_REQUESTS
 *     GPM_METRIC_GR6_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR6_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR6_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR6_CTXSW_ACTIVE_PCT             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR7_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR7_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ACTIVE
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR6_CTXSW_ACTIVE_PCT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 724, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR6_CTXSW_ACTIVE_PCT, __pyx_t_10) < (0)) __PYX_ERR(0, 724, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":725
 *     GPM_METRIC_GR6_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR6_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR6_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR6_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR7_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ELAPSED             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR7_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR7_CTXSW_REQUESTS = NVML_GPM_METRIC_GR7_CTXSW_REQUESTS
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ELAPSED); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 725, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR7_CTXSW_CYCLES_ELAP, __pyx_t_10) < (0)) __PYX_ERR(0, 725, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":726
 *     GPM_METRIC_GR6_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR6_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_GR7_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR7_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ACTIVE             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR7_CTXSW_REQUESTS = NVML_GPM_METRIC_GR7_CTXSW_REQUESTS
 *     GPM_METRIC_GR7_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR7_CTXSW_CYCLES_PER_REQ
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ACTIVE); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 726, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR7_CTXSW_CYCLES_ACTI, __pyx_t_10) < (0)) __PYX_ERR(0, 726, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":727
 *     GPM_METRIC_GR7_CTXSW_CYCLES_ELAPSED = NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ELAPSED
 *     GPM_METRIC_GR7_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR7_CTXSW_REQUESTS = NVML_GPM_METRIC_GR7_CTXSW_REQUESTS             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR7_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR7_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR7_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR7_CTXSW_ACTIVE_PCT
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR7_CTXSW_REQUESTS); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 727, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR7_CTXSW_REQUESTS, __pyx_t_10) < (0)) __PYX_ERR(0, 727, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":728
 *     GPM_METRIC_GR7_CTXSW_CYCLES_ACTIVE = NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ACTIVE
 *     GPM_METRIC_GR7_CTXSW_REQUESTS = NVML_GPM_METRIC_GR7_CTXSW_REQUESTS
 *     GPM_METRIC_GR7_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR7_CTXSW_CYCLES_PER_REQ             # <<<<<<<<<<<<<<
 *     GPM_METRIC_GR7_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR7_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_MAX = NVML_GPM_METRIC_MAX
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR7_CTXSW_CYCLES_PER_REQ); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 728, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR7_CTXSW_CYCLES_PER, __pyx_t_10) < (0)) __PYX_ERR(0, 728, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":729
 *     GPM_METRIC_GR7_CTXSW_REQUESTS = NVML_GPM_METRIC_GR7_CTXSW_REQUESTS
 *     GPM_METRIC_GR7_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR7_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR7_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR7_CTXSW_ACTIVE_PCT             # <<<<<<<<<<<<<<
 *     GPM_METRIC_MAX = NVML_GPM_METRIC_MAX
 * 
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_GR7_CTXSW_ACTIVE_PCT); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 729, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_GR7_CTXSW_ACTIVE_PCT, __pyx_t_10) < (0)) __PYX_ERR(0, 729, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":730
 *     GPM_METRIC_GR7_CTXSW_CYCLES_PER_REQ = NVML_GPM_METRIC_GR7_CTXSW_CYCLES_PER_REQ
 *     GPM_METRIC_GR7_CTXSW_ACTIVE_PCT = NVML_GPM_METRIC_GR7_CTXSW_ACTIVE_PCT
 *     GPM_METRIC_MAX = NVML_GPM_METRIC_MAX             # <<<<<<<<<<<<<<
 * 
 * class PowerProfileType(_IntEnum):
*/
  __pyx_t_10 = __Pyx_PyLong_From_nvmlGpmMetricId_t(NVML_GPM_METRIC_MAX); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 730, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_GPM_METRIC_MAX, __pyx_t_10) < (0)) __PYX_ERR(0, 730, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":548
 *     VGPU_COMPATIBILITY_LIMIT_OTHER = NVML_VGPU_COMPATIBILITY_LIMIT_OTHER
 * 
 * class GpmMetricId(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlGpmMetricId_t`."""
 *     GPM_METRIC_GRAPHICS_UTIL = NVML_GPM_METRIC_GRAPHICS_UTIL
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_GpmMetricId, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_GpmMetricId, __pyx_t_10) < (0)) __PYX_ERR(0, 548, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":732
 *     GPM_METRIC_MAX = NVML_GPM_METRIC_MAX
 * 
 * class PowerProfileType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlPowerProfileType_t`."""
 *     POWER_PROFILE_MAX_P = NVML_POWER_PROFILE_MAX_P
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 732, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 732, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 732, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 732, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_PowerProfileType, __pyx_mstate_global->__pyx_n_u_PowerProfileType, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlPowerProfileType_t); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 732, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 732, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":734
 * class PowerProfileType(_IntEnum):
 *     """See `nvmlPowerProfileType_t`."""
 *     POWER_PROFILE_MAX_P = NVML_POWER_PROFILE_MAX_P             # <<<<<<<<<<<<<<
 *     POWER_PROFILE_MAX_Q = NVML_POWER_PROFILE_MAX_Q
 *     POWER_PROFILE_COMPUTE = NVML_POWER_PROFILE_COMPUTE
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPowerProfileType_t(NVML_POWER_PROFILE_MAX_P); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 734, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_POWER_PROFILE_MAX_P, __pyx_t_11) < (0)) __PYX_ERR(0, 734, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":735
 *     """See `nvmlPowerProfileType_t`."""
 *     POWER_PROFILE_MAX_P = NVML_POWER_PROFILE_MAX_P
 *     POWER_PROFILE_MAX_Q = NVML_POWER_PROFILE_MAX_Q             # <<<<<<<<<<<<<<
 *     POWER_PROFILE_COMPUTE = NVML_POWER_PROFILE_COMPUTE
 *     POWER_PROFILE_MEMORY_BOUND = NVML_POWER_PROFILE_MEMORY_BOUND
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPowerProfileType_t(NVML_POWER_PROFILE_MAX_Q); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 735, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_POWER_PROFILE_MAX_Q, __pyx_t_11) < (0)) __PYX_ERR(0, 735, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":736
 *     POWER_PROFILE_MAX_P = NVML_POWER_PROFILE_MAX_P
 *     POWER_PROFILE_MAX_Q = NVML_POWER_PROFILE_MAX_Q
 *     POWER_PROFILE_COMPUTE = NVML_POWER_PROFILE_COMPUTE             # <<<<<<<<<<<<<<
 *     POWER_PROFILE_MEMORY_BOUND = NVML_POWER_PROFILE_MEMORY_BOUND
 *     POWER_PROFILE_NETWORK = NVML_POWER_PROFILE_NETWORK
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPowerProfileType_t(NVML_POWER_PROFILE_COMPUTE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 736, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_POWER_PROFILE_COMPUTE, __pyx_t_11) < (0)) __PYX_ERR(0, 736, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":737
 *     POWER_PROFILE_MAX_Q = NVML_POWER_PROFILE_MAX_Q
 *     POWER_PROFILE_COMPUTE = NVML_POWER_PROFILE_COMPUTE
 *     POWER_PROFILE_MEMORY_BOUND = NVML_POWER_PROFILE_MEMORY_BOUND             # <<<<<<<<<<<<<<
 *     POWER_PROFILE_NETWORK = NVML_POWER_PROFILE_NETWORK
 *     POWER_PROFILE_BALANCED = NVML_POWER_PROFILE_BALANCED
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPowerProfileType_t(NVML_POWER_PROFILE_MEMORY_BOUND); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 737, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_POWER_PROFILE_MEMORY_BOUND, __pyx_t_11) < (0)) __PYX_ERR(0, 737, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":738
 *     POWER_PROFILE_COMPUTE = NVML_POWER_PROFILE_COMPUTE
 *     POWER_PROFILE_MEMORY_BOUND = NVML_POWER_PROFILE_MEMORY_BOUND
 *     POWER_PROFILE_NETWORK = NVML_POWER_PROFILE_NETWORK             # <<<<<<<<<<<<<<
 *     POWER_PROFILE_BALANCED = NVML_POWER_PROFILE_BALANCED
 *     POWER_PROFILE_LLM_INFERENCE = NVML_POWER_PROFILE_LLM_INFERENCE
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPowerProfileType_t(NVML_POWER_PROFILE_NETWORK); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 738, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_POWER_PROFILE_NETWORK, __pyx_t_11) < (0)) __PYX_ERR(0, 738, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":739
 *     POWER_PROFILE_MEMORY_BOUND = NVML_POWER_PROFILE_MEMORY_BOUND
 *     POWER_PROFILE_NETWORK = NVML_POWER_PROFILE_NETWORK
 *     POWER_PROFILE_BALANCED = NVML_POWER_PROFILE_BALANCED             # <<<<<<<<<<<<<<
 *     POWER_PROFILE_LLM_INFERENCE = NVML_POWER_PROFILE_LLM_INFERENCE
 *     POWER_PROFILE_LLM_TRAINING = NVML_POWER_PROFILE_LLM_TRAINING
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPowerProfileType_t(NVML_POWER_PROFILE_BALANCED); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 739, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_POWER_PROFILE_BALANCED, __pyx_t_11) < (0)) __PYX_ERR(0, 739, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":740
 *     POWER_PROFILE_NETWORK = NVML_POWER_PROFILE_NETWORK
 *     POWER_PROFILE_BALANCED = NVML_POWER_PROFILE_BALANCED
 *     POWER_PROFILE_LLM_INFERENCE = NVML_POWER_PROFILE_LLM_INFERENCE             # <<<<<<<<<<<<<<
 *     POWER_PROFILE_LLM_TRAINING = NVML_POWER_PROFILE_LLM_TRAINING
 *     POWER_PROFILE_RBM = NVML_POWER_PROFILE_RBM
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPowerProfileType_t(NVML_POWER_PROFILE_LLM_INFERENCE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 740, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_POWER_PROFILE_LLM_INFERENCE, __pyx_t_11) < (0)) __PYX_ERR(0, 740, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":741
 *     POWER_PROFILE_BALANCED = NVML_POWER_PROFILE_BALANCED
 *     POWER_PROFILE_LLM_INFERENCE = NVML_POWER_PROFILE_LLM_INFERENCE
 *     POWER_PROFILE_LLM_TRAINING = NVML_POWER_PROFILE_LLM_TRAINING             # <<<<<<<<<<<<<<
 *     POWER_PROFILE_RBM = NVML_POWER_PROFILE_RBM
 *     POWER_PROFILE_DCPCIE = NVML_POWER_PROFILE_DCPCIE
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPowerProfileType_t(NVML_POWER_PROFILE_LLM_TRAINING); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 741, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_POWER_PROFILE_LLM_TRAINING, __pyx_t_11) < (0)) __PYX_ERR(0, 741, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":742
 *     POWER_PROFILE_LLM_INFERENCE = NVML_POWER_PROFILE_LLM_INFERENCE
 *     POWER_PROFILE_LLM_TRAINING = NVML_POWER_PROFILE_LLM_TRAINING
 *     POWER_PROFILE_RBM = NVML_POWER_PROFILE_RBM             # <<<<<<<<<<<<<<
 *     POWER_PROFILE_DCPCIE = NVML_POWER_PROFILE_DCPCIE
 *     POWER_PROFILE_HMMA_SPARSE = NVML_POWER_PROFILE_HMMA_SPARSE
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPowerProfileType_t(NVML_POWER_PROFILE_RBM); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 742, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_POWER_PROFILE_RBM, __pyx_t_11) < (0)) __PYX_ERR(0, 742, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":743
 *     POWER_PROFILE_LLM_TRAINING = NVML_POWER_PROFILE_LLM_TRAINING
 *     POWER_PROFILE_RBM = NVML_POWER_PROFILE_RBM
 *     POWER_PROFILE_DCPCIE = NVML_POWER_PROFILE_DCPCIE             # <<<<<<<<<<<<<<
 *     POWER_PROFILE_HMMA_SPARSE = NVML_POWER_PROFILE_HMMA_SPARSE
 *     POWER_PROFILE_HMMA_DENSE = NVML_POWER_PROFILE_HMMA_DENSE
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPowerProfileType_t(NVML_POWER_PROFILE_DCPCIE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 743, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_POWER_PROFILE_DCPCIE, __pyx_t_11) < (0)) __PYX_ERR(0, 743, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":744
 *     POWER_PROFILE_RBM = NVML_POWER_PROFILE_RBM
 *     POWER_PROFILE_DCPCIE = NVML_POWER_PROFILE_DCPCIE
 *     POWER_PROFILE_HMMA_SPARSE = NVML_POWER_PROFILE_HMMA_SPARSE             # <<<<<<<<<<<<<<
 *     POWER_PROFILE_HMMA_DENSE = NVML_POWER_PROFILE_HMMA_DENSE
 *     POWER_PROFILE_SYNC_BALANCED = NVML_POWER_PROFILE_SYNC_BALANCED
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPowerProfileType_t(NVML_POWER_PROFILE_HMMA_SPARSE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 744, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_POWER_PROFILE_HMMA_SPARSE, __pyx_t_11) < (0)) __PYX_ERR(0, 744, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":745
 *     POWER_PROFILE_DCPCIE = NVML_POWER_PROFILE_DCPCIE
 *     POWER_PROFILE_HMMA_SPARSE = NVML_POWER_PROFILE_HMMA_SPARSE
 *     POWER_PROFILE_HMMA_DENSE = NVML_POWER_PROFILE_HMMA_DENSE             # <<<<<<<<<<<<<<
 *     POWER_PROFILE_SYNC_BALANCED = NVML_POWER_PROFILE_SYNC_BALANCED
 *     POWER_PROFILE_HPC = NVML_POWER_PROFILE_HPC
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPowerProfileType_t(NVML_POWER_PROFILE_HMMA_DENSE); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 745, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_POWER_PROFILE_HMMA_DENSE, __pyx_t_11) < (0)) __PYX_ERR(0, 745, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":746
 *     POWER_PROFILE_HMMA_SPARSE = NVML_POWER_PROFILE_HMMA_SPARSE
 *     POWER_PROFILE_HMMA_DENSE = NVML_POWER_PROFILE_HMMA_DENSE
 *     POWER_PROFILE_SYNC_BALANCED = NVML_POWER_PROFILE_SYNC_BALANCED             # <<<<<<<<<<<<<<
 *     POWER_PROFILE_HPC = NVML_POWER_PROFILE_HPC
 *     POWER_PROFILE_MIG = NVML_POWER_PROFILE_MIG
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPowerProfileType_t(NVML_POWER_PROFILE_SYNC_BALANCED); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 746, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_POWER_PROFILE_SYNC_BALANCED, __pyx_t_11) < (0)) __PYX_ERR(0, 746, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":747
 *     POWER_PROFILE_HMMA_DENSE = NVML_POWER_PROFILE_HMMA_DENSE
 *     POWER_PROFILE_SYNC_BALANCED = NVML_POWER_PROFILE_SYNC_BALANCED
 *     POWER_PROFILE_HPC = NVML_POWER_PROFILE_HPC             # <<<<<<<<<<<<<<
 *     POWER_PROFILE_MIG = NVML_POWER_PROFILE_MIG
 *     POWER_PROFILE_MAX = NVML_POWER_PROFILE_MAX
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPowerProfileType_t(NVML_POWER_PROFILE_HPC); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 747, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_POWER_PROFILE_HPC, __pyx_t_11) < (0)) __PYX_ERR(0, 747, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":748
 *     POWER_PROFILE_SYNC_BALANCED = NVML_POWER_PROFILE_SYNC_BALANCED
 *     POWER_PROFILE_HPC = NVML_POWER_PROFILE_HPC
 *     POWER_PROFILE_MIG = NVML_POWER_PROFILE_MIG             # <<<<<<<<<<<<<<
 *     POWER_PROFILE_MAX = NVML_POWER_PROFILE_MAX
 * 
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPowerProfileType_t(NVML_POWER_PROFILE_MIG); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 748, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_POWER_PROFILE_MIG, __pyx_t_11) < (0)) __PYX_ERR(0, 748, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":749
 *     POWER_PROFILE_HPC = NVML_POWER_PROFILE_HPC
 *     POWER_PROFILE_MIG = NVML_POWER_PROFILE_MIG
 *     POWER_PROFILE_MAX = NVML_POWER_PROFILE_MAX             # <<<<<<<<<<<<<<
 * 
 * class DeviceAddressingModeType(_IntEnum):
*/
  __pyx_t_11 = __Pyx_PyLong_From_nvmlPowerProfileType_t(NVML_POWER_PROFILE_MAX); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 749, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_POWER_PROFILE_MAX, __pyx_t_11) < (0)) __PYX_ERR(0, 749, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":732
 *     GPM_METRIC_MAX = NVML_GPM_METRIC_MAX
 * 
 * class PowerProfileType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlPowerProfileType_t`."""
 *     POWER_PROFILE_MAX_P = NVML_POWER_PROFILE_MAX_P
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_PowerProfileType, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 732, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_PowerProfileType, __pyx_t_11) < (0)) __PYX_ERR(0, 732, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":751
 *     POWER_PROFILE_MAX = NVML_POWER_PROFILE_MAX
 * 
 * class DeviceAddressingModeType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlDeviceAddressingModeType_t`."""
 *     DEVICE_ADDRESSING_MODE_NONE = NVML_DEVICE_ADDRESSING_MODE_NONE
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 751, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 751, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 751, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 751, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_DeviceAddressingModeType, __pyx_mstate_global->__pyx_n_u_DeviceAddressingModeType, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_kp_u_See_nvmlDeviceAddressingModeType); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 751, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_4 != __pyx_t_5) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_5) < 0))) __PYX_ERR(0, 751, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":753
 * class DeviceAddressingModeType(_IntEnum):
 *     """See `nvmlDeviceAddressingModeType_t`."""
 *     DEVICE_ADDRESSING_MODE_NONE = NVML_DEVICE_ADDRESSING_MODE_NONE             # <<<<<<<<<<<<<<
 *     DEVICE_ADDRESSING_MODE_HMM = NVML_DEVICE_ADDRESSING_MODE_HMM
 *     DEVICE_ADDRESSING_MODE_ATS = NVML_DEVICE_ADDRESSING_MODE_ATS
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlDeviceAddressingModeType_t(NVML_DEVICE_ADDRESSING_MODE_NONE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 753, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_DEVICE_ADDRESSING_MODE_NONE, __pyx_t_5) < (0)) __PYX_ERR(0, 753, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":754
 *     """See `nvmlDeviceAddressingModeType_t`."""
 *     DEVICE_ADDRESSING_MODE_NONE = NVML_DEVICE_ADDRESSING_MODE_NONE
 *     DEVICE_ADDRESSING_MODE_HMM = NVML_DEVICE_ADDRESSING_MODE_HMM             # <<<<<<<<<<<<<<
 *     DEVICE_ADDRESSING_MODE_ATS = NVML_DEVICE_ADDRESSING_MODE_ATS
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlDeviceAddressingModeType_t(NVML_DEVICE_ADDRESSING_MODE_HMM); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 754, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_DEVICE_ADDRESSING_MODE_HMM, __pyx_t_5) < (0)) __PYX_ERR(0, 754, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":755
 *     DEVICE_ADDRESSING_MODE_NONE = NVML_DEVICE_ADDRESSING_MODE_NONE
 *     DEVICE_ADDRESSING_MODE_HMM = NVML_DEVICE_ADDRESSING_MODE_HMM
 *     DEVICE_ADDRESSING_MODE_ATS = NVML_DEVICE_ADDRESSING_MODE_ATS             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_5 = __Pyx_PyLong_From_nvmlDeviceAddressingModeType_t(NVML_DEVICE_ADDRESSING_MODE_ATS); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 755, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_DEVICE_ADDRESSING_MODE_ATS, __pyx_t_5) < (0)) __PYX_ERR(0, 755, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":751
 *     POWER_PROFILE_MAX = NVML_POWER_PROFILE_MAX
 * 
 * class DeviceAddressingModeType(_IntEnum):             # <<<<<<<<<<<<<<
 *     """See `nvmlDeviceAddressingModeType_t`."""
 *     DEVICE_ADDRESSING_MODE_NONE = NVML_DEVICE_ADDRESSING_MODE_NONE
*/
  __pyx_t_5 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DeviceAddressingModeType, __pyx_t_4, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 751, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_DeviceAddressingModeType, __pyx_t_5) < (0)) __PYX_ERR(0, 751, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":758
 * 
 * 
 * class AffinityScope(_IntEnum):             # <<<<<<<<<<<<<<
 *     NODE = 0     # Scope of NUMA node for affinity queries
 *     SOCKET = 1   # Scope of processor socket for affinity queries
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 758, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 758, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 758, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 758, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_5 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_AffinityScope, __pyx_mstate_global->__pyx_n_u_AffinityScope, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 758, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (__pyx_t_4 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_5, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 758, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;

  /* "cuda/bindings/_nvml.pyx":759
 * 
 * class AffinityScope(_IntEnum):
 *     NODE = 0     # Scope of NUMA node for affinity queries             # <<<<<<<<<<<<<<
 *     SOCKET = 1   # Scope of processor socket for affinity queries
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NODE, __pyx_mstate_global->__pyx_int_0) < (0)) __PYX_ERR(0, 759, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":760
 * class AffinityScope(_IntEnum):
 *     NODE = 0     # Scope of NUMA node for affinity queries
 *     SOCKET = 1   # Scope of processor socket for affinity queries             # <<<<<<<<<<<<<<
 * 
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_SOCKET, __pyx_mstate_global->__pyx_int_1) < (0)) __PYX_ERR(0, 760, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":758
 * 
 * 
 * class AffinityScope(_IntEnum):             # <<<<<<<<<<<<<<
 *     NODE = 0     # Scope of NUMA node for affinity queries
 *     SOCKET = 1   # Scope of processor socket for affinity queries
*/
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_AffinityScope, __pyx_t_4, __pyx_t_5, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 758, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_AffinityScope, __pyx_t_10) < (0)) __PYX_ERR(0, 758, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":763
 * 
 * 
 * class FI(_IntEnum):             # <<<<<<<<<<<<<<
 *     DEV_ECC_CURRENT =          1   # Current ECC mode. 1=Active. 0=Inactive
 *     DEV_ECC_PENDING =          2   # Pending ECC mode. 1=Active. 0=Inactive
*/
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IntEnum_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 763, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 763, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 763, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_CalculateMetaclass(NULL, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 763, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_5, __pyx_t_4, __pyx_mstate_global->__pyx_n_u_FI, __pyx_mstate_global->__pyx_n_u_FI, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 763, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_4 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 763, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":764
 * 
 * class FI(_IntEnum):
 *     DEV_ECC_CURRENT =          1   # Current ECC mode. 1=Active. 0=Inactive             # <<<<<<<<<<<<<<
 *     DEV_ECC_PENDING =          2   # Pending ECC mode. 1=Active. 0=Inactive
 *     # ECC Count Totals
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_CURRENT, __pyx_mstate_global->__pyx_int_1) < (0)) __PYX_ERR(0, 764, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":765
 * class FI(_IntEnum):
 *     DEV_ECC_CURRENT =          1   # Current ECC mode. 1=Active. 0=Inactive
 *     DEV_ECC_PENDING =          2   # Pending ECC mode. 1=Active. 0=Inactive             # <<<<<<<<<<<<<<
 *     # ECC Count Totals
 *     DEV_ECC_SBE_VOL_TOTAL =    3   # Total single bit volatile ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_PENDING, __pyx_mstate_global->__pyx_int_2) < (0)) __PYX_ERR(0, 765, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":767
 *     DEV_ECC_PENDING =          2   # Pending ECC mode. 1=Active. 0=Inactive
 *     # ECC Count Totals
 *     DEV_ECC_SBE_VOL_TOTAL =    3   # Total single bit volatile ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_DBE_VOL_TOTAL =    4   # Total double bit volatile ECC errors
 *     DEV_ECC_SBE_AGG_TOTAL =    5   # Total single bit aggregate (persistent) ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_SBE_VOL_TOTAL, __pyx_mstate_global->__pyx_int_3) < (0)) __PYX_ERR(0, 767, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":768
 *     # ECC Count Totals
 *     DEV_ECC_SBE_VOL_TOTAL =    3   # Total single bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_TOTAL =    4   # Total double bit volatile ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_SBE_AGG_TOTAL =    5   # Total single bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_TOTAL =    6   # Total double bit aggregate (persistent) ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_DBE_VOL_TOTAL, __pyx_mstate_global->__pyx_int_4) < (0)) __PYX_ERR(0, 768, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":769
 *     DEV_ECC_SBE_VOL_TOTAL =    3   # Total single bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_TOTAL =    4   # Total double bit volatile ECC errors
 *     DEV_ECC_SBE_AGG_TOTAL =    5   # Total single bit aggregate (persistent) ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_DBE_AGG_TOTAL =    6   # Total double bit aggregate (persistent) ECC errors
 *     # Individual ECC locations
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_SBE_AGG_TOTAL, __pyx_mstate_global->__pyx_int_5) < (0)) __PYX_ERR(0, 769, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":770
 *     DEV_ECC_DBE_VOL_TOTAL =    4   # Total double bit volatile ECC errors
 *     DEV_ECC_SBE_AGG_TOTAL =    5   # Total single bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_TOTAL =    6   # Total double bit aggregate (persistent) ECC errors             # <<<<<<<<<<<<<<
 *     # Individual ECC locations
 *     DEV_ECC_SBE_VOL_L1 =       7   # L1 cache single bit volatile ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_DBE_AGG_TOTAL, __pyx_mstate_global->__pyx_int_6) < (0)) __PYX_ERR(0, 770, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":772
 *     DEV_ECC_DBE_AGG_TOTAL =    6   # Total double bit aggregate (persistent) ECC errors
 *     # Individual ECC locations
 *     DEV_ECC_SBE_VOL_L1 =       7   # L1 cache single bit volatile ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_DBE_VOL_L1 =       8   # L1 cache double bit volatile ECC errors
 *     DEV_ECC_SBE_VOL_L2 =       9   # L2 cache single bit volatile ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_SBE_VOL_L1, __pyx_mstate_global->__pyx_int_7) < (0)) __PYX_ERR(0, 772, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":773
 *     # Individual ECC locations
 *     DEV_ECC_SBE_VOL_L1 =       7   # L1 cache single bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_L1 =       8   # L1 cache double bit volatile ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_SBE_VOL_L2 =       9   # L2 cache single bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_L2 =       10  # L2 cache double bit volatile ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_DBE_VOL_L1, __pyx_mstate_global->__pyx_int_8) < (0)) __PYX_ERR(0, 773, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":774
 *     DEV_ECC_SBE_VOL_L1 =       7   # L1 cache single bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_L1 =       8   # L1 cache double bit volatile ECC errors
 *     DEV_ECC_SBE_VOL_L2 =       9   # L2 cache single bit volatile ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_DBE_VOL_L2 =       10  # L2 cache double bit volatile ECC errors
 *     DEV_ECC_SBE_VOL_DEV =      11  # Device memory single bit volatile ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_SBE_VOL_L2, __pyx_mstate_global->__pyx_int_9) < (0)) __PYX_ERR(0, 774, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":775
 *     DEV_ECC_DBE_VOL_L1 =       8   # L1 cache double bit volatile ECC errors
 *     DEV_ECC_SBE_VOL_L2 =       9   # L2 cache single bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_L2 =       10  # L2 cache double bit volatile ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_SBE_VOL_DEV =      11  # Device memory single bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_DEV =      12  # Device memory double bit volatile ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_DBE_VOL_L2, __pyx_mstate_global->__pyx_int_10) < (0)) __PYX_ERR(0, 775, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":776
 *     DEV_ECC_SBE_VOL_L2 =       9   # L2 cache single bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_L2 =       10  # L2 cache double bit volatile ECC errors
 *     DEV_ECC_SBE_VOL_DEV =      11  # Device memory single bit volatile ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_DBE_VOL_DEV =      12  # Device memory double bit volatile ECC errors
 *     DEV_ECC_SBE_VOL_REG =      13  # Register file single bit volatile ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_SBE_VOL_DEV, __pyx_mstate_global->__pyx_int_11) < (0)) __PYX_ERR(0, 776, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":777
 *     DEV_ECC_DBE_VOL_L2 =       10  # L2 cache double bit volatile ECC errors
 *     DEV_ECC_SBE_VOL_DEV =      11  # Device memory single bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_DEV =      12  # Device memory double bit volatile ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_SBE_VOL_REG =      13  # Register file single bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_REG =      14  # Register file double bit volatile ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_DBE_VOL_DEV, __pyx_mstate_global->__pyx_int_12) < (0)) __PYX_ERR(0, 777, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":778
 *     DEV_ECC_SBE_VOL_DEV =      11  # Device memory single bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_DEV =      12  # Device memory double bit volatile ECC errors
 *     DEV_ECC_SBE_VOL_REG =      13  # Register file single bit volatile ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_DBE_VOL_REG =      14  # Register file double bit volatile ECC errors
 *     DEV_ECC_SBE_VOL_TEX =      15  # Texture memory single bit volatile ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_SBE_VOL_REG, __pyx_mstate_global->__pyx_int_13) < (0)) __PYX_ERR(0, 778, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":779
 *     DEV_ECC_DBE_VOL_DEV =      12  # Device memory double bit volatile ECC errors
 *     DEV_ECC_SBE_VOL_REG =      13  # Register file single bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_REG =      14  # Register file double bit volatile ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_SBE_VOL_TEX =      15  # Texture memory single bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_TEX =      16  # Texture memory double bit volatile ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_DBE_VOL_REG, __pyx_mstate_global->__pyx_int_14) < (0)) __PYX_ERR(0, 779, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":780
 *     DEV_ECC_SBE_VOL_REG =      13  # Register file single bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_REG =      14  # Register file double bit volatile ECC errors
 *     DEV_ECC_SBE_VOL_TEX =      15  # Texture memory single bit volatile ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_DBE_VOL_TEX =      16  # Texture memory double bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_CBU =      17  # CBU double bit volatile ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_SBE_VOL_TEX, __pyx_mstate_global->__pyx_int_15) < (0)) __PYX_ERR(0, 780, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":781
 *     DEV_ECC_DBE_VOL_REG =      14  # Register file double bit volatile ECC errors
 *     DEV_ECC_SBE_VOL_TEX =      15  # Texture memory single bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_TEX =      16  # Texture memory double bit volatile ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_DBE_VOL_CBU =      17  # CBU double bit volatile ECC errors
 *     DEV_ECC_SBE_AGG_L1 =       18  # L1 cache single bit aggregate (persistent) ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_DBE_VOL_TEX, __pyx_mstate_global->__pyx_int_16) < (0)) __PYX_ERR(0, 781, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":782
 *     DEV_ECC_SBE_VOL_TEX =      15  # Texture memory single bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_TEX =      16  # Texture memory double bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_CBU =      17  # CBU double bit volatile ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_SBE_AGG_L1 =       18  # L1 cache single bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_L1 =       19  # L1 cache double bit aggregate (persistent) ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_DBE_VOL_CBU, __pyx_mstate_global->__pyx_int_17) < (0)) __PYX_ERR(0, 782, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":783
 *     DEV_ECC_DBE_VOL_TEX =      16  # Texture memory double bit volatile ECC errors
 *     DEV_ECC_DBE_VOL_CBU =      17  # CBU double bit volatile ECC errors
 *     DEV_ECC_SBE_AGG_L1 =       18  # L1 cache single bit aggregate (persistent) ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_DBE_AGG_L1 =       19  # L1 cache double bit aggregate (persistent) ECC errors
 *     DEV_ECC_SBE_AGG_L2 =       20  # L2 cache single bit aggregate (persistent) ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_SBE_AGG_L1, __pyx_mstate_global->__pyx_int_18) < (0)) __PYX_ERR(0, 783, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":784
 *     DEV_ECC_DBE_VOL_CBU =      17  # CBU double bit volatile ECC errors
 *     DEV_ECC_SBE_AGG_L1 =       18  # L1 cache single bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_L1 =       19  # L1 cache double bit aggregate (persistent) ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_SBE_AGG_L2 =       20  # L2 cache single bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_L2 =       21  # L2 cache double bit aggregate (persistent) ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_DBE_AGG_L1, __pyx_mstate_global->__pyx_int_19) < (0)) __PYX_ERR(0, 784, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":785
 *     DEV_ECC_SBE_AGG_L1 =       18  # L1 cache single bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_L1 =       19  # L1 cache double bit aggregate (persistent) ECC errors
 *     DEV_ECC_SBE_AGG_L2 =       20  # L2 cache single bit aggregate (persistent) ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_DBE_AGG_L2 =       21  # L2 cache double bit aggregate (persistent) ECC errors
 *     DEV_ECC_SBE_AGG_DEV =      22  # Device memory single bit aggregate (persistent) ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_SBE_AGG_L2, __pyx_mstate_global->__pyx_int_20) < (0)) __PYX_ERR(0, 785, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":786
 *     DEV_ECC_DBE_AGG_L1 =       19  # L1 cache double bit aggregate (persistent) ECC errors
 *     DEV_ECC_SBE_AGG_L2 =       20  # L2 cache single bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_L2 =       21  # L2 cache double bit aggregate (persistent) ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_SBE_AGG_DEV =      22  # Device memory single bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_DEV =      23  # Device memory double bit aggregate (persistent) ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_DBE_AGG_L2, __pyx_mstate_global->__pyx_int_21) < (0)) __PYX_ERR(0, 786, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":787
 *     DEV_ECC_SBE_AGG_L2 =       20  # L2 cache single bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_L2 =       21  # L2 cache double bit aggregate (persistent) ECC errors
 *     DEV_ECC_SBE_AGG_DEV =      22  # Device memory single bit aggregate (persistent) ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_DBE_AGG_DEV =      23  # Device memory double bit aggregate (persistent) ECC errors
 *     DEV_ECC_SBE_AGG_REG =      24  # Register File single bit aggregate (persistent) ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_SBE_AGG_DEV, __pyx_mstate_global->__pyx_int_22) < (0)) __PYX_ERR(0, 787, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":788
 *     DEV_ECC_DBE_AGG_L2 =       21  # L2 cache double bit aggregate (persistent) ECC errors
 *     DEV_ECC_SBE_AGG_DEV =      22  # Device memory single bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_DEV =      23  # Device memory double bit aggregate (persistent) ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_SBE_AGG_REG =      24  # Register File single bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_REG =      25  # Register File double bit aggregate (persistent) ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_DBE_AGG_DEV, __pyx_mstate_global->__pyx_int_23) < (0)) __PYX_ERR(0, 788, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":789
 *     DEV_ECC_SBE_AGG_DEV =      22  # Device memory single bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_DEV =      23  # Device memory double bit aggregate (persistent) ECC errors
 *     DEV_ECC_SBE_AGG_REG =      24  # Register File single bit aggregate (persistent) ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_DBE_AGG_REG =      25  # Register File double bit aggregate (persistent) ECC errors
 *     DEV_ECC_SBE_AGG_TEX =      26  # Texture memory single bit aggregate (persistent) ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_SBE_AGG_REG, __pyx_mstate_global->__pyx_int_24) < (0)) __PYX_ERR(0, 789, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":790
 *     DEV_ECC_DBE_AGG_DEV =      23  # Device memory double bit aggregate (persistent) ECC errors
 *     DEV_ECC_SBE_AGG_REG =      24  # Register File single bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_REG =      25  # Register File double bit aggregate (persistent) ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_SBE_AGG_TEX =      26  # Texture memory single bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_TEX =      27  # Texture memory double bit aggregate (persistent) ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_DBE_AGG_REG, __pyx_mstate_global->__pyx_int_25) < (0)) __PYX_ERR(0, 790, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":791
 *     DEV_ECC_SBE_AGG_REG =      24  # Register File single bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_REG =      25  # Register File double bit aggregate (persistent) ECC errors
 *     DEV_ECC_SBE_AGG_TEX =      26  # Texture memory single bit aggregate (persistent) ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_DBE_AGG_TEX =      27  # Texture memory double bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_CBU =      28  # CBU double bit aggregate ECC errors
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_SBE_AGG_TEX, __pyx_mstate_global->__pyx_int_26) < (0)) __PYX_ERR(0, 791, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":792
 *     DEV_ECC_DBE_AGG_REG =      25  # Register File double bit aggregate (persistent) ECC errors
 *     DEV_ECC_SBE_AGG_TEX =      26  # Texture memory single bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_TEX =      27  # Texture memory double bit aggregate (persistent) ECC errors             # <<<<<<<<<<<<<<
 *     DEV_ECC_DBE_AGG_CBU =      28  # CBU double bit aggregate ECC errors
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_DBE_AGG_TEX, __pyx_mstate_global->__pyx_int_27) < (0)) __PYX_ERR(0, 792, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":793
 *     DEV_ECC_SBE_AGG_TEX =      26  # Texture memory single bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_TEX =      27  # Texture memory double bit aggregate (persistent) ECC errors
 *     DEV_ECC_DBE_AGG_CBU =      28  # CBU double bit aggregate ECC errors             # <<<<<<<<<<<<<<
 * 
 *     # Page Retirement
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ECC_DBE_AGG_CBU, __pyx_mstate_global->__pyx_int_28) < (0)) __PYX_ERR(0, 793, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":796
 * 
 *     # Page Retirement
 *     DEV_RETIRED_SBE =          29  # Number of retired pages because of single bit errors             # <<<<<<<<<<<<<<
 *     DEV_RETIRED_DBE =          30  # Number of retired pages because of double bit errors
 *     DEV_RETIRED_PENDING =      31  # If any pages are pending retirement. 1=yes. 0=no.
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_RETIRED_SBE, __pyx_mstate_global->__pyx_int_29) < (0)) __PYX_ERR(0, 796, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":797
 *     # Page Retirement
 *     DEV_RETIRED_SBE =          29  # Number of retired pages because of single bit errors
 *     DEV_RETIRED_DBE =          30  # Number of retired pages because of double bit errors             # <<<<<<<<<<<<<<
 *     DEV_RETIRED_PENDING =      31  # If any pages are pending retirement. 1=yes. 0=no.
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_RETIRED_DBE, __pyx_mstate_global->__pyx_int_30) < (0)) __PYX_ERR(0, 797, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":798
 *     DEV_RETIRED_SBE =          29  # Number of retired pages because of single bit errors
 *     DEV_RETIRED_DBE =          30  # Number of retired pages because of double bit errors
 *     DEV_RETIRED_PENDING =      31  # If any pages are pending retirement. 1=yes. 0=no.             # <<<<<<<<<<<<<<
 * 
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_RETIRED_PENDING, __pyx_mstate_global->__pyx_int_31) < (0)) __PYX_ERR(0, 798, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":802
 * 
 *     # NVLink Flit Error Counters
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0 =   32 # NVLink flow control CRC  Error Counter for Lane 0             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1 =   33 # NVLink flow control CRC  Error Counter for Lane 1
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2 =   34 # NVLink flow control CRC  Error Counter for Lane 2
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT, __pyx_mstate_global->__pyx_int_32) < (0)) __PYX_ERR(0, 802, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":803
 *     # NVLink Flit Error Counters
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0 =   32 # NVLink flow control CRC  Error Counter for Lane 0
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1 =   33 # NVLink flow control CRC  Error Counter for Lane 1             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2 =   34 # NVLink flow control CRC  Error Counter for Lane 2
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3 =   35 # NVLink flow control CRC  Error Counter for Lane 3
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_2, __pyx_mstate_global->__pyx_int_33) < (0)) __PYX_ERR(0, 803, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":804
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0 =   32 # NVLink flow control CRC  Error Counter for Lane 0
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1 =   33 # NVLink flow control CRC  Error Counter for Lane 1
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2 =   34 # NVLink flow control CRC  Error Counter for Lane 2             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3 =   35 # NVLink flow control CRC  Error Counter for Lane 3
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4 =   36 # NVLink flow control CRC  Error Counter for Lane 4
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_3, __pyx_mstate_global->__pyx_int_34) < (0)) __PYX_ERR(0, 804, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":805
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1 =   33 # NVLink flow control CRC  Error Counter for Lane 1
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2 =   34 # NVLink flow control CRC  Error Counter for Lane 2
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3 =   35 # NVLink flow control CRC  Error Counter for Lane 3             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4 =   36 # NVLink flow control CRC  Error Counter for Lane 4
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5 =   37 # NVLink flow control CRC  Error Counter for Lane 5
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_4, __pyx_mstate_global->__pyx_int_35) < (0)) __PYX_ERR(0, 805, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":806
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2 =   34 # NVLink flow control CRC  Error Counter for Lane 2
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3 =   35 # NVLink flow control CRC  Error Counter for Lane 3
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4 =   36 # NVLink flow control CRC  Error Counter for Lane 4             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5 =   37 # NVLink flow control CRC  Error Counter for Lane 5
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL =38 # NVLink flow control CRC  Error Counter total for all Lanes
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_5, __pyx_mstate_global->__pyx_int_36) < (0)) __PYX_ERR(0, 806, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":807
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3 =   35 # NVLink flow control CRC  Error Counter for Lane 3
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4 =   36 # NVLink flow control CRC  Error Counter for Lane 4
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5 =   37 # NVLink flow control CRC  Error Counter for Lane 5             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL =38 # NVLink flow control CRC  Error Counter total for all Lanes
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_6, __pyx_mstate_global->__pyx_int_37) < (0)) __PYX_ERR(0, 807, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":808
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4 =   36 # NVLink flow control CRC  Error Counter for Lane 4
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5 =   37 # NVLink flow control CRC  Error Counter for Lane 5
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL =38 # NVLink flow control CRC  Error Counter total for all Lanes             # <<<<<<<<<<<<<<
 * 
 *     # NVLink CRC Data Error Counters
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_7, __pyx_mstate_global->__pyx_int_38) < (0)) __PYX_ERR(0, 808, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":811
 * 
 *     # NVLink CRC Data Error Counters
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L0 =   39 # NVLink data CRC Error Counter for Lane 0             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1 =   40 # NVLink data CRC Error Counter for Lane 1
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2 =   41 # NVLink data CRC Error Counter for Lane 2
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT, __pyx_mstate_global->__pyx_int_39) < (0)) __PYX_ERR(0, 811, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":812
 *     # NVLink CRC Data Error Counters
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L0 =   39 # NVLink data CRC Error Counter for Lane 0
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1 =   40 # NVLink data CRC Error Counter for Lane 1             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2 =   41 # NVLink data CRC Error Counter for Lane 2
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3 =   42 # NVLink data CRC Error Counter for Lane 3
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_2, __pyx_mstate_global->__pyx_int_40) < (0)) __PYX_ERR(0, 812, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":813
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L0 =   39 # NVLink data CRC Error Counter for Lane 0
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1 =   40 # NVLink data CRC Error Counter for Lane 1
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2 =   41 # NVLink data CRC Error Counter for Lane 2             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3 =   42 # NVLink data CRC Error Counter for Lane 3
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4 =   43 # NVLink data CRC Error Counter for Lane 4
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_3, __pyx_mstate_global->__pyx_int_41) < (0)) __PYX_ERR(0, 813, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":814
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1 =   40 # NVLink data CRC Error Counter for Lane 1
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2 =   41 # NVLink data CRC Error Counter for Lane 2
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3 =   42 # NVLink data CRC Error Counter for Lane 3             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4 =   43 # NVLink data CRC Error Counter for Lane 4
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5 =   44 # NVLink data CRC Error Counter for Lane 5
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_4, __pyx_mstate_global->__pyx_int_42) < (0)) __PYX_ERR(0, 814, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":815
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2 =   41 # NVLink data CRC Error Counter for Lane 2
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3 =   42 # NVLink data CRC Error Counter for Lane 3
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4 =   43 # NVLink data CRC Error Counter for Lane 4             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5 =   44 # NVLink data CRC Error Counter for Lane 5
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL =45 # NvLink data CRC Error Counter total for all Lanes
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_5, __pyx_mstate_global->__pyx_int_43) < (0)) __PYX_ERR(0, 815, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":816
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3 =   42 # NVLink data CRC Error Counter for Lane 3
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4 =   43 # NVLink data CRC Error Counter for Lane 4
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5 =   44 # NVLink data CRC Error Counter for Lane 5             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL =45 # NvLink data CRC Error Counter total for all Lanes
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_6, __pyx_mstate_global->__pyx_int_44) < (0)) __PYX_ERR(0, 816, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":817
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4 =   43 # NVLink data CRC Error Counter for Lane 4
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5 =   44 # NVLink data CRC Error Counter for Lane 5
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL =45 # NvLink data CRC Error Counter total for all Lanes             # <<<<<<<<<<<<<<
 * 
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_7, __pyx_mstate_global->__pyx_int_45) < (0)) __PYX_ERR(0, 817, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":821
 * 
 *     # NVLink Replay Error Counters
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L0 =     46 # NVLink Replay Error Counter for Lane 0             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L1 =     47 # NVLink Replay Error Counter for Lane 1
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L2 =     48 # NVLink Replay Error Counter for Lane 2
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L0, __pyx_mstate_global->__pyx_int_46) < (0)) __PYX_ERR(0, 821, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":822
 *     # NVLink Replay Error Counters
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L0 =     46 # NVLink Replay Error Counter for Lane 0
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L1 =     47 # NVLink Replay Error Counter for Lane 1             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L2 =     48 # NVLink Replay Error Counter for Lane 2
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L3 =     49 # NVLink Replay Error Counter for Lane 3
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L1, __pyx_mstate_global->__pyx_int_47) < (0)) __PYX_ERR(0, 822, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":823
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L0 =     46 # NVLink Replay Error Counter for Lane 0
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L1 =     47 # NVLink Replay Error Counter for Lane 1
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L2 =     48 # NVLink Replay Error Counter for Lane 2             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L3 =     49 # NVLink Replay Error Counter for Lane 3
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L4 =     50 # NVLink Replay Error Counter for Lane 4
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L2, __pyx_mstate_global->__pyx_int_48) < (0)) __PYX_ERR(0, 823, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":824
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L1 =     47 # NVLink Replay Error Counter for Lane 1
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L2 =     48 # NVLink Replay Error Counter for Lane 2
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L3 =     49 # NVLink Replay Error Counter for Lane 3             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L4 =     50 # NVLink Replay Error Counter for Lane 4
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L5 =     51 # NVLink Replay Error Counter for Lane 5
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L3, __pyx_mstate_global->__pyx_int_49) < (0)) __PYX_ERR(0, 824, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":825
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L2 =     48 # NVLink Replay Error Counter for Lane 2
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L3 =     49 # NVLink Replay Error Counter for Lane 3
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L4 =     50 # NVLink Replay Error Counter for Lane 4             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L5 =     51 # NVLink Replay Error Counter for Lane 5
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL =  52 # NVLink Replay Error Counter total for all Lanes
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L4, __pyx_mstate_global->__pyx_int_50) < (0)) __PYX_ERR(0, 825, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":826
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L3 =     49 # NVLink Replay Error Counter for Lane 3
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L4 =     50 # NVLink Replay Error Counter for Lane 4
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L5 =     51 # NVLink Replay Error Counter for Lane 5             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL =  52 # NVLink Replay Error Counter total for all Lanes
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L5, __pyx_mstate_global->__pyx_int_51) < (0)) __PYX_ERR(0, 826, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":827
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L4 =     50 # NVLink Replay Error Counter for Lane 4
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L5 =     51 # NVLink Replay Error Counter for Lane 5
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL =  52 # NVLink Replay Error Counter total for all Lanes             # <<<<<<<<<<<<<<
 * 
 *     # NVLink Recovery Error Counters
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_TO, __pyx_mstate_global->__pyx_int_52) < (0)) __PYX_ERR(0, 827, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":830
 * 
 *     # NVLink Recovery Error Counters
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L0 =   53 # NVLink Recovery Error Counter for Lane 0             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L1 =   54 # NVLink Recovery Error Counter for Lane 1
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L2 =   55 # NVLink Recovery Error Counter for Lane 2
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT, __pyx_mstate_global->__pyx_int_53) < (0)) __PYX_ERR(0, 830, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":831
 *     # NVLink Recovery Error Counters
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L0 =   53 # NVLink Recovery Error Counter for Lane 0
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L1 =   54 # NVLink Recovery Error Counter for Lane 1             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L2 =   55 # NVLink Recovery Error Counter for Lane 2
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L3 =   56 # NVLink Recovery Error Counter for Lane 3
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_2, __pyx_mstate_global->__pyx_int_54) < (0)) __PYX_ERR(0, 831, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":832
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L0 =   53 # NVLink Recovery Error Counter for Lane 0
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L1 =   54 # NVLink Recovery Error Counter for Lane 1
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L2 =   55 # NVLink Recovery Error Counter for Lane 2             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L3 =   56 # NVLink Recovery Error Counter for Lane 3
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L4 =   57 # NVLink Recovery Error Counter for Lane 4
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_3, __pyx_mstate_global->__pyx_int_55) < (0)) __PYX_ERR(0, 832, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":833
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L1 =   54 # NVLink Recovery Error Counter for Lane 1
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L2 =   55 # NVLink Recovery Error Counter for Lane 2
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L3 =   56 # NVLink Recovery Error Counter for Lane 3             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L4 =   57 # NVLink Recovery Error Counter for Lane 4
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L5 =   58 # NVLink Recovery Error Counter for Lane 5
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_4, __pyx_mstate_global->__pyx_int_56) < (0)) __PYX_ERR(0, 833, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":834
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L2 =   55 # NVLink Recovery Error Counter for Lane 2
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L3 =   56 # NVLink Recovery Error Counter for Lane 3
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L4 =   57 # NVLink Recovery Error Counter for Lane 4             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L5 =   58 # NVLink Recovery Error Counter for Lane 5
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL =59 # NVLink Recovery Error Counter total for all Lanes
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_5, __pyx_mstate_global->__pyx_int_57) < (0)) __PYX_ERR(0, 834, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":835
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L3 =   56 # NVLink Recovery Error Counter for Lane 3
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L4 =   57 # NVLink Recovery Error Counter for Lane 4
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L5 =   58 # NVLink Recovery Error Counter for Lane 5             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL =59 # NVLink Recovery Error Counter total for all Lanes
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_6, __pyx_mstate_global->__pyx_int_58) < (0)) __PYX_ERR(0, 835, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":836
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L4 =   57 # NVLink Recovery Error Counter for Lane 4
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L5 =   58 # NVLink Recovery Error Counter for Lane 5
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL =59 # NVLink Recovery Error Counter total for all Lanes             # <<<<<<<<<<<<<<
 * 
 *     # NvLink Bandwidth Counters
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_7, __pyx_mstate_global->__pyx_int_59) < (0)) __PYX_ERR(0, 836, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":839
 * 
 *     # NvLink Bandwidth Counters
 *     DEV_NVLINK_BANDWIDTH_C0_L0 =    60 # NVLink Bandwidth Counter for Counter Set 0, Lane 0             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C0_L1 =    61 # NVLink Bandwidth Counter for Counter Set 0, Lane 1
 *     DEV_NVLINK_BANDWIDTH_C0_L2 =    62 # NVLink Bandwidth Counter for Counter Set 0, Lane 2
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L0, __pyx_mstate_global->__pyx_int_60) < (0)) __PYX_ERR(0, 839, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":840
 *     # NvLink Bandwidth Counters
 *     DEV_NVLINK_BANDWIDTH_C0_L0 =    60 # NVLink Bandwidth Counter for Counter Set 0, Lane 0
 *     DEV_NVLINK_BANDWIDTH_C0_L1 =    61 # NVLink Bandwidth Counter for Counter Set 0, Lane 1             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C0_L2 =    62 # NVLink Bandwidth Counter for Counter Set 0, Lane 2
 *     DEV_NVLINK_BANDWIDTH_C0_L3 =    63 # NVLink Bandwidth Counter for Counter Set 0, Lane 3
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L1, __pyx_mstate_global->__pyx_int_61) < (0)) __PYX_ERR(0, 840, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":841
 *     DEV_NVLINK_BANDWIDTH_C0_L0 =    60 # NVLink Bandwidth Counter for Counter Set 0, Lane 0
 *     DEV_NVLINK_BANDWIDTH_C0_L1 =    61 # NVLink Bandwidth Counter for Counter Set 0, Lane 1
 *     DEV_NVLINK_BANDWIDTH_C0_L2 =    62 # NVLink Bandwidth Counter for Counter Set 0, Lane 2             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C0_L3 =    63 # NVLink Bandwidth Counter for Counter Set 0, Lane 3
 *     DEV_NVLINK_BANDWIDTH_C0_L4 =    64 # NVLink Bandwidth Counter for Counter Set 0, Lane 4
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L2, __pyx_mstate_global->__pyx_int_62) < (0)) __PYX_ERR(0, 841, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":842
 *     DEV_NVLINK_BANDWIDTH_C0_L1 =    61 # NVLink Bandwidth Counter for Counter Set 0, Lane 1
 *     DEV_NVLINK_BANDWIDTH_C0_L2 =    62 # NVLink Bandwidth Counter for Counter Set 0, Lane 2
 *     DEV_NVLINK_BANDWIDTH_C0_L3 =    63 # NVLink Bandwidth Counter for Counter Set 0, Lane 3             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C0_L4 =    64 # NVLink Bandwidth Counter for Counter Set 0, Lane 4
 *     DEV_NVLINK_BANDWIDTH_C0_L5 =    65 # NVLink Bandwidth Counter for Counter Set 0, Lane 5
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L3, __pyx_mstate_global->__pyx_int_63) < (0)) __PYX_ERR(0, 842, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":843
 *     DEV_NVLINK_BANDWIDTH_C0_L2 =    62 # NVLink Bandwidth Counter for Counter Set 0, Lane 2
 *     DEV_NVLINK_BANDWIDTH_C0_L3 =    63 # NVLink Bandwidth Counter for Counter Set 0, Lane 3
 *     DEV_NVLINK_BANDWIDTH_C0_L4 =    64 # NVLink Bandwidth Counter for Counter Set 0, Lane 4             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C0_L5 =    65 # NVLink Bandwidth Counter for Counter Set 0, Lane 5
 *     DEV_NVLINK_BANDWIDTH_C0_TOTAL = 66 # NVLink Bandwidth Counter Total for Counter Set 0, All Lanes
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L4, __pyx_mstate_global->__pyx_int_64) < (0)) __PYX_ERR(0, 843, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":844
 *     DEV_NVLINK_BANDWIDTH_C0_L3 =    63 # NVLink Bandwidth Counter for Counter Set 0, Lane 3
 *     DEV_NVLINK_BANDWIDTH_C0_L4 =    64 # NVLink Bandwidth Counter for Counter Set 0, Lane 4
 *     DEV_NVLINK_BANDWIDTH_C0_L5 =    65 # NVLink Bandwidth Counter for Counter Set 0, Lane 5             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C0_TOTAL = 66 # NVLink Bandwidth Counter Total for Counter Set 0, All Lanes
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L5, __pyx_mstate_global->__pyx_int_65) < (0)) __PYX_ERR(0, 844, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":845
 *     DEV_NVLINK_BANDWIDTH_C0_L4 =    64 # NVLink Bandwidth Counter for Counter Set 0, Lane 4
 *     DEV_NVLINK_BANDWIDTH_C0_L5 =    65 # NVLink Bandwidth Counter for Counter Set 0, Lane 5
 *     DEV_NVLINK_BANDWIDTH_C0_TOTAL = 66 # NVLink Bandwidth Counter Total for Counter Set 0, All Lanes             # <<<<<<<<<<<<<<
 * 
 *     # NvLink Bandwidth Counters
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_TOTAL, __pyx_mstate_global->__pyx_int_66) < (0)) __PYX_ERR(0, 845, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":848
 * 
 *     # NvLink Bandwidth Counters
 *     DEV_NVLINK_BANDWIDTH_C1_L0 =    67 # NVLink Bandwidth Counter for Counter Set 1, Lane 0             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C1_L1 =    68 # NVLink Bandwidth Counter for Counter Set 1, Lane 1
 *     DEV_NVLINK_BANDWIDTH_C1_L2 =    69 # NVLink Bandwidth Counter for Counter Set 1, Lane 2
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L0, __pyx_mstate_global->__pyx_int_67) < (0)) __PYX_ERR(0, 848, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":849
 *     # NvLink Bandwidth Counters
 *     DEV_NVLINK_BANDWIDTH_C1_L0 =    67 # NVLink Bandwidth Counter for Counter Set 1, Lane 0
 *     DEV_NVLINK_BANDWIDTH_C1_L1 =    68 # NVLink Bandwidth Counter for Counter Set 1, Lane 1             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C1_L2 =    69 # NVLink Bandwidth Counter for Counter Set 1, Lane 2
 *     DEV_NVLINK_BANDWIDTH_C1_L3 =    70 # NVLink Bandwidth Counter for Counter Set 1, Lane 3
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L1, __pyx_mstate_global->__pyx_int_68) < (0)) __PYX_ERR(0, 849, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":850
 *     DEV_NVLINK_BANDWIDTH_C1_L0 =    67 # NVLink Bandwidth Counter for Counter Set 1, Lane 0
 *     DEV_NVLINK_BANDWIDTH_C1_L1 =    68 # NVLink Bandwidth Counter for Counter Set 1, Lane 1
 *     DEV_NVLINK_BANDWIDTH_C1_L2 =    69 # NVLink Bandwidth Counter for Counter Set 1, Lane 2             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C1_L3 =    70 # NVLink Bandwidth Counter for Counter Set 1, Lane 3
 *     DEV_NVLINK_BANDWIDTH_C1_L4 =    71 # NVLink Bandwidth Counter for Counter Set 1, Lane 4
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L2, __pyx_mstate_global->__pyx_int_69) < (0)) __PYX_ERR(0, 850, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":851
 *     DEV_NVLINK_BANDWIDTH_C1_L1 =    68 # NVLink Bandwidth Counter for Counter Set 1, Lane 1
 *     DEV_NVLINK_BANDWIDTH_C1_L2 =    69 # NVLink Bandwidth Counter for Counter Set 1, Lane 2
 *     DEV_NVLINK_BANDWIDTH_C1_L3 =    70 # NVLink Bandwidth Counter for Counter Set 1, Lane 3             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C1_L4 =    71 # NVLink Bandwidth Counter for Counter Set 1, Lane 4
 *     DEV_NVLINK_BANDWIDTH_C1_L5 =    72 # NVLink Bandwidth Counter for Counter Set 1, Lane 5
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L3, __pyx_mstate_global->__pyx_int_70) < (0)) __PYX_ERR(0, 851, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":852
 *     DEV_NVLINK_BANDWIDTH_C1_L2 =    69 # NVLink Bandwidth Counter for Counter Set 1, Lane 2
 *     DEV_NVLINK_BANDWIDTH_C1_L3 =    70 # NVLink Bandwidth Counter for Counter Set 1, Lane 3
 *     DEV_NVLINK_BANDWIDTH_C1_L4 =    71 # NVLink Bandwidth Counter for Counter Set 1, Lane 4             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C1_L5 =    72 # NVLink Bandwidth Counter for Counter Set 1, Lane 5
 *     DEV_NVLINK_BANDWIDTH_C1_TOTAL = 73 # NVLink Bandwidth Counter Total for Counter Set 1, All Lanes
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L4, __pyx_mstate_global->__pyx_int_71) < (0)) __PYX_ERR(0, 852, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":853
 *     DEV_NVLINK_BANDWIDTH_C1_L3 =    70 # NVLink Bandwidth Counter for Counter Set 1, Lane 3
 *     DEV_NVLINK_BANDWIDTH_C1_L4 =    71 # NVLink Bandwidth Counter for Counter Set 1, Lane 4
 *     DEV_NVLINK_BANDWIDTH_C1_L5 =    72 # NVLink Bandwidth Counter for Counter Set 1, Lane 5             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C1_TOTAL = 73 # NVLink Bandwidth Counter Total for Counter Set 1, All Lanes
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L5, __pyx_mstate_global->__pyx_int_72) < (0)) __PYX_ERR(0, 853, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":854
 *     DEV_NVLINK_BANDWIDTH_C1_L4 =    71 # NVLink Bandwidth Counter for Counter Set 1, Lane 4
 *     DEV_NVLINK_BANDWIDTH_C1_L5 =    72 # NVLink Bandwidth Counter for Counter Set 1, Lane 5
 *     DEV_NVLINK_BANDWIDTH_C1_TOTAL = 73 # NVLink Bandwidth Counter Total for Counter Set 1, All Lanes             # <<<<<<<<<<<<<<
 * 
 *     # NVML Perf Policy Counters
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_TOTAL, __pyx_mstate_global->__pyx_int_73) < (0)) __PYX_ERR(0, 854, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":857
 * 
 *     # NVML Perf Policy Counters
 *     DEV_PERF_POLICY_POWER =             74   # Perf Policy Counter for Power Policy             # <<<<<<<<<<<<<<
 *     DEV_PERF_POLICY_THERMAL =           75   # Perf Policy Counter for Thermal Policy
 *     DEV_PERF_POLICY_SYNC_BOOST =        76   # Perf Policy Counter for Sync boost Policy
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PERF_POLICY_POWER, __pyx_mstate_global->__pyx_int_74) < (0)) __PYX_ERR(0, 857, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":858
 *     # NVML Perf Policy Counters
 *     DEV_PERF_POLICY_POWER =             74   # Perf Policy Counter for Power Policy
 *     DEV_PERF_POLICY_THERMAL =           75   # Perf Policy Counter for Thermal Policy             # <<<<<<<<<<<<<<
 *     DEV_PERF_POLICY_SYNC_BOOST =        76   # Perf Policy Counter for Sync boost Policy
 *     DEV_PERF_POLICY_BOARD_LIMIT =       77   # Perf Policy Counter for Board Limit
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PERF_POLICY_THERMAL, __pyx_mstate_global->__pyx_int_75) < (0)) __PYX_ERR(0, 858, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":859
 *     DEV_PERF_POLICY_POWER =             74   # Perf Policy Counter for Power Policy
 *     DEV_PERF_POLICY_THERMAL =           75   # Perf Policy Counter for Thermal Policy
 *     DEV_PERF_POLICY_SYNC_BOOST =        76   # Perf Policy Counter for Sync boost Policy             # <<<<<<<<<<<<<<
 *     DEV_PERF_POLICY_BOARD_LIMIT =       77   # Perf Policy Counter for Board Limit
 *     DEV_PERF_POLICY_LOW_UTILIZATION =   78   # Perf Policy Counter for Low GPU Utilization Policy
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PERF_POLICY_SYNC_BOOST, __pyx_mstate_global->__pyx_int_76) < (0)) __PYX_ERR(0, 859, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":860
 *     DEV_PERF_POLICY_THERMAL =           75   # Perf Policy Counter for Thermal Policy
 *     DEV_PERF_POLICY_SYNC_BOOST =        76   # Perf Policy Counter for Sync boost Policy
 *     DEV_PERF_POLICY_BOARD_LIMIT =       77   # Perf Policy Counter for Board Limit             # <<<<<<<<<<<<<<
 *     DEV_PERF_POLICY_LOW_UTILIZATION =   78   # Perf Policy Counter for Low GPU Utilization Policy
 *     DEV_PERF_POLICY_RELIABILITY =       79   # Perf Policy Counter for Reliability Policy
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PERF_POLICY_BOARD_LIMIT, __pyx_mstate_global->__pyx_int_77) < (0)) __PYX_ERR(0, 860, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":861
 *     DEV_PERF_POLICY_SYNC_BOOST =        76   # Perf Policy Counter for Sync boost Policy
 *     DEV_PERF_POLICY_BOARD_LIMIT =       77   # Perf Policy Counter for Board Limit
 *     DEV_PERF_POLICY_LOW_UTILIZATION =   78   # Perf Policy Counter for Low GPU Utilization Policy             # <<<<<<<<<<<<<<
 *     DEV_PERF_POLICY_RELIABILITY =       79   # Perf Policy Counter for Reliability Policy
 *     DEV_PERF_POLICY_TOTAL_APP_CLOCKS =  80   # Perf Policy Counter for Total App Clock Policy
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PERF_POLICY_LOW_UTILIZATION, __pyx_mstate_global->__pyx_int_78) < (0)) __PYX_ERR(0, 861, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":862
 *     DEV_PERF_POLICY_BOARD_LIMIT =       77   # Perf Policy Counter for Board Limit
 *     DEV_PERF_POLICY_LOW_UTILIZATION =   78   # Perf Policy Counter for Low GPU Utilization Policy
 *     DEV_PERF_POLICY_RELIABILITY =       79   # Perf Policy Counter for Reliability Policy             # <<<<<<<<<<<<<<
 *     DEV_PERF_POLICY_TOTAL_APP_CLOCKS =  80   # Perf Policy Counter for Total App Clock Policy
 *     DEV_PERF_POLICY_TOTAL_BASE_CLOCKS = 81   # Perf Policy Counter for Total Base Clocks Policy
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PERF_POLICY_RELIABILITY, __pyx_mstate_global->__pyx_int_79) < (0)) __PYX_ERR(0, 862, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":863
 *     DEV_PERF_POLICY_LOW_UTILIZATION =   78   # Perf Policy Counter for Low GPU Utilization Policy
 *     DEV_PERF_POLICY_RELIABILITY =       79   # Perf Policy Counter for Reliability Policy
 *     DEV_PERF_POLICY_TOTAL_APP_CLOCKS =  80   # Perf Policy Counter for Total App Clock Policy             # <<<<<<<<<<<<<<
 *     DEV_PERF_POLICY_TOTAL_BASE_CLOCKS = 81   # Perf Policy Counter for Total Base Clocks Policy
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PERF_POLICY_TOTAL_APP_CLOCKS, __pyx_mstate_global->__pyx_int_80) < (0)) __PYX_ERR(0, 863, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":864
 *     DEV_PERF_POLICY_RELIABILITY =       79   # Perf Policy Counter for Reliability Policy
 *     DEV_PERF_POLICY_TOTAL_APP_CLOCKS =  80   # Perf Policy Counter for Total App Clock Policy
 *     DEV_PERF_POLICY_TOTAL_BASE_CLOCKS = 81   # Perf Policy Counter for Total Base Clocks Policy             # <<<<<<<<<<<<<<
 * 
 *     # Memory temperatures
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PERF_POLICY_TOTAL_BASE_CLOCK, __pyx_mstate_global->__pyx_int_81) < (0)) __PYX_ERR(0, 864, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":867
 * 
 *     # Memory temperatures
 *     DEV_MEMORY_TEMP = 82 # Memory temperature for the device             # <<<<<<<<<<<<<<
 * 
 *     # Energy Counter
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_MEMORY_TEMP, __pyx_mstate_global->__pyx_int_82) < (0)) __PYX_ERR(0, 867, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":870
 * 
 *     # Energy Counter
 *     DEV_TOTAL_ENERGY_CONSUMPTION =83 # Total energy consumption for the GPU in mJ since the driver was last reloaded             # <<<<<<<<<<<<<<
 * 
 *     # NVLink Speed
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_TOTAL_ENERGY_CONSUMPTION, __pyx_mstate_global->__pyx_int_83) < (0)) __PYX_ERR(0, 870, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":873
 * 
 *     # NVLink Speed
 *     DEV_NVLINK_SPEED_MBPS_L0 =    84  # NVLink Speed in MBps for Link 0             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_SPEED_MBPS_L1 =    85  # NVLink Speed in MBps for Link 1
 *     DEV_NVLINK_SPEED_MBPS_L2 =    86  # NVLink Speed in MBps for Link 2
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_SPEED_MBPS_L0, __pyx_mstate_global->__pyx_int_84) < (0)) __PYX_ERR(0, 873, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":874
 *     # NVLink Speed
 *     DEV_NVLINK_SPEED_MBPS_L0 =    84  # NVLink Speed in MBps for Link 0
 *     DEV_NVLINK_SPEED_MBPS_L1 =    85  # NVLink Speed in MBps for Link 1             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_SPEED_MBPS_L2 =    86  # NVLink Speed in MBps for Link 2
 *     DEV_NVLINK_SPEED_MBPS_L3 =    87  # NVLink Speed in MBps for Link 3
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_SPEED_MBPS_L1, __pyx_mstate_global->__pyx_int_85) < (0)) __PYX_ERR(0, 874, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":875
 *     DEV_NVLINK_SPEED_MBPS_L0 =    84  # NVLink Speed in MBps for Link 0
 *     DEV_NVLINK_SPEED_MBPS_L1 =    85  # NVLink Speed in MBps for Link 1
 *     DEV_NVLINK_SPEED_MBPS_L2 =    86  # NVLink Speed in MBps for Link 2             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_SPEED_MBPS_L3 =    87  # NVLink Speed in MBps for Link 3
 *     DEV_NVLINK_SPEED_MBPS_L4 =    88  # NVLink Speed in MBps for Link 4
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_SPEED_MBPS_L2, __pyx_mstate_global->__pyx_int_86) < (0)) __PYX_ERR(0, 875, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":876
 *     DEV_NVLINK_SPEED_MBPS_L1 =    85  # NVLink Speed in MBps for Link 1
 *     DEV_NVLINK_SPEED_MBPS_L2 =    86  # NVLink Speed in MBps for Link 2
 *     DEV_NVLINK_SPEED_MBPS_L3 =    87  # NVLink Speed in MBps for Link 3             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_SPEED_MBPS_L4 =    88  # NVLink Speed in MBps for Link 4
 *     DEV_NVLINK_SPEED_MBPS_L5 =    89  # NVLink Speed in MBps for Link 5
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_SPEED_MBPS_L3, __pyx_mstate_global->__pyx_int_87) < (0)) __PYX_ERR(0, 876, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":877
 *     DEV_NVLINK_SPEED_MBPS_L2 =    86  # NVLink Speed in MBps for Link 2
 *     DEV_NVLINK_SPEED_MBPS_L3 =    87  # NVLink Speed in MBps for Link 3
 *     DEV_NVLINK_SPEED_MBPS_L4 =    88  # NVLink Speed in MBps for Link 4             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_SPEED_MBPS_L5 =    89  # NVLink Speed in MBps for Link 5
 *     DEV_NVLINK_SPEED_MBPS_COMMON =90  # Common NVLink Speed in MBps for active links
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_SPEED_MBPS_L4, __pyx_mstate_global->__pyx_int_88) < (0)) __PYX_ERR(0, 877, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":878
 *     DEV_NVLINK_SPEED_MBPS_L3 =    87  # NVLink Speed in MBps for Link 3
 *     DEV_NVLINK_SPEED_MBPS_L4 =    88  # NVLink Speed in MBps for Link 4
 *     DEV_NVLINK_SPEED_MBPS_L5 =    89  # NVLink Speed in MBps for Link 5             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_SPEED_MBPS_COMMON =90  # Common NVLink Speed in MBps for active links
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_SPEED_MBPS_L5, __pyx_mstate_global->__pyx_int_89) < (0)) __PYX_ERR(0, 878, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":879
 *     DEV_NVLINK_SPEED_MBPS_L4 =    88  # NVLink Speed in MBps for Link 4
 *     DEV_NVLINK_SPEED_MBPS_L5 =    89  # NVLink Speed in MBps for Link 5
 *     DEV_NVLINK_SPEED_MBPS_COMMON =90  # Common NVLink Speed in MBps for active links             # <<<<<<<<<<<<<<
 * 
 *     DEV_NVLINK_LINK_COUNT =       91  # Number of NVLinks present on the device
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_SPEED_MBPS_COMMON, __pyx_mstate_global->__pyx_int_90) < (0)) __PYX_ERR(0, 879, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":881
 *     DEV_NVLINK_SPEED_MBPS_COMMON =90  # Common NVLink Speed in MBps for active links
 * 
 *     DEV_NVLINK_LINK_COUNT =       91  # Number of NVLinks present on the device             # <<<<<<<<<<<<<<
 * 
 *     DEV_RETIRED_PENDING_SBE =     92  # If any pages are pending retirement due to SBE. 1=yes. 0=no.
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_LINK_COUNT, __pyx_mstate_global->__pyx_int_91) < (0)) __PYX_ERR(0, 881, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":883
 *     DEV_NVLINK_LINK_COUNT =       91  # Number of NVLinks present on the device
 * 
 *     DEV_RETIRED_PENDING_SBE =     92  # If any pages are pending retirement due to SBE. 1=yes. 0=no.             # <<<<<<<<<<<<<<
 *     DEV_RETIRED_PENDING_DBE =     93  # If any pages are pending retirement due to DBE. 1=yes. 0=no.
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_RETIRED_PENDING_SBE, __pyx_mstate_global->__pyx_int_92) < (0)) __PYX_ERR(0, 883, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":884
 * 
 *     DEV_RETIRED_PENDING_SBE =     92  # If any pages are pending retirement due to SBE. 1=yes. 0=no.
 *     DEV_RETIRED_PENDING_DBE =     93  # If any pages are pending retirement due to DBE. 1=yes. 0=no.             # <<<<<<<<<<<<<<
 * 
 *     DEV_PCIE_REPLAY_COUNTER =            94  # PCIe replay counter
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_RETIRED_PENDING_DBE, __pyx_mstate_global->__pyx_int_93) < (0)) __PYX_ERR(0, 884, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":886
 *     DEV_RETIRED_PENDING_DBE =     93  # If any pages are pending retirement due to DBE. 1=yes. 0=no.
 * 
 *     DEV_PCIE_REPLAY_COUNTER =            94  # PCIe replay counter             # <<<<<<<<<<<<<<
 *     DEV_PCIE_REPLAY_ROLLOVER_COUNTER =   95  # PCIe replay rollover counter
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_REPLAY_COUNTER, __pyx_mstate_global->__pyx_int_94) < (0)) __PYX_ERR(0, 886, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":887
 * 
 *     DEV_PCIE_REPLAY_COUNTER =            94  # PCIe replay counter
 *     DEV_PCIE_REPLAY_ROLLOVER_COUNTER =   95  # PCIe replay rollover counter             # <<<<<<<<<<<<<<
 * 
 *     # NVLink Flit Error Counters
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_REPLAY_ROLLOVER_COUNTER, __pyx_mstate_global->__pyx_int_95) < (0)) __PYX_ERR(0, 887, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":890
 * 
 *     # NVLink Flit Error Counters
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6 =    96 # NVLink flow control CRC  Error Counter for Lane 6             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7 =    97 # NVLink flow control CRC  Error Counter for Lane 7
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8 =    98 # NVLink flow control CRC  Error Counter for Lane 8
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_8, __pyx_mstate_global->__pyx_int_96) < (0)) __PYX_ERR(0, 890, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":891
 *     # NVLink Flit Error Counters
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6 =    96 # NVLink flow control CRC  Error Counter for Lane 6
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7 =    97 # NVLink flow control CRC  Error Counter for Lane 7             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8 =    98 # NVLink flow control CRC  Error Counter for Lane 8
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9 =    99 # NVLink flow control CRC  Error Counter for Lane 9
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_9, __pyx_mstate_global->__pyx_int_97) < (0)) __PYX_ERR(0, 891, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":892
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6 =    96 # NVLink flow control CRC  Error Counter for Lane 6
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7 =    97 # NVLink flow control CRC  Error Counter for Lane 7
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8 =    98 # NVLink flow control CRC  Error Counter for Lane 8             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9 =    99 # NVLink flow control CRC  Error Counter for Lane 9
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10 =  100 # NVLink flow control CRC  Error Counter for Lane 10
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_10, __pyx_mstate_global->__pyx_int_98) < (0)) __PYX_ERR(0, 892, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":893
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7 =    97 # NVLink flow control CRC  Error Counter for Lane 7
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8 =    98 # NVLink flow control CRC  Error Counter for Lane 8
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9 =    99 # NVLink flow control CRC  Error Counter for Lane 9             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10 =  100 # NVLink flow control CRC  Error Counter for Lane 10
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11 =  101 # NVLink flow control CRC  Error Counter for Lane 11
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_11, __pyx_mstate_global->__pyx_int_99) < (0)) __PYX_ERR(0, 893, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":894
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8 =    98 # NVLink flow control CRC  Error Counter for Lane 8
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9 =    99 # NVLink flow control CRC  Error Counter for Lane 9
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10 =  100 # NVLink flow control CRC  Error Counter for Lane 10             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11 =  101 # NVLink flow control CRC  Error Counter for Lane 11
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_12, __pyx_mstate_global->__pyx_int_100) < (0)) __PYX_ERR(0, 894, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":895
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9 =    99 # NVLink flow control CRC  Error Counter for Lane 9
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10 =  100 # NVLink flow control CRC  Error Counter for Lane 10
 *     DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11 =  101 # NVLink flow control CRC  Error Counter for Lane 11             # <<<<<<<<<<<<<<
 * 
 *     # NVLink CRC Data Error Counters
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_13, __pyx_mstate_global->__pyx_int_101) < (0)) __PYX_ERR(0, 895, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":898
 * 
 *     # NVLink CRC Data Error Counters
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L6 =   102 # NVLink data CRC Error Counter for Lane 6             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7 =   103 # NVLink data CRC Error Counter for Lane 7
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8 =   104 # NVLink data CRC Error Counter for Lane 8
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_8, __pyx_mstate_global->__pyx_int_102) < (0)) __PYX_ERR(0, 898, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":899
 *     # NVLink CRC Data Error Counters
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L6 =   102 # NVLink data CRC Error Counter for Lane 6
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7 =   103 # NVLink data CRC Error Counter for Lane 7             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8 =   104 # NVLink data CRC Error Counter for Lane 8
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9 =   105 # NVLink data CRC Error Counter for Lane 9
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_9, __pyx_mstate_global->__pyx_int_103) < (0)) __PYX_ERR(0, 899, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":900
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L6 =   102 # NVLink data CRC Error Counter for Lane 6
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7 =   103 # NVLink data CRC Error Counter for Lane 7
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8 =   104 # NVLink data CRC Error Counter for Lane 8             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9 =   105 # NVLink data CRC Error Counter for Lane 9
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10 =  106 # NVLink data CRC Error Counter for Lane 10
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_10, __pyx_mstate_global->__pyx_int_104) < (0)) __PYX_ERR(0, 900, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":901
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7 =   103 # NVLink data CRC Error Counter for Lane 7
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8 =   104 # NVLink data CRC Error Counter for Lane 8
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9 =   105 # NVLink data CRC Error Counter for Lane 9             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10 =  106 # NVLink data CRC Error Counter for Lane 10
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11 =  107 # NVLink data CRC Error Counter for Lane 11
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_11, __pyx_mstate_global->__pyx_int_105) < (0)) __PYX_ERR(0, 901, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":902
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8 =   104 # NVLink data CRC Error Counter for Lane 8
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9 =   105 # NVLink data CRC Error Counter for Lane 9
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10 =  106 # NVLink data CRC Error Counter for Lane 10             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11 =  107 # NVLink data CRC Error Counter for Lane 11
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_12, __pyx_mstate_global->__pyx_int_106) < (0)) __PYX_ERR(0, 902, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":903
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9 =   105 # NVLink data CRC Error Counter for Lane 9
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10 =  106 # NVLink data CRC Error Counter for Lane 10
 *     DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11 =  107 # NVLink data CRC Error Counter for Lane 11             # <<<<<<<<<<<<<<
 * 
 *     # NVLink Replay Error Counters
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_CRC_DATA_ERROR_COUNT_13, __pyx_mstate_global->__pyx_int_107) < (0)) __PYX_ERR(0, 903, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":906
 * 
 *     # NVLink Replay Error Counters
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L6 =     108 # NVLink Replay Error Counter for Lane 6             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L7 =     109 # NVLink Replay Error Counter for Lane 7
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L8 =     110 # NVLink Replay Error Counter for Lane 8
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L6, __pyx_mstate_global->__pyx_int_108) < (0)) __PYX_ERR(0, 906, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":907
 *     # NVLink Replay Error Counters
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L6 =     108 # NVLink Replay Error Counter for Lane 6
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L7 =     109 # NVLink Replay Error Counter for Lane 7             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L8 =     110 # NVLink Replay Error Counter for Lane 8
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L9 =     111 # NVLink Replay Error Counter for Lane 9
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L7, __pyx_mstate_global->__pyx_int_109) < (0)) __PYX_ERR(0, 907, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":908
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L6 =     108 # NVLink Replay Error Counter for Lane 6
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L7 =     109 # NVLink Replay Error Counter for Lane 7
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L8 =     110 # NVLink Replay Error Counter for Lane 8             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L9 =     111 # NVLink Replay Error Counter for Lane 9
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L10 =    112 # NVLink Replay Error Counter for Lane 10
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L8, __pyx_mstate_global->__pyx_int_110) < (0)) __PYX_ERR(0, 908, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":909
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L7 =     109 # NVLink Replay Error Counter for Lane 7
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L8 =     110 # NVLink Replay Error Counter for Lane 8
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L9 =     111 # NVLink Replay Error Counter for Lane 9             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L10 =    112 # NVLink Replay Error Counter for Lane 10
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L11 =    113 # NVLink Replay Error Counter for Lane 11
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L9, __pyx_mstate_global->__pyx_int_111) < (0)) __PYX_ERR(0, 909, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":910
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L8 =     110 # NVLink Replay Error Counter for Lane 8
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L9 =     111 # NVLink Replay Error Counter for Lane 9
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L10 =    112 # NVLink Replay Error Counter for Lane 10             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L11 =    113 # NVLink Replay Error Counter for Lane 11
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L1_2, __pyx_mstate_global->__pyx_int_112) < (0)) __PYX_ERR(0, 910, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":911
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L9 =     111 # NVLink Replay Error Counter for Lane 9
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L10 =    112 # NVLink Replay Error Counter for Lane 10
 *     DEV_NVLINK_REPLAY_ERROR_COUNT_L11 =    113 # NVLink Replay Error Counter for Lane 11             # <<<<<<<<<<<<<<
 * 
 *     # NVLink Recovery Error Counters
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_REPLAY_ERROR_COUNT_L1_3, __pyx_mstate_global->__pyx_int_113) < (0)) __PYX_ERR(0, 911, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":914
 * 
 *     # NVLink Recovery Error Counters
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L6 =   114 # NVLink Recovery Error Counter for Lane 6             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L7 =   115 # NVLink Recovery Error Counter for Lane 7
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L8 =   116 # NVLink Recovery Error Counter for Lane 8
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_8, __pyx_mstate_global->__pyx_int_114) < (0)) __PYX_ERR(0, 914, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":915
 *     # NVLink Recovery Error Counters
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L6 =   114 # NVLink Recovery Error Counter for Lane 6
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L7 =   115 # NVLink Recovery Error Counter for Lane 7             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L8 =   116 # NVLink Recovery Error Counter for Lane 8
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L9 =   117 # NVLink Recovery Error Counter for Lane 9
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_9, __pyx_mstate_global->__pyx_int_115) < (0)) __PYX_ERR(0, 915, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":916
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L6 =   114 # NVLink Recovery Error Counter for Lane 6
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L7 =   115 # NVLink Recovery Error Counter for Lane 7
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L8 =   116 # NVLink Recovery Error Counter for Lane 8             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L9 =   117 # NVLink Recovery Error Counter for Lane 9
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L10 =  118 # NVLink Recovery Error Counter for Lane 10
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_10, __pyx_mstate_global->__pyx_int_116) < (0)) __PYX_ERR(0, 916, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":917
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L7 =   115 # NVLink Recovery Error Counter for Lane 7
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L8 =   116 # NVLink Recovery Error Counter for Lane 8
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L9 =   117 # NVLink Recovery Error Counter for Lane 9             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L10 =  118 # NVLink Recovery Error Counter for Lane 10
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L11 =  119 # NVLink Recovery Error Counter for Lane 11
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_11, __pyx_mstate_global->__pyx_int_117) < (0)) __PYX_ERR(0, 917, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":918
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L8 =   116 # NVLink Recovery Error Counter for Lane 8
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L9 =   117 # NVLink Recovery Error Counter for Lane 9
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L10 =  118 # NVLink Recovery Error Counter for Lane 10             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L11 =  119 # NVLink Recovery Error Counter for Lane 11
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_12, __pyx_mstate_global->__pyx_int_118) < (0)) __PYX_ERR(0, 918, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":919
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L9 =   117 # NVLink Recovery Error Counter for Lane 9
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L10 =  118 # NVLink Recovery Error Counter for Lane 10
 *     DEV_NVLINK_RECOVERY_ERROR_COUNT_L11 =  119 # NVLink Recovery Error Counter for Lane 11             # <<<<<<<<<<<<<<
 * 
 *     # NvLink Bandwidth Counters *[inserted by cython to avoid comment closer]/
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_RECOVERY_ERROR_COUNT_13, __pyx_mstate_global->__pyx_int_119) < (0)) __PYX_ERR(0, 919, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":922
 * 
 *     # NvLink Bandwidth Counters *[inserted by cython to avoid comment closer]/
 *     DEV_NVLINK_BANDWIDTH_C0_L6 =    120 # NVLink Bandwidth Counter for Counter Set 0, Lane 6             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C0_L7 =    121 # NVLink Bandwidth Counter for Counter Set 0, Lane 7
 *     DEV_NVLINK_BANDWIDTH_C0_L8 =    122 # NVLink Bandwidth Counter for Counter Set 0, Lane 8
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L6, __pyx_mstate_global->__pyx_int_120) < (0)) __PYX_ERR(0, 922, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":923
 *     # NvLink Bandwidth Counters *[inserted by cython to avoid comment closer]/
 *     DEV_NVLINK_BANDWIDTH_C0_L6 =    120 # NVLink Bandwidth Counter for Counter Set 0, Lane 6
 *     DEV_NVLINK_BANDWIDTH_C0_L7 =    121 # NVLink Bandwidth Counter for Counter Set 0, Lane 7             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C0_L8 =    122 # NVLink Bandwidth Counter for Counter Set 0, Lane 8
 *     DEV_NVLINK_BANDWIDTH_C0_L9 =    123 # NVLink Bandwidth Counter for Counter Set 0, Lane 9
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L7, __pyx_mstate_global->__pyx_int_121) < (0)) __PYX_ERR(0, 923, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":924
 *     DEV_NVLINK_BANDWIDTH_C0_L6 =    120 # NVLink Bandwidth Counter for Counter Set 0, Lane 6
 *     DEV_NVLINK_BANDWIDTH_C0_L7 =    121 # NVLink Bandwidth Counter for Counter Set 0, Lane 7
 *     DEV_NVLINK_BANDWIDTH_C0_L8 =    122 # NVLink Bandwidth Counter for Counter Set 0, Lane 8             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C0_L9 =    123 # NVLink Bandwidth Counter for Counter Set 0, Lane 9
 *     DEV_NVLINK_BANDWIDTH_C0_L10 =   124 # NVLink Bandwidth Counter for Counter Set 0, Lane 10
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L8, __pyx_mstate_global->__pyx_int_122) < (0)) __PYX_ERR(0, 924, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":925
 *     DEV_NVLINK_BANDWIDTH_C0_L7 =    121 # NVLink Bandwidth Counter for Counter Set 0, Lane 7
 *     DEV_NVLINK_BANDWIDTH_C0_L8 =    122 # NVLink Bandwidth Counter for Counter Set 0, Lane 8
 *     DEV_NVLINK_BANDWIDTH_C0_L9 =    123 # NVLink Bandwidth Counter for Counter Set 0, Lane 9             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C0_L10 =   124 # NVLink Bandwidth Counter for Counter Set 0, Lane 10
 *     DEV_NVLINK_BANDWIDTH_C0_L11 =   125 # NVLink Bandwidth Counter for Counter Set 0, Lane 11
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L9, __pyx_mstate_global->__pyx_int_123) < (0)) __PYX_ERR(0, 925, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":926
 *     DEV_NVLINK_BANDWIDTH_C0_L8 =    122 # NVLink Bandwidth Counter for Counter Set 0, Lane 8
 *     DEV_NVLINK_BANDWIDTH_C0_L9 =    123 # NVLink Bandwidth Counter for Counter Set 0, Lane 9
 *     DEV_NVLINK_BANDWIDTH_C0_L10 =   124 # NVLink Bandwidth Counter for Counter Set 0, Lane 10             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C0_L11 =   125 # NVLink Bandwidth Counter for Counter Set 0, Lane 11
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L10, __pyx_mstate_global->__pyx_int_124) < (0)) __PYX_ERR(0, 926, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":927
 *     DEV_NVLINK_BANDWIDTH_C0_L9 =    123 # NVLink Bandwidth Counter for Counter Set 0, Lane 9
 *     DEV_NVLINK_BANDWIDTH_C0_L10 =   124 # NVLink Bandwidth Counter for Counter Set 0, Lane 10
 *     DEV_NVLINK_BANDWIDTH_C0_L11 =   125 # NVLink Bandwidth Counter for Counter Set 0, Lane 11             # <<<<<<<<<<<<<<
 * 
 *     # NvLink Bandwidth Counters
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C0_L11, __pyx_mstate_global->__pyx_int_125) < (0)) __PYX_ERR(0, 927, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":930
 * 
 *     # NvLink Bandwidth Counters
 *     DEV_NVLINK_BANDWIDTH_C1_L6 =    126 # NVLink Bandwidth Counter for Counter Set 1, Lane 6             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C1_L7 =    127 # NVLink Bandwidth Counter for Counter Set 1, Lane 7
 *     DEV_NVLINK_BANDWIDTH_C1_L8 =    128 # NVLink Bandwidth Counter for Counter Set 1, Lane 8
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L6, __pyx_mstate_global->__pyx_int_126) < (0)) __PYX_ERR(0, 930, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":931
 *     # NvLink Bandwidth Counters
 *     DEV_NVLINK_BANDWIDTH_C1_L6 =    126 # NVLink Bandwidth Counter for Counter Set 1, Lane 6
 *     DEV_NVLINK_BANDWIDTH_C1_L7 =    127 # NVLink Bandwidth Counter for Counter Set 1, Lane 7             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C1_L8 =    128 # NVLink Bandwidth Counter for Counter Set 1, Lane 8
 *     DEV_NVLINK_BANDWIDTH_C1_L9 =    129 # NVLink Bandwidth Counter for Counter Set 1, Lane 9
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L7, __pyx_mstate_global->__pyx_int_127) < (0)) __PYX_ERR(0, 931, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":932
 *     DEV_NVLINK_BANDWIDTH_C1_L6 =    126 # NVLink Bandwidth Counter for Counter Set 1, Lane 6
 *     DEV_NVLINK_BANDWIDTH_C1_L7 =    127 # NVLink Bandwidth Counter for Counter Set 1, Lane 7
 *     DEV_NVLINK_BANDWIDTH_C1_L8 =    128 # NVLink Bandwidth Counter for Counter Set 1, Lane 8             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C1_L9 =    129 # NVLink Bandwidth Counter for Counter Set 1, Lane 9
 *     DEV_NVLINK_BANDWIDTH_C1_L10 =   130 # NVLink Bandwidth Counter for Counter Set 1, Lane 10
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L8, __pyx_mstate_global->__pyx_int_128) < (0)) __PYX_ERR(0, 932, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":933
 *     DEV_NVLINK_BANDWIDTH_C1_L7 =    127 # NVLink Bandwidth Counter for Counter Set 1, Lane 7
 *     DEV_NVLINK_BANDWIDTH_C1_L8 =    128 # NVLink Bandwidth Counter for Counter Set 1, Lane 8
 *     DEV_NVLINK_BANDWIDTH_C1_L9 =    129 # NVLink Bandwidth Counter for Counter Set 1, Lane 9             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C1_L10 =   130 # NVLink Bandwidth Counter for Counter Set 1, Lane 10
 *     DEV_NVLINK_BANDWIDTH_C1_L11 =   131 # NVLink Bandwidth Counter for Counter Set 1, Lane 11
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L9, __pyx_mstate_global->__pyx_int_129) < (0)) __PYX_ERR(0, 933, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":934
 *     DEV_NVLINK_BANDWIDTH_C1_L8 =    128 # NVLink Bandwidth Counter for Counter Set 1, Lane 8
 *     DEV_NVLINK_BANDWIDTH_C1_L9 =    129 # NVLink Bandwidth Counter for Counter Set 1, Lane 9
 *     DEV_NVLINK_BANDWIDTH_C1_L10 =   130 # NVLink Bandwidth Counter for Counter Set 1, Lane 10             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_BANDWIDTH_C1_L11 =   131 # NVLink Bandwidth Counter for Counter Set 1, Lane 11
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L10, __pyx_mstate_global->__pyx_int_130) < (0)) __PYX_ERR(0, 934, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":935
 *     DEV_NVLINK_BANDWIDTH_C1_L9 =    129 # NVLink Bandwidth Counter for Counter Set 1, Lane 9
 *     DEV_NVLINK_BANDWIDTH_C1_L10 =   130 # NVLink Bandwidth Counter for Counter Set 1, Lane 10
 *     DEV_NVLINK_BANDWIDTH_C1_L11 =   131 # NVLink Bandwidth Counter for Counter Set 1, Lane 11             # <<<<<<<<<<<<<<
 * 
 *     # NVLink Speed
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_BANDWIDTH_C1_L11, __pyx_mstate_global->__pyx_int_131) < (0)) __PYX_ERR(0, 935, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":938
 * 
 *     # NVLink Speed
 *     DEV_NVLINK_SPEED_MBPS_L6 =    132  # NVLink Speed in MBps for Link 6             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_SPEED_MBPS_L7 =    133  # NVLink Speed in MBps for Link 7
 *     DEV_NVLINK_SPEED_MBPS_L8 =    134  # NVLink Speed in MBps for Link 8
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_SPEED_MBPS_L6, __pyx_mstate_global->__pyx_int_132) < (0)) __PYX_ERR(0, 938, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":939
 *     # NVLink Speed
 *     DEV_NVLINK_SPEED_MBPS_L6 =    132  # NVLink Speed in MBps for Link 6
 *     DEV_NVLINK_SPEED_MBPS_L7 =    133  # NVLink Speed in MBps for Link 7             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_SPEED_MBPS_L8 =    134  # NVLink Speed in MBps for Link 8
 *     DEV_NVLINK_SPEED_MBPS_L9 =    135  # NVLink Speed in MBps for Link 9
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_SPEED_MBPS_L7, __pyx_mstate_global->__pyx_int_133) < (0)) __PYX_ERR(0, 939, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":940
 *     DEV_NVLINK_SPEED_MBPS_L6 =    132  # NVLink Speed in MBps for Link 6
 *     DEV_NVLINK_SPEED_MBPS_L7 =    133  # NVLink Speed in MBps for Link 7
 *     DEV_NVLINK_SPEED_MBPS_L8 =    134  # NVLink Speed in MBps for Link 8             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_SPEED_MBPS_L9 =    135  # NVLink Speed in MBps for Link 9
 *     DEV_NVLINK_SPEED_MBPS_L10 =   136  # NVLink Speed in MBps for Link 10
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_SPEED_MBPS_L8, __pyx_mstate_global->__pyx_int_134) < (0)) __PYX_ERR(0, 940, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":941
 *     DEV_NVLINK_SPEED_MBPS_L7 =    133  # NVLink Speed in MBps for Link 7
 *     DEV_NVLINK_SPEED_MBPS_L8 =    134  # NVLink Speed in MBps for Link 8
 *     DEV_NVLINK_SPEED_MBPS_L9 =    135  # NVLink Speed in MBps for Link 9             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_SPEED_MBPS_L10 =   136  # NVLink Speed in MBps for Link 10
 *     DEV_NVLINK_SPEED_MBPS_L11 =   137  # NVLink Speed in MBps for Link 11
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_SPEED_MBPS_L9, __pyx_mstate_global->__pyx_int_135) < (0)) __PYX_ERR(0, 941, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":942
 *     DEV_NVLINK_SPEED_MBPS_L8 =    134  # NVLink Speed in MBps for Link 8
 *     DEV_NVLINK_SPEED_MBPS_L9 =    135  # NVLink Speed in MBps for Link 9
 *     DEV_NVLINK_SPEED_MBPS_L10 =   136  # NVLink Speed in MBps for Link 10             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_SPEED_MBPS_L11 =   137  # NVLink Speed in MBps for Link 11
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_SPEED_MBPS_L10, __pyx_mstate_global->__pyx_int_136) < (0)) __PYX_ERR(0, 942, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":943
 *     DEV_NVLINK_SPEED_MBPS_L9 =    135  # NVLink Speed in MBps for Link 9
 *     DEV_NVLINK_SPEED_MBPS_L10 =   136  # NVLink Speed in MBps for Link 10
 *     DEV_NVLINK_SPEED_MBPS_L11 =   137  # NVLink Speed in MBps for Link 11             # <<<<<<<<<<<<<<
 * 
 *     # NVLink throughput counters field values
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_SPEED_MBPS_L11, __pyx_mstate_global->__pyx_int_137) < (0)) __PYX_ERR(0, 943, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":946
 * 
 *     # NVLink throughput counters field values
 *     DEV_NVLINK_THROUGHPUT_DATA_TX =     138 # NVLink TX Data throughput in KiB             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_THROUGHPUT_DATA_RX =     139 # NVLink RX Data throughput in KiB
 *     DEV_NVLINK_THROUGHPUT_RAW_TX =      140 # NVLink TX Data + protocol overhead in KiB
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_THROUGHPUT_DATA_TX, __pyx_mstate_global->__pyx_int_138) < (0)) __PYX_ERR(0, 946, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":947
 *     # NVLink throughput counters field values
 *     DEV_NVLINK_THROUGHPUT_DATA_TX =     138 # NVLink TX Data throughput in KiB
 *     DEV_NVLINK_THROUGHPUT_DATA_RX =     139 # NVLink RX Data throughput in KiB             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_THROUGHPUT_RAW_TX =      140 # NVLink TX Data + protocol overhead in KiB
 *     DEV_NVLINK_THROUGHPUT_RAW_RX =      141 # NVLink RX Data + protocol overhead in KiB
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_THROUGHPUT_DATA_RX, __pyx_mstate_global->__pyx_int_139) < (0)) __PYX_ERR(0, 947, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":948
 *     DEV_NVLINK_THROUGHPUT_DATA_TX =     138 # NVLink TX Data throughput in KiB
 *     DEV_NVLINK_THROUGHPUT_DATA_RX =     139 # NVLink RX Data throughput in KiB
 *     DEV_NVLINK_THROUGHPUT_RAW_TX =      140 # NVLink TX Data + protocol overhead in KiB             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_THROUGHPUT_RAW_RX =      141 # NVLink RX Data + protocol overhead in KiB
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_THROUGHPUT_RAW_TX, __pyx_mstate_global->__pyx_int_140) < (0)) __PYX_ERR(0, 948, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":949
 *     DEV_NVLINK_THROUGHPUT_DATA_RX =     139 # NVLink RX Data throughput in KiB
 *     DEV_NVLINK_THROUGHPUT_RAW_TX =      140 # NVLink TX Data + protocol overhead in KiB
 *     DEV_NVLINK_THROUGHPUT_RAW_RX =      141 # NVLink RX Data + protocol overhead in KiB             # <<<<<<<<<<<<<<
 * 
 *     # Row Remapper
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_THROUGHPUT_RAW_RX, __pyx_mstate_global->__pyx_int_141) < (0)) __PYX_ERR(0, 949, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":952
 * 
 *     # Row Remapper
 *     DEV_REMAPPED_COR =       142 # Number of remapped rows due to correctable errors             # <<<<<<<<<<<<<<
 *     DEV_REMAPPED_UNC =       143 # Number of remapped rows due to uncorrectable errors
 *     DEV_REMAPPED_PENDING =   144 # If any rows are pending remapping. 1=yes 0=no
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_REMAPPED_COR, __pyx_mstate_global->__pyx_int_142) < (0)) __PYX_ERR(0, 952, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":953
 *     # Row Remapper
 *     DEV_REMAPPED_COR =       142 # Number of remapped rows due to correctable errors
 *     DEV_REMAPPED_UNC =       143 # Number of remapped rows due to uncorrectable errors             # <<<<<<<<<<<<<<
 *     DEV_REMAPPED_PENDING =   144 # If any rows are pending remapping. 1=yes 0=no
 *     DEV_REMAPPED_FAILURE =   145 # If any rows failed to be remapped 1=yes 0=no
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_REMAPPED_UNC, __pyx_mstate_global->__pyx_int_143) < (0)) __PYX_ERR(0, 953, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":954
 *     DEV_REMAPPED_COR =       142 # Number of remapped rows due to correctable errors
 *     DEV_REMAPPED_UNC =       143 # Number of remapped rows due to uncorrectable errors
 *     DEV_REMAPPED_PENDING =   144 # If any rows are pending remapping. 1=yes 0=no             # <<<<<<<<<<<<<<
 *     DEV_REMAPPED_FAILURE =   145 # If any rows failed to be remapped 1=yes 0=no
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_REMAPPED_PENDING, __pyx_mstate_global->__pyx_int_144) < (0)) __PYX_ERR(0, 954, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":955
 *     DEV_REMAPPED_UNC =       143 # Number of remapped rows due to uncorrectable errors
 *     DEV_REMAPPED_PENDING =   144 # If any rows are pending remapping. 1=yes 0=no
 *     DEV_REMAPPED_FAILURE =   145 # If any rows failed to be remapped 1=yes 0=no             # <<<<<<<<<<<<<<
 * 
 *     # Remote device NVLink ID
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_REMAPPED_FAILURE, __pyx_mstate_global->__pyx_int_145) < (0)) __PYX_ERR(0, 955, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":958
 * 
 *     # Remote device NVLink ID
 *     DEV_NVLINK_REMOTE_NVLINK_ID =    146 # Remote device NVLink ID             # <<<<<<<<<<<<<<
 * 
 *     # NVSwitch: connected NVLink count
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_REMOTE_NVLINK_ID, __pyx_mstate_global->__pyx_int_146) < (0)) __PYX_ERR(0, 958, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":961
 * 
 *     # NVSwitch: connected NVLink count
 *     DEV_NVSWITCH_CONNECTED_LINK_COUNT =  147  # Number of NVLinks connected to NVSwitch             # <<<<<<<<<<<<<<
 * 
 *     # NvLink ECC Data Error Counters
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVSWITCH_CONNECTED_LINK_COUN, __pyx_mstate_global->__pyx_int_147) < (0)) __PYX_ERR(0, 961, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":964
 * 
 *     # NvLink ECC Data Error Counters
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L0 =   148 # NVLink data ECC Error Counter for Link 0             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L1 =   149 # NVLink data ECC Error Counter for Link 1
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2 =   150 # NVLink data ECC Error Counter for Link 2
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT, __pyx_mstate_global->__pyx_int_148) < (0)) __PYX_ERR(0, 964, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":965
 *     # NvLink ECC Data Error Counters
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L0 =   148 # NVLink data ECC Error Counter for Link 0
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L1 =   149 # NVLink data ECC Error Counter for Link 1             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2 =   150 # NVLink data ECC Error Counter for Link 2
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3 =   151 # NVLink data ECC Error Counter for Link 3
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_2, __pyx_mstate_global->__pyx_int_149) < (0)) __PYX_ERR(0, 965, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":966
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L0 =   148 # NVLink data ECC Error Counter for Link 0
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L1 =   149 # NVLink data ECC Error Counter for Link 1
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2 =   150 # NVLink data ECC Error Counter for Link 2             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3 =   151 # NVLink data ECC Error Counter for Link 3
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4 =   152 # NVLink data ECC Error Counter for Link 4
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_3, __pyx_mstate_global->__pyx_int_150) < (0)) __PYX_ERR(0, 966, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":967
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L1 =   149 # NVLink data ECC Error Counter for Link 1
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2 =   150 # NVLink data ECC Error Counter for Link 2
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3 =   151 # NVLink data ECC Error Counter for Link 3             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4 =   152 # NVLink data ECC Error Counter for Link 4
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5 =   153 # NVLink data ECC Error Counter for Link 5
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_4, __pyx_mstate_global->__pyx_int_151) < (0)) __PYX_ERR(0, 967, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":968
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2 =   150 # NVLink data ECC Error Counter for Link 2
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3 =   151 # NVLink data ECC Error Counter for Link 3
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4 =   152 # NVLink data ECC Error Counter for Link 4             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5 =   153 # NVLink data ECC Error Counter for Link 5
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6 =   154 # NVLink data ECC Error Counter for Link 6
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_5, __pyx_mstate_global->__pyx_int_152) < (0)) __PYX_ERR(0, 968, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":969
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3 =   151 # NVLink data ECC Error Counter for Link 3
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4 =   152 # NVLink data ECC Error Counter for Link 4
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5 =   153 # NVLink data ECC Error Counter for Link 5             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6 =   154 # NVLink data ECC Error Counter for Link 6
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7 =   155 # NVLink data ECC Error Counter for Link 7
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_6, __pyx_mstate_global->__pyx_int_153) < (0)) __PYX_ERR(0, 969, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":970
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4 =   152 # NVLink data ECC Error Counter for Link 4
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5 =   153 # NVLink data ECC Error Counter for Link 5
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6 =   154 # NVLink data ECC Error Counter for Link 6             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7 =   155 # NVLink data ECC Error Counter for Link 7
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8 =   156 # NVLink data ECC Error Counter for Link 8
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_7, __pyx_mstate_global->__pyx_int_154) < (0)) __PYX_ERR(0, 970, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":971
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5 =   153 # NVLink data ECC Error Counter for Link 5
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6 =   154 # NVLink data ECC Error Counter for Link 6
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7 =   155 # NVLink data ECC Error Counter for Link 7             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8 =   156 # NVLink data ECC Error Counter for Link 8
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9 =   157 # NVLink data ECC Error Counter for Link 9
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_8, __pyx_mstate_global->__pyx_int_155) < (0)) __PYX_ERR(0, 971, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":972
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6 =   154 # NVLink data ECC Error Counter for Link 6
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7 =   155 # NVLink data ECC Error Counter for Link 7
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8 =   156 # NVLink data ECC Error Counter for Link 8             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9 =   157 # NVLink data ECC Error Counter for Link 9
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10 =  158 # NVLink data ECC Error Counter for Link 10
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_9, __pyx_mstate_global->__pyx_int_156) < (0)) __PYX_ERR(0, 972, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":973
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7 =   155 # NVLink data ECC Error Counter for Link 7
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8 =   156 # NVLink data ECC Error Counter for Link 8
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9 =   157 # NVLink data ECC Error Counter for Link 9             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10 =  158 # NVLink data ECC Error Counter for Link 10
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L11 =  159 # NVLink data ECC Error Counter for Link 11
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_10, __pyx_mstate_global->__pyx_int_157) < (0)) __PYX_ERR(0, 973, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":974
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8 =   156 # NVLink data ECC Error Counter for Link 8
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9 =   157 # NVLink data ECC Error Counter for Link 9
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10 =  158 # NVLink data ECC Error Counter for Link 10             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L11 =  159 # NVLink data ECC Error Counter for Link 11
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_TOTAL =160 # NVLink data ECC Error Counter total for all Links
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_11, __pyx_mstate_global->__pyx_int_158) < (0)) __PYX_ERR(0, 974, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":975
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9 =   157 # NVLink data ECC Error Counter for Link 9
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10 =  158 # NVLink data ECC Error Counter for Link 10
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L11 =  159 # NVLink data ECC Error Counter for Link 11             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_TOTAL =160 # NVLink data ECC Error Counter total for all Links
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_12, __pyx_mstate_global->__pyx_int_159) < (0)) __PYX_ERR(0, 975, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":976
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10 =  158 # NVLink data ECC Error Counter for Link 10
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_L11 =  159 # NVLink data ECC Error Counter for Link 11
 *     DEV_NVLINK_ECC_DATA_ERROR_COUNT_TOTAL =160 # NVLink data ECC Error Counter total for all Links             # <<<<<<<<<<<<<<
 * 
 *     # NVLink Error Replay
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_ECC_DATA_ERROR_COUNT_13, __pyx_mstate_global->__pyx_int_160) < (0)) __PYX_ERR(0, 976, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":979
 * 
 *     # NVLink Error Replay
 *     DEV_NVLINK_ERROR_DL_REPLAY =           161 # NVLink Replay Error Counter             # <<<<<<<<<<<<<<
 * 
 *     # NVLink Recovery Error Counter
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_ERROR_DL_REPLAY, __pyx_mstate_global->__pyx_int_161) < (0)) __PYX_ERR(0, 979, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":982
 * 
 *     # NVLink Recovery Error Counter
 *     DEV_NVLINK_ERROR_DL_RECOVERY =         162 # NVLink Recovery Error Counter             # <<<<<<<<<<<<<<
 * 
 *     # NVLink Recovery Error CRC Counter
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_ERROR_DL_RECOVERY, __pyx_mstate_global->__pyx_int_162) < (0)) __PYX_ERR(0, 982, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":985
 * 
 *     # NVLink Recovery Error CRC Counter
 *     DEV_NVLINK_ERROR_DL_CRC =              163 # NVLink CRC Error Counter             # <<<<<<<<<<<<<<
 * 
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_ERROR_DL_CRC, __pyx_mstate_global->__pyx_int_163) < (0)) __PYX_ERR(0, 985, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":989
 * 
 *     # NVLink Speed, State and Version field id 164, 165, and 166
 *     DEV_NVLINK_GET_SPEED =                 164 # NVLink Speed in MBps             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_GET_STATE =                 165 # NVLink State - Active,Inactive
 *     DEV_NVLINK_GET_VERSION =               166 # NVLink Version
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_GET_SPEED, __pyx_mstate_global->__pyx_int_164) < (0)) __PYX_ERR(0, 989, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":990
 *     # NVLink Speed, State and Version field id 164, 165, and 166
 *     DEV_NVLINK_GET_SPEED =                 164 # NVLink Speed in MBps
 *     DEV_NVLINK_GET_STATE =                 165 # NVLink State - Active,Inactive             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_GET_VERSION =               166 # NVLink Version
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_GET_STATE, __pyx_mstate_global->__pyx_int_165) < (0)) __PYX_ERR(0, 990, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":991
 *     DEV_NVLINK_GET_SPEED =                 164 # NVLink Speed in MBps
 *     DEV_NVLINK_GET_STATE =                 165 # NVLink State - Active,Inactive
 *     DEV_NVLINK_GET_VERSION =               166 # NVLink Version             # <<<<<<<<<<<<<<
 * 
 *     DEV_NVLINK_GET_POWER_STATE =           167 # NVLink Power state. 0=HIGH_SPEED 1=LOW_SPEED
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_GET_VERSION, __pyx_mstate_global->__pyx_int_166) < (0)) __PYX_ERR(0, 991, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":993
 *     DEV_NVLINK_GET_VERSION =               166 # NVLink Version
 * 
 *     DEV_NVLINK_GET_POWER_STATE =           167 # NVLink Power state. 0=HIGH_SPEED 1=LOW_SPEED             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_GET_POWER_THRESHOLD =       168 # NVLink length of idle period (units can be found from
 *                                                        # DEV_NVLINK_GET_POWER_THRESHOLD_UNITS) before
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_GET_POWER_STATE, __pyx_mstate_global->__pyx_int_167) < (0)) __PYX_ERR(0, 993, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":994
 * 
 *     DEV_NVLINK_GET_POWER_STATE =           167 # NVLink Power state. 0=HIGH_SPEED 1=LOW_SPEED
 *     DEV_NVLINK_GET_POWER_THRESHOLD =       168 # NVLink length of idle period (units can be found from             # <<<<<<<<<<<<<<
 *                                                        # DEV_NVLINK_GET_POWER_THRESHOLD_UNITS) before
 *                                                        # transitioning links to sleep state
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_GET_POWER_THRESHOLD, __pyx_mstate_global->__pyx_int_168) < (0)) __PYX_ERR(0, 994, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":998
 *                                                        # transitioning links to sleep state
 * 
 *     DEV_PCIE_L0_TO_RECOVERY_COUNTER =      169 # Device PEX error recovery counter             # <<<<<<<<<<<<<<
 * 
 *     DEV_C2C_LINK_COUNT =                   170 # Number of C2C Links present on the device
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_L0_TO_RECOVERY_COUNTER, __pyx_mstate_global->__pyx_int_169) < (0)) __PYX_ERR(0, 998, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1000
 *     DEV_PCIE_L0_TO_RECOVERY_COUNTER =      169 # Device PEX error recovery counter
 * 
 *     DEV_C2C_LINK_COUNT =                   170 # Number of C2C Links present on the device             # <<<<<<<<<<<<<<
 *     DEV_C2C_LINK_GET_STATUS =              171 # C2C Link Status 0=INACTIVE 1=ACTIVE
 *     DEV_C2C_LINK_GET_MAX_BW =              172 # C2C Link Speed in MBps for active links
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_C2C_LINK_COUNT, __pyx_mstate_global->__pyx_int_170) < (0)) __PYX_ERR(0, 1000, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1001
 * 
 *     DEV_C2C_LINK_COUNT =                   170 # Number of C2C Links present on the device
 *     DEV_C2C_LINK_GET_STATUS =              171 # C2C Link Status 0=INACTIVE 1=ACTIVE             # <<<<<<<<<<<<<<
 *     DEV_C2C_LINK_GET_MAX_BW =              172 # C2C Link Speed in MBps for active links
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_C2C_LINK_GET_STATUS, __pyx_mstate_global->__pyx_int_171) < (0)) __PYX_ERR(0, 1001, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1002
 *     DEV_C2C_LINK_COUNT =                   170 # Number of C2C Links present on the device
 *     DEV_C2C_LINK_GET_STATUS =              171 # C2C Link Status 0=INACTIVE 1=ACTIVE
 *     DEV_C2C_LINK_GET_MAX_BW =              172 # C2C Link Speed in MBps for active links             # <<<<<<<<<<<<<<
 * 
 *     DEV_PCIE_COUNT_CORRECTABLE_ERRORS =    173 # PCIe Correctable Errors Counter
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_C2C_LINK_GET_MAX_BW, __pyx_mstate_global->__pyx_int_172) < (0)) __PYX_ERR(0, 1002, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1004
 *     DEV_C2C_LINK_GET_MAX_BW =              172 # C2C Link Speed in MBps for active links
 * 
 *     DEV_PCIE_COUNT_CORRECTABLE_ERRORS =    173 # PCIe Correctable Errors Counter             # <<<<<<<<<<<<<<
 *     DEV_PCIE_COUNT_NAKS_RECEIVED =         174 # PCIe NAK Receive Counter
 *     DEV_PCIE_COUNT_RECEIVER_ERROR =        175 # PCIe Receiver Error Counter
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_COUNT_CORRECTABLE_ERROR, __pyx_mstate_global->__pyx_int_173) < (0)) __PYX_ERR(0, 1004, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1005
 * 
 *     DEV_PCIE_COUNT_CORRECTABLE_ERRORS =    173 # PCIe Correctable Errors Counter
 *     DEV_PCIE_COUNT_NAKS_RECEIVED =         174 # PCIe NAK Receive Counter             # <<<<<<<<<<<<<<
 *     DEV_PCIE_COUNT_RECEIVER_ERROR =        175 # PCIe Receiver Error Counter
 *     DEV_PCIE_COUNT_BAD_TLP =               176 # PCIe Bad TLP Counter
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_COUNT_NAKS_RECEIVED, __pyx_mstate_global->__pyx_int_174) < (0)) __PYX_ERR(0, 1005, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1006
 *     DEV_PCIE_COUNT_CORRECTABLE_ERRORS =    173 # PCIe Correctable Errors Counter
 *     DEV_PCIE_COUNT_NAKS_RECEIVED =         174 # PCIe NAK Receive Counter
 *     DEV_PCIE_COUNT_RECEIVER_ERROR =        175 # PCIe Receiver Error Counter             # <<<<<<<<<<<<<<
 *     DEV_PCIE_COUNT_BAD_TLP =               176 # PCIe Bad TLP Counter
 *     DEV_PCIE_COUNT_NAKS_SENT =             177 # PCIe NAK Send Counter
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_COUNT_RECEIVER_ERROR, __pyx_mstate_global->__pyx_int_175) < (0)) __PYX_ERR(0, 1006, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1007
 *     DEV_PCIE_COUNT_NAKS_RECEIVED =         174 # PCIe NAK Receive Counter
 *     DEV_PCIE_COUNT_RECEIVER_ERROR =        175 # PCIe Receiver Error Counter
 *     DEV_PCIE_COUNT_BAD_TLP =               176 # PCIe Bad TLP Counter             # <<<<<<<<<<<<<<
 *     DEV_PCIE_COUNT_NAKS_SENT =             177 # PCIe NAK Send Counter
 *     DEV_PCIE_COUNT_BAD_DLLP =              178 # PCIe Bad DLLP Counter
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_COUNT_BAD_TLP, __pyx_mstate_global->__pyx_int_176) < (0)) __PYX_ERR(0, 1007, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1008
 *     DEV_PCIE_COUNT_RECEIVER_ERROR =        175 # PCIe Receiver Error Counter
 *     DEV_PCIE_COUNT_BAD_TLP =               176 # PCIe Bad TLP Counter
 *     DEV_PCIE_COUNT_NAKS_SENT =             177 # PCIe NAK Send Counter             # <<<<<<<<<<<<<<
 *     DEV_PCIE_COUNT_BAD_DLLP =              178 # PCIe Bad DLLP Counter
 *     DEV_PCIE_COUNT_NON_FATAL_ERROR =       179 # PCIe Non Fatal Error Counter
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_COUNT_NAKS_SENT, __pyx_mstate_global->__pyx_int_177) < (0)) __PYX_ERR(0, 1008, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1009
 *     DEV_PCIE_COUNT_BAD_TLP =               176 # PCIe Bad TLP Counter
 *     DEV_PCIE_COUNT_NAKS_SENT =             177 # PCIe NAK Send Counter
 *     DEV_PCIE_COUNT_BAD_DLLP =              178 # PCIe Bad DLLP Counter             # <<<<<<<<<<<<<<
 *     DEV_PCIE_COUNT_NON_FATAL_ERROR =       179 # PCIe Non Fatal Error Counter
 *     DEV_PCIE_COUNT_FATAL_ERROR =           180 # PCIe Fatal Error Counter
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_COUNT_BAD_DLLP, __pyx_mstate_global->__pyx_int_178) < (0)) __PYX_ERR(0, 1009, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1010
 *     DEV_PCIE_COUNT_NAKS_SENT =             177 # PCIe NAK Send Counter
 *     DEV_PCIE_COUNT_BAD_DLLP =              178 # PCIe Bad DLLP Counter
 *     DEV_PCIE_COUNT_NON_FATAL_ERROR =       179 # PCIe Non Fatal Error Counter             # <<<<<<<<<<<<<<
 *     DEV_PCIE_COUNT_FATAL_ERROR =           180 # PCIe Fatal Error Counter
 *     DEV_PCIE_COUNT_UNSUPPORTED_REQ =       181 # PCIe Unsupported Request Counter
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_COUNT_NON_FATAL_ERROR, __pyx_mstate_global->__pyx_int_179) < (0)) __PYX_ERR(0, 1010, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1011
 *     DEV_PCIE_COUNT_BAD_DLLP =              178 # PCIe Bad DLLP Counter
 *     DEV_PCIE_COUNT_NON_FATAL_ERROR =       179 # PCIe Non Fatal Error Counter
 *     DEV_PCIE_COUNT_FATAL_ERROR =           180 # PCIe Fatal Error Counter             # <<<<<<<<<<<<<<
 *     DEV_PCIE_COUNT_UNSUPPORTED_REQ =       181 # PCIe Unsupported Request Counter
 *     DEV_PCIE_COUNT_LCRC_ERROR =            182 # PCIe LCRC Error Counter
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_COUNT_FATAL_ERROR, __pyx_mstate_global->__pyx_int_180) < (0)) __PYX_ERR(0, 1011, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1012
 *     DEV_PCIE_COUNT_NON_FATAL_ERROR =       179 # PCIe Non Fatal Error Counter
 *     DEV_PCIE_COUNT_FATAL_ERROR =           180 # PCIe Fatal Error Counter
 *     DEV_PCIE_COUNT_UNSUPPORTED_REQ =       181 # PCIe Unsupported Request Counter             # <<<<<<<<<<<<<<
 *     DEV_PCIE_COUNT_LCRC_ERROR =            182 # PCIe LCRC Error Counter
 *     DEV_PCIE_COUNT_LANE_ERROR =            183 # PCIe Per Lane Error Counter.
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_COUNT_UNSUPPORTED_REQ, __pyx_mstate_global->__pyx_int_181) < (0)) __PYX_ERR(0, 1012, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1013
 *     DEV_PCIE_COUNT_FATAL_ERROR =           180 # PCIe Fatal Error Counter
 *     DEV_PCIE_COUNT_UNSUPPORTED_REQ =       181 # PCIe Unsupported Request Counter
 *     DEV_PCIE_COUNT_LCRC_ERROR =            182 # PCIe LCRC Error Counter             # <<<<<<<<<<<<<<
 *     DEV_PCIE_COUNT_LANE_ERROR =            183 # PCIe Per Lane Error Counter.
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_COUNT_LCRC_ERROR, __pyx_mstate_global->__pyx_int_182) < (0)) __PYX_ERR(0, 1013, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1014
 *     DEV_PCIE_COUNT_UNSUPPORTED_REQ =       181 # PCIe Unsupported Request Counter
 *     DEV_PCIE_COUNT_LCRC_ERROR =            182 # PCIe LCRC Error Counter
 *     DEV_PCIE_COUNT_LANE_ERROR =            183 # PCIe Per Lane Error Counter.             # <<<<<<<<<<<<<<
 * 
 *     DEV_IS_RESETLESS_MIG_SUPPORTED =       184 # Device's Restless MIG Capability
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_COUNT_LANE_ERROR, __pyx_mstate_global->__pyx_int_183) < (0)) __PYX_ERR(0, 1014, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1016
 *     DEV_PCIE_COUNT_LANE_ERROR =            183 # PCIe Per Lane Error Counter.
 * 
 *     DEV_IS_RESETLESS_MIG_SUPPORTED =       184 # Device's Restless MIG Capability             # <<<<<<<<<<<<<<
 * 
 *     DEV_POWER_AVERAGE =                    185 # GPU power averaged over 1 sec interval, supported on Ampere (except GA100) or newer architectures.
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_IS_RESETLESS_MIG_SUPPORTED, __pyx_mstate_global->__pyx_int_184) < (0)) __PYX_ERR(0, 1016, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1018
 *     DEV_IS_RESETLESS_MIG_SUPPORTED =       184 # Device's Restless MIG Capability
 * 
 *     DEV_POWER_AVERAGE =                    185 # GPU power averaged over 1 sec interval, supported on Ampere (except GA100) or newer architectures.             # <<<<<<<<<<<<<<
 *     DEV_POWER_INSTANT =                    186 # Current GPU power, supported on all architectures.
 *     DEV_POWER_MIN_LIMIT =                  187 # Minimum power limit in milliwatts.
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_POWER_AVERAGE, __pyx_mstate_global->__pyx_int_185) < (0)) __PYX_ERR(0, 1018, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1019
 * 
 *     DEV_POWER_AVERAGE =                    185 # GPU power averaged over 1 sec interval, supported on Ampere (except GA100) or newer architectures.
 *     DEV_POWER_INSTANT =                    186 # Current GPU power, supported on all architectures.             # <<<<<<<<<<<<<<
 *     DEV_POWER_MIN_LIMIT =                  187 # Minimum power limit in milliwatts.
 *     DEV_POWER_MAX_LIMIT =                  188 # Maximum power limit in milliwatts.
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_POWER_INSTANT, __pyx_mstate_global->__pyx_int_186) < (0)) __PYX_ERR(0, 1019, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1020
 *     DEV_POWER_AVERAGE =                    185 # GPU power averaged over 1 sec interval, supported on Ampere (except GA100) or newer architectures.
 *     DEV_POWER_INSTANT =                    186 # Current GPU power, supported on all architectures.
 *     DEV_POWER_MIN_LIMIT =                  187 # Minimum power limit in milliwatts.             # <<<<<<<<<<<<<<
 *     DEV_POWER_MAX_LIMIT =                  188 # Maximum power limit in milliwatts.
 *     DEV_POWER_DEFAULT_LIMIT =              189 # Default power limit in milliwatts (limit which device boots with).
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_POWER_MIN_LIMIT, __pyx_mstate_global->__pyx_int_187) < (0)) __PYX_ERR(0, 1020, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1021
 *     DEV_POWER_INSTANT =                    186 # Current GPU power, supported on all architectures.
 *     DEV_POWER_MIN_LIMIT =                  187 # Minimum power limit in milliwatts.
 *     DEV_POWER_MAX_LIMIT =                  188 # Maximum power limit in milliwatts.             # <<<<<<<<<<<<<<
 *     DEV_POWER_DEFAULT_LIMIT =              189 # Default power limit in milliwatts (limit which device boots with).
 *     DEV_POWER_CURRENT_LIMIT =              190 # Limit currently enforced in milliwatts (This includes other limits set elsewhere. E.g. Out-of-band).
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_POWER_MAX_LIMIT, __pyx_mstate_global->__pyx_int_188) < (0)) __PYX_ERR(0, 1021, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1022
 *     DEV_POWER_MIN_LIMIT =                  187 # Minimum power limit in milliwatts.
 *     DEV_POWER_MAX_LIMIT =                  188 # Maximum power limit in milliwatts.
 *     DEV_POWER_DEFAULT_LIMIT =              189 # Default power limit in milliwatts (limit which device boots with).             # <<<<<<<<<<<<<<
 *     DEV_POWER_CURRENT_LIMIT =              190 # Limit currently enforced in milliwatts (This includes other limits set elsewhere. E.g. Out-of-band).
 *     DEV_ENERGY =                           191 # Total energy consumption (in mJ) since the driver was last reloaded. Same as \ref DEV_TOTAL_ENERGY_CONSUMPTION for the GPU.
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_POWER_DEFAULT_LIMIT, __pyx_mstate_global->__pyx_int_189) < (0)) __PYX_ERR(0, 1022, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1023
 *     DEV_POWER_MAX_LIMIT =                  188 # Maximum power limit in milliwatts.
 *     DEV_POWER_DEFAULT_LIMIT =              189 # Default power limit in milliwatts (limit which device boots with).
 *     DEV_POWER_CURRENT_LIMIT =              190 # Limit currently enforced in milliwatts (This includes other limits set elsewhere. E.g. Out-of-band).             # <<<<<<<<<<<<<<
 *     DEV_ENERGY =                           191 # Total energy consumption (in mJ) since the driver was last reloaded. Same as \ref DEV_TOTAL_ENERGY_CONSUMPTION for the GPU.
 *     DEV_POWER_REQUESTED_LIMIT =            192 # Power limit requested by NVML or any other userspace client.
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_POWER_CURRENT_LIMIT, __pyx_mstate_global->__pyx_int_190) < (0)) __PYX_ERR(0, 1023, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1024
 *     DEV_POWER_DEFAULT_LIMIT =              189 # Default power limit in milliwatts (limit which device boots with).
 *     DEV_POWER_CURRENT_LIMIT =              190 # Limit currently enforced in milliwatts (This includes other limits set elsewhere. E.g. Out-of-band).
 *     DEV_ENERGY =                           191 # Total energy consumption (in mJ) since the driver was last reloaded. Same as \ref DEV_TOTAL_ENERGY_CONSUMPTION for the GPU.             # <<<<<<<<<<<<<<
 *     DEV_POWER_REQUESTED_LIMIT =            192 # Power limit requested by NVML or any other userspace client.
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_ENERGY, __pyx_mstate_global->__pyx_int_191) < (0)) __PYX_ERR(0, 1024, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1025
 *     DEV_POWER_CURRENT_LIMIT =              190 # Limit currently enforced in milliwatts (This includes other limits set elsewhere. E.g. Out-of-band).
 *     DEV_ENERGY =                           191 # Total energy consumption (in mJ) since the driver was last reloaded. Same as \ref DEV_TOTAL_ENERGY_CONSUMPTION for the GPU.
 *     DEV_POWER_REQUESTED_LIMIT =            192 # Power limit requested by NVML or any other userspace client.             # <<<<<<<<<<<<<<
 * 
 *     # GPU T.Limit temperature thresholds in degree Celsius
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_POWER_REQUESTED_LIMIT, __pyx_mstate_global->__pyx_int_192) < (0)) __PYX_ERR(0, 1025, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1028
 * 
 *     # GPU T.Limit temperature thresholds in degree Celsius
 *     DEV_TEMPERATURE_SHUTDOWN_TLIMIT =      193 # T.Limit temperature after which GPU may shut down for HW protection             # <<<<<<<<<<<<<<
 *     DEV_TEMPERATURE_SLOWDOWN_TLIMIT =      194 # T.Limit temperature after which GPU may begin HW slowdown
 *     DEV_TEMPERATURE_MEM_MAX_TLIMIT =       195 # T.Limit temperature after which GPU may begin SW slowdown due to memory temperature
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_TEMPERATURE_SHUTDOWN_TLIMIT, __pyx_mstate_global->__pyx_int_193) < (0)) __PYX_ERR(0, 1028, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1029
 *     # GPU T.Limit temperature thresholds in degree Celsius
 *     DEV_TEMPERATURE_SHUTDOWN_TLIMIT =      193 # T.Limit temperature after which GPU may shut down for HW protection
 *     DEV_TEMPERATURE_SLOWDOWN_TLIMIT =      194 # T.Limit temperature after which GPU may begin HW slowdown             # <<<<<<<<<<<<<<
 *     DEV_TEMPERATURE_MEM_MAX_TLIMIT =       195 # T.Limit temperature after which GPU may begin SW slowdown due to memory temperature
 *     DEV_TEMPERATURE_GPU_MAX_TLIMIT =       196 # T.Limit temperature after which GPU may be throttled below base clock
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_TEMPERATURE_SLOWDOWN_TLIMIT, __pyx_mstate_global->__pyx_int_194) < (0)) __PYX_ERR(0, 1029, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1030
 *     DEV_TEMPERATURE_SHUTDOWN_TLIMIT =      193 # T.Limit temperature after which GPU may shut down for HW protection
 *     DEV_TEMPERATURE_SLOWDOWN_TLIMIT =      194 # T.Limit temperature after which GPU may begin HW slowdown
 *     DEV_TEMPERATURE_MEM_MAX_TLIMIT =       195 # T.Limit temperature after which GPU may begin SW slowdown due to memory temperature             # <<<<<<<<<<<<<<
 *     DEV_TEMPERATURE_GPU_MAX_TLIMIT =       196 # T.Limit temperature after which GPU may be throttled below base clock
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_TEMPERATURE_MEM_MAX_TLIMIT, __pyx_mstate_global->__pyx_int_195) < (0)) __PYX_ERR(0, 1030, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1031
 *     DEV_TEMPERATURE_SLOWDOWN_TLIMIT =      194 # T.Limit temperature after which GPU may begin HW slowdown
 *     DEV_TEMPERATURE_MEM_MAX_TLIMIT =       195 # T.Limit temperature after which GPU may begin SW slowdown due to memory temperature
 *     DEV_TEMPERATURE_GPU_MAX_TLIMIT =       196 # T.Limit temperature after which GPU may be throttled below base clock             # <<<<<<<<<<<<<<
 * 
 *     DEV_PCIE_COUNT_TX_BYTES =              197 # PCIe transmit bytes. Value can be wrapped.
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_TEMPERATURE_GPU_MAX_TLIMIT, __pyx_mstate_global->__pyx_int_196) < (0)) __PYX_ERR(0, 1031, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1033
 *     DEV_TEMPERATURE_GPU_MAX_TLIMIT =       196 # T.Limit temperature after which GPU may be throttled below base clock
 * 
 *     DEV_PCIE_COUNT_TX_BYTES =              197 # PCIe transmit bytes. Value can be wrapped.             # <<<<<<<<<<<<<<
 *     DEV_PCIE_COUNT_RX_BYTES =              198 # PCIe receive bytes. Value can be wrapped.
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_COUNT_TX_BYTES, __pyx_mstate_global->__pyx_int_197) < (0)) __PYX_ERR(0, 1033, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1034
 * 
 *     DEV_PCIE_COUNT_TX_BYTES =              197 # PCIe transmit bytes. Value can be wrapped.
 *     DEV_PCIE_COUNT_RX_BYTES =              198 # PCIe receive bytes. Value can be wrapped.             # <<<<<<<<<<<<<<
 * 
 *     DEV_IS_MIG_MODE_INDEPENDENT_MIG_QUERY_CAPABLE =  199 # MIG mode independent, MIG query capable device. 1=yes. 0=no.
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_COUNT_RX_BYTES, __pyx_mstate_global->__pyx_int_198) < (0)) __PYX_ERR(0, 1034, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1036
 *     DEV_PCIE_COUNT_RX_BYTES =              198 # PCIe receive bytes. Value can be wrapped.
 * 
 *     DEV_IS_MIG_MODE_INDEPENDENT_MIG_QUERY_CAPABLE =  199 # MIG mode independent, MIG query capable device. 1=yes. 0=no.             # <<<<<<<<<<<<<<
 * 
 *     DEV_NVLINK_GET_POWER_THRESHOLD_MAX =             200 # Max Nvlink Power Threshold. See DEV_NVLINK_GET_POWER_THRESHOLD
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_IS_MIG_MODE_INDEPENDENT_MIG, __pyx_mstate_global->__pyx_int_199) < (0)) __PYX_ERR(0, 1036, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1038
 *     DEV_IS_MIG_MODE_INDEPENDENT_MIG_QUERY_CAPABLE =  199 # MIG mode independent, MIG query capable device. 1=yes. 0=no.
 * 
 *     DEV_NVLINK_GET_POWER_THRESHOLD_MAX =             200 # Max Nvlink Power Threshold. See DEV_NVLINK_GET_POWER_THRESHOLD             # <<<<<<<<<<<<<<
 * 
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_GET_POWER_THRESHOLD_M, __pyx_mstate_global->__pyx_int_200) < (0)) __PYX_ERR(0, 1038, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1042
 * 
 *     # NVLink counter field id 201-225
 *     DEV_NVLINK_COUNT_XMIT_PACKETS =                   201 # Total Tx packets on the link in NVLink5             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_XMIT_BYTES =                     202 # Total Tx bytes on the link in NVLink5
 *     DEV_NVLINK_COUNT_RCV_PACKETS =                    203 # Total Rx packets on the link in NVLink5
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_XMIT_PACKETS, __pyx_mstate_global->__pyx_int_201) < (0)) __PYX_ERR(0, 1042, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1043
 *     # NVLink counter field id 201-225
 *     DEV_NVLINK_COUNT_XMIT_PACKETS =                   201 # Total Tx packets on the link in NVLink5
 *     DEV_NVLINK_COUNT_XMIT_BYTES =                     202 # Total Tx bytes on the link in NVLink5             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_RCV_PACKETS =                    203 # Total Rx packets on the link in NVLink5
 *     DEV_NVLINK_COUNT_RCV_BYTES =                      204 # Total Rx bytes on the link in NVLink5
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_XMIT_BYTES, __pyx_mstate_global->__pyx_int_202) < (0)) __PYX_ERR(0, 1043, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1044
 *     DEV_NVLINK_COUNT_XMIT_PACKETS =                   201 # Total Tx packets on the link in NVLink5
 *     DEV_NVLINK_COUNT_XMIT_BYTES =                     202 # Total Tx bytes on the link in NVLink5
 *     DEV_NVLINK_COUNT_RCV_PACKETS =                    203 # Total Rx packets on the link in NVLink5             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_RCV_BYTES =                      204 # Total Rx bytes on the link in NVLink5
 *     DEV_NVLINK_COUNT_VL15_DROPPED =                   205 # Deprecated, do not use
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_RCV_PACKETS, __pyx_mstate_global->__pyx_int_203) < (0)) __PYX_ERR(0, 1044, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1045
 *     DEV_NVLINK_COUNT_XMIT_BYTES =                     202 # Total Tx bytes on the link in NVLink5
 *     DEV_NVLINK_COUNT_RCV_PACKETS =                    203 # Total Rx packets on the link in NVLink5
 *     DEV_NVLINK_COUNT_RCV_BYTES =                      204 # Total Rx bytes on the link in NVLink5             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_VL15_DROPPED =                   205 # Deprecated, do not use
 *     DEV_NVLINK_COUNT_MALFORMED_PACKET_ERRORS =        206 # Number of packets Rx on a link where packets are malformed
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_RCV_BYTES, __pyx_mstate_global->__pyx_int_204) < (0)) __PYX_ERR(0, 1045, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1046
 *     DEV_NVLINK_COUNT_RCV_PACKETS =                    203 # Total Rx packets on the link in NVLink5
 *     DEV_NVLINK_COUNT_RCV_BYTES =                      204 # Total Rx bytes on the link in NVLink5
 *     DEV_NVLINK_COUNT_VL15_DROPPED =                   205 # Deprecated, do not use             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_MALFORMED_PACKET_ERRORS =        206 # Number of packets Rx on a link where packets are malformed
 *     DEV_NVLINK_COUNT_BUFFER_OVERRUN_ERRORS =          207 # Number of packets that were discarded on Rx due to buffer overrun
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_VL15_DROPPED, __pyx_mstate_global->__pyx_int_205) < (0)) __PYX_ERR(0, 1046, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1047
 *     DEV_NVLINK_COUNT_RCV_BYTES =                      204 # Total Rx bytes on the link in NVLink5
 *     DEV_NVLINK_COUNT_VL15_DROPPED =                   205 # Deprecated, do not use
 *     DEV_NVLINK_COUNT_MALFORMED_PACKET_ERRORS =        206 # Number of packets Rx on a link where packets are malformed             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_BUFFER_OVERRUN_ERRORS =          207 # Number of packets that were discarded on Rx due to buffer overrun
 *     DEV_NVLINK_COUNT_RCV_ERRORS =                     208 # Total number of packets with errors Rx on a link
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_MALFORMED_PACKE, __pyx_mstate_global->__pyx_int_206) < (0)) __PYX_ERR(0, 1047, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1048
 *     DEV_NVLINK_COUNT_VL15_DROPPED =                   205 # Deprecated, do not use
 *     DEV_NVLINK_COUNT_MALFORMED_PACKET_ERRORS =        206 # Number of packets Rx on a link where packets are malformed
 *     DEV_NVLINK_COUNT_BUFFER_OVERRUN_ERRORS =          207 # Number of packets that were discarded on Rx due to buffer overrun             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_RCV_ERRORS =                     208 # Total number of packets with errors Rx on a link
 *     DEV_NVLINK_COUNT_RCV_REMOTE_ERRORS =              209 # Total number of packets Rx - stomp/EBP marker
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_BUFFER_OVERRUN, __pyx_mstate_global->__pyx_int_207) < (0)) __PYX_ERR(0, 1048, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1049
 *     DEV_NVLINK_COUNT_MALFORMED_PACKET_ERRORS =        206 # Number of packets Rx on a link where packets are malformed
 *     DEV_NVLINK_COUNT_BUFFER_OVERRUN_ERRORS =          207 # Number of packets that were discarded on Rx due to buffer overrun
 *     DEV_NVLINK_COUNT_RCV_ERRORS =                     208 # Total number of packets with errors Rx on a link             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_RCV_REMOTE_ERRORS =              209 # Total number of packets Rx - stomp/EBP marker
 *     DEV_NVLINK_COUNT_RCV_GENERAL_ERRORS =             210 # Total number of packets Rx with header mismatch
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_RCV_ERRORS, __pyx_mstate_global->__pyx_int_208) < (0)) __PYX_ERR(0, 1049, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1050
 *     DEV_NVLINK_COUNT_BUFFER_OVERRUN_ERRORS =          207 # Number of packets that were discarded on Rx due to buffer overrun
 *     DEV_NVLINK_COUNT_RCV_ERRORS =                     208 # Total number of packets with errors Rx on a link
 *     DEV_NVLINK_COUNT_RCV_REMOTE_ERRORS =              209 # Total number of packets Rx - stomp/EBP marker             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_RCV_GENERAL_ERRORS =             210 # Total number of packets Rx with header mismatch
 *     DEV_NVLINK_COUNT_LOCAL_LINK_INTEGRITY_ERRORS =    211 # Total number of times that the count of local errors exceeded a threshold
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_RCV_REMOTE_ERRO, __pyx_mstate_global->__pyx_int_209) < (0)) __PYX_ERR(0, 1050, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1051
 *     DEV_NVLINK_COUNT_RCV_ERRORS =                     208 # Total number of packets with errors Rx on a link
 *     DEV_NVLINK_COUNT_RCV_REMOTE_ERRORS =              209 # Total number of packets Rx - stomp/EBP marker
 *     DEV_NVLINK_COUNT_RCV_GENERAL_ERRORS =             210 # Total number of packets Rx with header mismatch             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_LOCAL_LINK_INTEGRITY_ERRORS =    211 # Total number of times that the count of local errors exceeded a threshold
 *     DEV_NVLINK_COUNT_XMIT_DISCARDS =                  212 # Total number of tx error packets that were discarded
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_RCV_GENERAL_ERR, __pyx_mstate_global->__pyx_int_210) < (0)) __PYX_ERR(0, 1051, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1052
 *     DEV_NVLINK_COUNT_RCV_REMOTE_ERRORS =              209 # Total number of packets Rx - stomp/EBP marker
 *     DEV_NVLINK_COUNT_RCV_GENERAL_ERRORS =             210 # Total number of packets Rx with header mismatch
 *     DEV_NVLINK_COUNT_LOCAL_LINK_INTEGRITY_ERRORS =    211 # Total number of times that the count of local errors exceeded a threshold             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_XMIT_DISCARDS =                  212 # Total number of tx error packets that were discarded
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_LOCAL_LINK_INTE, __pyx_mstate_global->__pyx_int_211) < (0)) __PYX_ERR(0, 1052, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1053
 *     DEV_NVLINK_COUNT_RCV_GENERAL_ERRORS =             210 # Total number of packets Rx with header mismatch
 *     DEV_NVLINK_COUNT_LOCAL_LINK_INTEGRITY_ERRORS =    211 # Total number of times that the count of local errors exceeded a threshold
 *     DEV_NVLINK_COUNT_XMIT_DISCARDS =                  212 # Total number of tx error packets that were discarded             # <<<<<<<<<<<<<<
 * 
 *     DEV_NVLINK_COUNT_LINK_RECOVERY_SUCCESSFUL_EVENTS =213 # Number of times link went from Up to recovery, succeeded and link came back up
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_XMIT_DISCARDS, __pyx_mstate_global->__pyx_int_212) < (0)) __PYX_ERR(0, 1053, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1055
 *     DEV_NVLINK_COUNT_XMIT_DISCARDS =                  212 # Total number of tx error packets that were discarded
 * 
 *     DEV_NVLINK_COUNT_LINK_RECOVERY_SUCCESSFUL_EVENTS =213 # Number of times link went from Up to recovery, succeeded and link came back up             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_LINK_RECOVERY_FAILED_EVENTS =    214 # Number of times link went from Up to recovery, failed and link was declared down
 *     DEV_NVLINK_COUNT_LINK_RECOVERY_EVENTS =           215 # Number of times link went from Up to recovery, irrespective of the result
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_LINK_RECOVERY_S, __pyx_mstate_global->__pyx_int_213) < (0)) __PYX_ERR(0, 1055, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1056
 * 
 *     DEV_NVLINK_COUNT_LINK_RECOVERY_SUCCESSFUL_EVENTS =213 # Number of times link went from Up to recovery, succeeded and link came back up
 *     DEV_NVLINK_COUNT_LINK_RECOVERY_FAILED_EVENTS =    214 # Number of times link went from Up to recovery, failed and link was declared down             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_LINK_RECOVERY_EVENTS =           215 # Number of times link went from Up to recovery, irrespective of the result
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_LINK_RECOVERY_F, __pyx_mstate_global->__pyx_int_214) < (0)) __PYX_ERR(0, 1056, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1057
 *     DEV_NVLINK_COUNT_LINK_RECOVERY_SUCCESSFUL_EVENTS =213 # Number of times link went from Up to recovery, succeeded and link came back up
 *     DEV_NVLINK_COUNT_LINK_RECOVERY_FAILED_EVENTS =    214 # Number of times link went from Up to recovery, failed and link was declared down
 *     DEV_NVLINK_COUNT_LINK_RECOVERY_EVENTS =           215 # Number of times link went from Up to recovery, irrespective of the result             # <<<<<<<<<<<<<<
 * 
 *     DEV_NVLINK_COUNT_RAW_BER_LANE0 =                  216 # Deprecated, do not use
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_LINK_RECOVERY_E, __pyx_mstate_global->__pyx_int_215) < (0)) __PYX_ERR(0, 1057, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1059
 *     DEV_NVLINK_COUNT_LINK_RECOVERY_EVENTS =           215 # Number of times link went from Up to recovery, irrespective of the result
 * 
 *     DEV_NVLINK_COUNT_RAW_BER_LANE0 =                  216 # Deprecated, do not use             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_RAW_BER_LANE1 =                  217 # Deprecated, do not use
 *     DEV_NVLINK_COUNT_RAW_BER =                        218 # Deprecated, do not use
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_RAW_BER_LANE0, __pyx_mstate_global->__pyx_int_216) < (0)) __PYX_ERR(0, 1059, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1060
 * 
 *     DEV_NVLINK_COUNT_RAW_BER_LANE0 =                  216 # Deprecated, do not use
 *     DEV_NVLINK_COUNT_RAW_BER_LANE1 =                  217 # Deprecated, do not use             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_RAW_BER =                        218 # Deprecated, do not use
 *     DEV_NVLINK_COUNT_EFFECTIVE_ERRORS =               219 # Sum of the number of errors in each Nvlink packet
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_RAW_BER_LANE1, __pyx_mstate_global->__pyx_int_217) < (0)) __PYX_ERR(0, 1060, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1061
 *     DEV_NVLINK_COUNT_RAW_BER_LANE0 =                  216 # Deprecated, do not use
 *     DEV_NVLINK_COUNT_RAW_BER_LANE1 =                  217 # Deprecated, do not use
 *     DEV_NVLINK_COUNT_RAW_BER =                        218 # Deprecated, do not use             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_EFFECTIVE_ERRORS =               219 # Sum of the number of errors in each Nvlink packet
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_RAW_BER, __pyx_mstate_global->__pyx_int_218) < (0)) __PYX_ERR(0, 1061, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1062
 *     DEV_NVLINK_COUNT_RAW_BER_LANE1 =                  217 # Deprecated, do not use
 *     DEV_NVLINK_COUNT_RAW_BER =                        218 # Deprecated, do not use
 *     DEV_NVLINK_COUNT_EFFECTIVE_ERRORS =               219 # Sum of the number of errors in each Nvlink packet             # <<<<<<<<<<<<<<
 * 
 *     # NVLink Effective BER
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_EFFECTIVE_ERROR, __pyx_mstate_global->__pyx_int_219) < (0)) __PYX_ERR(0, 1062, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1065
 * 
 *     # NVLink Effective BER
 *     DEV_NVLINK_COUNT_EFFECTIVE_BER =                  220 # Effective BER for effective errors             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_SYMBOL_ERRORS =                  221 # Number of errors in rx symbols
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_EFFECTIVE_BER, __pyx_mstate_global->__pyx_int_220) < (0)) __PYX_ERR(0, 1065, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1066
 *     # NVLink Effective BER
 *     DEV_NVLINK_COUNT_EFFECTIVE_BER =                  220 # Effective BER for effective errors
 *     DEV_NVLINK_COUNT_SYMBOL_ERRORS =                  221 # Number of errors in rx symbols             # <<<<<<<<<<<<<<
 * 
 *     # NVLink Symbol BER
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_SYMBOL_ERRORS, __pyx_mstate_global->__pyx_int_221) < (0)) __PYX_ERR(0, 1066, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1069
 * 
 *     # NVLink Symbol BER
 *     DEV_NVLINK_COUNT_SYMBOL_BER =                     222 # BER for symbol errors             # <<<<<<<<<<<<<<
 * 
 *     DEV_NVLINK_GET_POWER_THRESHOLD_MIN =              223 # Min Nvlink Power Threshold. See DEV_NVLINK_GET_POWER_THRESHOLD
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_SYMBOL_BER, __pyx_mstate_global->__pyx_int_222) < (0)) __PYX_ERR(0, 1069, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1071
 *     DEV_NVLINK_COUNT_SYMBOL_BER =                     222 # BER for symbol errors
 * 
 *     DEV_NVLINK_GET_POWER_THRESHOLD_MIN =              223 # Min Nvlink Power Threshold. See DEV_NVLINK_GET_POWER_THRESHOLD             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_GET_POWER_THRESHOLD_UNITS =            224 # Values are in the form NVML_NVLINK_LOW_POWER_THRESHOLD_UNIT_*
 *     DEV_NVLINK_GET_POWER_THRESHOLD_SUPPORTED =        225 # Determine if Nvlink Power Threshold feature is supported
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_GET_POWER_THRESHOLD_M_2, __pyx_mstate_global->__pyx_int_223) < (0)) __PYX_ERR(0, 1071, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1072
 * 
 *     DEV_NVLINK_GET_POWER_THRESHOLD_MIN =              223 # Min Nvlink Power Threshold. See DEV_NVLINK_GET_POWER_THRESHOLD
 *     DEV_NVLINK_GET_POWER_THRESHOLD_UNITS =            224 # Values are in the form NVML_NVLINK_LOW_POWER_THRESHOLD_UNIT_*             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_GET_POWER_THRESHOLD_SUPPORTED =        225 # Determine if Nvlink Power Threshold feature is supported
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_GET_POWER_THRESHOLD_U, __pyx_mstate_global->__pyx_int_224) < (0)) __PYX_ERR(0, 1072, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1073
 *     DEV_NVLINK_GET_POWER_THRESHOLD_MIN =              223 # Min Nvlink Power Threshold. See DEV_NVLINK_GET_POWER_THRESHOLD
 *     DEV_NVLINK_GET_POWER_THRESHOLD_UNITS =            224 # Values are in the form NVML_NVLINK_LOW_POWER_THRESHOLD_UNIT_*
 *     DEV_NVLINK_GET_POWER_THRESHOLD_SUPPORTED =        225 # Determine if Nvlink Power Threshold feature is supported             # <<<<<<<<<<<<<<
 * 
 *     DEV_RESET_STATUS =                                226 # Depracated, do not use (use DEV_GET_GPU_RECOVERY_ACTION instead)
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_GET_POWER_THRESHOLD_S, __pyx_mstate_global->__pyx_int_225) < (0)) __PYX_ERR(0, 1073, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1075
 *     DEV_NVLINK_GET_POWER_THRESHOLD_SUPPORTED =        225 # Determine if Nvlink Power Threshold feature is supported
 * 
 *     DEV_RESET_STATUS =                                226 # Depracated, do not use (use DEV_GET_GPU_RECOVERY_ACTION instead)             # <<<<<<<<<<<<<<
 *     DEV_DRAIN_AND_RESET_STATUS =                      227 # Deprecated, do not use (use DEV_GET_GPU_RECOVERY_ACTION instead)
 *     DEV_PCIE_OUTBOUND_ATOMICS_MASK =                  228
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_RESET_STATUS, __pyx_mstate_global->__pyx_int_226) < (0)) __PYX_ERR(0, 1075, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1076
 * 
 *     DEV_RESET_STATUS =                                226 # Depracated, do not use (use DEV_GET_GPU_RECOVERY_ACTION instead)
 *     DEV_DRAIN_AND_RESET_STATUS =                      227 # Deprecated, do not use (use DEV_GET_GPU_RECOVERY_ACTION instead)             # <<<<<<<<<<<<<<
 *     DEV_PCIE_OUTBOUND_ATOMICS_MASK =                  228
 *     DEV_PCIE_INBOUND_ATOMICS_MASK =                   229
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_DRAIN_AND_RESET_STATUS, __pyx_mstate_global->__pyx_int_227) < (0)) __PYX_ERR(0, 1076, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1077
 *     DEV_RESET_STATUS =                                226 # Depracated, do not use (use DEV_GET_GPU_RECOVERY_ACTION instead)
 *     DEV_DRAIN_AND_RESET_STATUS =                      227 # Deprecated, do not use (use DEV_GET_GPU_RECOVERY_ACTION instead)
 *     DEV_PCIE_OUTBOUND_ATOMICS_MASK =                  228             # <<<<<<<<<<<<<<
 *     DEV_PCIE_INBOUND_ATOMICS_MASK =                   229
 *     DEV_GET_GPU_RECOVERY_ACTION =                     230 # GPU Recovery action - None/Reset/Reboot/Drain P2P/Drain and Reset
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_OUTBOUND_ATOMICS_MASK, __pyx_mstate_global->__pyx_int_228) < (0)) __PYX_ERR(0, 1077, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1078
 *     DEV_DRAIN_AND_RESET_STATUS =                      227 # Deprecated, do not use (use DEV_GET_GPU_RECOVERY_ACTION instead)
 *     DEV_PCIE_OUTBOUND_ATOMICS_MASK =                  228
 *     DEV_PCIE_INBOUND_ATOMICS_MASK =                   229             # <<<<<<<<<<<<<<
 *     DEV_GET_GPU_RECOVERY_ACTION =                     230 # GPU Recovery action - None/Reset/Reboot/Drain P2P/Drain and Reset
 *     DEV_C2C_LINK_ERROR_INTR =                         231 # C2C Link CRC Error Counter
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PCIE_INBOUND_ATOMICS_MASK, __pyx_mstate_global->__pyx_int_229) < (0)) __PYX_ERR(0, 1078, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1079
 *     DEV_PCIE_OUTBOUND_ATOMICS_MASK =                  228
 *     DEV_PCIE_INBOUND_ATOMICS_MASK =                   229
 *     DEV_GET_GPU_RECOVERY_ACTION =                     230 # GPU Recovery action - None/Reset/Reboot/Drain P2P/Drain and Reset             # <<<<<<<<<<<<<<
 *     DEV_C2C_LINK_ERROR_INTR =                         231 # C2C Link CRC Error Counter
 *     DEV_C2C_LINK_ERROR_REPLAY =                       232 # C2C Link Replay Error Counter
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_GET_GPU_RECOVERY_ACTION, __pyx_mstate_global->__pyx_int_230) < (0)) __PYX_ERR(0, 1079, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1080
 *     DEV_PCIE_INBOUND_ATOMICS_MASK =                   229
 *     DEV_GET_GPU_RECOVERY_ACTION =                     230 # GPU Recovery action - None/Reset/Reboot/Drain P2P/Drain and Reset
 *     DEV_C2C_LINK_ERROR_INTR =                         231 # C2C Link CRC Error Counter             # <<<<<<<<<<<<<<
 *     DEV_C2C_LINK_ERROR_REPLAY =                       232 # C2C Link Replay Error Counter
 *     DEV_C2C_LINK_ERROR_REPLAY_B2B =                   233 # C2C Link Back to Back Replay Error Counter
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_C2C_LINK_ERROR_INTR, __pyx_mstate_global->__pyx_int_231) < (0)) __PYX_ERR(0, 1080, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1081
 *     DEV_GET_GPU_RECOVERY_ACTION =                     230 # GPU Recovery action - None/Reset/Reboot/Drain P2P/Drain and Reset
 *     DEV_C2C_LINK_ERROR_INTR =                         231 # C2C Link CRC Error Counter
 *     DEV_C2C_LINK_ERROR_REPLAY =                       232 # C2C Link Replay Error Counter             # <<<<<<<<<<<<<<
 *     DEV_C2C_LINK_ERROR_REPLAY_B2B =                   233 # C2C Link Back to Back Replay Error Counter
 *     DEV_C2C_LINK_POWER_STATE =                        234 # C2C Link Power state. See NVML_C2C_POWER_STATE_*
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_C2C_LINK_ERROR_REPLAY, __pyx_mstate_global->__pyx_int_232) < (0)) __PYX_ERR(0, 1081, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1082
 *     DEV_C2C_LINK_ERROR_INTR =                         231 # C2C Link CRC Error Counter
 *     DEV_C2C_LINK_ERROR_REPLAY =                       232 # C2C Link Replay Error Counter
 *     DEV_C2C_LINK_ERROR_REPLAY_B2B =                   233 # C2C Link Back to Back Replay Error Counter             # <<<<<<<<<<<<<<
 *     DEV_C2C_LINK_POWER_STATE =                        234 # C2C Link Power state. See NVML_C2C_POWER_STATE_*
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_C2C_LINK_ERROR_REPLAY_B2B, __pyx_mstate_global->__pyx_int_233) < (0)) __PYX_ERR(0, 1082, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1083
 *     DEV_C2C_LINK_ERROR_REPLAY =                       232 # C2C Link Replay Error Counter
 *     DEV_C2C_LINK_ERROR_REPLAY_B2B =                   233 # C2C Link Back to Back Replay Error Counter
 *     DEV_C2C_LINK_POWER_STATE =                        234 # C2C Link Power state. See NVML_C2C_POWER_STATE_*             # <<<<<<<<<<<<<<
 * 
 *     # NVLink counter field id 235-250
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_C2C_LINK_POWER_STATE, __pyx_mstate_global->__pyx_int_234) < (0)) __PYX_ERR(0, 1083, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1086
 * 
 *     # NVLink counter field id 235-250
 *     DEV_NVLINK_COUNT_FEC_HISTORY_0 =                  235 # Count of symbol errors that are corrected - bin 0             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_FEC_HISTORY_1 =                  236 # Count of symbol errors that are corrected - bin 1
 *     DEV_NVLINK_COUNT_FEC_HISTORY_2 =                  237 # Count of symbol errors that are corrected - bin 2
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_0, __pyx_mstate_global->__pyx_int_235) < (0)) __PYX_ERR(0, 1086, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1087
 *     # NVLink counter field id 235-250
 *     DEV_NVLINK_COUNT_FEC_HISTORY_0 =                  235 # Count of symbol errors that are corrected - bin 0
 *     DEV_NVLINK_COUNT_FEC_HISTORY_1 =                  236 # Count of symbol errors that are corrected - bin 1             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_FEC_HISTORY_2 =                  237 # Count of symbol errors that are corrected - bin 2
 *     DEV_NVLINK_COUNT_FEC_HISTORY_3 =                  238 # Count of symbol errors that are corrected - bin 3
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_1, __pyx_mstate_global->__pyx_int_236) < (0)) __PYX_ERR(0, 1087, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1088
 *     DEV_NVLINK_COUNT_FEC_HISTORY_0 =                  235 # Count of symbol errors that are corrected - bin 0
 *     DEV_NVLINK_COUNT_FEC_HISTORY_1 =                  236 # Count of symbol errors that are corrected - bin 1
 *     DEV_NVLINK_COUNT_FEC_HISTORY_2 =                  237 # Count of symbol errors that are corrected - bin 2             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_FEC_HISTORY_3 =                  238 # Count of symbol errors that are corrected - bin 3
 *     DEV_NVLINK_COUNT_FEC_HISTORY_4 =                  239 # Count of symbol errors that are corrected - bin 4
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_2, __pyx_mstate_global->__pyx_int_237) < (0)) __PYX_ERR(0, 1088, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1089
 *     DEV_NVLINK_COUNT_FEC_HISTORY_1 =                  236 # Count of symbol errors that are corrected - bin 1
 *     DEV_NVLINK_COUNT_FEC_HISTORY_2 =                  237 # Count of symbol errors that are corrected - bin 2
 *     DEV_NVLINK_COUNT_FEC_HISTORY_3 =                  238 # Count of symbol errors that are corrected - bin 3             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_FEC_HISTORY_4 =                  239 # Count of symbol errors that are corrected - bin 4
 *     DEV_NVLINK_COUNT_FEC_HISTORY_5 =                  240 # Count of symbol errors that are corrected - bin 5
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_3, __pyx_mstate_global->__pyx_int_238) < (0)) __PYX_ERR(0, 1089, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1090
 *     DEV_NVLINK_COUNT_FEC_HISTORY_2 =                  237 # Count of symbol errors that are corrected - bin 2
 *     DEV_NVLINK_COUNT_FEC_HISTORY_3 =                  238 # Count of symbol errors that are corrected - bin 3
 *     DEV_NVLINK_COUNT_FEC_HISTORY_4 =                  239 # Count of symbol errors that are corrected - bin 4             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_FEC_HISTORY_5 =                  240 # Count of symbol errors that are corrected - bin 5
 *     DEV_NVLINK_COUNT_FEC_HISTORY_6 =                  241 # Count of symbol errors that are corrected - bin 6
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_4, __pyx_mstate_global->__pyx_int_239) < (0)) __PYX_ERR(0, 1090, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1091
 *     DEV_NVLINK_COUNT_FEC_HISTORY_3 =                  238 # Count of symbol errors that are corrected - bin 3
 *     DEV_NVLINK_COUNT_FEC_HISTORY_4 =                  239 # Count of symbol errors that are corrected - bin 4
 *     DEV_NVLINK_COUNT_FEC_HISTORY_5 =                  240 # Count of symbol errors that are corrected - bin 5             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_FEC_HISTORY_6 =                  241 # Count of symbol errors that are corrected - bin 6
 *     DEV_NVLINK_COUNT_FEC_HISTORY_7 =                  242 # Count of symbol errors that are corrected - bin 7
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_5, __pyx_mstate_global->__pyx_int_240) < (0)) __PYX_ERR(0, 1091, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1092
 *     DEV_NVLINK_COUNT_FEC_HISTORY_4 =                  239 # Count of symbol errors that are corrected - bin 4
 *     DEV_NVLINK_COUNT_FEC_HISTORY_5 =                  240 # Count of symbol errors that are corrected - bin 5
 *     DEV_NVLINK_COUNT_FEC_HISTORY_6 =                  241 # Count of symbol errors that are corrected - bin 6             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_FEC_HISTORY_7 =                  242 # Count of symbol errors that are corrected - bin 7
 *     DEV_NVLINK_COUNT_FEC_HISTORY_8 =                  243 # Count of symbol errors that are corrected - bin 8
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_6, __pyx_mstate_global->__pyx_int_241) < (0)) __PYX_ERR(0, 1092, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1093
 *     DEV_NVLINK_COUNT_FEC_HISTORY_5 =                  240 # Count of symbol errors that are corrected - bin 5
 *     DEV_NVLINK_COUNT_FEC_HISTORY_6 =                  241 # Count of symbol errors that are corrected - bin 6
 *     DEV_NVLINK_COUNT_FEC_HISTORY_7 =                  242 # Count of symbol errors that are corrected - bin 7             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_FEC_HISTORY_8 =                  243 # Count of symbol errors that are corrected - bin 8
 *     DEV_NVLINK_COUNT_FEC_HISTORY_9 =                  244 # Count of symbol errors that are corrected - bin 9
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_7, __pyx_mstate_global->__pyx_int_242) < (0)) __PYX_ERR(0, 1093, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1094
 *     DEV_NVLINK_COUNT_FEC_HISTORY_6 =                  241 # Count of symbol errors that are corrected - bin 6
 *     DEV_NVLINK_COUNT_FEC_HISTORY_7 =                  242 # Count of symbol errors that are corrected - bin 7
 *     DEV_NVLINK_COUNT_FEC_HISTORY_8 =                  243 # Count of symbol errors that are corrected - bin 8             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_FEC_HISTORY_9 =                  244 # Count of symbol errors that are corrected - bin 9
 *     DEV_NVLINK_COUNT_FEC_HISTORY_10 =                 245 # Count of symbol errors that are corrected - bin 10
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_8, __pyx_mstate_global->__pyx_int_243) < (0)) __PYX_ERR(0, 1094, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1095
 *     DEV_NVLINK_COUNT_FEC_HISTORY_7 =                  242 # Count of symbol errors that are corrected - bin 7
 *     DEV_NVLINK_COUNT_FEC_HISTORY_8 =                  243 # Count of symbol errors that are corrected - bin 8
 *     DEV_NVLINK_COUNT_FEC_HISTORY_9 =                  244 # Count of symbol errors that are corrected - bin 9             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_FEC_HISTORY_10 =                 245 # Count of symbol errors that are corrected - bin 10
 *     DEV_NVLINK_COUNT_FEC_HISTORY_11 =                 246 # Count of symbol errors that are corrected - bin 11
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_9, __pyx_mstate_global->__pyx_int_244) < (0)) __PYX_ERR(0, 1095, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1096
 *     DEV_NVLINK_COUNT_FEC_HISTORY_8 =                  243 # Count of symbol errors that are corrected - bin 8
 *     DEV_NVLINK_COUNT_FEC_HISTORY_9 =                  244 # Count of symbol errors that are corrected - bin 9
 *     DEV_NVLINK_COUNT_FEC_HISTORY_10 =                 245 # Count of symbol errors that are corrected - bin 10             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_FEC_HISTORY_11 =                 246 # Count of symbol errors that are corrected - bin 11
 *     DEV_NVLINK_COUNT_FEC_HISTORY_12 =                 247 # Count of symbol errors that are corrected - bin 12
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_10, __pyx_mstate_global->__pyx_int_245) < (0)) __PYX_ERR(0, 1096, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1097
 *     DEV_NVLINK_COUNT_FEC_HISTORY_9 =                  244 # Count of symbol errors that are corrected - bin 9
 *     DEV_NVLINK_COUNT_FEC_HISTORY_10 =                 245 # Count of symbol errors that are corrected - bin 10
 *     DEV_NVLINK_COUNT_FEC_HISTORY_11 =                 246 # Count of symbol errors that are corrected - bin 11             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_FEC_HISTORY_12 =                 247 # Count of symbol errors that are corrected - bin 12
 *     DEV_NVLINK_COUNT_FEC_HISTORY_13 =                 248 # Count of symbol errors that are corrected - bin 13
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_11, __pyx_mstate_global->__pyx_int_246) < (0)) __PYX_ERR(0, 1097, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1098
 *     DEV_NVLINK_COUNT_FEC_HISTORY_10 =                 245 # Count of symbol errors that are corrected - bin 10
 *     DEV_NVLINK_COUNT_FEC_HISTORY_11 =                 246 # Count of symbol errors that are corrected - bin 11
 *     DEV_NVLINK_COUNT_FEC_HISTORY_12 =                 247 # Count of symbol errors that are corrected - bin 12             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_FEC_HISTORY_13 =                 248 # Count of symbol errors that are corrected - bin 13
 *     DEV_NVLINK_COUNT_FEC_HISTORY_14 =                 249 # Count of symbol errors that are corrected - bin 14
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_12, __pyx_mstate_global->__pyx_int_247) < (0)) __PYX_ERR(0, 1098, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1099
 *     DEV_NVLINK_COUNT_FEC_HISTORY_11 =                 246 # Count of symbol errors that are corrected - bin 11
 *     DEV_NVLINK_COUNT_FEC_HISTORY_12 =                 247 # Count of symbol errors that are corrected - bin 12
 *     DEV_NVLINK_COUNT_FEC_HISTORY_13 =                 248 # Count of symbol errors that are corrected - bin 13             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_FEC_HISTORY_14 =                 249 # Count of symbol errors that are corrected - bin 14
 *     DEV_NVLINK_COUNT_FEC_HISTORY_15 =                 250 # Count of symbol errors that are corrected - bin 15
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_13, __pyx_mstate_global->__pyx_int_248) < (0)) __PYX_ERR(0, 1099, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1100
 *     DEV_NVLINK_COUNT_FEC_HISTORY_12 =                 247 # Count of symbol errors that are corrected - bin 12
 *     DEV_NVLINK_COUNT_FEC_HISTORY_13 =                 248 # Count of symbol errors that are corrected - bin 13
 *     DEV_NVLINK_COUNT_FEC_HISTORY_14 =                 249 # Count of symbol errors that are corrected - bin 14             # <<<<<<<<<<<<<<
 *     DEV_NVLINK_COUNT_FEC_HISTORY_15 =                 250 # Count of symbol errors that are corrected - bin 15
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_14, __pyx_mstate_global->__pyx_int_249) < (0)) __PYX_ERR(0, 1100, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1101
 *     DEV_NVLINK_COUNT_FEC_HISTORY_13 =                 248 # Count of symbol errors that are corrected - bin 13
 *     DEV_NVLINK_COUNT_FEC_HISTORY_14 =                 249 # Count of symbol errors that are corrected - bin 14
 *     DEV_NVLINK_COUNT_FEC_HISTORY_15 =                 250 # Count of symbol errors that are corrected - bin 15             # <<<<<<<<<<<<<<
 * 
 *     # Power Smoothing
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_NVLINK_COUNT_FEC_HISTORY_15, __pyx_mstate_global->__pyx_int_250) < (0)) __PYX_ERR(0, 1101, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1104
 * 
 *     # Power Smoothing
 *     PWR_SMOOTHING_ENABLED =                                  251 # Enablement (0/DISABLED or 1/ENABLED)             # <<<<<<<<<<<<<<
 *     PWR_SMOOTHING_PRIV_LVL =                                 252 # Current privilege level
 *     PWR_SMOOTHING_IMM_RAMP_DOWN_ENABLED =                    253 # Immediate ramp down enablement (0/DISABLED or 1/ENABLED)
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_ENABLED, __pyx_mstate_global->__pyx_int_251) < (0)) __PYX_ERR(0, 1104, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1105
 *     # Power Smoothing
 *     PWR_SMOOTHING_ENABLED =                                  251 # Enablement (0/DISABLED or 1/ENABLED)
 *     PWR_SMOOTHING_PRIV_LVL =                                 252 # Current privilege level             # <<<<<<<<<<<<<<
 *     PWR_SMOOTHING_IMM_RAMP_DOWN_ENABLED =                    253 # Immediate ramp down enablement (0/DISABLED or 1/ENABLED)
 *     PWR_SMOOTHING_APPLIED_TMP_CEIL =                         254 # Applied TMP ceiling value in Watts
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_PRIV_LVL, __pyx_mstate_global->__pyx_int_252) < (0)) __PYX_ERR(0, 1105, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1106
 *     PWR_SMOOTHING_ENABLED =                                  251 # Enablement (0/DISABLED or 1/ENABLED)
 *     PWR_SMOOTHING_PRIV_LVL =                                 252 # Current privilege level
 *     PWR_SMOOTHING_IMM_RAMP_DOWN_ENABLED =                    253 # Immediate ramp down enablement (0/DISABLED or 1/ENABLED)             # <<<<<<<<<<<<<<
 *     PWR_SMOOTHING_APPLIED_TMP_CEIL =                         254 # Applied TMP ceiling value in Watts
 *     PWR_SMOOTHING_APPLIED_TMP_FLOOR =                        255 # Applied TMP floor value in Watts
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_IMM_RAMP_DOWN_ENAB, __pyx_mstate_global->__pyx_int_253) < (0)) __PYX_ERR(0, 1106, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1107
 *     PWR_SMOOTHING_PRIV_LVL =                                 252 # Current privilege level
 *     PWR_SMOOTHING_IMM_RAMP_DOWN_ENABLED =                    253 # Immediate ramp down enablement (0/DISABLED or 1/ENABLED)
 *     PWR_SMOOTHING_APPLIED_TMP_CEIL =                         254 # Applied TMP ceiling value in Watts             # <<<<<<<<<<<<<<
 *     PWR_SMOOTHING_APPLIED_TMP_FLOOR =                        255 # Applied TMP floor value in Watts
 *     PWR_SMOOTHING_MAX_PERCENT_TMP_FLOOR_SETTING =            256 # Max % TMP Floor value
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_APPLIED_TMP_CEIL, __pyx_mstate_global->__pyx_int_254) < (0)) __PYX_ERR(0, 1107, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1108
 *     PWR_SMOOTHING_IMM_RAMP_DOWN_ENABLED =                    253 # Immediate ramp down enablement (0/DISABLED or 1/ENABLED)
 *     PWR_SMOOTHING_APPLIED_TMP_CEIL =                         254 # Applied TMP ceiling value in Watts
 *     PWR_SMOOTHING_APPLIED_TMP_FLOOR =                        255 # Applied TMP floor value in Watts             # <<<<<<<<<<<<<<
 *     PWR_SMOOTHING_MAX_PERCENT_TMP_FLOOR_SETTING =            256 # Max % TMP Floor value
 *     PWR_SMOOTHING_MIN_PERCENT_TMP_FLOOR_SETTING =            257 # Min % TMP Floor value
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_APPLIED_TMP_FLOOR, __pyx_mstate_global->__pyx_int_255) < (0)) __PYX_ERR(0, 1108, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1109
 *     PWR_SMOOTHING_APPLIED_TMP_CEIL =                         254 # Applied TMP ceiling value in Watts
 *     PWR_SMOOTHING_APPLIED_TMP_FLOOR =                        255 # Applied TMP floor value in Watts
 *     PWR_SMOOTHING_MAX_PERCENT_TMP_FLOOR_SETTING =            256 # Max % TMP Floor value             # <<<<<<<<<<<<<<
 *     PWR_SMOOTHING_MIN_PERCENT_TMP_FLOOR_SETTING =            257 # Min % TMP Floor value
 *     PWR_SMOOTHING_HW_CIRCUITRY_PERCENT_LIFETIME_REMAINING =  258 # HW Circuitry % lifetime remaining
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_MAX_PERCENT_TMP_FL, __pyx_mstate_global->__pyx_int_256) < (0)) __PYX_ERR(0, 1109, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1110
 *     PWR_SMOOTHING_APPLIED_TMP_FLOOR =                        255 # Applied TMP floor value in Watts
 *     PWR_SMOOTHING_MAX_PERCENT_TMP_FLOOR_SETTING =            256 # Max % TMP Floor value
 *     PWR_SMOOTHING_MIN_PERCENT_TMP_FLOOR_SETTING =            257 # Min % TMP Floor value             # <<<<<<<<<<<<<<
 *     PWR_SMOOTHING_HW_CIRCUITRY_PERCENT_LIFETIME_REMAINING =  258 # HW Circuitry % lifetime remaining
 *     PWR_SMOOTHING_MAX_NUM_PRESET_PROFILES =                  259 # Max number of preset profiles
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_MIN_PERCENT_TMP_FL, __pyx_mstate_global->__pyx_int_257) < (0)) __PYX_ERR(0, 1110, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1111
 *     PWR_SMOOTHING_MAX_PERCENT_TMP_FLOOR_SETTING =            256 # Max % TMP Floor value
 *     PWR_SMOOTHING_MIN_PERCENT_TMP_FLOOR_SETTING =            257 # Min % TMP Floor value
 *     PWR_SMOOTHING_HW_CIRCUITRY_PERCENT_LIFETIME_REMAINING =  258 # HW Circuitry % lifetime remaining             # <<<<<<<<<<<<<<
 *     PWR_SMOOTHING_MAX_NUM_PRESET_PROFILES =                  259 # Max number of preset profiles
 *     PWR_SMOOTHING_PROFILE_PERCENT_TMP_FLOOR =                260 # % TMP floor for a given profile
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_HW_CIRCUITRY_PERCE, __pyx_mstate_global->__pyx_int_258) < (0)) __PYX_ERR(0, 1111, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1112
 *     PWR_SMOOTHING_MIN_PERCENT_TMP_FLOOR_SETTING =            257 # Min % TMP Floor value
 *     PWR_SMOOTHING_HW_CIRCUITRY_PERCENT_LIFETIME_REMAINING =  258 # HW Circuitry % lifetime remaining
 *     PWR_SMOOTHING_MAX_NUM_PRESET_PROFILES =                  259 # Max number of preset profiles             # <<<<<<<<<<<<<<
 *     PWR_SMOOTHING_PROFILE_PERCENT_TMP_FLOOR =                260 # % TMP floor for a given profile
 *     PWR_SMOOTHING_PROFILE_RAMP_UP_RATE =                     261 # Ramp up rate in mW/s for a given profile
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_MAX_NUM_PRESET_PRO, __pyx_mstate_global->__pyx_int_259) < (0)) __PYX_ERR(0, 1112, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1113
 *     PWR_SMOOTHING_HW_CIRCUITRY_PERCENT_LIFETIME_REMAINING =  258 # HW Circuitry % lifetime remaining
 *     PWR_SMOOTHING_MAX_NUM_PRESET_PROFILES =                  259 # Max number of preset profiles
 *     PWR_SMOOTHING_PROFILE_PERCENT_TMP_FLOOR =                260 # % TMP floor for a given profile             # <<<<<<<<<<<<<<
 *     PWR_SMOOTHING_PROFILE_RAMP_UP_RATE =                     261 # Ramp up rate in mW/s for a given profile
 *     PWR_SMOOTHING_PROFILE_RAMP_DOWN_RATE =                   262 # Ramp down rate in mW/s for a given profile
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_PROFILE_PERCENT_TM, __pyx_mstate_global->__pyx_int_260) < (0)) __PYX_ERR(0, 1113, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1114
 *     PWR_SMOOTHING_MAX_NUM_PRESET_PROFILES =                  259 # Max number of preset profiles
 *     PWR_SMOOTHING_PROFILE_PERCENT_TMP_FLOOR =                260 # % TMP floor for a given profile
 *     PWR_SMOOTHING_PROFILE_RAMP_UP_RATE =                     261 # Ramp up rate in mW/s for a given profile             # <<<<<<<<<<<<<<
 *     PWR_SMOOTHING_PROFILE_RAMP_DOWN_RATE =                   262 # Ramp down rate in mW/s for a given profile
 *     PWR_SMOOTHING_PROFILE_RAMP_DOWN_HYST_VAL =               263 # Ramp down hysteresis value in ms for a given profile
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_PROFILE_RAMP_UP_RA, __pyx_mstate_global->__pyx_int_261) < (0)) __PYX_ERR(0, 1114, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1115
 *     PWR_SMOOTHING_PROFILE_PERCENT_TMP_FLOOR =                260 # % TMP floor for a given profile
 *     PWR_SMOOTHING_PROFILE_RAMP_UP_RATE =                     261 # Ramp up rate in mW/s for a given profile
 *     PWR_SMOOTHING_PROFILE_RAMP_DOWN_RATE =                   262 # Ramp down rate in mW/s for a given profile             # <<<<<<<<<<<<<<
 *     PWR_SMOOTHING_PROFILE_RAMP_DOWN_HYST_VAL =               263 # Ramp down hysteresis value in ms for a given profile
 *     PWR_SMOOTHING_ACTIVE_PRESET_PROFILE =                    264 # Active preset profile number
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_PROFILE_RAMP_DOWN, __pyx_mstate_global->__pyx_int_262) < (0)) __PYX_ERR(0, 1115, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1116
 *     PWR_SMOOTHING_PROFILE_RAMP_UP_RATE =                     261 # Ramp up rate in mW/s for a given profile
 *     PWR_SMOOTHING_PROFILE_RAMP_DOWN_RATE =                   262 # Ramp down rate in mW/s for a given profile
 *     PWR_SMOOTHING_PROFILE_RAMP_DOWN_HYST_VAL =               263 # Ramp down hysteresis value in ms for a given profile             # <<<<<<<<<<<<<<
 *     PWR_SMOOTHING_ACTIVE_PRESET_PROFILE =                    264 # Active preset profile number
 *     PWR_SMOOTHING_ADMIN_OVERRIDE_PERCENT_TMP_FLOOR =         265 # % TMP floor for a given profile
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_PROFILE_RAMP_DOWN_2, __pyx_mstate_global->__pyx_int_263) < (0)) __PYX_ERR(0, 1116, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1117
 *     PWR_SMOOTHING_PROFILE_RAMP_DOWN_RATE =                   262 # Ramp down rate in mW/s for a given profile
 *     PWR_SMOOTHING_PROFILE_RAMP_DOWN_HYST_VAL =               263 # Ramp down hysteresis value in ms for a given profile
 *     PWR_SMOOTHING_ACTIVE_PRESET_PROFILE =                    264 # Active preset profile number             # <<<<<<<<<<<<<<
 *     PWR_SMOOTHING_ADMIN_OVERRIDE_PERCENT_TMP_FLOOR =         265 # % TMP floor for a given profile
 *     PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_UP_RATE =              266 # Ramp up rate in mW/s for a given profile
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_ACTIVE_PRESET_PROF, __pyx_mstate_global->__pyx_int_264) < (0)) __PYX_ERR(0, 1117, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1118
 *     PWR_SMOOTHING_PROFILE_RAMP_DOWN_HYST_VAL =               263 # Ramp down hysteresis value in ms for a given profile
 *     PWR_SMOOTHING_ACTIVE_PRESET_PROFILE =                    264 # Active preset profile number
 *     PWR_SMOOTHING_ADMIN_OVERRIDE_PERCENT_TMP_FLOOR =         265 # % TMP floor for a given profile             # <<<<<<<<<<<<<<
 *     PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_UP_RATE =              266 # Ramp up rate in mW/s for a given profile
 *     PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_DOWN_RATE =            267 # Ramp down rate in mW/s for a given profile
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_ADMIN_OVERRIDE_PER, __pyx_mstate_global->__pyx_int_265) < (0)) __PYX_ERR(0, 1118, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1119
 *     PWR_SMOOTHING_ACTIVE_PRESET_PROFILE =                    264 # Active preset profile number
 *     PWR_SMOOTHING_ADMIN_OVERRIDE_PERCENT_TMP_FLOOR =         265 # % TMP floor for a given profile
 *     PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_UP_RATE =              266 # Ramp up rate in mW/s for a given profile             # <<<<<<<<<<<<<<
 *     PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_DOWN_RATE =            267 # Ramp down rate in mW/s for a given profile
 *     PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_DOWN_HYST_VAL =        268 # Ramp down hysteresis value in ms for a given profile
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_ADMIN_OVERRIDE_RAM, __pyx_mstate_global->__pyx_int_266) < (0)) __PYX_ERR(0, 1119, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1120
 *     PWR_SMOOTHING_ADMIN_OVERRIDE_PERCENT_TMP_FLOOR =         265 # % TMP floor for a given profile
 *     PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_UP_RATE =              266 # Ramp up rate in mW/s for a given profile
 *     PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_DOWN_RATE =            267 # Ramp down rate in mW/s for a given profile             # <<<<<<<<<<<<<<
 *     PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_DOWN_HYST_VAL =        268 # Ramp down hysteresis value in ms for a given profile
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_ADMIN_OVERRIDE_RAM_2, __pyx_mstate_global->__pyx_int_267) < (0)) __PYX_ERR(0, 1120, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1121
 *     PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_UP_RATE =              266 # Ramp up rate in mW/s for a given profile
 *     PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_DOWN_RATE =            267 # Ramp down rate in mW/s for a given profile
 *     PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_DOWN_HYST_VAL =        268 # Ramp down hysteresis value in ms for a given profile             # <<<<<<<<<<<<<<
 * 
 *     # Field values for Clock Throttle Reason Counters
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_PWR_SMOOTHING_ADMIN_OVERRIDE_RAM_3, __pyx_mstate_global->__pyx_int_268) < (0)) __PYX_ERR(0, 1121, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1124
 * 
 *     # Field values for Clock Throttle Reason Counters
 *     DEV_CLOCKS_EVENT_REASON_SW_POWER_CAP =            DEV_PERF_POLICY_POWER      # Throttling to not exceed currently set power limits in ns             # <<<<<<<<<<<<<<
 *     DEV_CLOCKS_EVENT_REASON_SYNC_BOOST =              DEV_PERF_POLICY_SYNC_BOOST # Throttling to match minimum possible clock across Sync Boost Group in ns
 *     DEV_CLOCKS_EVENT_REASON_SW_THERM_SLOWDOWN =       269 # Throttling to ensure ((GPU temp < GPU Max Operating Temp) && (Memory Temp < Memory Max Operating Temp)) in ns
*/
  __pyx_t_11 = PyObject_GetItem(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PERF_POLICY_POWER);
  if (unlikely(!__pyx_t_11)) {
    PyErr_Clear();
    __Pyx_GetModuleGlobalName(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_DEV_PERF_POLICY_POWER);
  }
  if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1124, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_CLOCKS_EVENT_REASON_SW_POWER, __pyx_t_11) < (0)) __PYX_ERR(0, 1124, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":1125
 *     # Field values for Clock Throttle Reason Counters
 *     DEV_CLOCKS_EVENT_REASON_SW_POWER_CAP =            DEV_PERF_POLICY_POWER      # Throttling to not exceed currently set power limits in ns
 *     DEV_CLOCKS_EVENT_REASON_SYNC_BOOST =              DEV_PERF_POLICY_SYNC_BOOST # Throttling to match minimum possible clock across Sync Boost Group in ns             # <<<<<<<<<<<<<<
 *     DEV_CLOCKS_EVENT_REASON_SW_THERM_SLOWDOWN =       269 # Throttling to ensure ((GPU temp < GPU Max Operating Temp) && (Memory Temp < Memory Max Operating Temp)) in ns
 *     DEV_CLOCKS_EVENT_REASON_HW_THERM_SLOWDOWN =       270 # Throttling due to temperature being too high (reducing core clocks by a factor of 2 or more) in ns
*/
  __pyx_t_11 = PyObject_GetItem(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_PERF_POLICY_SYNC_BOOST);
  if (unlikely(!__pyx_t_11)) {
    PyErr_Clear();
    __Pyx_GetModuleGlobalName(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_DEV_PERF_POLICY_SYNC_BOOST);
  }
  if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_CLOCKS_EVENT_REASON_SYNC_BOO, __pyx_t_11) < (0)) __PYX_ERR(0, 1125, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;

  /* "cuda/bindings/_nvml.pyx":1126
 *     DEV_CLOCKS_EVENT_REASON_SW_POWER_CAP =            DEV_PERF_POLICY_POWER      # Throttling to not exceed currently set power limits in ns
 *     DEV_CLOCKS_EVENT_REASON_SYNC_BOOST =              DEV_PERF_POLICY_SYNC_BOOST # Throttling to match minimum possible clock across Sync Boost Group in ns
 *     DEV_CLOCKS_EVENT_REASON_SW_THERM_SLOWDOWN =       269 # Throttling to ensure ((GPU temp < GPU Max Operating Temp) && (Memory Temp < Memory Max Operating Temp)) in ns             # <<<<<<<<<<<<<<
 *     DEV_CLOCKS_EVENT_REASON_HW_THERM_SLOWDOWN =       270 # Throttling due to temperature being too high (reducing core clocks by a factor of 2 or more) in ns
 *     DEV_CLOCKS_EVENT_REASON_HW_POWER_BRAKE_SLOWDOWN = 271 # Throttling due to external power brake assertion trigger (reducing core clocks by a factor of 2 or more) in ns
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_CLOCKS_EVENT_REASON_SW_THERM, __pyx_mstate_global->__pyx_int_269) < (0)) __PYX_ERR(0, 1126, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1127
 *     DEV_CLOCKS_EVENT_REASON_SYNC_BOOST =              DEV_PERF_POLICY_SYNC_BOOST # Throttling to match minimum possible clock across Sync Boost Group in ns
 *     DEV_CLOCKS_EVENT_REASON_SW_THERM_SLOWDOWN =       269 # Throttling to ensure ((GPU temp < GPU Max Operating Temp) && (Memory Temp < Memory Max Operating Temp)) in ns
 *     DEV_CLOCKS_EVENT_REASON_HW_THERM_SLOWDOWN =       270 # Throttling due to temperature being too high (reducing core clocks by a factor of 2 or more) in ns             # <<<<<<<<<<<<<<
 *     DEV_CLOCKS_EVENT_REASON_HW_POWER_BRAKE_SLOWDOWN = 271 # Throttling due to external power brake assertion trigger (reducing core clocks by a factor of 2 or more) in ns
 *     DEV_POWER_SYNC_BALANCING_FREQ =                   272 # Accumulated frequency of the GPU to be used for averaging
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_CLOCKS_EVENT_REASON_HW_THERM, __pyx_mstate_global->__pyx_int_270) < (0)) __PYX_ERR(0, 1127, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1128
 *     DEV_CLOCKS_EVENT_REASON_SW_THERM_SLOWDOWN =       269 # Throttling to ensure ((GPU temp < GPU Max Operating Temp) && (Memory Temp < Memory Max Operating Temp)) in ns
 *     DEV_CLOCKS_EVENT_REASON_HW_THERM_SLOWDOWN =       270 # Throttling due to temperature being too high (reducing core clocks by a factor of 2 or more) in ns
 *     DEV_CLOCKS_EVENT_REASON_HW_POWER_BRAKE_SLOWDOWN = 271 # Throttling due to external power brake assertion trigger (reducing core clocks by a factor of 2 or more) in ns             # <<<<<<<<<<<<<<
 *     DEV_POWER_SYNC_BALANCING_FREQ =                   272 # Accumulated frequency of the GPU to be used for averaging
 *     DEV_POWER_SYNC_BALANCING_AF =                     273 # Accumulated activity factor of the GPU to be used for averaging
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_CLOCKS_EVENT_REASON_HW_POWER, __pyx_mstate_global->__pyx_int_271) < (0)) __PYX_ERR(0, 1128, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1129
 *     DEV_CLOCKS_EVENT_REASON_HW_THERM_SLOWDOWN =       270 # Throttling due to temperature being too high (reducing core clocks by a factor of 2 or more) in ns
 *     DEV_CLOCKS_EVENT_REASON_HW_POWER_BRAKE_SLOWDOWN = 271 # Throttling due to external power brake assertion trigger (reducing core clocks by a factor of 2 or more) in ns
 *     DEV_POWER_SYNC_BALANCING_FREQ =                   272 # Accumulated frequency of the GPU to be used for averaging             # <<<<<<<<<<<<<<
 *     DEV_POWER_SYNC_BALANCING_AF =                     273 # Accumulated activity factor of the GPU to be used for averaging
 *     MAX =                                             274 # One greater than the largest field ID defined above
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_POWER_SYNC_BALANCING_FREQ, __pyx_mstate_global->__pyx_int_272) < (0)) __PYX_ERR(0, 1129, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1130
 *     DEV_CLOCKS_EVENT_REASON_HW_POWER_BRAKE_SLOWDOWN = 271 # Throttling due to external power brake assertion trigger (reducing core clocks by a factor of 2 or more) in ns
 *     DEV_POWER_SYNC_BALANCING_FREQ =                   272 # Accumulated frequency of the GPU to be used for averaging
 *     DEV_POWER_SYNC_BALANCING_AF =                     273 # Accumulated activity factor of the GPU to be used for averaging             # <<<<<<<<<<<<<<
 *     MAX =                                             274 # One greater than the largest field ID defined above
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DEV_POWER_SYNC_BALANCING_AF, __pyx_mstate_global->__pyx_int_273) < (0)) __PYX_ERR(0, 1130, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1131
 *     DEV_POWER_SYNC_BALANCING_FREQ =                   272 # Accumulated frequency of the GPU to be used for averaging
 *     DEV_POWER_SYNC_BALANCING_AF =                     273 # Accumulated activity factor of the GPU to be used for averaging
 *     MAX =                                             274 # One greater than the largest field ID defined above             # <<<<<<<<<<<<<<
 * 
 * 
*/
  if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_MAX, __pyx_mstate_global->__pyx_int_274) < (0)) __PYX_ERR(0, 1131, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":763
 * 
 * 
 * class FI(_IntEnum):             # <<<<<<<<<<<<<<
 *     DEV_ECC_CURRENT =          1   # Current ECC mode. 1=Active. 0=Inactive
 *     DEV_ECC_PENDING =          2   # Pending ECC mode. 1=Active. 0=Inactive
*/
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_FI, __pyx_t_4, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 763, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_FI, __pyx_t_11) < (0)) __PYX_ERR(0, 763, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":1134
 * 
 * 
 * NVLINK_MAX_LINKS = 18             # <<<<<<<<<<<<<<
 * 
 * 
*/
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_NVLINK_MAX_LINKS, __pyx_mstate_global->__pyx_int_18) < (0)) __PYX_ERR(0, 1134, __pyx_L1_error)

  /* "cuda/bindings/_nvml.pyx":1142
 * 
 * 
 * class NvmlError(Exception):             # <<<<<<<<<<<<<<
 *     def __init__(self, status):
 *         self.status = status
*/
  __pyx_t_4 = PyTuple_Pack(1, ((PyObject *)(((PyTypeObject*)PyExc_Exception)))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1142, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1142, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1142, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError, __pyx_mstate_global->__pyx_n_u_NvmlError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1142, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_5 != __pyx_t_4) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_4) < 0))) __PYX_ERR(0, 1142, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":1143
 * 
 * class NvmlError(Exception):
 *     def __init__(self, status):             # <<<<<<<<<<<<<<
 *         self.status = status
 *         s = error_string(status)
*/
  __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_9NvmlError_1__init__, 0, __pyx_mstate_global->__pyx_n_u_NvmlError___init, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1143, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_4);
  #endif
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_init, __pyx_t_4) < (0)) __PYX_ERR(0, 1143, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":1148
 *         super(NvmlError, self).__init__(s)
 * 
 *     def __reduce__(self):             # <<<<<<<<<<<<<<
 *         return (type(self), (self.status,))
 * 
*/
  __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_9NvmlError_3__reduce__, 0, __pyx_mstate_global->__pyx_n_u_NvmlError___reduce, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[1])); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1148, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_4);
  #endif
  if (__Pyx_SetNameInClass(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_reduce, __pyx_t_4) < (0)) __PYX_ERR(0, 1148, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":1142
 * 
 * 
 * class NvmlError(Exception):             # <<<<<<<<<<<<<<
 *     def __init__(self, status):
 *         self.status = status
*/
  __pyx_t_4 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NvmlError, __pyx_t_5, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1142, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_4);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_NvmlError, __pyx_t_4) < (0)) __PYX_ERR(0, 1142, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1152
 * 
 * 
 * class UninitializedError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_4 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_UninitializedError, __pyx_mstate_global->__pyx_n_u_UninitializedError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  if (__pyx_t_5 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_4, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 1152, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_UninitializedError, __pyx_t_5, __pyx_t_4, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_UninitializedError, __pyx_t_10) < (0)) __PYX_ERR(0, 1152, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1155
 *     pass
 * 
 * class InvalidArgumentError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1155, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1155, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1155, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1155, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_4, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_InvalidArgumentError, __pyx_mstate_global->__pyx_n_u_InvalidArgumentError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1155, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_5 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 1155, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_InvalidArgumentError, __pyx_t_5, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1155, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_InvalidArgumentError, __pyx_t_11) < (0)) __PYX_ERR(0, 1155, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1158
 *     pass
 * 
 * class NotSupportedError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1158, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1158, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1158, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1158, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_NotSupportedError, __pyx_mstate_global->__pyx_n_u_NotSupportedError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1158, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_5 != __pyx_t_4) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_4) < 0))) __PYX_ERR(0, 1158, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NotSupportedError, __pyx_t_5, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1158, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_4);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_NotSupportedError, __pyx_t_4) < (0)) __PYX_ERR(0, 1158, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1161
 *     pass
 * 
 * class NoPermissionError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1161, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1161, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1161, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1161, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_4 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_NoPermissionError, __pyx_mstate_global->__pyx_n_u_NoPermissionError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1161, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  if (__pyx_t_5 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_4, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 1161, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_NoPermissionError, __pyx_t_5, __pyx_t_4, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1161, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_NoPermissionError, __pyx_t_10) < (0)) __PYX_ERR(0, 1161, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1164
 *     pass
 * 
 * class AlreadyInitializedError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1164, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1164, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1164, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1164, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_4, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_AlreadyInitializedError, __pyx_mstate_global->__pyx_n_u_AlreadyInitializedError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1164, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_5 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 1164, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_AlreadyInitializedError, __pyx_t_5, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1164, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_AlreadyInitializedError, __pyx_t_11) < (0)) __PYX_ERR(0, 1164, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1167
 *     pass
 * 
 * class NotFoundError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1167, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1167, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1167, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1167, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_NotFoundError, __pyx_mstate_global->__pyx_n_u_NotFoundError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1167, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_5 != __pyx_t_4) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_4) < 0))) __PYX_ERR(0, 1167, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NotFoundError, __pyx_t_5, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1167, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_4);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_NotFoundError, __pyx_t_4) < (0)) __PYX_ERR(0, 1167, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1170
 *     pass
 * 
 * class InsufficientSizeError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1170, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1170, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1170, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1170, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_4 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_InsufficientSizeError, __pyx_mstate_global->__pyx_n_u_InsufficientSizeError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1170, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  if (__pyx_t_5 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_4, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 1170, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_InsufficientSizeError, __pyx_t_5, __pyx_t_4, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1170, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_InsufficientSizeError, __pyx_t_10) < (0)) __PYX_ERR(0, 1170, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1173
 *     pass
 * 
 * class InsufficientPowerError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1173, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1173, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1173, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1173, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_4, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_InsufficientPowerError, __pyx_mstate_global->__pyx_n_u_InsufficientPowerError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1173, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_5 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 1173, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_InsufficientPowerError, __pyx_t_5, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1173, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_InsufficientPowerError, __pyx_t_11) < (0)) __PYX_ERR(0, 1173, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1176
 *     pass
 * 
 * class DriverNotLoadedError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1176, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1176, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1176, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1176, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_DriverNotLoadedError, __pyx_mstate_global->__pyx_n_u_DriverNotLoadedError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1176, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_5 != __pyx_t_4) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_4) < 0))) __PYX_ERR(0, 1176, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_DriverNotLoadedError, __pyx_t_5, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1176, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_4);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_DriverNotLoadedError, __pyx_t_4) < (0)) __PYX_ERR(0, 1176, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1179
 *     pass
 * 
 * class TimeoutError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_4 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_TimeoutError, __pyx_mstate_global->__pyx_n_u_TimeoutError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  if (__pyx_t_5 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_4, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 1179, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_TimeoutError, __pyx_t_5, __pyx_t_4, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_TimeoutError, __pyx_t_10) < (0)) __PYX_ERR(0, 1179, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1182
 *     pass
 * 
 * class IrqIssueError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1182, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1182, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1182, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1182, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_4, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_IrqIssueError, __pyx_mstate_global->__pyx_n_u_IrqIssueError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1182, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_5 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 1182, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_IrqIssueError, __pyx_t_5, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1182, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_IrqIssueError, __pyx_t_11) < (0)) __PYX_ERR(0, 1182, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1185
 *     pass
 * 
 * class LibraryNotFoundError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1185, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1185, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1185, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1185, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_LibraryNotFoundError, __pyx_mstate_global->__pyx_n_u_LibraryNotFoundError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1185, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_5 != __pyx_t_4) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_4) < 0))) __PYX_ERR(0, 1185, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_LibraryNotFoundError, __pyx_t_5, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1185, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_4);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_LibraryNotFoundError, __pyx_t_4) < (0)) __PYX_ERR(0, 1185, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1188
 *     pass
 * 
 * class FunctionNotFoundError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1188, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1188, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1188, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1188, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_4 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_FunctionNotFoundError, __pyx_mstate_global->__pyx_n_u_FunctionNotFoundError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1188, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  if (__pyx_t_5 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_4, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 1188, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_FunctionNotFoundError, __pyx_t_5, __pyx_t_4, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1188, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_FunctionNotFoundError, __pyx_t_10) < (0)) __PYX_ERR(0, 1188, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1191
 *     pass
 * 
 * class CorruptedInforomError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1191, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1191, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1191, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1191, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_4, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_CorruptedInforomError, __pyx_mstate_global->__pyx_n_u_CorruptedInforomError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1191, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_5 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 1191, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_CorruptedInforomError, __pyx_t_5, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1191, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_CorruptedInforomError, __pyx_t_11) < (0)) __PYX_ERR(0, 1191, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1194
 *     pass
 * 
 * class GpuIsLostError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1194, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1194, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1194, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1194, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_GpuIsLostError, __pyx_mstate_global->__pyx_n_u_GpuIsLostError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1194, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_5 != __pyx_t_4) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_4) < 0))) __PYX_ERR(0, 1194, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_GpuIsLostError, __pyx_t_5, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1194, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_4);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_GpuIsLostError, __pyx_t_4) < (0)) __PYX_ERR(0, 1194, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1197
 *     pass
 * 
 * class ResetRequiredError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1197, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1197, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1197, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1197, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_4 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_ResetRequiredError, __pyx_mstate_global->__pyx_n_u_ResetRequiredError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1197, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  if (__pyx_t_5 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_4, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 1197, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_ResetRequiredError, __pyx_t_5, __pyx_t_4, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1197, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_ResetRequiredError, __pyx_t_10) < (0)) __PYX_ERR(0, 1197, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1200
 *     pass
 * 
 * class OperatingSystemError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1200, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1200, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1200, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1200, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_4, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_OperatingSystemError, __pyx_mstate_global->__pyx_n_u_OperatingSystemError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1200, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_5 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 1200, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_OperatingSystemError, __pyx_t_5, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1200, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_OperatingSystemError, __pyx_t_11) < (0)) __PYX_ERR(0, 1200, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1203
 *     pass
 * 
 * class LibRmVersionMismatchError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_LibRmVersionMismatchError, __pyx_mstate_global->__pyx_n_u_LibRmVersionMismatchError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_5 != __pyx_t_4) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_4) < 0))) __PYX_ERR(0, 1203, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_LibRmVersionMismatchError, __pyx_t_5, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1203, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_4);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_LibRmVersionMismatchError, __pyx_t_4) < (0)) __PYX_ERR(0, 1203, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1206
 *     pass
 * 
 * class InUseError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1206, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1206, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1206, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1206, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_4 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_InUseError, __pyx_mstate_global->__pyx_n_u_InUseError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1206, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  if (__pyx_t_5 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_4, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 1206, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_InUseError, __pyx_t_5, __pyx_t_4, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1206, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_InUseError, __pyx_t_10) < (0)) __PYX_ERR(0, 1206, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1209
 *     pass
 * 
 * class MemoryError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1209, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1209, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1209, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1209, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_4, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_MemoryError, __pyx_mstate_global->__pyx_n_u_MemoryError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1209, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_5 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 1209, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_MemoryError, __pyx_t_5, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1209, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_MemoryError, __pyx_t_11) < (0)) __PYX_ERR(0, 1209, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1212
 *     pass
 * 
 * class NoDataError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1212, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1212, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1212, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1212, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_NoDataError, __pyx_mstate_global->__pyx_n_u_NoDataError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1212, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_5 != __pyx_t_4) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_4) < 0))) __PYX_ERR(0, 1212, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NoDataError, __pyx_t_5, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1212, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_4);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_NoDataError, __pyx_t_4) < (0)) __PYX_ERR(0, 1212, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1215
 *     pass
 * 
 * class VgpuEccNotSupportedError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1215, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1215, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1215, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1215, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_4 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_VgpuEccNotSupportedError, __pyx_mstate_global->__pyx_n_u_VgpuEccNotSupportedError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1215, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  if (__pyx_t_5 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_4, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 1215, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_VgpuEccNotSupportedError, __pyx_t_5, __pyx_t_4, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1215, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_VgpuEccNotSupportedError, __pyx_t_10) < (0)) __PYX_ERR(0, 1215, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1218
 *     pass
 * 
 * class InsufficientResourcesError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1218, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1218, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1218, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1218, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_4, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_InsufficientResourcesError, __pyx_mstate_global->__pyx_n_u_InsufficientResourcesError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1218, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_5 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 1218, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_InsufficientResourcesError, __pyx_t_5, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1218, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_InsufficientResourcesError, __pyx_t_11) < (0)) __PYX_ERR(0, 1218, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1221
 *     pass
 * 
 * class FreqNotSupportedError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1221, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1221, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1221, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1221, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_FreqNotSupportedError, __pyx_mstate_global->__pyx_n_u_FreqNotSupportedError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1221, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_5 != __pyx_t_4) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_4) < 0))) __PYX_ERR(0, 1221, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_FreqNotSupportedError, __pyx_t_5, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1221, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_4);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_FreqNotSupportedError, __pyx_t_4) < (0)) __PYX_ERR(0, 1221, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1224
 *     pass
 * 
 * class ArgumentVersionMismatchError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1224, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1224, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1224, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1224, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_4 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_ArgumentVersionMismatchError, __pyx_mstate_global->__pyx_n_u_ArgumentVersionMismatchError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1224, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  if (__pyx_t_5 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_4, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 1224, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_ArgumentVersionMismatchError, __pyx_t_5, __pyx_t_4, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1224, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_ArgumentVersionMismatchError, __pyx_t_10) < (0)) __PYX_ERR(0, 1224, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1227
 *     pass
 * 
 * class DeprecatedError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1227, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1227, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1227, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1227, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_4, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_DeprecatedError, __pyx_mstate_global->__pyx_n_u_DeprecatedError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1227, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_5 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 1227, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_DeprecatedError, __pyx_t_5, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1227, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_DeprecatedError, __pyx_t_11) < (0)) __PYX_ERR(0, 1227, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1230
 *     pass
 * 
 * class NotReadyError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1230, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1230, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1230, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1230, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_NotReadyError, __pyx_mstate_global->__pyx_n_u_NotReadyError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1230, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_5 != __pyx_t_4) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_4) < 0))) __PYX_ERR(0, 1230, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_NotReadyError, __pyx_t_5, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1230, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_4);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_NotReadyError, __pyx_t_4) < (0)) __PYX_ERR(0, 1230, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1233
 *     pass
 * 
 * class GpuNotFoundError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1233, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1233, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1233, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1233, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_4 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_GpuNotFoundError, __pyx_mstate_global->__pyx_n_u_GpuNotFoundError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1233, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  if (__pyx_t_5 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_4, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 1233, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_GpuNotFoundError, __pyx_t_5, __pyx_t_4, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1233, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_GpuNotFoundError, __pyx_t_10) < (0)) __PYX_ERR(0, 1233, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1236
 *     pass
 * 
 * class InvalidStateError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1236, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1236, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1236, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1236, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_4, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_InvalidStateError, __pyx_mstate_global->__pyx_n_u_InvalidStateError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1236, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  if (__pyx_t_5 != __pyx_t_11) {
    if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_11) < 0))) __PYX_ERR(0, 1236, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __pyx_t_11 = __Pyx_Py3ClassCreate(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_InvalidStateError, __pyx_t_5, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1236, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_11);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_InvalidStateError, __pyx_t_11) < (0)) __PYX_ERR(0, 1236, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1239
 *     pass
 * 
 * class ResetTypeNotSupportedError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_4 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_11 = __Pyx_Py3MetaclassPrepare(__pyx_t_10, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_ResetTypeNotSupportedError, __pyx_mstate_global->__pyx_n_u_ResetTypeNotSupportedError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  if (__pyx_t_5 != __pyx_t_4) {
    if (unlikely((PyDict_SetItemString(__pyx_t_11, "__orig_bases__", __pyx_t_4) < 0))) __PYX_ERR(0, 1239, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_Py3ClassCreate(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_ResetTypeNotSupportedError, __pyx_t_5, __pyx_t_11, NULL, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_4);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_ResetTypeNotSupportedError, __pyx_t_4) < (0)) __PYX_ERR(0, 1239, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1242
 *     pass
 * 
 * class UnknownError(NvmlError):             # <<<<<<<<<<<<<<
 *     pass
 * 
*/
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_NvmlError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1242, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_10 = PyTuple_Pack(1, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1242, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PEP560_update_bases(__pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1242, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __pyx_t_11 = __Pyx_CalculateMetaclass(NULL, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1242, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_4 = __Pyx_Py3MetaclassPrepare(__pyx_t_11, __pyx_t_5, __pyx_mstate_global->__pyx_n_u_UnknownError, __pyx_mstate_global->__pyx_n_u_UnknownError, (PyObject *) NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, (PyObject *) NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1242, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  if (__pyx_t_5 != __pyx_t_10) {
    if (unlikely((PyDict_SetItemString(__pyx_t_4, "__orig_bases__", __pyx_t_10) < 0))) __PYX_ERR(0, 1242, __pyx_L1_error)
  }
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_UnknownError, __pyx_t_5, __pyx_t_4, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1242, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_10);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_UnknownError, __pyx_t_10) < (0)) __PYX_ERR(0, 1242, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1313
 * 
 * 
 * @cython.profile(False)             # <<<<<<<<<<<<<<
 * cpdef int check_status(int status) except 1 nogil:
 *     if status != 0:
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_1check_status, 0, __pyx_mstate_global->__pyx_n_u_check_status, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[2])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1313, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_check_status, __pyx_t_5) < (0)) __PYX_ERR(0, 1313, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1321
 * 
 * 
 * @cython.profile(False)             # <<<<<<<<<<<<<<
 * cpdef int check_status_size(int status) except 1 nogil:
 *     if status == nvmlReturn_t.NVML_ERROR_INSUFFICIENT_SIZE:
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_3check_status_size, 0, __pyx_mstate_global->__pyx_n_u_check_status_size, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[3])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1321, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_check_status_size, __pyx_t_5) < (0)) __PYX_ERR(0, 1321, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1352
 *     })
 * 
 * pci_info_ext_v1_dtype = _get_pci_info_ext_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class PciInfoExt_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_pci_info_ext_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1352, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pci_info_ext_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 1352, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1517
 *         memcpy(<void *>(self._ptr[0].busId), <void *>ptr, 32)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an PciInfoExt_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13PciInfoExt_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_PciInfoExt_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[4])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1517, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 1517, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_11 = NULL;
  __Pyx_GetNameInClass(__pyx_t_4, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1517, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_11, __pyx_t_4};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1517, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 1517, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1526
 *         return __from_data(data, "pci_info_ext_v1_dtype", pci_info_ext_v1_dtype, PciInfoExt_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an PciInfoExt_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13PciInfoExt_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_PciInfoExt_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[5])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1526, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 1526, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_4 = NULL;
  __Pyx_GetNameInClass(__pyx_t_11, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1526, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_11};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1526, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfoExt_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 1526, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13PciInfoExt_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_PciInfoExt_v1___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[6])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13PciInfoExt_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_PciInfoExt_v1___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[7])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1570
 *     })
 * 
 * pci_info_dtype = _get_pci_info_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class PciInfo:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_pci_info_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1570, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pci_info_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 1570, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1717
 *         memcpy(<void *>(self._ptr[0].busId), <void *>ptr, 32)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an PciInfo instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_7PciInfo_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_PciInfo_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[8])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1717, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 1717, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_11 = NULL;
  __Pyx_GetNameInClass(__pyx_t_4, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1717, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_11, __pyx_t_4};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1717, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 1717, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1726
 *         return __from_data(data, "pci_info_dtype", pci_info_dtype, PciInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an PciInfo instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_7PciInfo_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_PciInfo_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[9])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1726, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 1726, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_4 = NULL;
  __Pyx_GetNameInClass(__pyx_t_11, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1726, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_11};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1726, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PciInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 1726, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_7PciInfo_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_PciInfo___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[10])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_7PciInfo_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_PciInfo___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[11])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1765
 *     })
 * 
 * utilization_dtype = _get_utilization_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class Utilization:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_utilization_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1765, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_utilization_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 1765, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1849
 *         self._ptr[0].memory = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Utilization instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_11Utilization_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Utilization_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[12])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 1849, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_11 = NULL;
  __Pyx_GetNameInClass(__pyx_t_4, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_11, __pyx_t_4};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1849, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 1849, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1858
 *         return __from_data(data, "utilization_dtype", utilization_dtype, Utilization)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Utilization instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_11Utilization_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Utilization_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[13])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1858, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 1858, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_4 = NULL;
  __Pyx_GetNameInClass(__pyx_t_11, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1858, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_11};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1858, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Utilization, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 1858, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_11Utilization_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Utilization___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[14])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_11Utilization_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Utilization___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[15])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1898
 *     })
 * 
 * memory_dtype = _get_memory_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class Memory:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_memory_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1898, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_memory_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 1898, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1993
 *         self._ptr[0].used = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Memory instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_6Memory_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Memory_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[16])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1993, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 1993, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_11 = NULL;
  __Pyx_GetNameInClass(__pyx_t_4, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1993, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_11, __pyx_t_4};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1993, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 1993, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2002
 *         return __from_data(data, "memory_dtype", memory_dtype, Memory)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Memory instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_6Memory_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Memory_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[17])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2002, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 2002, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_4 = NULL;
  __Pyx_GetNameInClass(__pyx_t_11, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 2002, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_11};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2002, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 2002, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_6Memory_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Memory___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[18])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_6Memory_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Memory___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[19])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2044
 *     })
 * 
 * memory_v2_dtype = _get_memory_v2_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class Memory_v2:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_memory_v2_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_memory_v2_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 2044, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2150
 *         self._ptr[0].used = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Memory_v2 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_9Memory_v2_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Memory_v2_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[20])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2150, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 2150, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_11 = NULL;
  __Pyx_GetNameInClass(__pyx_t_4, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2150, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_11, __pyx_t_4};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2150, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 2150, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2159
 *         return __from_data(data, "memory_v2_dtype", memory_v2_dtype, Memory_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Memory_v2 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_9Memory_v2_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Memory_v2_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[21])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2159, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 2159, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_4 = NULL;
  __Pyx_GetNameInClass(__pyx_t_11, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 2159, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_11};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2159, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Memory_v2, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 2159, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_9Memory_v2_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Memory_v2___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[22])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_9Memory_v2_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Memory_v2___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[23])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2199
 *     })
 * 
 * ba_r1memory_dtype = _get_ba_r1memory_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class BAR1Memory:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_ba_r1memory_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2199, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_ba_r1memory_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 2199, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2294
 *         self._ptr[0].bar1Used = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an BAR1Memory instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_10BAR1Memory_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_BAR1Memory_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[24])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2294, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 2294, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_11 = NULL;
  __Pyx_GetNameInClass(__pyx_t_4, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2294, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_11, __pyx_t_4};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2294, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 2294, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2303
 *         return __from_data(data, "ba_r1memory_dtype", ba_r1memory_dtype, BAR1Memory)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an BAR1Memory instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_10BAR1Memory_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_BAR1Memory_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[25])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2303, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 2303, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_4 = NULL;
  __Pyx_GetNameInClass(__pyx_t_11, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 2303, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_11};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2303, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BAR1Memory, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 2303, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_10BAR1Memory_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_BAR1Memory___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[26])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_10BAR1Memory_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_BAR1Memory___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[27])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2344
 *     })
 * 
 * process_info_dtype = _get_process_info_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ProcessInfo:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_process_info_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2344, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_process_info_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 2344, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2461
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessInfo instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_11ProcessInfo_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessInfo_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[28])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2461, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 2461, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_11 = NULL;
  __Pyx_GetNameInClass(__pyx_t_4, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2461, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_11, __pyx_t_4};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2461, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 2461, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2479
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ProcessInfo instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_11ProcessInfo_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessInfo_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[29])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2479, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 2479, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_4 = NULL;
  __Pyx_GetNameInClass(__pyx_t_11, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 2479, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_11};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2479, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 2479, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_11ProcessInfo_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessInfo___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[30])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ProcessInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ProcessInfo__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_11ProcessInfo_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessInfo___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[31])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessInfo, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2515
 *     })
 * 
 * process_detail_v1_dtype = _get_process_detail_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ProcessDetail_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_process_detail_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2515, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_process_detail_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 2515, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2643
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessDetail_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16ProcessDetail_v1_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessDetail_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[32])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2643, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 2643, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_11 = NULL;
  __Pyx_GetNameInClass(__pyx_t_4, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2643, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_11, __pyx_t_4};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2643, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 2643, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2661
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ProcessDetail_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16ProcessDetail_v1_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessDetail_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[33])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2661, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 2661, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_4 = NULL;
  __Pyx_GetNameInClass(__pyx_t_11, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 2661, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_11};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2661, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 2661, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16ProcessDetail_v1_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessDetail_v1___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[34])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ProcessDetail_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ProcessDetail_v1__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16ProcessDetail_v1_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessDetail_v1___setstate_cyth, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[35])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetail_v1, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2701
 *     })
 * 
 * device_attributes_dtype = _get_device_attributes_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class DeviceAttributes:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_device_attributes_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2701, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_attributes_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 2701, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2862
 *         self._ptr[0].memorySizeMB = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DeviceAttributes instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16DeviceAttributes_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DeviceAttributes_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[36])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2862, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 2862, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_11 = NULL;
  __Pyx_GetNameInClass(__pyx_t_4, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2862, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_11, __pyx_t_4};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2862, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 2862, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2871
 *         return __from_data(data, "device_attributes_dtype", device_attributes_dtype, DeviceAttributes)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DeviceAttributes instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16DeviceAttributes_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DeviceAttributes_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[37])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2871, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 2871, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_4 = NULL;
  __Pyx_GetNameInClass(__pyx_t_11, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 2871, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_11};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2871, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAttributes, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 2871, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16DeviceAttributes_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DeviceAttributes___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[38])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16DeviceAttributes_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DeviceAttributes___setstate_cyth, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[39])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2909
 *     })
 * 
 * c2c_mode_info_v1_dtype = _get_c2c_mode_info_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class C2cModeInfo_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_c2c_mode_info_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2909, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_c2c_mode_info_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 2909, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2982
 *         self._ptr[0].isC2cEnabled = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an C2cModeInfo_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_C2cModeInfo_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[40])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2982, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 2982, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_11 = NULL;
  __Pyx_GetNameInClass(__pyx_t_4, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2982, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_11, __pyx_t_4};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2982, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 2982, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":2991
 *         return __from_data(data, "c2c_mode_info_v1_dtype", c2c_mode_info_v1_dtype, C2cModeInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an C2cModeInfo_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_C2cModeInfo_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[41])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2991, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 2991, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_4 = NULL;
  __Pyx_GetNameInClass(__pyx_t_11, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 2991, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_11};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2991, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_C2cModeInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 2991, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_C2cModeInfo_v1___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[42])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_14C2cModeInfo_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_C2cModeInfo_v1___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[43])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3033
 *     })
 * 
 * row_remapper_histogram_values_dtype = _get_row_remapper_histogram_values_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class RowRemapperHistogramValues:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_row_remapper_histogram_values_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3033, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_row_remapper_histogram_values_dt, __pyx_t_5) < (0)) __PYX_ERR(0, 3033, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3150
 *         self._ptr[0].none = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an RowRemapperHistogramValues instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_RowRemapperHistogramValues_from, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[44])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3150, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 3150, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_11 = NULL;
  __Pyx_GetNameInClass(__pyx_t_4, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3150, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_11, __pyx_t_4};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3150, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 3150, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3159
 *         return __from_data(data, "row_remapper_histogram_values_dtype", row_remapper_histogram_values_dtype, RowRemapperHistogramValues)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an RowRemapperHistogramValues instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_RowRemapperHistogramValues_from_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[45])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3159, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 3159, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_4 = NULL;
  __Pyx_GetNameInClass(__pyx_t_11, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 3159, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_11};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3159, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RowRemapperHistogramValues, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 3159, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_RowRemapperHistogramValues___red, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[46])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_26RowRemapperHistogramValues_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_RowRemapperHistogramValues___set, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[47])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3198
 *     })
 * 
 * bridge_chip_info_dtype = _get_bridge_chip_info_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class BridgeChipInfo:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_bridge_chip_info_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3198, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_bridge_chip_info_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 3198, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3293
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an BridgeChipInfo instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_14BridgeChipInfo_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_BridgeChipInfo_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[48])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3293, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 3293, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_11 = NULL;
  __Pyx_GetNameInClass(__pyx_t_4, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3293, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_11, __pyx_t_4};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3293, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 3293, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3311
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an BridgeChipInfo instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_14BridgeChipInfo_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_BridgeChipInfo_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[49])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3311, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 3311, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_4 = NULL;
  __Pyx_GetNameInClass(__pyx_t_11, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 3311, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_11};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
    __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3311, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 3311, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_14BridgeChipInfo_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_BridgeChipInfo___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[50])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_BridgeChipInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_BridgeChipInfo__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_14BridgeChipInfo_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_BridgeChipInfo___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[51])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipInfo, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3332
 * 
 * 
 * value_dtype = _numpy.dtype((             # <<<<<<<<<<<<<<
 *     _numpy.dtype((_numpy.void, sizeof(nvmlValue_t))),
 *     {
*/
  __pyx_t_11 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3332, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3332, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;

  /* "cuda/bindings/_nvml.pyx":3333
 * 
 * value_dtype = _numpy.dtype((
 *     _numpy.dtype((_numpy.void, sizeof(nvmlValue_t))),             # <<<<<<<<<<<<<<
 *     {
 *         "d_val": (_numpy.float64, 0),
*/
  __pyx_t_12 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_13, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 3333, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_13, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 3333, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_13, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 3333, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_15 = __Pyx_PyObject_GetAttrStr(__pyx_t_13, __pyx_mstate_global->__pyx_n_u_void); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 3333, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);
  __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
  __pyx_t_13 = __Pyx_PyLong_FromSize_t((sizeof(nvmlValue_t))); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 3333, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_16 = PyTuple_Pack(2, __pyx_t_15, __pyx_t_13); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3333, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0;
  __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_12, __pyx_t_16};
    __pyx_t_4 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_14, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
    if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3333, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
  }

  /* "cuda/bindings/_nvml.pyx":3335
 *     _numpy.dtype((_numpy.void, sizeof(nvmlValue_t))),
 *     {
 *         "d_val": (_numpy.float64, 0),             # <<<<<<<<<<<<<<
 *         "si_val": (_numpy.int32, 0),
 *         "ui_val": (_numpy.uint32, 0),
*/
  __pyx_t_14 = __Pyx_PyDict_NewPresized(7); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 3335, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_GetModuleGlobalName(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3335, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_float64); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 3335, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
  __pyx_t_16 = PyTuple_Pack(2, __pyx_t_12, __pyx_mstate_global->__pyx_int_0); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3335, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
  if (PyDict_SetItem(__pyx_t_14, __pyx_mstate_global->__pyx_n_u_d_val, __pyx_t_16) < (0)) __PYX_ERR(0, 3335, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;

  /* "cuda/bindings/_nvml.pyx":3336
 *     {
 *         "d_val": (_numpy.float64, 0),
 *         "si_val": (_numpy.int32, 0),             # <<<<<<<<<<<<<<
 *         "ui_val": (_numpy.uint32, 0),
 *         "ul_val": (_numpy.uint32, 0),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3336, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_int32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 3336, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
  __pyx_t_16 = PyTuple_Pack(2, __pyx_t_12, __pyx_mstate_global->__pyx_int_0); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3336, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
  if (PyDict_SetItem(__pyx_t_14, __pyx_mstate_global->__pyx_n_u_si_val, __pyx_t_16) < (0)) __PYX_ERR(0, 3335, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;

  /* "cuda/bindings/_nvml.pyx":3337
 *         "d_val": (_numpy.float64, 0),
 *         "si_val": (_numpy.int32, 0),
 *         "ui_val": (_numpy.uint32, 0),             # <<<<<<<<<<<<<<
 *         "ul_val": (_numpy.uint32, 0),
 *         "ull_val": (_numpy.uint64, 0),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 3337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
  __pyx_t_16 = PyTuple_Pack(2, __pyx_t_12, __pyx_mstate_global->__pyx_int_0); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
  if (PyDict_SetItem(__pyx_t_14, __pyx_mstate_global->__pyx_n_u_ui_val, __pyx_t_16) < (0)) __PYX_ERR(0, 3335, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;

  /* "cuda/bindings/_nvml.pyx":3338
 *         "si_val": (_numpy.int32, 0),
 *         "ui_val": (_numpy.uint32, 0),
 *         "ul_val": (_numpy.uint32, 0),             # <<<<<<<<<<<<<<
 *         "ull_val": (_numpy.uint64, 0),
 *         "sll_val": (_numpy.int64, 0),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3338, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_uint32); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 3338, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
  __pyx_t_16 = PyTuple_Pack(2, __pyx_t_12, __pyx_mstate_global->__pyx_int_0); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3338, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
  if (PyDict_SetItem(__pyx_t_14, __pyx_mstate_global->__pyx_n_u_ul_val, __pyx_t_16) < (0)) __PYX_ERR(0, 3335, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;

  /* "cuda/bindings/_nvml.pyx":3339
 *         "ui_val": (_numpy.uint32, 0),
 *         "ul_val": (_numpy.uint32, 0),
 *         "ull_val": (_numpy.uint64, 0),             # <<<<<<<<<<<<<<
 *         "sll_val": (_numpy.int64, 0),
 *         "us_val": (_numpy.uint16, 0),
*/
  __Pyx_GetModuleGlobalName(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3339, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_uint64); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 3339, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
  __pyx_t_16 = PyTuple_Pack(2, __pyx_t_12, __pyx_mstate_global->__pyx_int_0); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3339, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
  if (PyDict_SetItem(__pyx_t_14, __pyx_mstate_global->__pyx_n_u_ull_val, __pyx_t_16) < (0)) __PYX_ERR(0, 3335, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;

  /* "cuda/bindings/_nvml.pyx":3340
 *         "ul_val": (_numpy.uint32, 0),
 *         "ull_val": (_numpy.uint64, 0),
 *         "sll_val": (_numpy.int64, 0),             # <<<<<<<<<<<<<<
 *         "us_val": (_numpy.uint16, 0),
 *     }
*/
  __Pyx_GetModuleGlobalName(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3340, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_int64); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 3340, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
  __pyx_t_16 = PyTuple_Pack(2, __pyx_t_12, __pyx_mstate_global->__pyx_int_0); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3340, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
  if (PyDict_SetItem(__pyx_t_14, __pyx_mstate_global->__pyx_n_u_sll_val, __pyx_t_16) < (0)) __PYX_ERR(0, 3335, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;

  /* "cuda/bindings/_nvml.pyx":3341
 *         "ull_val": (_numpy.uint64, 0),
 *         "sll_val": (_numpy.int64, 0),
 *         "us_val": (_numpy.uint16, 0),             # <<<<<<<<<<<<<<
 *     }
 *     ))
*/
  __Pyx_GetModuleGlobalName(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3341, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_uint16); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 3341, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
  __pyx_t_16 = PyTuple_Pack(2, __pyx_t_12, __pyx_mstate_global->__pyx_int_0); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3341, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
  if (PyDict_SetItem(__pyx_t_14, __pyx_mstate_global->__pyx_n_u_us_val, __pyx_t_16) < (0)) __PYX_ERR(0, 3335, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;

  /* "cuda/bindings/_nvml.pyx":3333
 * 
 * value_dtype = _numpy.dtype((
 *     _numpy.dtype((_numpy.void, sizeof(nvmlValue_t))),             # <<<<<<<<<<<<<<
 *     {
 *         "d_val": (_numpy.float64, 0),
*/
  __pyx_t_16 = PyTuple_Pack(2, __pyx_t_4, __pyx_t_14); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3333, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_11, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_10, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3332, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_value_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 3332, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3483
 *         self._ptr[0].usVal = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Value instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_5Value_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Value_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[52])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3483, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Value, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 3483, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Value, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3483, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3483, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Value, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 3483, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3492
 *         return __from_data(data, "value_dtype", value_dtype, Value)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Value instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_5Value_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Value_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[53])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3492, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Value, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 3492, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Value, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3492, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3492, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Value, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 3492, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_5Value_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Value___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[54])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_5Value_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Value___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[55])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3534
 *     })
 * 
 * _py_anon_pod0_dtype = _get__py_anon_pod0_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class _py_anon_pod0:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod0_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3534, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_py_anon_pod0_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 3534, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3651
 *         self._ptr[0].target = <nvmlThermalTarget_t><int>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod0 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod0_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod0_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[56])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3651, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 3651, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3651, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3651, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 3651, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3660
 *         return __from_data(data, "_py_anon_pod0_dtype", _py_anon_pod0_dtype, _py_anon_pod0)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod0 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod0_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod0_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[57])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3660, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 3660, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3660, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3660, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod0, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 3660, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod0_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod0___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[58])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod0_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod0___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[59])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3701
 *     })
 * 
 * cooler_info_v1_dtype = _get_cooler_info_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class CoolerInfo_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_cooler_info_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3701, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_cooler_info_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 3701, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3807
 *         self._ptr[0].target = <nvmlCoolerTarget_t><int>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an CoolerInfo_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13CoolerInfo_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_CoolerInfo_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[60])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3807, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 3807, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3807, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3807, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 3807, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3816
 *         return __from_data(data, "cooler_info_v1_dtype", cooler_info_v1_dtype, CoolerInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an CoolerInfo_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13CoolerInfo_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_CoolerInfo_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[61])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3816, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 3816, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3816, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3816, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_CoolerInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 3816, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13CoolerInfo_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_CoolerInfo_v1___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[62])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13CoolerInfo_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_CoolerInfo_v1___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[63])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3855
 *     })
 * 
 * margin_temperature_v1_dtype = _get_margin_temperature_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class MarginTemperature_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_margin_temperature_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3855, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_margin_temperature_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 3855, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3939
 *         self._ptr[0].marginTemperature = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an MarginTemperature_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_20MarginTemperature_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_MarginTemperature_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[64])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3939, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 3939, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 3939, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3939, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 3939, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3948
 *         return __from_data(data, "margin_temperature_v1_dtype", margin_temperature_v1_dtype, MarginTemperature_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an MarginTemperature_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_20MarginTemperature_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_MarginTemperature_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[65])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3948, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 3948, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3948, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3948, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_MarginTemperature_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 3948, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_20MarginTemperature_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_MarginTemperature_v1___reduce_cy, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[66])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_20MarginTemperature_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_MarginTemperature_v1___setstate, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[67])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":3987
 *     })
 * 
 * clk_mon_fault_info_dtype = _get_clk_mon_fault_info_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ClkMonFaultInfo:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_clk_mon_fault_info_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3987, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_clk_mon_fault_info_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 3987, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4082
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ClkMonFaultInfo instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ClkMonFaultInfo_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[68])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4082, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 4082, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 4082, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4082, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 4082, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4100
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ClkMonFaultInfo instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ClkMonFaultInfo_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[69])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4100, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 4100, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4100, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4100, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 4100, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ClkMonFaultInfo___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[70])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ClkMonFaultInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ClkMonFaultInfo__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15ClkMonFaultInfo_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ClkMonFaultInfo___setstate_cytho, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[71])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonFaultInfo, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4137
 *     })
 * 
 * clock_offset_v1_dtype = _get_clock_offset_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ClockOffset_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_clock_offset_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4137, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_clock_offset_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 4137, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4265
 *         self._ptr[0].maxClockOffsetMHz = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ClockOffset_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_14ClockOffset_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ClockOffset_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[72])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4265, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 4265, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 4265, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4265, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 4265, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4274
 *         return __from_data(data, "clock_offset_v1_dtype", clock_offset_v1_dtype, ClockOffset_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ClockOffset_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_14ClockOffset_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ClockOffset_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[73])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 4274, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4274, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClockOffset_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 4274, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_14ClockOffset_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ClockOffset_v1___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[74])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_14ClockOffset_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ClockOffset_v1___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[75])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4314
 *     })
 * 
 * fan_speed_info_v1_dtype = _get_fan_speed_info_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class FanSpeedInfo_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_fan_speed_info_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4314, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_fan_speed_info_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 4314, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4409
 *         self._ptr[0].speed = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an FanSpeedInfo_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_FanSpeedInfo_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[76])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4409, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 4409, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 4409, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4409, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 4409, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4418
 *         return __from_data(data, "fan_speed_info_v1_dtype", fan_speed_info_v1_dtype, FanSpeedInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an FanSpeedInfo_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_FanSpeedInfo_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[77])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4418, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 4418, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4418, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4418, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FanSpeedInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 4418, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_FanSpeedInfo_v1___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[78])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15FanSpeedInfo_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_FanSpeedInfo_v1___setstate_cytho, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[79])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4457
 *     })
 * 
 * device_perf_modes_v1_dtype = _get_device_perf_modes_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class DevicePerfModes_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_device_perf_modes_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4457, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_perf_modes_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 4457, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4545
 *         memcpy(<void *>(self._ptr[0].str), <void *>ptr, 2048)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DevicePerfModes_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DevicePerfModes_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[80])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4545, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 4545, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 4545, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4545, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 4545, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4554
 *         return __from_data(data, "device_perf_modes_v1_dtype", device_perf_modes_v1_dtype, DevicePerfModes_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DevicePerfModes_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DevicePerfModes_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[81])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4554, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 4554, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4554, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4554, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePerfModes_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 4554, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DevicePerfModes_v1___reduce_cyth, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[82])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18DevicePerfModes_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DevicePerfModes_v1___setstate_cy, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[83])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4593
 *     })
 * 
 * device_current_clock_freqs_v1_dtype = _get_device_current_clock_freqs_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class DeviceCurrentClockFreqs_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_device_current_clock_freqs_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4593, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_current_clock_freqs_v1_dt, __pyx_t_5) < (0)) __PYX_ERR(0, 4593, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4681
 *         memcpy(<void *>(self._ptr[0].str), <void *>ptr, 2048)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DeviceCurrentClockFreqs_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DeviceCurrentClockFreqs_v1_from, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[84])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4681, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 4681, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 4681, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4681, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 4681, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4690
 *         return __from_data(data, "device_current_clock_freqs_v1_dtype", device_current_clock_freqs_v1_dtype, DeviceCurrentClockFreqs_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DeviceCurrentClockFreqs_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DeviceCurrentClockFreqs_v1_from_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[85])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4690, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 4690, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4690, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4690, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCurrentClockFreqs_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 4690, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DeviceCurrentClockFreqs_v1___red, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[86])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_26DeviceCurrentClockFreqs_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DeviceCurrentClockFreqs_v1___set, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[87])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4733
 *     })
 * 
 * process_utilization_sample_dtype = _get_process_utilization_sample_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ProcessUtilizationSample:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_process_utilization_sample_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4733, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_process_utilization_sample_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 4733, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4872
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessUtilizationSample instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessUtilizationSample_from_da, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[88])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4872, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 4872, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 4872, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4872, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 4872, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4890
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ProcessUtilizationSample instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessUtilizationSample_from_pt, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[89])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4890, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 4890, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4890, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4890, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 4890, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessUtilizationSample___reduc, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[90])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ProcessUtilizationSample, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ProcessUtilizationSample__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_24ProcessUtilizationSample_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessUtilizationSample___setst, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[91])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationSample, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":4929
 *     })
 * 
 * process_utilization_info_v1_dtype = _get_process_utilization_info_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ProcessUtilizationInfo_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_process_utilization_info_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4929, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_process_utilization_info_v1_dtyp, __pyx_t_5) < (0)) __PYX_ERR(0, 4929, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":5090
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessUtilizationInfo_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessUtilizationInfo_v1_from_d, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[92])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 5090, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 5090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5090, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 5090, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":5108
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ProcessUtilizationInfo_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessUtilizationInfo_v1_from_p, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[93])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5108, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 5108, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 5108, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5108, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 5108, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessUtilizationInfo_v1___redu, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[94])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ProcessUtilizationInfo_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ProcessUtilizationInfo_v1__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25ProcessUtilizationInfo_v1_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessUtilizationInfo_v1___sets, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[95])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":5152
 *     })
 * 
 * ecc_sram_error_status_v1_dtype = _get_ecc_sram_error_status_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class EccSramErrorStatus_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_ecc_sram_error_status_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5152, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_ecc_sram_error_status_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 5152, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":5357
 *         self._ptr[0].bThresholdExceeded = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an EccSramErrorStatus_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EccSramErrorStatus_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[96])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5357, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 5357, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 5357, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5357, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 5357, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":5366
 *         return __from_data(data, "ecc_sram_error_status_v1_dtype", ecc_sram_error_status_v1_dtype, EccSramErrorStatus_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an EccSramErrorStatus_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EccSramErrorStatus_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[97])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5366, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 5366, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 5366, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5366, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramErrorStatus_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 5366, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EccSramErrorStatus_v1___reduce_c, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[98])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21EccSramErrorStatus_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EccSramErrorStatus_v1___setstate, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[99])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":5411
 *     })
 * 
 * platform_info_v2_dtype = _get_platform_info_v2_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class PlatformInfo_v2:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_platform_info_v2_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5411, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_platform_info_v2_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 5411, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":5569
 *         self._ptr[0].moduleId = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an PlatformInfo_v2 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15PlatformInfo_v2_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_PlatformInfo_v2_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[100])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5569, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 5569, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 5569, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5569, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 5569, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":5578
 *         return __from_data(data, "platform_info_v2_dtype", platform_info_v2_dtype, PlatformInfo_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an PlatformInfo_v2 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15PlatformInfo_v2_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_PlatformInfo_v2_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[101])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5578, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 5578, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 5578, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5578, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PlatformInfo_v2, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 5578, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15PlatformInfo_v2_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_PlatformInfo_v2___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[102])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15PlatformInfo_v2_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_PlatformInfo_v2___setstate_cytho, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[103])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":5619
 *     })
 * 
 * _py_anon_pod1_dtype = _get__py_anon_pod1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class _py_anon_pod1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5619, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_py_anon_pod1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 5619, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":5725
 *         self._ptr[0].decThreshold = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[104])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5725, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 5725, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 5725, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5725, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 5725, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":5734
 *         return __from_data(data, "_py_anon_pod1_dtype", _py_anon_pod1_dtype, _py_anon_pod1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[105])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5734, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 5734, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 5734, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5734, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 5734, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod1___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[106])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod1___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[107])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":5773
 *     })
 * 
 * vgpu_heterogeneous_mode_v1_dtype = _get_vgpu_heterogeneous_mode_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuHeterogeneousMode_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_heterogeneous_mode_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5773, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_heterogeneous_mode_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 5773, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":5857
 *         self._ptr[0].mode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuHeterogeneousMode_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuHeterogeneousMode_v1_from_da, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[108])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5857, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 5857, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 5857, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5857, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 5857, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":5866
 *         return __from_data(data, "vgpu_heterogeneous_mode_v1_dtype", vgpu_heterogeneous_mode_v1_dtype, VgpuHeterogeneousMode_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuHeterogeneousMode_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuHeterogeneousMode_v1_from_pt, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[109])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5866, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 5866, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 5866, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5866, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuHeterogeneousMode_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 5866, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuHeterogeneousMode_v1___reduc, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[110])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_24VgpuHeterogeneousMode_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuHeterogeneousMode_v1___setst, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[111])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":5905
 *     })
 * 
 * vgpu_placement_id_v1_dtype = _get_vgpu_placement_id_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuPlacementId_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_placement_id_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5905, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_placement_id_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 5905, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":5989
 *         self._ptr[0].placementId = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuPlacementId_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuPlacementId_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[112])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5989, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 5989, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 5989, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5989, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 5989, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":5998
 *         return __from_data(data, "vgpu_placement_id_v1_dtype", vgpu_placement_id_v1_dtype, VgpuPlacementId_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuPlacementId_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuPlacementId_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[113])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5998, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 5998, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 5998, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 5998, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementId_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 5998, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuPlacementId_v1___reduce_cyth, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[114])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18VgpuPlacementId_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuPlacementId_v1___setstate_cy, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[115])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6040
 *     })
 * 
 * vgpu_placement_list_v2_dtype = _get_vgpu_placement_list_v2_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuPlacementList_v2:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_placement_list_v2_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6040, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_placement_list_v2_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 6040, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6156
 *         self._ptr[0].mode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuPlacementList_v2 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuPlacementList_v2_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[116])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6156, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 6156, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 6156, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6156, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 6156, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6165
 *         return __from_data(data, "vgpu_placement_list_v2_dtype", vgpu_placement_list_v2_dtype, VgpuPlacementList_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuPlacementList_v2 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuPlacementList_v2_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[117])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6165, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 6165, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 6165, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6165, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPlacementList_v2, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 6165, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuPlacementList_v2___reduce_cy, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[118])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_20VgpuPlacementList_v2_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuPlacementList_v2___setstate, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[119])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6205
 *     })
 * 
 * vgpu_type_bar1info_v1_dtype = _get_vgpu_type_bar1info_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuTypeBar1Info_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_type_bar1info_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6205, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_bar1info_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 6205, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6289
 *         self._ptr[0].bar1Size = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuTypeBar1Info_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuTypeBar1Info_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[120])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6289, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 6289, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 6289, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6289, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 6289, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6298
 *         return __from_data(data, "vgpu_type_bar1info_v1_dtype", vgpu_type_bar1info_v1_dtype, VgpuTypeBar1Info_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuTypeBar1Info_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuTypeBar1Info_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[121])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6298, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 6298, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 6298, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6298, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeBar1Info_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 6298, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuTypeBar1Info_v1___reduce_cyt, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[122])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19VgpuTypeBar1Info_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuTypeBar1Info_v1___setstate_c, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[123])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6345
 *     })
 * 
 * vgpu_process_utilization_info_v1_dtype = _get_vgpu_process_utilization_info_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuProcessUtilizationInfo_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_process_utilization_info_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6345, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_process_utilization_info_v1, __pyx_t_5) < (0)) __PYX_ERR(0, 6345, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6526
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuProcessUtilizationInfo_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuProcessUtilizationInfo_v1_fr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[124])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6526, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 6526, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 6526, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6526, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 6526, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6544
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an VgpuProcessUtilizationInfo_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuProcessUtilizationInfo_v1_fr_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[125])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6544, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 6544, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 6544, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6544, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 6544, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuProcessUtilizationInfo_v1_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[126])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_VgpuProcessUtilizationInfo_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_VgpuProcessUtilizationInfo_v1__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_29VgpuProcessUtilizationInfo_v1_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuProcessUtilizationInfo_v1_3, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[127])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6577
 *     })
 * 
 * vgpu_runtime_state_v1_dtype = _get_vgpu_runtime_state_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuRuntimeState_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_runtime_state_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6577, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_runtime_state_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 6577, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6661
 *         self._ptr[0].size = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuRuntimeState_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuRuntimeState_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[128])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6661, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 6661, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 6661, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6661, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 6661, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6670
 *         return __from_data(data, "vgpu_runtime_state_v1_dtype", vgpu_runtime_state_v1_dtype, VgpuRuntimeState_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuRuntimeState_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuRuntimeState_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[129])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6670, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 6670, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 6670, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6670, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuRuntimeState_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 6670, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuRuntimeState_v1___reduce_cyt, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[130])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19VgpuRuntimeState_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuRuntimeState_v1___setstate_c, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[131])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6709
 *     })
 * 
 * _py_anon_pod2_dtype = _get__py_anon_pod2_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class _py_anon_pod2:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod2_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6709, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_py_anon_pod2_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 6709, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6793
 *         self._ptr[0].timeslice = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod2 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod2_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod2_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[132])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6793, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 6793, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 6793, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6793, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 6793, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6802
 *         return __from_data(data, "_py_anon_pod2_dtype", _py_anon_pod2_dtype, _py_anon_pod2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod2 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod2_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod2_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[133])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6802, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 6802, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 6802, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6802, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod2, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 6802, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod2_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod2___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[134])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod2_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod2___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[135])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6840
 *     })
 * 
 * _py_anon_pod3_dtype = _get__py_anon_pod3_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class _py_anon_pod3:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod3_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6840, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_py_anon_pod3_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 6840, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6913
 *         self._ptr[0].timeslice = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod3 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod3_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod3_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[136])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6913, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 6913, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 6913, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6913, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 6913, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6922
 *         return __from_data(data, "_py_anon_pod3_dtype", _py_anon_pod3_dtype, _py_anon_pod3)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod3 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod3_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod3_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[137])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6922, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 6922, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 6922, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6922, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod3, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 6922, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod3_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod3___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[138])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod3_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod3___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[139])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":6965
 *     })
 * 
 * vgpu_scheduler_log_entry_dtype = _get_vgpu_scheduler_log_entry_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuSchedulerLogEntry:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_log_entry_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 6965, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_log_entry_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 6965, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":7104
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerLogEntry instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerLogEntry_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[140])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 7104, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 7104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7104, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 7104, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":7122
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an VgpuSchedulerLogEntry instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerLogEntry_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[141])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7122, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 7122, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 7122, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7122, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 7122, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerLogEntry___reduce_c, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[142])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_VgpuSchedulerLogEntry, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_VgpuSchedulerLogEntry__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerLogEntry_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerLogEntry___setstate, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[143])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogEntry, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":7155
 *     })
 * 
 * _py_anon_pod4_dtype = _get__py_anon_pod4_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class _py_anon_pod4:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod4_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7155, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_py_anon_pod4_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 7155, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":7239
 *         self._ptr[0].frequency = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod4 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod4_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod4_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[144])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 7239, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 7239, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7239, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 7239, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":7248
 *         return __from_data(data, "_py_anon_pod4_dtype", _py_anon_pod4_dtype, _py_anon_pod4)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod4 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod4_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod4_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[145])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7248, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 7248, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 7248, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7248, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod4, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 7248, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod4_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod4___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[146])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod4_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod4___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[147])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":7286
 *     })
 * 
 * _py_anon_pod5_dtype = _get__py_anon_pod5_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class _py_anon_pod5:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get__py_anon_pod5_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7286, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_py_anon_pod5_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 7286, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":7359
 *         self._ptr[0].timeslice = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an _py_anon_pod5 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod5_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod5_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[148])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7359, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 7359, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 7359, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7359, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 7359, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":7368
 *         return __from_data(data, "_py_anon_pod5_dtype", _py_anon_pod5_dtype, _py_anon_pod5)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an _py_anon_pod5 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod5_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod5_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[149])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 7368, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 7368, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7368, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml__py_anon_pod5, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 7368, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod5_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod5___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[150])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13_py_anon_pod5_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_py_anon_pod5___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[151])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":7413
 *     })
 * 
 * vgpu_scheduler_capabilities_dtype = _get_vgpu_scheduler_capabilities_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuSchedulerCapabilities:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_capabilities_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7413, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_capabilities_dtyp, __pyx_t_5) < (0)) __PYX_ERR(0, 7413, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":7567
 *         self._ptr[0].minAvgFactorForARR = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerCapabilities instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerCapabilities_from_d, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[152])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7567, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 7567, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 7567, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7567, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 7567, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":7576
 *         return __from_data(data, "vgpu_scheduler_capabilities_dtype", vgpu_scheduler_capabilities_dtype, VgpuSchedulerCapabilities)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerCapabilities instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerCapabilities_from_p, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[153])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7576, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 7576, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 7576, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7576, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerCapabilities, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 7576, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerCapabilities___redu, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[154])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25VgpuSchedulerCapabilities_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerCapabilities___sets, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[155])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":7620
 *     })
 * 
 * vgpu_license_expiry_dtype = _get_vgpu_license_expiry_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuLicenseExpiry:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_license_expiry_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7620, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_license_expiry_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 7620, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":7759
 *         self._ptr[0].status = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuLicenseExpiry instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuLicenseExpiry_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[156])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7759, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 7759, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 7759, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7759, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 7759, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":7768
 *         return __from_data(data, "vgpu_license_expiry_dtype", vgpu_license_expiry_dtype, VgpuLicenseExpiry)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuLicenseExpiry instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuLicenseExpiry_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[157])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7768, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 7768, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 7768, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7768, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseExpiry, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 7768, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuLicenseExpiry___reduce_cytho, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[158])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_17VgpuLicenseExpiry_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuLicenseExpiry___setstate_cyt, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[159])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":7812
 *     })
 * 
 * grid_license_expiry_dtype = _get_grid_license_expiry_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class GridLicenseExpiry:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_grid_license_expiry_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7812, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_grid_license_expiry_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 7812, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":7951
 *         self._ptr[0].status = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GridLicenseExpiry instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_17GridLicenseExpiry_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GridLicenseExpiry_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[160])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7951, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 7951, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 7951, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7951, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 7951, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":7960
 *         return __from_data(data, "grid_license_expiry_dtype", grid_license_expiry_dtype, GridLicenseExpiry)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GridLicenseExpiry instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_17GridLicenseExpiry_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GridLicenseExpiry_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[161])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7960, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 7960, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 7960, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 7960, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicenseExpiry, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 7960, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_17GridLicenseExpiry_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GridLicenseExpiry___reduce_cytho, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[162])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_17GridLicenseExpiry_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GridLicenseExpiry___setstate_cyt, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[163])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8000
 *     })
 * 
 * vgpu_type_id_info_v1_dtype = _get_vgpu_type_id_info_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuTypeIdInfo_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_type_id_info_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8000, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_id_info_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 8000, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8084
 *         self._ptr[0].vgpuTypeIds = <nvmlVgpuTypeId_t*><intptr_t>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuTypeIdInfo_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuTypeIdInfo_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[164])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8084, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 8084, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 8084, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8084, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 8084, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8093
 *         return __from_data(data, "vgpu_type_id_info_v1_dtype", vgpu_type_id_info_v1_dtype, VgpuTypeIdInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuTypeIdInfo_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuTypeIdInfo_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[165])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8093, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 8093, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 8093, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8093, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeIdInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 8093, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuTypeIdInfo_v1___reduce_cytho, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[166])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_17VgpuTypeIdInfo_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuTypeIdInfo_v1___setstate_cyt, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[167])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8133
 *     })
 * 
 * vgpu_type_max_instance_v1_dtype = _get_vgpu_type_max_instance_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuTypeMaxInstance_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_type_max_instance_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8133, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_max_instance_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 8133, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8228
 *         self._ptr[0].maxInstancePerGI = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuTypeMaxInstance_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuTypeMaxInstance_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[168])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8228, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 8228, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 8228, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8228, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 8228, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8237
 *         return __from_data(data, "vgpu_type_max_instance_v1_dtype", vgpu_type_max_instance_v1_dtype, VgpuTypeMaxInstance_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuTypeMaxInstance_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuTypeMaxInstance_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[169])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8237, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 8237, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 8237, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8237, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuTypeMaxInstance_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 8237, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuTypeMaxInstance_v1___reduce, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[170])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22VgpuTypeMaxInstance_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuTypeMaxInstance_v1___setstat, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[171])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8277
 *     })
 * 
 * active_vgpu_instance_info_v1_dtype = _get_active_vgpu_instance_info_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ActiveVgpuInstanceInfo_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_active_vgpu_instance_info_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8277, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_active_vgpu_instance_info_v1_dty, __pyx_t_5) < (0)) __PYX_ERR(0, 8277, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8361
 *         self._ptr[0].vgpuInstances = <nvmlVgpuInstance_t*><intptr_t>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ActiveVgpuInstanceInfo_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ActiveVgpuInstanceInfo_v1_from_d, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[172])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8361, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 8361, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 8361, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8361, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 8361, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8370
 *         return __from_data(data, "active_vgpu_instance_info_v1_dtype", active_vgpu_instance_info_v1_dtype, ActiveVgpuInstanceInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ActiveVgpuInstanceInfo_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ActiveVgpuInstanceInfo_v1_from_p, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[173])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8370, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 8370, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 8370, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8370, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ActiveVgpuInstanceInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 8370, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ActiveVgpuInstanceInfo_v1___redu, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[174])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25ActiveVgpuInstanceInfo_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ActiveVgpuInstanceInfo_v1___sets, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[175])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8412
 *     })
 * 
 * vgpu_creatable_placement_info_v1_dtype = _get_vgpu_creatable_placement_info_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuCreatablePlacementInfo_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_creatable_placement_info_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8412, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_creatable_placement_info_v1, __pyx_t_5) < (0)) __PYX_ERR(0, 8412, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8528
 *         self._refs["placement_ids"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuCreatablePlacementInfo_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuCreatablePlacementInfo_v1_fr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[176])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8528, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 8528, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 8528, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8528, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 8528, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8537
 *         return __from_data(data, "vgpu_creatable_placement_info_v1_dtype", vgpu_creatable_placement_info_v1_dtype, VgpuCreatablePlacementInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuCreatablePlacementInfo_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuCreatablePlacementInfo_v1_fr_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[177])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8537, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 8537, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 8537, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8537, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuCreatablePlacementInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 8537, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuCreatablePlacementInfo_v1_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[178])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_29VgpuCreatablePlacementInfo_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuCreatablePlacementInfo_v1_3, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[179])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8577
 *     })
 * 
 * hwbc_entry_dtype = _get_hwbc_entry_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class HwbcEntry:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_hwbc_entry_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8577, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_hwbc_entry_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 8577, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8670
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an HwbcEntry instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_9HwbcEntry_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_HwbcEntry_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[180])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8670, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 8670, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 8670, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8670, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 8670, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8688
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an HwbcEntry instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_9HwbcEntry_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_HwbcEntry_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[181])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8688, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 8688, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 8688, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8688, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 8688, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_9HwbcEntry_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_HwbcEntry___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[182])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_HwbcEntry, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_HwbcEntry__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_9HwbcEntry_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_HwbcEntry___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[183])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_HwbcEntry, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8721
 *     })
 * 
 * led_state_dtype = _get_led_state_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class LedState:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_led_state_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8721, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_led_state_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 8721, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8809
 *         self._ptr[0].color = <nvmlLedColor_t><int>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an LedState instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_8LedState_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_LedState_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[184])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8809, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_LedState, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 8809, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_LedState, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 8809, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8809, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_LedState, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 8809, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8818
 *         return __from_data(data, "led_state_dtype", led_state_dtype, LedState)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an LedState instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_8LedState_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_LedState_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[185])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8818, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_LedState, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 8818, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_LedState, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 8818, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8818, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_LedState, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 8818, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_8LedState_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_LedState___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[186])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_8LedState_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_LedState___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[187])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8859
 *     })
 * 
 * unit_info_dtype = _get_unit_info_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class UnitInfo:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_unit_info_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8859, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_unit_info_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 8859, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8981
 *         memcpy(<void *>(self._ptr[0].firmwareVersion), <void *>ptr, 96)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an UnitInfo instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_8UnitInfo_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_UnitInfo_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[188])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8981, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 8981, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 8981, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8981, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 8981, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":8990
 *         return __from_data(data, "unit_info_dtype", unit_info_dtype, UnitInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an UnitInfo instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_8UnitInfo_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_UnitInfo_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[189])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8990, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 8990, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 8990, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 8990, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 8990, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_8UnitInfo_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_UnitInfo___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[190])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_8UnitInfo_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_UnitInfo___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[191])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":9031
 *     })
 * 
 * psu_info_dtype = _get_psu_info_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class PSUInfo:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_psu_info_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9031, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_psu_info_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 9031, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":9141
 *         self._ptr[0].power = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an PSUInfo instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_7PSUInfo_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_PSUInfo_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[192])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9141, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 9141, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 9141, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9141, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 9141, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":9150
 *         return __from_data(data, "psu_info_dtype", psu_info_dtype, PSUInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an PSUInfo instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_7PSUInfo_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_PSUInfo_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[193])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9150, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 9150, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 9150, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9150, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_PSUInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 9150, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_7PSUInfo_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_PSUInfo___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[194])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_7PSUInfo_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_PSUInfo___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[195])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":9189
 *     })
 * 
 * unit_fan_info_dtype = _get_unit_fan_info_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class UnitFanInfo:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_unit_fan_info_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9189, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_unit_fan_info_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 9189, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":9284
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an UnitFanInfo instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_11UnitFanInfo_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_UnitFanInfo_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[196])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9284, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 9284, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 9284, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9284, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 9284, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":9302
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an UnitFanInfo instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_11UnitFanInfo_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_UnitFanInfo_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[197])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9302, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 9302, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 9302, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9302, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 9302, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_11UnitFanInfo_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_UnitFanInfo___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[198])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_UnitFanInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_UnitFanInfo__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_11UnitFanInfo_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_UnitFanInfo___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[199])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanInfo, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":9338
 *     })
 * 
 * event_data_dtype = _get_event_data_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class EventData:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_event_data_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9338, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_event_data_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 9338, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":9455
 *         self._ptr[0].computeInstanceId = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an EventData instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_9EventData_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EventData_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[200])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9455, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EventData, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 9455, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EventData, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 9455, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9455, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EventData, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 9455, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":9464
 *         return __from_data(data, "event_data_dtype", event_data_dtype, EventData)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an EventData instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_9EventData_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EventData_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[201])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9464, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EventData, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 9464, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EventData, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 9464, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9464, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EventData, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 9464, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_9EventData_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EventData___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[202])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_9EventData_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EventData___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[203])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":9508
 *     })
 * 
 * accounting_stats_dtype = _get_accounting_stats_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class AccountingStats:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_accounting_stats_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9508, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_accounting_stats_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 9508, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":9636
 *         self._ptr[0].isRunning = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an AccountingStats instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15AccountingStats_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_AccountingStats_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[204])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9636, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 9636, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 9636, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9636, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 9636, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":9645
 *         return __from_data(data, "accounting_stats_dtype", accounting_stats_dtype, AccountingStats)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an AccountingStats instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15AccountingStats_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_AccountingStats_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[205])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9645, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 9645, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 9645, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9645, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_AccountingStats, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 9645, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15AccountingStats_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_AccountingStats___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[206])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15AccountingStats_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_AccountingStats___setstate_cytho, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[207])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":9690
 *     })
 * 
 * encoder_session_info_dtype = _get_encoder_session_info_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class EncoderSessionInfo:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_encoder_session_info_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9690, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_encoder_session_info_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 9690, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":9851
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an EncoderSessionInfo instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18EncoderSessionInfo_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EncoderSessionInfo_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[208])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9851, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 9851, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 9851, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9851, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 9851, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":9869
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an EncoderSessionInfo instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18EncoderSessionInfo_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EncoderSessionInfo_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[209])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9869, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 9869, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 9869, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9869, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 9869, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18EncoderSessionInfo_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EncoderSessionInfo___reduce_cyth, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[210])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_EncoderSessionInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_EncoderSessionInfo__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18EncoderSessionInfo_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EncoderSessionInfo___setstate_cy, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[211])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EncoderSessionInfo, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":9903
 *     })
 * 
 * fbc_stats_dtype = _get_fbc_stats_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class FBCStats:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_fbc_stats_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9903, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_fbc_stats_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 9903, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":9998
 *         self._ptr[0].averageLatency = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an FBCStats instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_8FBCStats_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_FBCStats_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[212])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9998, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 9998, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 9998, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 9998, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 9998, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":10007
 *         return __from_data(data, "fbc_stats_dtype", fbc_stats_dtype, FBCStats)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an FBCStats instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_8FBCStats_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_FBCStats_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[213])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10007, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 10007, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 10007, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10007, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCStats, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 10007, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_8FBCStats_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_FBCStats___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[214])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_8FBCStats_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_FBCStats___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[215])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":10056
 *     })
 * 
 * fbc_session_info_dtype = _get_fbc_session_info_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class FBCSessionInfo:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_fbc_session_info_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10056, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_fbc_session_info_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 10056, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":10261
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an FBCSessionInfo instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_14FBCSessionInfo_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_FBCSessionInfo_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[216])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10261, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 10261, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 10261, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10261, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 10261, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":10279
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an FBCSessionInfo instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_14FBCSessionInfo_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_FBCSessionInfo_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[217])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10279, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 10279, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 10279, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10279, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 10279, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_14FBCSessionInfo_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_FBCSessionInfo___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[218])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_FBCSessionInfo, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_FBCSessionInfo__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_14FBCSessionInfo_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_FBCSessionInfo___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[219])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FBCSessionInfo, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":10312
 *     })
 * 
 * conf_compute_system_caps_dtype = _get_conf_compute_system_caps_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ConfComputeSystemCaps:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_system_caps_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10312, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_conf_compute_system_caps_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 10312, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":10396
 *         self._ptr[0].gpusCaps = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeSystemCaps instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeSystemCaps_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[220])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 10396, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 10396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10396, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 10396, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":10405
 *         return __from_data(data, "conf_compute_system_caps_dtype", conf_compute_system_caps_dtype, ConfComputeSystemCaps)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeSystemCaps instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeSystemCaps_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[221])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10405, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 10405, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 10405, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10405, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemCaps, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 10405, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeSystemCaps___reduce_c, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[222])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21ConfComputeSystemCaps_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeSystemCaps___setstate, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[223])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":10445
 *     })
 * 
 * conf_compute_system_state_dtype = _get_conf_compute_system_state_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ConfComputeSystemState:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_system_state_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10445, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_conf_compute_system_state_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 10445, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":10540
 *         self._ptr[0].devToolsMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeSystemState instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22ConfComputeSystemState_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeSystemState_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[224])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10540, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 10540, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 10540, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10540, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 10540, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":10549
 *         return __from_data(data, "conf_compute_system_state_dtype", conf_compute_system_state_dtype, ConfComputeSystemState)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeSystemState instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22ConfComputeSystemState_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeSystemState_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[225])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10549, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 10549, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 10549, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10549, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeSystemState, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 10549, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22ConfComputeSystemState_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeSystemState___reduce, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[226])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22ConfComputeSystemState_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeSystemState___setstat, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[227])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":10591
 *     })
 * 
 * system_conf_compute_settings_v1_dtype = _get_system_conf_compute_settings_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class SystemConfComputeSettings_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_system_conf_compute_settings_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10591, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_conf_compute_settings_v1, __pyx_t_5) < (0)) __PYX_ERR(0, 10591, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":10708
 *         self._ptr[0].multiGpuMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an SystemConfComputeSettings_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_SystemConfComputeSettings_v1_fro, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[228])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10708, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 10708, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 10708, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10708, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 10708, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":10717
 *         return __from_data(data, "system_conf_compute_settings_v1_dtype", system_conf_compute_settings_v1_dtype, SystemConfComputeSettings_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an SystemConfComputeSettings_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_SystemConfComputeSettings_v1_fro_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[229])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10717, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 10717, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 10717, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10717, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_SystemConfComputeSettings_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 10717, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_SystemConfComputeSettings_v1___r, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[230])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_28SystemConfComputeSettings_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_SystemConfComputeSettings_v1___s, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[231])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":10756
 *     })
 * 
 * conf_compute_mem_size_info_dtype = _get_conf_compute_mem_size_info_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ConfComputeMemSizeInfo:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_mem_size_info_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10756, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_conf_compute_mem_size_info_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 10756, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":10840
 *         self._ptr[0].unprotectedMemSizeKib = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeMemSizeInfo instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeMemSizeInfo_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[232])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10840, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 10840, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 10840, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10840, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 10840, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":10849
 *         return __from_data(data, "conf_compute_mem_size_info_dtype", conf_compute_mem_size_info_dtype, ConfComputeMemSizeInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeMemSizeInfo instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeMemSizeInfo_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[233])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 10849, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 10849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10849, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeMemSizeInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 10849, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeMemSizeInfo___reduce, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[234])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22ConfComputeMemSizeInfo_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeMemSizeInfo___setstat, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[235])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":10890
 *     })
 * 
 * conf_compute_gpu_certificate_dtype = _get_conf_compute_gpu_certificate_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ConfComputeGpuCertificate:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_gpu_certificate_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 10890, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_conf_compute_gpu_certificate_dty, __pyx_t_5) < (0)) __PYX_ERR(0, 10890, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11004
 *         memcpy(<void *>(&(self._ptr[0].attestationCertChain)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeGpuCertificate instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeGpuCertificate_from_d, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[236])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11004, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 11004, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 11004, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11004, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 11004, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11013
 *         return __from_data(data, "conf_compute_gpu_certificate_dtype", conf_compute_gpu_certificate_dtype, ConfComputeGpuCertificate)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeGpuCertificate instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeGpuCertificate_from_p, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[237])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11013, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 11013, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 11013, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11013, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuCertificate, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 11013, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeGpuCertificate___redu, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[238])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25ConfComputeGpuCertificate_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeGpuCertificate___sets, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[239])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11056
 *     })
 * 
 * conf_compute_gpu_attestation_report_dtype = _get_conf_compute_gpu_attestation_report_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ConfComputeGpuAttestationReport:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_gpu_attestation_report_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11056, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_conf_compute_gpu_attestation_rep, __pyx_t_5) < (0)) __PYX_ERR(0, 11056, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11196
 *         memcpy(<void *>(&(self._ptr[0].cecAttestationReport)), <void *>(arr.data), sizeof(unsigned char) * len(val))
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeGpuAttestationReport instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeGpuAttestationReport_3, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[240])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11196, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 11196, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 11196, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11196, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 11196, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11205
 *         return __from_data(data, "conf_compute_gpu_attestation_report_dtype", conf_compute_gpu_attestation_report_dtype, ConfComputeGpuAttestationReport)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeGpuAttestationReport instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeGpuAttestationReport_4, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[241])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11205, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 11205, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 11205, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11205, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGpuAttestationReport, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 11205, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeGpuAttestationReport_5, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[242])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_31ConfComputeGpuAttestationReport_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeGpuAttestationReport_6, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[243])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11244
 *     })
 * 
 * conf_compute_get_key_rotation_threshold_info_v1_dtype = _get_conf_compute_get_key_rotation_threshold_info_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ConfComputeGetKeyRotationThresholdInfo_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_conf_compute_get_key_rotation_threshold_info_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11244, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_conf_compute_get_key_rotation_th, __pyx_t_5) < (0)) __PYX_ERR(0, 11244, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11328
 *         self._ptr[0].attackerAdvantage = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ConfComputeGetKeyRotationThresholdInfo_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeGetKeyRotationThresho_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[244])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11328, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 11328, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 11328, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11328, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 11328, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11337
 *         return __from_data(data, "conf_compute_get_key_rotation_threshold_info_v1_dtype", conf_compute_get_key_rotation_threshold_info_v1_dtype, ConfComputeGetKeyRotationThresholdInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ConfComputeGetKeyRotationThresholdInfo_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeGetKeyRotationThresho_3, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[245])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 11337, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 11337, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11337, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ConfComputeGetKeyRotationThresholdInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 11337, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeGetKeyRotationThresho_4, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[246])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_41ConfComputeGetKeyRotationThresholdInfo_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ConfComputeGetKeyRotationThresho_5, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[247])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11377
 *     })
 * 
 * nvlink_supported_bw_modes_v1_dtype = _get_nvlink_supported_bw_modes_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class NvlinkSupportedBwModes_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_nvlink_supported_bw_modes_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11377, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_nvlink_supported_bw_modes_v1_dty, __pyx_t_5) < (0)) __PYX_ERR(0, 11377, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11476
 *         self._ptr[0].totalBwModes = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvlinkSupportedBwModes_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkSupportedBwModes_v1_from_d, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[248])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11476, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 11476, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 11476, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11476, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 11476, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11485
 *         return __from_data(data, "nvlink_supported_bw_modes_v1_dtype", nvlink_supported_bw_modes_v1_dtype, NvlinkSupportedBwModes_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkSupportedBwModes_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkSupportedBwModes_v1_from_p, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[249])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11485, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 11485, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 11485, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11485, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSupportedBwModes_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 11485, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkSupportedBwModes_v1___redu, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[250])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25NvlinkSupportedBwModes_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkSupportedBwModes_v1___sets, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[251])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11525
 *     })
 * 
 * nvlink_get_bw_mode_v1_dtype = _get_nvlink_get_bw_mode_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class NvlinkGetBwMode_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_nvlink_get_bw_mode_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11525, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_nvlink_get_bw_mode_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 11525, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11620
 *         self._ptr[0].bwMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvlinkGetBwMode_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkGetBwMode_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[252])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11620, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 11620, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 11620, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11620, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 11620, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11629
 *         return __from_data(data, "nvlink_get_bw_mode_v1_dtype", nvlink_get_bw_mode_v1_dtype, NvlinkGetBwMode_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkGetBwMode_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkGetBwMode_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[253])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 11629, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 11629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11629, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkGetBwMode_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 11629, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkGetBwMode_v1___reduce_cyth, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[254])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkGetBwMode_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkGetBwMode_v1___setstate_cy, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[255])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11669
 *     })
 * 
 * nvlink_set_bw_mode_v1_dtype = _get_nvlink_set_bw_mode_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class NvlinkSetBwMode_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_nvlink_set_bw_mode_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11669, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_nvlink_set_bw_mode_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 11669, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11764
 *         self._ptr[0].bwMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvlinkSetBwMode_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkSetBwMode_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[256])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11764, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 11764, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 11764, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11764, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 11764, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11773
 *         return __from_data(data, "nvlink_set_bw_mode_v1_dtype", nvlink_set_bw_mode_v1_dtype, NvlinkSetBwMode_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkSetBwMode_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkSetBwMode_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[257])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11773, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 11773, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 11773, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11773, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkSetBwMode_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 11773, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkSetBwMode_v1___reduce_cyth, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[258])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkSetBwMode_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkSetBwMode_v1___setstate_cy, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[259])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11812
 *     })
 * 
 * vgpu_version_dtype = _get_vgpu_version_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuVersion:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_version_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11812, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_version_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 11812, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11896
 *         self._ptr[0].maxVersion = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuVersion instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_11VgpuVersion_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuVersion_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[260])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11896, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 11896, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 11896, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11896, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 11896, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11905
 *         return __from_data(data, "vgpu_version_dtype", vgpu_version_dtype, VgpuVersion)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuVersion instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_11VgpuVersion_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuVersion_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[261])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11905, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 11905, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 11905, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11905, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuVersion, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 11905, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_11VgpuVersion_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuVersion___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[262])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_11VgpuVersion_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuVersion___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[263])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":11952
 *     })
 * 
 * vgpu_metadata_dtype = _get_vgpu_metadata_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuMetadata:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_metadata_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 11952, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_metadata_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 11952, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":12125
 *         memcpy(<void *>(self._ptr[0].opaqueData), <void *>ptr, 4)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuMetadata instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_12VgpuMetadata_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuMetadata_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[264])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 12125, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 12125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12125, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 12125, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":12134
 *         return __from_data(data, "vgpu_metadata_dtype", vgpu_metadata_dtype, VgpuMetadata)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuMetadata instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_12VgpuMetadata_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuMetadata_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[265])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12134, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 12134, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12134, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12134, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuMetadata, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 12134, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_12VgpuMetadata_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuMetadata___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[266])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_12VgpuMetadata_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuMetadata___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[267])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":12173
 *     })
 * 
 * vgpu_pgpu_compatibility_dtype = _get_vgpu_pgpu_compatibility_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuPgpuCompatibility:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_pgpu_compatibility_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12173, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_pgpu_compatibility_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 12173, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":12257
 *         self._ptr[0].compatibilityLimitCode = <nvmlVgpuPgpuCompatibilityLimitCode_t><int>val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuPgpuCompatibility instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuPgpuCompatibility_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[268])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12257, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 12257, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 12257, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12257, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 12257, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":12266
 *         return __from_data(data, "vgpu_pgpu_compatibility_dtype", vgpu_pgpu_compatibility_dtype, VgpuPgpuCompatibility)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuPgpuCompatibility instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuPgpuCompatibility_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[269])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12266, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 12266, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12266, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12266, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuCompatibility, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 12266, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuPgpuCompatibility___reduce_c, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[270])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21VgpuPgpuCompatibility_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuPgpuCompatibility___setstate, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[271])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":12305
 *     })
 * 
 * gpu_instance_placement_dtype = _get_gpu_instance_placement_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class GpuInstancePlacement:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_gpu_instance_placement_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12305, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_placement_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 12305, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":12400
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuInstancePlacement instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_20GpuInstancePlacement_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuInstancePlacement_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[272])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12400, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 12400, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 12400, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12400, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 12400, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":12418
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an GpuInstancePlacement instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_20GpuInstancePlacement_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuInstancePlacement_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[273])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12418, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 12418, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12418, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12418, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 12418, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_20GpuInstancePlacement_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuInstancePlacement___reduce_cy, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[274])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_GpuInstancePlacement, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_GpuInstancePlacement__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_20GpuInstancePlacement_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuInstancePlacement___setstate, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[275])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstancePlacement, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":12462
 *     })
 * 
 * gpu_instance_profile_info_v2_dtype = _get_gpu_instance_profile_info_v2_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class GpuInstanceProfileInfo_v2:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_gpu_instance_profile_info_v2_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12462, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_profile_info_v2_dty, __pyx_t_5) < (0)) __PYX_ERR(0, 12462, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":12671
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuInstanceProfileInfo_v2 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuInstanceProfileInfo_v2_from_d, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[276])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12671, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 12671, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 12671, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12671, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 12671, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":12680
 *         return __from_data(data, "gpu_instance_profile_info_v2_dtype", gpu_instance_profile_info_v2_dtype, GpuInstanceProfileInfo_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuInstanceProfileInfo_v2 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuInstanceProfileInfo_v2_from_p, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[277])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12680, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 12680, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12680, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12680, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v2, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 12680, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuInstanceProfileInfo_v2___redu, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[278])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v2_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuInstanceProfileInfo_v2___sets, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[279])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":12730
 *     })
 * 
 * gpu_instance_profile_info_v3_dtype = _get_gpu_instance_profile_info_v3_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class GpuInstanceProfileInfo_v3:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_gpu_instance_profile_info_v3_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12730, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_profile_info_v3_dty, __pyx_t_5) < (0)) __PYX_ERR(0, 12730, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":12939
 *         self._ptr[0].capabilities = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuInstanceProfileInfo_v3 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuInstanceProfileInfo_v3_from_d, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[280])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12939, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 12939, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 12939, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12939, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 12939, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":12948
 *         return __from_data(data, "gpu_instance_profile_info_v3_dtype", gpu_instance_profile_info_v3_dtype, GpuInstanceProfileInfo_v3)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuInstanceProfileInfo_v3 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuInstanceProfileInfo_v3_from_p, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[281])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12948, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 12948, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12948, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12948, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceProfileInfo_v3, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 12948, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuInstanceProfileInfo_v3___redu, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[282])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25GpuInstanceProfileInfo_v3_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuInstanceProfileInfo_v3___sets, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[283])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":12987
 *     })
 * 
 * compute_instance_placement_dtype = _get_compute_instance_placement_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ComputeInstancePlacement:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_compute_instance_placement_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12987, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_compute_instance_placement_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 12987, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":13082
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ComputeInstancePlacement instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ComputeInstancePlacement_from_da, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[284])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13082, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 13082, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 13082, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13082, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 13082, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":13100
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ComputeInstancePlacement instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ComputeInstancePlacement_from_pt, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[285])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13100, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 13100, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13100, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13100, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 13100, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ComputeInstancePlacement___reduc, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[286])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_ComputeInstancePlacement, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_ComputeInstancePlacement__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_24ComputeInstancePlacement_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ComputeInstancePlacement___setst, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[287])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstancePlacement, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":13142
 *     })
 * 
 * compute_instance_profile_info_v2_dtype = _get_compute_instance_profile_info_v2_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ComputeInstanceProfileInfo_v2:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_compute_instance_profile_info_v2_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13142, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_compute_instance_profile_info_v2, __pyx_t_5) < (0)) __PYX_ERR(0, 13142, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":13329
 *         memcpy(<void *>(self._ptr[0].name), <void *>ptr, 96)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ComputeInstanceProfileInfo_v2 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ComputeInstanceProfileInfo_v2_fr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[288])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 13329, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 13329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13329, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 13329, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":13338
 *         return __from_data(data, "compute_instance_profile_info_v2_dtype", compute_instance_profile_info_v2_dtype, ComputeInstanceProfileInfo_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ComputeInstanceProfileInfo_v2 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ComputeInstanceProfileInfo_v2_fr_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[289])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13338, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 13338, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13338, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13338, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v2, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 13338, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ComputeInstanceProfileInfo_v2_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[290])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v2_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ComputeInstanceProfileInfo_v2_3, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[291])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":13387
 *     })
 * 
 * compute_instance_profile_info_v3_dtype = _get_compute_instance_profile_info_v3_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ComputeInstanceProfileInfo_v3:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_compute_instance_profile_info_v3_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13387, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_compute_instance_profile_info_v3, __pyx_t_5) < (0)) __PYX_ERR(0, 13387, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":13585
 *         self._ptr[0].capabilities = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ComputeInstanceProfileInfo_v3 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ComputeInstanceProfileInfo_v3_fr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[292])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13585, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 13585, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 13585, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13585, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 13585, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":13594
 *         return __from_data(data, "compute_instance_profile_info_v3_dtype", compute_instance_profile_info_v3_dtype, ComputeInstanceProfileInfo_v3)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ComputeInstanceProfileInfo_v3 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ComputeInstanceProfileInfo_v3_fr_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[293])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13594, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 13594, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13594, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13594, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceProfileInfo_v3, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 13594, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ComputeInstanceProfileInfo_v3_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[294])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_29ComputeInstanceProfileInfo_v3_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ComputeInstanceProfileInfo_v3_3, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[295])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":13633
 *     })
 * 
 * gpm_support_dtype = _get_gpm_support_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class GpmSupport:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_gpm_support_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13633, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpm_support_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 13633, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":13717
 *         self._ptr[0].isSupportedDevice = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpmSupport instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_10GpmSupport_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpmSupport_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[296])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13717, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 13717, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 13717, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13717, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 13717, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":13726
 *         return __from_data(data, "gpm_support_dtype", gpm_support_dtype, GpmSupport)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpmSupport instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_10GpmSupport_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpmSupport_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[297])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13726, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 13726, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13726, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13726, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpmSupport, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 13726, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_10GpmSupport_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpmSupport___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[298])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_10GpmSupport_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpmSupport___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[299])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":13765
 *     })
 * 
 * device_capabilities_v1_dtype = _get_device_capabilities_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class DeviceCapabilities_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_device_capabilities_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13765, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_capabilities_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 13765, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":13849
 *         self._ptr[0].capMask = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DeviceCapabilities_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DeviceCapabilities_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[300])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 13849, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 13849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13849, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 13849, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":13858
 *         return __from_data(data, "device_capabilities_v1_dtype", device_capabilities_v1_dtype, DeviceCapabilities_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DeviceCapabilities_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DeviceCapabilities_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[301])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13858, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 13858, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13858, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13858, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceCapabilities_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 13858, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DeviceCapabilities_v1___reduce_c, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[302])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21DeviceCapabilities_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DeviceCapabilities_v1___setstate, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[303])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":13897
 *     })
 * 
 * device_addressing_mode_v1_dtype = _get_device_addressing_mode_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class DeviceAddressingMode_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_device_addressing_mode_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13897, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_addressing_mode_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 13897, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":13981
 *         self._ptr[0].value = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DeviceAddressingMode_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DeviceAddressingMode_v1_from_dat, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[304])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13981, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 13981, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 13981, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13981, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 13981, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":13990
 *         return __from_data(data, "device_addressing_mode_v1_dtype", device_addressing_mode_v1_dtype, DeviceAddressingMode_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DeviceAddressingMode_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DeviceAddressingMode_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[305])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13990, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 13990, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13990, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13990, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DeviceAddressingMode_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 13990, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DeviceAddressingMode_v1___reduce, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[306])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_23DeviceAddressingMode_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DeviceAddressingMode_v1___setsta, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[307])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14030
 *     })
 * 
 * repair_status_v1_dtype = _get_repair_status_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class RepairStatus_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_repair_status_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_repair_status_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 14030, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14125
 *         self._ptr[0].bTpcRepairPending = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an RepairStatus_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15RepairStatus_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_RepairStatus_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[308])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 14125, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 14125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14125, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 14125, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14134
 *         return __from_data(data, "repair_status_v1_dtype", repair_status_v1_dtype, RepairStatus_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an RepairStatus_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15RepairStatus_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_RepairStatus_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[309])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14134, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 14134, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 14134, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14134, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_RepairStatus_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 14134, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15RepairStatus_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_RepairStatus_v1___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[310])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15RepairStatus_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_RepairStatus_v1___setstate_cytho, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[311])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14173
 *     })
 * 
 * pdi_v1_dtype = _get_pdi_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class Pdi_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_pdi_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14173, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pdi_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 14173, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14257
 *         self._ptr[0].value = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Pdi_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_6Pdi_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Pdi_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[312])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14257, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 14257, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 14257, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14257, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 14257, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14266
 *         return __from_data(data, "pdi_v1_dtype", pdi_v1_dtype, Pdi_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an Pdi_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_6Pdi_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Pdi_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[313])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14266, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 14266, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 14266, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14266, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Pdi_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 14266, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_6Pdi_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Pdi_v1___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[314])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_6Pdi_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Pdi_v1___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[315])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14306
 *     })
 * 
 * device_power_mizer_modes_v1_dtype = _get_device_power_mizer_modes_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class DevicePowerMizerModes_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_device_power_mizer_modes_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14306, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_power_mizer_modes_v1_dtyp, __pyx_t_5) < (0)) __PYX_ERR(0, 14306, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14401
 *         self._ptr[0].supportedPowerMizerModes = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an DevicePowerMizerModes_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DevicePowerMizerModes_v1_from_da, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[316])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 14401, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 14401, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14401, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 14401, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14410
 *         return __from_data(data, "device_power_mizer_modes_v1_dtype", device_power_mizer_modes_v1_dtype, DevicePowerMizerModes_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an DevicePowerMizerModes_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DevicePowerMizerModes_v1_from_pt, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[317])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14410, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 14410, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 14410, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14410, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_DevicePowerMizerModes_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 14410, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DevicePowerMizerModes_v1___reduc, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[318])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_24DevicePowerMizerModes_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_DevicePowerMizerModes_v1___setst, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[319])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14454
 *     })
 * 
 * ecc_sram_unique_uncorrected_error_entry_v1_dtype = _get_ecc_sram_unique_uncorrected_error_entry_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class EccSramUniqueUncorrectedErrorEntry_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_ecc_sram_unique_uncorrected_error_entry_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14454, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_ecc_sram_unique_uncorrected_erro, __pyx_t_5) < (0)) __PYX_ERR(0, 14454, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14604
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an EccSramUniqueUncorrectedErrorEntry_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EccSramUniqueUncorrectedErrorEnt_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[320])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14604, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 14604, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 14604, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14604, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 14604, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14622
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an EccSramUniqueUncorrectedErrorEntry_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EccSramUniqueUncorrectedErrorEnt_3, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[321])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14622, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 14622, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 14622, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14622, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 14622, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EccSramUniqueUncorrectedErrorEnt_4, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[322])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_37EccSramUniqueUncorrectedErrorEntry_v1_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EccSramUniqueUncorrectedErrorEnt_5, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[323])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorEntry_v1, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14660
 *     })
 * 
 * gpu_fabric_info_v3_dtype = _get_gpu_fabric_info_v3_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class GpuFabricInfo_v3:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_gpu_fabric_info_v3_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14660, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_fabric_info_v3_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 14660, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14803
 *         self._ptr[0].healthSummary = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuFabricInfo_v3 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuFabricInfo_v3_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[324])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14803, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 14803, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 14803, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14803, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 14803, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14812
 *         return __from_data(data, "gpu_fabric_info_v3_dtype", gpu_fabric_info_v3_dtype, GpuFabricInfo_v3)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuFabricInfo_v3 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuFabricInfo_v3_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[325])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14812, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 14812, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 14812, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14812, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuFabricInfo_v3, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 14812, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuFabricInfo_v3___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[326])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16GpuFabricInfo_v3_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuFabricInfo_v3___setstate_cyth, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[327])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14853
 *     })
 * 
 * nvlink_firmware_version_dtype = _get_nvlink_firmware_version_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class NvlinkFirmwareVersion:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_nvlink_firmware_version_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14853, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_nvlink_firmware_version_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 14853, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14959
 *         self._ptr[0].subMinor = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvlinkFirmwareVersion instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkFirmwareVersion_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[328])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14959, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 14959, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 14959, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14959, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 14959, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":14968
 *         return __from_data(data, "nvlink_firmware_version_dtype", nvlink_firmware_version_dtype, NvlinkFirmwareVersion)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkFirmwareVersion instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkFirmwareVersion_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[329])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14968, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 14968, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 14968, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14968, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareVersion, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 14968, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkFirmwareVersion___reduce_c, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[330])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21NvlinkFirmwareVersion_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkFirmwareVersion___setstate, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[331])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15007
 *     })
 * 
 * excluded_device_info_dtype = _get_excluded_device_info_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ExcludedDeviceInfo:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_excluded_device_info_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15007, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_excluded_device_info_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 15007, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15096
 *         memcpy(<void *>(self._ptr[0].uuid), <void *>ptr, 80)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ExcludedDeviceInfo instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ExcludedDeviceInfo_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[332])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15096, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 15096, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 15096, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15096, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 15096, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15105
 *         return __from_data(data, "excluded_device_info_dtype", excluded_device_info_dtype, ExcludedDeviceInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ExcludedDeviceInfo instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ExcludedDeviceInfo_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[333])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15105, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 15105, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 15105, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15105, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ExcludedDeviceInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 15105, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ExcludedDeviceInfo___reduce_cyth, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[334])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18ExcludedDeviceInfo_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ExcludedDeviceInfo___setstate_cy, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[335])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15146
 *     })
 * 
 * process_detail_list_v1_dtype = _get_process_detail_list_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ProcessDetailList_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_process_detail_list_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15146, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_process_detail_list_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 15146, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15248
 *         self._refs["proc_array"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessDetailList_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessDetailList_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[336])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15248, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 15248, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 15248, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15248, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 15248, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15257
 *         return __from_data(data, "process_detail_list_v1_dtype", process_detail_list_v1_dtype, ProcessDetailList_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ProcessDetailList_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessDetailList_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[337])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15257, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 15257, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 15257, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15257, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessDetailList_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 15257, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessDetailList_v1___reduce_cy, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[338])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_20ProcessDetailList_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessDetailList_v1___setstate, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[339])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15297
 *     })
 * 
 * bridge_chip_hierarchy_dtype = _get_bridge_chip_hierarchy_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class BridgeChipHierarchy:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_bridge_chip_hierarchy_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_bridge_chip_hierarchy_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 15297, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15384
 *         self._ptr[0].bridgeCount = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an BridgeChipHierarchy instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_BridgeChipHierarchy_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[340])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15384, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 15384, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 15384, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15384, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 15384, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15393
 *         return __from_data(data, "bridge_chip_hierarchy_dtype", bridge_chip_hierarchy_dtype, BridgeChipHierarchy)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an BridgeChipHierarchy instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_BridgeChipHierarchy_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[341])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15393, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 15393, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 15393, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15393, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_BridgeChipHierarchy, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 15393, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_BridgeChipHierarchy___reduce_cyt, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[342])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19BridgeChipHierarchy_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_BridgeChipHierarchy___setstate_c, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[343])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15432
 *     })
 * 
 * sample_dtype = _get_sample_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class Sample:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_sample_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15432, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_sample_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 15432, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15525
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an Sample instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_6Sample_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Sample_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[344])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15525, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Sample, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 15525, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Sample, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 15525, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15525, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Sample, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 15525, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15543
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an Sample instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_6Sample_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Sample_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[345])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15543, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Sample, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 15543, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Sample, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 15543, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15543, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Sample, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 15543, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_6Sample_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Sample___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[346])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Sample, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_Sample, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_Sample__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_6Sample_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_Sample___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[347])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_Sample, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15582
 *     })
 * 
 * vgpu_instance_utilization_info_v1_dtype = _get_vgpu_instance_utilization_info_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuInstanceUtilizationInfo_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_instance_utilization_info_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15582, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_utilization_info_v, __pyx_t_5) < (0)) __PYX_ERR(0, 15582, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15731
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuInstanceUtilizationInfo_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuInstanceUtilizationInfo_v1_f, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[348])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15731, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 15731, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 15731, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15731, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 15731, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15749
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an VgpuInstanceUtilizationInfo_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuInstanceUtilizationInfo_v1_f_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[349])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15749, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 15749, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 15749, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15749, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 15749, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuInstanceUtilizationInfo_v1_4, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[350])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_VgpuInstanceUtilizationInfo_v1, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_VgpuInstanceUtilizationInfo_v1__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_30VgpuInstanceUtilizationInfo_v1_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuInstanceUtilizationInfo_v1_5, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[351])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstanceUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15787
 *     })
 * 
 * field_value_dtype = _get_field_value_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class FieldValue:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_field_value_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15787, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_field_value_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 15787, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15935
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an FieldValue instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_10FieldValue_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_FieldValue_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[352])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15935, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 15935, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 15935, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15935, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 15935, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15953
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an FieldValue instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_10FieldValue_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_FieldValue_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[353])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 15953, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 15953, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15953, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 15953, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_10FieldValue_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_FieldValue___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[354])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_FieldValue, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_FieldValue__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_10FieldValue_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_FieldValue___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[355])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_FieldValue, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":15986
 *     })
 * 
 * gpu_thermal_settings_dtype = _get_gpu_thermal_settings_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class GpuThermalSettings:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_gpu_thermal_settings_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15986, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_thermal_settings_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 15986, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16073
 *         self._ptr[0].count = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuThermalSettings instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18GpuThermalSettings_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuThermalSettings_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[356])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16073, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 16073, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 16073, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16073, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 16073, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16082
 *         return __from_data(data, "gpu_thermal_settings_dtype", gpu_thermal_settings_dtype, GpuThermalSettings)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuThermalSettings instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18GpuThermalSettings_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuThermalSettings_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[357])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16082, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 16082, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 16082, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16082, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuThermalSettings, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 16082, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18GpuThermalSettings_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuThermalSettings___reduce_cyth, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[358])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18GpuThermalSettings_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuThermalSettings___setstate_cy, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[359])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16122
 *     })
 * 
 * clk_mon_status_dtype = _get_clk_mon_status_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ClkMonStatus:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_clk_mon_status_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16122, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_clk_mon_status_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 16122, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16220
 *         self._ptr[0].clkMonListSize = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ClkMonStatus instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_12ClkMonStatus_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ClkMonStatus_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[360])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16220, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 16220, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 16220, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16220, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 16220, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16229
 *         return __from_data(data, "clk_mon_status_dtype", clk_mon_status_dtype, ClkMonStatus)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ClkMonStatus instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_12ClkMonStatus_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ClkMonStatus_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[361])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16229, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 16229, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 16229, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16229, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ClkMonStatus, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 16229, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_12ClkMonStatus_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ClkMonStatus___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[362])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_12ClkMonStatus_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ClkMonStatus___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[363])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16270
 *     })
 * 
 * processes_utilization_info_v1_dtype = _get_processes_utilization_info_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ProcessesUtilizationInfo_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_processes_utilization_info_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16270, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_processes_utilization_info_v1_dt, __pyx_t_5) < (0)) __PYX_ERR(0, 16270, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16372
 *         self._refs["proc_util_array"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ProcessesUtilizationInfo_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessesUtilizationInfo_v1_from, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[364])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16372, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 16372, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 16372, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16372, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 16372, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16381
 *         return __from_data(data, "processes_utilization_info_v1_dtype", processes_utilization_info_v1_dtype, ProcessesUtilizationInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ProcessesUtilizationInfo_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessesUtilizationInfo_v1_from_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[365])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16381, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 16381, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 16381, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16381, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ProcessesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 16381, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessesUtilizationInfo_v1___re, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[366])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_27ProcessesUtilizationInfo_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ProcessesUtilizationInfo_v1___se, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[367])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16421
 *     })
 * 
 * gpu_dynamic_pstates_info_dtype = _get_gpu_dynamic_pstates_info_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class GpuDynamicPstatesInfo:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_gpu_dynamic_pstates_info_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16421, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_dynamic_pstates_info_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 16421, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16508
 *         self._ptr[0].flags = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuDynamicPstatesInfo instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuDynamicPstatesInfo_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[368])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16508, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 16508, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 16508, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16508, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 16508, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16517
 *         return __from_data(data, "gpu_dynamic_pstates_info_dtype", gpu_dynamic_pstates_info_dtype, GpuDynamicPstatesInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuDynamicPstatesInfo instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuDynamicPstatesInfo_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[369])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16517, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 16517, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 16517, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16517, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuDynamicPstatesInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 16517, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuDynamicPstatesInfo___reduce_c, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[370])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21GpuDynamicPstatesInfo_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuDynamicPstatesInfo___setstate, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[371])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16558
 *     })
 * 
 * vgpu_processes_utilization_info_v1_dtype = _get_vgpu_processes_utilization_info_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuProcessesUtilizationInfo_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_processes_utilization_info_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16558, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_processes_utilization_info, __pyx_t_5) < (0)) __PYX_ERR(0, 16558, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16660
 *         self._refs["vgpu_proc_util_array"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuProcessesUtilizationInfo_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuProcessesUtilizationInfo_v1_3, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[372])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16660, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 16660, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_16, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 16660, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_16};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16660, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 16660, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16669
 *         return __from_data(data, "vgpu_processes_utilization_info_v1_dtype", vgpu_processes_utilization_info_v1_dtype, VgpuProcessesUtilizationInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuProcessesUtilizationInfo_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuProcessesUtilizationInfo_v1_4, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[373])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16669, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 16669, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_16 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 16669, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_16, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16669, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuProcessesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 16669, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuProcessesUtilizationInfo_v1_5, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[374])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_31VgpuProcessesUtilizationInfo_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuProcessesUtilizationInfo_v1_6, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[375])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16697
 * 
 * 
 * vgpu_scheduler_params_dtype = _numpy.dtype((             # <<<<<<<<<<<<<<
 *     _numpy.dtype((_numpy.void, sizeof(nvmlVgpuSchedulerParams_t))),
 *     {
*/
  __pyx_t_10 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 16697, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 16697, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;

  /* "cuda/bindings/_nvml.pyx":16698
 * 
 * vgpu_scheduler_params_dtype = _numpy.dtype((
 *     _numpy.dtype((_numpy.void, sizeof(nvmlVgpuSchedulerParams_t))),             # <<<<<<<<<<<<<<
 *     {
 *         "vgpu_sched_data_with_arr": (_py_anon_pod2_dtype, 0),
*/
  __pyx_t_14 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16698, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 16698, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16698, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_void); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 16698, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuSchedulerParams_t))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16698, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __pyx_t_15 = PyTuple_Pack(2, __pyx_t_13, __pyx_t_4); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 16698, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);
  __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_14, __pyx_t_15};
    __pyx_t_16 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_12, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0;
    __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0;
    __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
    if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 16698, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_16);
  }

  /* "cuda/bindings/_nvml.pyx":16700
 *     _numpy.dtype((_numpy.void, sizeof(nvmlVgpuSchedulerParams_t))),
 *     {
 *         "vgpu_sched_data_with_arr": (_py_anon_pod2_dtype, 0),             # <<<<<<<<<<<<<<
 *         "vgpu_sched_data": (_py_anon_pod3_dtype, 0),
 *     }
*/
  __pyx_t_12 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 16700, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_GetModuleGlobalName(__pyx_t_15, __pyx_mstate_global->__pyx_n_u_py_anon_pod2_dtype); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 16700, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);
  __pyx_t_14 = PyTuple_Pack(2, __pyx_t_15, __pyx_mstate_global->__pyx_int_0); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 16700, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0;
  if (PyDict_SetItem(__pyx_t_12, __pyx_mstate_global->__pyx_n_u_vgpu_sched_data_with_arr, __pyx_t_14) < (0)) __PYX_ERR(0, 16700, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;

  /* "cuda/bindings/_nvml.pyx":16701
 *     {
 *         "vgpu_sched_data_with_arr": (_py_anon_pod2_dtype, 0),
 *         "vgpu_sched_data": (_py_anon_pod3_dtype, 0),             # <<<<<<<<<<<<<<
 *     }
 *     ))
*/
  __Pyx_GetModuleGlobalName(__pyx_t_14, __pyx_mstate_global->__pyx_n_u_py_anon_pod3_dtype); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 16701, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __pyx_t_15 = PyTuple_Pack(2, __pyx_t_14, __pyx_mstate_global->__pyx_int_0); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 16701, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);
  __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
  if (PyDict_SetItem(__pyx_t_12, __pyx_mstate_global->__pyx_n_u_vgpu_sched_data, __pyx_t_15) < (0)) __PYX_ERR(0, 16700, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0;

  /* "cuda/bindings/_nvml.pyx":16698
 * 
 * vgpu_scheduler_params_dtype = _numpy.dtype((
 *     _numpy.dtype((_numpy.void, sizeof(nvmlVgpuSchedulerParams_t))),             # <<<<<<<<<<<<<<
 *     {
 *         "vgpu_sched_data_with_arr": (_py_anon_pod2_dtype, 0),
*/
  __pyx_t_15 = PyTuple_Pack(2, __pyx_t_16, __pyx_t_12); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 16698, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
  __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_15};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_11, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0;
    __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16697, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_params_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 16697, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16790
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedData), <void *>(val_._get_ptr()), sizeof(_anon_pod3) * 1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerParams instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerParams_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[376])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16790, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 16790, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_11 = NULL;
  __Pyx_GetNameInClass(__pyx_t_15, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 16790, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_11, __pyx_t_15};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
    __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16790, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 16790, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16799
 *         return __from_data(data, "vgpu_scheduler_params_dtype", vgpu_scheduler_params_dtype, VgpuSchedulerParams)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerParams instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerParams_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[377])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 16799, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_15 = NULL;
  __Pyx_GetNameInClass(__pyx_t_11, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 16799, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_11);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_15, __pyx_t_11};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0;
    __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16799, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerParams, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 16799, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerParams___reduce_cyt, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[378])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19VgpuSchedulerParams_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerParams___setstate_c, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[379])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16826
 * 
 * 
 * vgpu_scheduler_set_params_dtype = _numpy.dtype((             # <<<<<<<<<<<<<<
 *     _numpy.dtype((_numpy.void, sizeof(nvmlVgpuSchedulerSetParams_t))),
 *     {
*/
  __pyx_t_11 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_15, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 16826, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_15);
  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_15, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 16826, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0;

  /* "cuda/bindings/_nvml.pyx":16827
 * 
 * vgpu_scheduler_set_params_dtype = _numpy.dtype((
 *     _numpy.dtype((_numpy.void, sizeof(nvmlVgpuSchedulerSetParams_t))),             # <<<<<<<<<<<<<<
 *     {
 *         "vgpu_sched_data_with_arr": (_py_anon_pod4_dtype, 0),
*/
  __pyx_t_12 = NULL;
  __Pyx_GetModuleGlobalName(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 16827, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_dtype); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 16827, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
  __Pyx_GetModuleGlobalName(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_numpy); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 16827, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_16, __pyx_mstate_global->__pyx_n_u_void); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16827, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
  __pyx_t_16 = __Pyx_PyLong_FromSize_t((sizeof(nvmlVgpuSchedulerSetParams_t))); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 16827, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_16);
  __pyx_t_13 = PyTuple_Pack(2, __pyx_t_4, __pyx_t_16); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 16827, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_12, __pyx_t_13};
    __pyx_t_15 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_14, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
    if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 16827, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_15);
  }

  /* "cuda/bindings/_nvml.pyx":16829
 *     _numpy.dtype((_numpy.void, sizeof(nvmlVgpuSchedulerSetParams_t))),
 *     {
 *         "vgpu_sched_data_with_arr": (_py_anon_pod4_dtype, 0),             # <<<<<<<<<<<<<<
 *         "vgpu_sched_data": (_py_anon_pod5_dtype, 0),
 *     }
*/
  __pyx_t_14 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 16829, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_14);
  __Pyx_GetModuleGlobalName(__pyx_t_13, __pyx_mstate_global->__pyx_n_u_py_anon_pod4_dtype); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 16829, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_12 = PyTuple_Pack(2, __pyx_t_13, __pyx_mstate_global->__pyx_int_0); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 16829, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
  if (PyDict_SetItem(__pyx_t_14, __pyx_mstate_global->__pyx_n_u_vgpu_sched_data_with_arr, __pyx_t_12) < (0)) __PYX_ERR(0, 16829, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;

  /* "cuda/bindings/_nvml.pyx":16830
 *     {
 *         "vgpu_sched_data_with_arr": (_py_anon_pod4_dtype, 0),
 *         "vgpu_sched_data": (_py_anon_pod5_dtype, 0),             # <<<<<<<<<<<<<<
 *     }
 *     ))
*/
  __Pyx_GetModuleGlobalName(__pyx_t_12, __pyx_mstate_global->__pyx_n_u_py_anon_pod5_dtype); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 16830, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_12);
  __pyx_t_13 = PyTuple_Pack(2, __pyx_t_12, __pyx_mstate_global->__pyx_int_0); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 16830, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
  if (PyDict_SetItem(__pyx_t_14, __pyx_mstate_global->__pyx_n_u_vgpu_sched_data, __pyx_t_13) < (0)) __PYX_ERR(0, 16829, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;

  /* "cuda/bindings/_nvml.pyx":16827
 * 
 * vgpu_scheduler_set_params_dtype = _numpy.dtype((
 *     _numpy.dtype((_numpy.void, sizeof(nvmlVgpuSchedulerSetParams_t))),             # <<<<<<<<<<<<<<
 *     {
 *         "vgpu_sched_data_with_arr": (_py_anon_pod4_dtype, 0),
*/
  __pyx_t_13 = PyTuple_Pack(2, __pyx_t_15, __pyx_t_14); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 16827, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0;
  __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_11, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_10, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16826, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_set_params_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 16826, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16919
 *         memcpy(<void *>&(self._ptr[0].vgpuSchedData), <void *>(val_._get_ptr()), sizeof(_anon_pod5) * 1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerSetParams instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerSetParams_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[380])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16919, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 16919, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_13, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 16919, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16919, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 16919, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16928
 *         return __from_data(data, "vgpu_scheduler_set_params_dtype", vgpu_scheduler_set_params_dtype, VgpuSchedulerSetParams)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerSetParams instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerSetParams_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[381])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16928, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 16928, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_13 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 16928, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_13, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16928, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerSetParams, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 16928, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerSetParams___reduce, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[382])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22VgpuSchedulerSetParams_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerSetParams___setstat, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[383])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":16968
 *     })
 * 
 * vgpu_license_info_dtype = _get_vgpu_license_info_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuLicenseInfo:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_license_info_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16968, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_license_info_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 16968, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17064
 *         self._ptr[0].currentState = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuLicenseInfo instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuLicenseInfo_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[384])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17064, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 17064, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_13, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 17064, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17064, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 17064, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17073
 *         return __from_data(data, "vgpu_license_info_dtype", vgpu_license_info_dtype, VgpuLicenseInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuLicenseInfo instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuLicenseInfo_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[385])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17073, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 17073, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_13 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17073, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_13, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17073, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuLicenseInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 17073, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuLicenseInfo___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[386])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15VgpuLicenseInfo_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuLicenseInfo___setstate_cytho, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[387])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17116
 *     })
 * 
 * grid_licensable_feature_dtype = _get_grid_licensable_feature_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class GridLicensableFeature:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_grid_licensable_feature_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17116, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_grid_licensable_feature_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 17116, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17249
 *         self._data[key] = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GridLicensableFeature instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21GridLicensableFeature_15from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GridLicensableFeature_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[388])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17249, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 17249, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_13, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 17249, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17249, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 17249, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17267
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an GridLicensableFeature instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21GridLicensableFeature_17from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GridLicensableFeature_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[389])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17267, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[12]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 17267, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_13 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17267, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_13, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17267, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 17267, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     cdef tuple state
 *     cdef object _dict
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21GridLicensableFeature_19__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GridLicensableFeature___reduce_c, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[390])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":16
 *     else:
 *         return __pyx_unpickle_GridLicensableFeature, (type(self), 0xa75e18a, state)
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     __pyx_unpickle_GridLicensableFeature__set_state(self, __pyx_state)
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21GridLicensableFeature_21__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GridLicensableFeature___setstate, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[391])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeature, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17300
 *     })
 * 
 * unit_fan_speeds_dtype = _get_unit_fan_speeds_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class UnitFanSpeeds:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_unit_fan_speeds_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17300, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_unit_fan_speeds_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 17300, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17387
 *         self._ptr[0].count = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an UnitFanSpeeds instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13UnitFanSpeeds_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_UnitFanSpeeds_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[392])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17387, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 17387, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_13, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 17387, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17387, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 17387, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17396
 *         return __from_data(data, "unit_fan_speeds_dtype", unit_fan_speeds_dtype, UnitFanSpeeds)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an UnitFanSpeeds instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13UnitFanSpeeds_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_UnitFanSpeeds_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[393])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 17396, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_13 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17396, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_13, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17396, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_UnitFanSpeeds, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 17396, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13UnitFanSpeeds_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_UnitFanSpeeds___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[394])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13UnitFanSpeeds_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_UnitFanSpeeds___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[395])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17441
 *     })
 * 
 * vgpu_pgpu_metadata_dtype = _get_vgpu_pgpu_metadata_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuPgpuMetadata:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_pgpu_metadata_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17441, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_pgpu_metadata_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 17441, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17589
 *         memcpy(<void *>(self._ptr[0].opaqueData), <void *>ptr, 4)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuPgpuMetadata instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuPgpuMetadata_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[396])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17589, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 17589, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_13, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 17589, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17589, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 17589, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17598
 *         return __from_data(data, "vgpu_pgpu_metadata_dtype", vgpu_pgpu_metadata_dtype, VgpuPgpuMetadata)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuPgpuMetadata instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuPgpuMetadata_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[397])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17598, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 17598, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_13 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17598, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_13, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17598, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuPgpuMetadata, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 17598, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuPgpuMetadata___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[398])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16VgpuPgpuMetadata_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuPgpuMetadata___setstate_cyth, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[399])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17639
 *     })
 * 
 * gpu_instance_info_dtype = _get_gpu_instance_info_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class GpuInstanceInfo:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_gpu_instance_info_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17639, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_info_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 17639, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17746
 *         self._ptr[0].profileId = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GpuInstanceInfo instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15GpuInstanceInfo_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuInstanceInfo_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[400])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17746, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 17746, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_13, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 17746, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17746, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 17746, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17755
 *         return __from_data(data, "gpu_instance_info_dtype", gpu_instance_info_dtype, GpuInstanceInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GpuInstanceInfo instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15GpuInstanceInfo_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuInstanceInfo_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[401])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17755, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 17755, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_13 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17755, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_13, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17755, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GpuInstanceInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 17755, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15GpuInstanceInfo_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuInstanceInfo___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[402])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15GpuInstanceInfo_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GpuInstanceInfo___setstate_cytho, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[403])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17797
 *     })
 * 
 * compute_instance_info_dtype = _get_compute_instance_info_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class ComputeInstanceInfo:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_compute_instance_info_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17797, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_compute_instance_info_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 17797, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17915
 *         self._ptr[0].profileId = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an ComputeInstanceInfo instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ComputeInstanceInfo_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[404])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 17915, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_13, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 17915, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17915, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 17915, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17924
 *         return __from_data(data, "compute_instance_info_dtype", compute_instance_info_dtype, ComputeInstanceInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an ComputeInstanceInfo instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ComputeInstanceInfo_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[405])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17924, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 17924, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_13 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17924, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_13, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17924, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_ComputeInstanceInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 17924, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ComputeInstanceInfo___reduce_cyt, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[406])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19ComputeInstanceInfo_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_ComputeInstanceInfo___setstate_c, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[407])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":17964
 *     })
 * 
 * ecc_sram_unique_uncorrected_error_counts_v1_dtype = _get_ecc_sram_unique_uncorrected_error_counts_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class EccSramUniqueUncorrectedErrorCounts_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_ecc_sram_unique_uncorrected_error_counts_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17964, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_ecc_sram_unique_uncorrected_erro_2, __pyx_t_5) < (0)) __PYX_ERR(0, 17964, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18055
 *         self._refs["entries"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an EccSramUniqueUncorrectedErrorCounts_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EccSramUniqueUncorrectedErrorCou_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[408])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18055, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 18055, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_13, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 18055, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18055, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 18055, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18064
 *         return __from_data(data, "ecc_sram_unique_uncorrected_error_counts_v1_dtype", ecc_sram_unique_uncorrected_error_counts_v1_dtype, EccSramUniqueUncorrectedErrorCounts_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an EccSramUniqueUncorrectedErrorCounts_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EccSramUniqueUncorrectedErrorCou_3, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[409])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18064, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 18064, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_13 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18064, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_13, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18064, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_EccSramUniqueUncorrectedErrorCounts_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 18064, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EccSramUniqueUncorrectedErrorCou_4, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[410])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_38EccSramUniqueUncorrectedErrorCounts_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_EccSramUniqueUncorrectedErrorCou_5, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[411])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18104
 *     })
 * 
 * nvlink_firmware_info_dtype = _get_nvlink_firmware_info_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class NvlinkFirmwareInfo:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_nvlink_firmware_info_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_nvlink_firmware_info_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 18104, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18191
 *         self._ptr[0].numValidEntries = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvlinkFirmwareInfo instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkFirmwareInfo_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[412])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18191, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 18191, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_13, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 18191, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18191, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 18191, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18200
 *         return __from_data(data, "nvlink_firmware_info_dtype", nvlink_firmware_info_dtype, NvlinkFirmwareInfo)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvlinkFirmwareInfo instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkFirmwareInfo_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[413])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18200, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 18200, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_13 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18200, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_13, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18200, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvlinkFirmwareInfo, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 18200, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkFirmwareInfo___reduce_cyth, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[414])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_18NvlinkFirmwareInfo_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvlinkFirmwareInfo___setstate_cy, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[415])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18242
 *     })
 * 
 * vgpu_instances_utilization_info_v1_dtype = _get_vgpu_instances_utilization_info_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuInstancesUtilizationInfo_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_instances_utilization_info_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18242, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instances_utilization_info, __pyx_t_5) < (0)) __PYX_ERR(0, 18242, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18355
 *         self._refs["vgpu_util_array"] = arr
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuInstancesUtilizationInfo_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuInstancesUtilizationInfo_v1_3, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[416])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18355, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 18355, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_13, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 18355, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18355, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 18355, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18364
 *         return __from_data(data, "vgpu_instances_utilization_info_v1_dtype", vgpu_instances_utilization_info_v1_dtype, VgpuInstancesUtilizationInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuInstancesUtilizationInfo_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuInstancesUtilizationInfo_v1_4, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[417])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18364, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 18364, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_13 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18364, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_13, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18364, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuInstancesUtilizationInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 18364, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuInstancesUtilizationInfo_v1_5, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[418])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_31VgpuInstancesUtilizationInfo_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuInstancesUtilizationInfo_v1_6, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[419])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18408
 *     })
 * 
 * vgpu_scheduler_log_dtype = _get_vgpu_scheduler_log_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuSchedulerLog:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_log_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18408, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_log_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 18408, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18540
 *         self._ptr[0].entriesCount = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerLog instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerLog_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[420])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18540, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 18540, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_13, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 18540, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18540, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 18540, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18549
 *         return __from_data(data, "vgpu_scheduler_log_dtype", vgpu_scheduler_log_dtype, VgpuSchedulerLog)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerLog instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerLog_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[421])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18549, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 18549, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_13 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18549, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_13, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18549, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLog, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 18549, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerLog___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[422])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_16VgpuSchedulerLog_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerLog___setstate_cyth, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[423])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18589
 *     })
 * 
 * vgpu_scheduler_get_state_dtype = _get_vgpu_scheduler_get_state_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuSchedulerGetState:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_get_state_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18589, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_get_state_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 18589, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18685
 *         self._ptr[0].arrMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerGetState instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerGetState_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[424])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18685, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 18685, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_13, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 18685, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18685, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 18685, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18694
 *         return __from_data(data, "vgpu_scheduler_get_state_dtype", vgpu_scheduler_get_state_dtype, VgpuSchedulerGetState)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerGetState instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerGetState_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[425])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18694, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 18694, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_13 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18694, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_13, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18694, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerGetState, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 18694, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerGetState___reduce_c, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[426])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerGetState_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerGetState___setstate, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[427])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18736
 *     })
 * 
 * vgpu_scheduler_state_info_v1_dtype = _get_vgpu_scheduler_state_info_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuSchedulerStateInfo_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_state_info_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18736, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_state_info_v1_dty, __pyx_t_5) < (0)) __PYX_ERR(0, 18736, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18854
 *         self._ptr[0].arrMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerStateInfo_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerStateInfo_v1_from_d, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[428])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18854, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 18854, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_13, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 18854, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18854, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 18854, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18863
 *         return __from_data(data, "vgpu_scheduler_state_info_v1_dtype", vgpu_scheduler_state_info_v1_dtype, VgpuSchedulerStateInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerStateInfo_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerStateInfo_v1_from_p, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[429])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18863, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 18863, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_13 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18863, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_13, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18863, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerStateInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 18863, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerStateInfo_v1___redu, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[430])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25VgpuSchedulerStateInfo_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerStateInfo_v1___sets, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[431])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":18907
 *     })
 * 
 * vgpu_scheduler_log_info_v1_dtype = _get_vgpu_scheduler_log_info_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuSchedulerLogInfo_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_log_info_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18907, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_log_info_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 18907, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19050
 *         self._ptr[0].entriesCount = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerLogInfo_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerLogInfo_v1_from_dat, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[432])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19050, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 19050, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_13, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 19050, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19050, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 19050, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19059
 *         return __from_data(data, "vgpu_scheduler_log_info_v1_dtype", vgpu_scheduler_log_info_v1_dtype, VgpuSchedulerLogInfo_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerLogInfo_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerLogInfo_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[433])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19059, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 19059, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_13 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 19059, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_13, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19059, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerLogInfo_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 19059, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerLogInfo_v1___reduce, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[434])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_23VgpuSchedulerLogInfo_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerLogInfo_v1___setsta, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[435])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19101
 *     })
 * 
 * vgpu_scheduler_state_v1_dtype = _get_vgpu_scheduler_state_v1_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class VgpuSchedulerState_v1:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_vgpu_scheduler_state_v1_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19101, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_scheduler_state_v1_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 19101, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19219
 *         self._ptr[0].enableARRMode = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an VgpuSchedulerState_v1 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerState_v1_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[436])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19219, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 19219, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_13, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 19219, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19219, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 19219, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19228
 *         return __from_data(data, "vgpu_scheduler_state_v1_dtype", vgpu_scheduler_state_v1_dtype, VgpuSchedulerState_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an VgpuSchedulerState_v1 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerState_v1_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[437])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19228, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 19228, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_13 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 19228, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_13, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19228, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_VgpuSchedulerState_v1, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 19228, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerState_v1___reduce_c, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[438])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21VgpuSchedulerState_v1_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_VgpuSchedulerState_v1___setstate, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[439])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19268
 *     })
 * 
 * grid_licensable_features_dtype = _get_grid_licensable_features_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class GridLicensableFeatures:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_grid_licensable_features_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19268, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_grid_licensable_features_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 19268, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19366
 *         self._ptr[0].licensableFeaturesCount = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an GridLicensableFeatures instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22GridLicensableFeatures_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GridLicensableFeatures_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[440])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19366, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 19366, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_13, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 19366, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19366, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 19366, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19375
 *         return __from_data(data, "grid_licensable_features_dtype", grid_licensable_features_dtype, GridLicensableFeatures)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an GridLicensableFeatures instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22GridLicensableFeatures_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GridLicensableFeatures_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[441])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19375, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 19375, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_13 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 19375, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_13, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19375, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_GridLicensableFeatures, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 19375, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22GridLicensableFeatures_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GridLicensableFeatures___reduce, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[442])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_22GridLicensableFeatures_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_GridLicensableFeatures___setstat, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[443])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19415
 *     })
 * 
 * nv_link_info_v2_dtype = _get_nv_link_info_v2_dtype_offsets()             # <<<<<<<<<<<<<<
 * 
 * cdef class NvLinkInfo_v2:
*/
  __pyx_t_5 = __pyx_f_4cuda_8bindings_5_nvml__get_nv_link_info_v2_dtype_offsets(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19415, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_nv_link_info_v2_dtype, __pyx_t_5) < (0)) __PYX_ERR(0, 19415, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19511
 *         self._ptr[0].isNvleEnabled = val
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_data(data):
 *         """Create an NvLinkInfo_v2 instance wrapping the given NumPy array.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_13from_data, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvLinkInfo_v2_from_data, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[444])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19511, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 19511, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_10 = NULL;
  __Pyx_GetNameInClass(__pyx_t_13, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2, __pyx_mstate_global->__pyx_n_u_from_data); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 19511, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_13);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_13};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
    __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19511, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2, __pyx_mstate_global->__pyx_n_u_from_data, __pyx_t_5) < (0)) __PYX_ERR(0, 19511, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19520
 *         return __from_data(data, "nv_link_info_v2_dtype", nv_link_info_v2_dtype, NvLinkInfo_v2)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an NvLinkInfo_v2 instance wrapping the given pointer.
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_15from_ptr, __Pyx_CYFUNCTION_STATICMETHOD | __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvLinkInfo_v2_from_ptr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[445])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19520, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_mstate_global->__pyx_tuple[11]);
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 19520, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_t_13 = NULL;
  __Pyx_GetNameInClass(__pyx_t_10, (PyObject*)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2, __pyx_mstate_global->__pyx_n_u_from_ptr); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 19520, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_10);
  __pyx_t_6 = 1;
  {
    PyObject *__pyx_callargs[2] = {__pyx_t_13, __pyx_t_10};
    __pyx_t_5 = __Pyx_PyObject_FastCall((PyObject*)__pyx_builtin_staticmethod, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET));
    __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
    if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19520, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_5);
  }
  if (__Pyx_SetItemOnTypeDict(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_5_nvml_NvLinkInfo_v2, __pyx_mstate_global->__pyx_n_u_from_ptr, __pyx_t_5) < (0)) __PYX_ERR(0, 19520, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_17__reduce_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvLinkInfo_v2___reduce_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[446])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_reduce_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":3
 * def __reduce_cython__(self):
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
 * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
 *     raise TypeError, "self._ptr cannot be converted to a Python object for pickling"
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13NvLinkInfo_v2_19__setstate_cython__, __Pyx_CYFUNCTION_CCLASS, __pyx_mstate_global->__pyx_n_u_NvLinkInfo_v2___setstate_cython, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[447])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_setstate_cython, __pyx_t_5) < (0)) __PYX_ERR(1, 3, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19548
 * 
 * 
 * cpdef init_v2():             # <<<<<<<<<<<<<<
 *     """Initialize NVML, but don't initialize any GPUs yet.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_5init_v2, 0, __pyx_mstate_global->__pyx_n_u_init_v2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[448])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19548, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_init_v2, __pyx_t_5) < (0)) __PYX_ERR(0, 19548, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19558
 * 
 * 
 * cpdef init_with_flags(unsigned int flags):             # <<<<<<<<<<<<<<
 *     """nvmlInitWithFlags is a variant of nvmlInit(), that allows passing a set of boolean values modifying the behaviour of nvmlInit(). Other than the "flags" parameter it is completely similar to ``nvmlInit_v2``.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_7init_with_flags, 0, __pyx_mstate_global->__pyx_n_u_init_with_flags, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[449])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19558, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_init_with_flags, __pyx_t_5) < (0)) __PYX_ERR(0, 19558, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19571
 * 
 * 
 * cpdef shutdown():             # <<<<<<<<<<<<<<
 *     """Shut down NVML by releasing all GPU resources previously allocated with :func:`init_v2`.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_9shutdown, 0, __pyx_mstate_global->__pyx_n_u_shutdown, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[450])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19571, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_shutdown, __pyx_t_5) < (0)) __PYX_ERR(0, 19571, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19581
 * 
 * 
 * cpdef str error_string(int result):             # <<<<<<<<<<<<<<
 *     """Helper method for converting NVML error codes into readable strings.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_11error_string, 0, __pyx_mstate_global->__pyx_n_u_error_string, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[451])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19581, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_error_string, __pyx_t_5) < (0)) __PYX_ERR(0, 19581, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19594
 * 
 * 
 * cpdef str system_get_driver_version():             # <<<<<<<<<<<<<<
 *     """Retrieves the version of the system's graphics driver.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_13system_get_driver_version, 0, __pyx_mstate_global->__pyx_n_u_system_get_driver_version, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[452])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19594, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_get_driver_version, __pyx_t_5) < (0)) __PYX_ERR(0, 19594, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19607
 * 
 * 
 * cpdef str system_get_nvml_version():             # <<<<<<<<<<<<<<
 *     """Retrieves the version of the NVML library.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_15system_get_nvml_version, 0, __pyx_mstate_global->__pyx_n_u_system_get_nvml_version, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[453])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_get_nvml_version, __pyx_t_5) < (0)) __PYX_ERR(0, 19607, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19620
 * 
 * 
 * cpdef int system_get_cuda_driver_version() except *:             # <<<<<<<<<<<<<<
 *     """Retrieves the version of the CUDA driver.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_17system_get_cuda_driver_version, 0, __pyx_mstate_global->__pyx_n_u_system_get_cuda_driver_version, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[454])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19620, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_get_cuda_driver_version, __pyx_t_5) < (0)) __PYX_ERR(0, 19620, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19635
 * 
 * 
 * cpdef int system_get_cuda_driver_version_v2() except 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the version of the CUDA driver from the shared library.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_19system_get_cuda_driver_version_v2, 0, __pyx_mstate_global->__pyx_n_u_system_get_cuda_driver_version_v, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[455])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19635, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_get_cuda_driver_version_v, __pyx_t_5) < (0)) __PYX_ERR(0, 19635, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19650
 * 
 * 
 * cpdef str system_get_process_name(unsigned int pid):             # <<<<<<<<<<<<<<
 *     """Gets name of the process with provided process id.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_21system_get_process_name, 0, __pyx_mstate_global->__pyx_n_u_system_get_process_name, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[456])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19650, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_get_process_name, __pyx_t_5) < (0)) __PYX_ERR(0, 19650, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19666
 * 
 * 
 * cpdef object system_get_hic_version():             # <<<<<<<<<<<<<<
 *     """Retrieves the IDs and firmware versions for any Host Interface Cards (HICs) in the system.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_23system_get_hic_version, 0, __pyx_mstate_global->__pyx_n_u_system_get_hic_version, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[457])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19666, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_get_hic_version, __pyx_t_5) < (0)) __PYX_ERR(0, 19666, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19685
 * 
 * 
 * cpdef unsigned int unit_get_count() except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the number of units in the system.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_25unit_get_count, 0, __pyx_mstate_global->__pyx_n_u_unit_get_count, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[458])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19685, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_unit_get_count, __pyx_t_5) < (0)) __PYX_ERR(0, 19685, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19700
 * 
 * 
 * cpdef intptr_t unit_get_handle_by_index(unsigned int ind_ex) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular unit, based on its ind_ex.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_27unit_get_handle_by_index, 0, __pyx_mstate_global->__pyx_n_u_unit_get_handle_by_index, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[459])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19700, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_unit_get_handle_by_index, __pyx_t_5) < (0)) __PYX_ERR(0, 19700, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19718
 * 
 * 
 * cpdef object unit_get_unit_info(intptr_t unit):             # <<<<<<<<<<<<<<
 *     """Retrieves the static information associated with a unit.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_29unit_get_unit_info, 0, __pyx_mstate_global->__pyx_n_u_unit_get_unit_info, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[460])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19718, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_unit_get_unit_info, __pyx_t_5) < (0)) __PYX_ERR(0, 19718, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19737
 * 
 * 
 * cpdef object unit_get_led_state(intptr_t unit):             # <<<<<<<<<<<<<<
 *     """Retrieves the LED state associated with this unit.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_31unit_get_led_state, 0, __pyx_mstate_global->__pyx_n_u_unit_get_led_state, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[461])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19737, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_unit_get_led_state, __pyx_t_5) < (0)) __PYX_ERR(0, 19737, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19756
 * 
 * 
 * cpdef object unit_get_psu_info(intptr_t unit):             # <<<<<<<<<<<<<<
 *     """Retrieves the PSU stats for the unit.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_33unit_get_psu_info, 0, __pyx_mstate_global->__pyx_n_u_unit_get_psu_info, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[462])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19756, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_unit_get_psu_info, __pyx_t_5) < (0)) __PYX_ERR(0, 19756, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19775
 * 
 * 
 * cpdef unsigned int unit_get_temperature(intptr_t unit, unsigned int type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the temperature readings for the unit, in degrees C.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_35unit_get_temperature, 0, __pyx_mstate_global->__pyx_n_u_unit_get_temperature, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[463])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19775, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_unit_get_temperature, __pyx_t_5) < (0)) __PYX_ERR(0, 19775, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19794
 * 
 * 
 * cpdef object unit_get_fan_speed_info(intptr_t unit):             # <<<<<<<<<<<<<<
 *     """Retrieves the fan speed readings for the unit.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_37unit_get_fan_speed_info, 0, __pyx_mstate_global->__pyx_n_u_unit_get_fan_speed_info, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[464])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19794, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_unit_get_fan_speed_info, __pyx_t_5) < (0)) __PYX_ERR(0, 19794, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19813
 * 
 * 
 * cpdef unsigned int device_get_count_v2() except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the number of compute devices in the system. A compute device is a single GPU.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_39device_get_count_v2, 0, __pyx_mstate_global->__pyx_n_u_device_get_count_v2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[465])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19813, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_count_v2, __pyx_t_5) < (0)) __PYX_ERR(0, 19813, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19828
 * 
 * 
 * cpdef object device_get_attributes_v2(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get attributes (engine counts etc.) for the given NVML device handle.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_41device_get_attributes_v2, 0, __pyx_mstate_global->__pyx_n_u_device_get_attributes_v2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[466])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19828, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_attributes_v2, __pyx_t_5) < (0)) __PYX_ERR(0, 19828, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19847
 * 
 * 
 * cpdef intptr_t device_get_handle_by_index_v2(unsigned int ind_ex) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular device, based on its ind_ex.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_43device_get_handle_by_index_v2, 0, __pyx_mstate_global->__pyx_n_u_device_get_handle_by_index_v2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[467])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19847, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_handle_by_index_v2, __pyx_t_5) < (0)) __PYX_ERR(0, 19847, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19865
 * 
 * 
 * cpdef intptr_t device_get_handle_by_serial(serial) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular device, based on its board serial number.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_45device_get_handle_by_serial, 0, __pyx_mstate_global->__pyx_n_u_device_get_handle_by_serial, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[468])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19865, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_handle_by_serial, __pyx_t_5) < (0)) __PYX_ERR(0, 19865, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19887
 * 
 * 
 * cpdef intptr_t device_get_handle_by_uuid(uuid) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular device, based on its globally unique immutable UUID (in ASCII format) associated with each device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_47device_get_handle_by_uuid, 0, __pyx_mstate_global->__pyx_n_u_device_get_handle_by_uuid, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[469])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19887, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_handle_by_uuid, __pyx_t_5) < (0)) __PYX_ERR(0, 19887, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19909
 * 
 * 
 * cpdef intptr_t device_get_handle_by_uuidv(intptr_t uuid) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular device, based on its globally unique immutable UUID (in either ASCII or binary format) associated with each device. See ``nvmlUUID_v1_t`` for more information on the UUID struct. The caller must set the appropriate version prior to calling this API.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_49device_get_handle_by_uuidv, 0, __pyx_mstate_global->__pyx_n_u_device_get_handle_by_uuidv, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[470])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19909, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_handle_by_uuidv, __pyx_t_5) < (0)) __PYX_ERR(0, 19909, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19927
 * 
 * 
 * cpdef intptr_t device_get_handle_by_pci_bus_id_v2(pci_bus_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Acquire the handle for a particular device, based on its PCI bus id.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_51device_get_handle_by_pci_bus_id_v2, 0, __pyx_mstate_global->__pyx_n_u_device_get_handle_by_pci_bus_id, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[471])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19927, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_handle_by_pci_bus_id, __pyx_t_5) < (0)) __PYX_ERR(0, 19927, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19949
 * 
 * 
 * cpdef str device_get_name(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the name of this device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_53device_get_name, 0, __pyx_mstate_global->__pyx_n_u_device_get_name, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[472])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19949, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_name, __pyx_t_5) < (0)) __PYX_ERR(0, 19949, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19965
 * 
 * 
 * cpdef int device_get_brand(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the brand of this device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_55device_get_brand, 0, __pyx_mstate_global->__pyx_n_u_device_get_brand, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[473])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19965, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_brand, __pyx_t_5) < (0)) __PYX_ERR(0, 19965, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":19983
 * 
 * 
 * cpdef unsigned int device_get_index(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the NVML index of this device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_57device_get_index, 0, __pyx_mstate_global->__pyx_n_u_device_get_index, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[474])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19983, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_index, __pyx_t_5) < (0)) __PYX_ERR(0, 19983, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20001
 * 
 * 
 * cpdef str device_get_serial(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the globally unique board serial number associated with this device's board.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_59device_get_serial, 0, __pyx_mstate_global->__pyx_n_u_device_get_serial, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[475])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20001, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_serial, __pyx_t_5) < (0)) __PYX_ERR(0, 20001, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20017
 * 
 * 
 * cpdef unsigned int device_get_module_id(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get a unique identifier for the device module on the baseboard.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_61device_get_module_id, 0, __pyx_mstate_global->__pyx_n_u_device_get_module_id, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[476])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20017, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_module_id, __pyx_t_5) < (0)) __PYX_ERR(0, 20017, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20035
 * 
 * 
 * cpdef object device_get_c2c_mode_info_v(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the Device's C2C Mode information.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_63device_get_c2c_mode_info_v, 0, __pyx_mstate_global->__pyx_n_u_device_get_c2c_mode_info_v, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[477])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20035, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_c2c_mode_info_v, __pyx_t_5) < (0)) __PYX_ERR(0, 20035, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20054
 * 
 * 
 * cpdef object device_get_memory_affinity(intptr_t device, unsigned int node_set_size, unsigned int scope):             # <<<<<<<<<<<<<<
 *     """Retrieves an array of unsigned ints (sized to node_set_size) of bitmasks with the ideal memory affinity within node or socket for the device. For example, if NUMA node 0, 1 are ideal within the socket for the device and node_set_size == 1, result[0] = 0x3.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_65device_get_memory_affinity, 0, __pyx_mstate_global->__pyx_n_u_device_get_memory_affinity, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[478])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20054, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_memory_affinity, __pyx_t_5) < (0)) __PYX_ERR(0, 20054, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20074
 * 
 * 
 * cpdef object device_get_cpu_affinity_within_scope(intptr_t device, unsigned int cpu_set_size, unsigned int scope):             # <<<<<<<<<<<<<<
 *     """Retrieves an array of unsigned ints (sized to cpu_set_size) of bitmasks with the ideal CPU affinity within node or socket for the device. For example, if processors 0, 1, 32, and 33 are ideal for the device and cpu_set_size == 2, result[0] = 0x3, result[1] = 0x3.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_67device_get_cpu_affinity_within_scope, 0, __pyx_mstate_global->__pyx_n_u_device_get_cpu_affinity_within_s, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[479])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20074, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_cpu_affinity_within_s, __pyx_t_5) < (0)) __PYX_ERR(0, 20074, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20094
 * 
 * 
 * cpdef object device_get_cpu_affinity(intptr_t device, unsigned int cpu_set_size):             # <<<<<<<<<<<<<<
 *     """Retrieves an array of unsigned ints (sized to cpu_set_size) of bitmasks with the ideal CPU affinity for the device For example, if processors 0, 1, 32, and 33 are ideal for the device and cpu_set_size == 2, result[0] = 0x3, result[1] = 0x3 This is equivalent to calling ``nvmlDeviceGetCpuAffinityWithinScope`` with ``NVML_AFFINITY_SCOPE_NODE``.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_69device_get_cpu_affinity, 0, __pyx_mstate_global->__pyx_n_u_device_get_cpu_affinity, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[480])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20094, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_cpu_affinity, __pyx_t_5) < (0)) __PYX_ERR(0, 20094, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20113
 * 
 * 
 * cpdef device_set_cpu_affinity(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Sets the ideal affinity for the calling thread and device using the guidelines given in :func:`device_get_cpu_affinity`. Note, this is a change as of version 8.0. Older versions set the affinity for a calling process and all children. Currently supports up to 1024 processors.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_71device_set_cpu_affinity, 0, __pyx_mstate_global->__pyx_n_u_device_set_cpu_affinity, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[481])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20113, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_cpu_affinity, __pyx_t_5) < (0)) __PYX_ERR(0, 20113, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20126
 * 
 * 
 * cpdef device_clear_cpu_affinity(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Clear all affinity bindings for the calling thread. Note, this is a change as of version 8.0 as older versions cleared the affinity for a calling process and all children.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_73device_clear_cpu_affinity, 0, __pyx_mstate_global->__pyx_n_u_device_clear_cpu_affinity, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[482])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20126, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_clear_cpu_affinity, __pyx_t_5) < (0)) __PYX_ERR(0, 20126, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20139
 * 
 * 
 * cpdef unsigned int device_get_numa_node_id(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get the NUMA node of the given GPU device. This only applies to platforms where the GPUs are NUMA nodes.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_75device_get_numa_node_id, 0, __pyx_mstate_global->__pyx_n_u_device_get_numa_node_id, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[483])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20139, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_numa_node_id, __pyx_t_5) < (0)) __PYX_ERR(0, 20139, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20157
 * 
 * 
 * cpdef int device_get_topology_common_ancestor(intptr_t device1, intptr_t device2) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieve the common ancestor for two devices For all products. Supported on Linux only.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_77device_get_topology_common_ancestor, 0, __pyx_mstate_global->__pyx_n_u_device_get_topology_common_ances, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[484])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20157, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_topology_common_ances, __pyx_t_5) < (0)) __PYX_ERR(0, 20157, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20176
 * 
 * 
 * cpdef int device_get_p2p_status(intptr_t device1, intptr_t device2, int p2p_ind_ex) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieve the status for a given p2p capability index between a given pair of GPU.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_79device_get_p2p_status, 0, __pyx_mstate_global->__pyx_n_u_device_get_p2p_status, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[485])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20176, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_p2p_status, __pyx_t_5) < (0)) __PYX_ERR(0, 20176, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20196
 * 
 * 
 * cpdef str device_get_uuid(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the globally unique immutable UUID associated with this device, as a 5 part hexadecimal string, that augments the immutable, board serial identifier.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_81device_get_uuid, 0, __pyx_mstate_global->__pyx_n_u_device_get_uuid, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[486])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20196, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_uuid, __pyx_t_5) < (0)) __PYX_ERR(0, 20196, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20212
 * 
 * 
 * cpdef unsigned int device_get_minor_number(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves minor number for the device. The minor number for the device is such that the Nvidia device node file for each GPU will have the form /dev/nvidia[minor number].
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_83device_get_minor_number, 0, __pyx_mstate_global->__pyx_n_u_device_get_minor_number, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[487])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20212, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_minor_number, __pyx_t_5) < (0)) __PYX_ERR(0, 20212, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20230
 * 
 * 
 * cpdef str device_get_board_part_number(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the the device board part number which is programmed into the board's InfoROM.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_85device_get_board_part_number, 0, __pyx_mstate_global->__pyx_n_u_device_get_board_part_number, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[488])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20230, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_board_part_number, __pyx_t_5) < (0)) __PYX_ERR(0, 20230, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20246
 * 
 * 
 * cpdef str device_get_inforom_version(intptr_t device, int object):             # <<<<<<<<<<<<<<
 *     """Retrieves the version information for the device's infoROM object.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_87device_get_inforom_version, 0, __pyx_mstate_global->__pyx_n_u_device_get_inforom_version, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[489])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20246, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_inforom_version, __pyx_t_5) < (0)) __PYX_ERR(0, 20246, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20263
 * 
 * 
 * cpdef str device_get_inforom_image_version(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the global infoROM image version.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_89device_get_inforom_image_version, 0, __pyx_mstate_global->__pyx_n_u_device_get_inforom_image_version, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[490])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20263, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_inforom_image_version, __pyx_t_5) < (0)) __PYX_ERR(0, 20263, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20279
 * 
 * 
 * cpdef unsigned int device_get_inforom_configuration_checksum(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the checksum of the configuration stored in the device's infoROM.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_91device_get_inforom_configuration_checksum, 0, __pyx_mstate_global->__pyx_n_u_device_get_inforom_configuration, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[491])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20279, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_inforom_configuration, __pyx_t_5) < (0)) __PYX_ERR(0, 20279, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20297
 * 
 * 
 * cpdef device_validate_inforom(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Reads the infoROM from the flash and verifies the checksums.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_93device_validate_inforom, 0, __pyx_mstate_global->__pyx_n_u_device_validate_inforom, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[492])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20297, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_validate_inforom, __pyx_t_5) < (0)) __PYX_ERR(0, 20297, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20310
 * 
 * 
 * cpdef unsigned long device_get_last_bbx_flush_time(intptr_t device, intptr_t timestamp) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the timestamp and the duration of the last flush of the BBX (blackbox) infoROM object during the current run.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_95device_get_last_bbx_flush_time, 0, __pyx_mstate_global->__pyx_n_u_device_get_last_bbx_flush_time, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[493])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20310, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_last_bbx_flush_time, __pyx_t_5) < (0)) __PYX_ERR(0, 20310, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20329
 * 
 * 
 * cpdef int device_get_display_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the display mode for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_97device_get_display_mode, 0, __pyx_mstate_global->__pyx_n_u_device_get_display_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[494])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_display_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 20329, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20347
 * 
 * 
 * cpdef int device_get_display_active(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the display active state for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_99device_get_display_active, 0, __pyx_mstate_global->__pyx_n_u_device_get_display_active, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[495])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20347, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_display_active, __pyx_t_5) < (0)) __PYX_ERR(0, 20347, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20365
 * 
 * 
 * cpdef int device_get_persistence_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the persistence mode associated with this device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_101device_get_persistence_mode, 0, __pyx_mstate_global->__pyx_n_u_device_get_persistence_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[496])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20365, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_persistence_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 20365, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20383
 * 
 * 
 * cpdef object device_get_pci_info_ext(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves PCI attributes of this device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_103device_get_pci_info_ext, 0, __pyx_mstate_global->__pyx_n_u_device_get_pci_info_ext, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[497])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20383, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_pci_info_ext, __pyx_t_5) < (0)) __PYX_ERR(0, 20383, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20403
 * 
 * 
 * cpdef object device_get_pci_info_v3(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the PCI attributes of this device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_105device_get_pci_info_v3, 0, __pyx_mstate_global->__pyx_n_u_device_get_pci_info_v3, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[498])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20403, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_pci_info_v3, __pyx_t_5) < (0)) __PYX_ERR(0, 20403, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20422
 * 
 * 
 * cpdef unsigned int device_get_max_pcie_link_generation(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the maximum PCIe link generation possible with this device and system.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_107device_get_max_pcie_link_generation, 0, __pyx_mstate_global->__pyx_n_u_device_get_max_pcie_link_generat, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[499])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20422, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_max_pcie_link_generat, __pyx_t_5) < (0)) __PYX_ERR(0, 20422, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20440
 * 
 * 
 * cpdef unsigned int device_get_gpu_max_pcie_link_generation(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the maximum PCIe link generation supported by this device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_109device_get_gpu_max_pcie_link_generation, 0, __pyx_mstate_global->__pyx_n_u_device_get_gpu_max_pcie_link_gen, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[500])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20440, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_gpu_max_pcie_link_gen, __pyx_t_5) < (0)) __PYX_ERR(0, 20440, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20458
 * 
 * 
 * cpdef unsigned int device_get_max_pcie_link_width(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the maximum PCIe link width possible with this device and system.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_111device_get_max_pcie_link_width, 0, __pyx_mstate_global->__pyx_n_u_device_get_max_pcie_link_width, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[501])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20458, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_max_pcie_link_width, __pyx_t_5) < (0)) __PYX_ERR(0, 20458, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20476
 * 
 * 
 * cpdef unsigned int device_get_curr_pcie_link_generation(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the current PCIe link generation.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_113device_get_curr_pcie_link_generation, 0, __pyx_mstate_global->__pyx_n_u_device_get_curr_pcie_link_genera, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[502])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20476, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_curr_pcie_link_genera, __pyx_t_5) < (0)) __PYX_ERR(0, 20476, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20494
 * 
 * 
 * cpdef unsigned int device_get_curr_pcie_link_width(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the current PCIe link width.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_115device_get_curr_pcie_link_width, 0, __pyx_mstate_global->__pyx_n_u_device_get_curr_pcie_link_width, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[503])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20494, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_curr_pcie_link_width, __pyx_t_5) < (0)) __PYX_ERR(0, 20494, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20512
 * 
 * 
 * cpdef unsigned int device_get_pcie_throughput(intptr_t device, int counter) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve PCIe utilization information. This function is querying a byte counter over a 20ms interval and thus is the PCIe throughput over that interval.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_117device_get_pcie_throughput, 0, __pyx_mstate_global->__pyx_n_u_device_get_pcie_throughput, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[504])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20512, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_pcie_throughput, __pyx_t_5) < (0)) __PYX_ERR(0, 20512, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20531
 * 
 * 
 * cpdef unsigned int device_get_pcie_replay_counter(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the PCIe replay counter.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_119device_get_pcie_replay_counter, 0, __pyx_mstate_global->__pyx_n_u_device_get_pcie_replay_counter, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[505])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20531, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_pcie_replay_counter, __pyx_t_5) < (0)) __PYX_ERR(0, 20531, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20549
 * 
 * 
 * cpdef unsigned int device_get_clock_info(intptr_t device, int type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the current clock speeds for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_121device_get_clock_info, 0, __pyx_mstate_global->__pyx_n_u_device_get_clock_info, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[506])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20549, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_clock_info, __pyx_t_5) < (0)) __PYX_ERR(0, 20549, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20568
 * 
 * 
 * cpdef unsigned int device_get_max_clock_info(intptr_t device, int type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the maximum clock speeds for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_123device_get_max_clock_info, 0, __pyx_mstate_global->__pyx_n_u_device_get_max_clock_info, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[507])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20568, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_max_clock_info, __pyx_t_5) < (0)) __PYX_ERR(0, 20568, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20587
 * 
 * 
 * cpdef int device_get_gpc_clk_vf_offset(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the GPCCLK VF offset value.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_125device_get_gpc_clk_vf_offset, 0, __pyx_mstate_global->__pyx_n_u_device_get_gpc_clk_vf_offset, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[508])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20587, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_gpc_clk_vf_offset, __pyx_t_5) < (0)) __PYX_ERR(0, 20587, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20605
 * 
 * 
 * cpdef unsigned int device_get_clock(intptr_t device, int clock_type, int clock_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the clock speed for the clock specified by the clock type and clock ID.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_127device_get_clock, 0, __pyx_mstate_global->__pyx_n_u_device_get_clock, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[509])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20605, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_clock, __pyx_t_5) < (0)) __PYX_ERR(0, 20605, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20625
 * 
 * 
 * cpdef unsigned int device_get_max_customer_boost_clock(intptr_t device, int clock_type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the customer defined maximum boost clock speed specified by the given clock type.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_129device_get_max_customer_boost_clock, 0, __pyx_mstate_global->__pyx_n_u_device_get_max_customer_boost_cl, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[510])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20625, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_max_customer_boost_cl, __pyx_t_5) < (0)) __PYX_ERR(0, 20625, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20644
 * 
 * 
 * cpdef object device_get_supported_memory_clocks(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the list of possible memory clocks that can be used as an argument for ``nvmlDeviceSetMemoryLockedClocks``.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_131device_get_supported_memory_clocks, 0, __pyx_mstate_global->__pyx_n_u_device_get_supported_memory_cloc, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[511])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20644, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_supported_memory_cloc, __pyx_t_5) < (0)) __PYX_ERR(0, 20644, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20666
 * 
 * 
 * cpdef object device_get_supported_graphics_clocks(intptr_t device, unsigned int memory_clock_m_hz):             # <<<<<<<<<<<<<<
 *     """Retrieves the list of possible graphics clocks that can be used as an argument for ``nvmlDeviceSetGpuLockedClocks``.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_133device_get_supported_graphics_clocks, 0, __pyx_mstate_global->__pyx_n_u_device_get_supported_graphics_cl, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[512])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20666, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_supported_graphics_cl, __pyx_t_5) < (0)) __PYX_ERR(0, 20666, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20689
 * 
 * 
 * cpdef tuple device_get_auto_boosted_clocks_enabled(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the current state of Auto Boosted clocks on a device and store it in ``isEnabled``.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_135device_get_auto_boosted_clocks_enabled, 0, __pyx_mstate_global->__pyx_n_u_device_get_auto_boosted_clocks_e, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[513])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20689, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_auto_boosted_clocks_e, __pyx_t_5) < (0)) __PYX_ERR(0, 20689, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20711
 * 
 * 
 * cpdef unsigned int device_get_fan_speed(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the intended operating speed of the device's fan.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_137device_get_fan_speed, 0, __pyx_mstate_global->__pyx_n_u_device_get_fan_speed, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[514])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20711, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_fan_speed, __pyx_t_5) < (0)) __PYX_ERR(0, 20711, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20729
 * 
 * 
 * cpdef unsigned int device_get_fan_speed_v2(intptr_t device, unsigned int fan) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the intended operating speed of the device's specified fan.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_139device_get_fan_speed_v2, 0, __pyx_mstate_global->__pyx_n_u_device_get_fan_speed_v2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[515])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20729, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_fan_speed_v2, __pyx_t_5) < (0)) __PYX_ERR(0, 20729, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20748
 * 
 * 
 * cpdef object device_get_fan_speed_rpm(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the intended operating speed in rotations per minute (RPM) of the device's specified fan.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_141device_get_fan_speed_rpm, 0, __pyx_mstate_global->__pyx_n_u_device_get_fan_speed_rpm, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[516])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20748, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_fan_speed_rpm, __pyx_t_5) < (0)) __PYX_ERR(0, 20748, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20768
 * 
 * 
 * cpdef unsigned int device_get_target_fan_speed(intptr_t device, unsigned int fan) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the intended target speed of the device's specified fan.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_143device_get_target_fan_speed, 0, __pyx_mstate_global->__pyx_n_u_device_get_target_fan_speed, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[517])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20768, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_target_fan_speed, __pyx_t_5) < (0)) __PYX_ERR(0, 20768, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20787
 * 
 * 
 * cpdef tuple device_get_min_max_fan_speed(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the min and max fan speed that user can set for the GPU fan.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_145device_get_min_max_fan_speed, 0, __pyx_mstate_global->__pyx_n_u_device_get_min_max_fan_speed, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[518])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20787, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_min_max_fan_speed, __pyx_t_5) < (0)) __PYX_ERR(0, 20787, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20809
 * 
 * 
 * cpdef unsigned int device_get_fan_control_policy_v2(intptr_t device, unsigned int fan) except *:             # <<<<<<<<<<<<<<
 *     """Gets current fan control policy.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_147device_get_fan_control_policy_v2, 0, __pyx_mstate_global->__pyx_n_u_device_get_fan_control_policy_v2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[519])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20809, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_fan_control_policy_v2, __pyx_t_5) < (0)) __PYX_ERR(0, 20809, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20828
 * 
 * 
 * cpdef unsigned int device_get_num_fans(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the number of fans on the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_149device_get_num_fans, 0, __pyx_mstate_global->__pyx_n_u_device_get_num_fans, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[520])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20828, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_num_fans, __pyx_t_5) < (0)) __PYX_ERR(0, 20828, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20846
 * 
 * 
 * cpdef object device_get_cooler_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the cooler's information. Returns a cooler's control signal characteristics. The possible types are restricted, Variable and Toggle. See ``nvmlCoolerControl_t`` for details on available signal types. Returns objects that cooler cools. Targets may be GPU, Memory, Power Supply or All of these. See ``nvmlCoolerTarget_t`` for details on available targets.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_151device_get_cooler_info, 0, __pyx_mstate_global->__pyx_n_u_device_get_cooler_info, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[521])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20846, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_cooler_info, __pyx_t_5) < (0)) __PYX_ERR(0, 20846, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20866
 * 
 * 
 * cpdef unsigned int device_get_temperature_threshold(intptr_t device, int threshold_type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the temperature threshold for the GPU with the specified threshold type in degrees C.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_153device_get_temperature_threshold, 0, __pyx_mstate_global->__pyx_n_u_device_get_temperature_threshold, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[522])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20866, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_temperature_threshold, __pyx_t_5) < (0)) __PYX_ERR(0, 20866, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20885
 * 
 * 
 * cpdef object device_get_margin_temperature(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the thermal margin temperature (distance to nearest slowdown threshold).
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_155device_get_margin_temperature, 0, __pyx_mstate_global->__pyx_n_u_device_get_margin_temperature, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[523])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20885, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_margin_temperature, __pyx_t_5) < (0)) __PYX_ERR(0, 20885, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20905
 * 
 * 
 * cpdef object device_get_thermal_settings(intptr_t device, unsigned int sensor_ind_ex):             # <<<<<<<<<<<<<<
 *     """Used to execute a list of thermal system instructions.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_157device_get_thermal_settings, 0, __pyx_mstate_global->__pyx_n_u_device_get_thermal_settings, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[524])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20905, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_thermal_settings, __pyx_t_5) < (0)) __PYX_ERR(0, 20905, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20925
 * 
 * 
 * cpdef int device_get_performance_state(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the current performance state for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_159device_get_performance_state, 0, __pyx_mstate_global->__pyx_n_u_device_get_performance_state, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[525])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20925, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_performance_state, __pyx_t_5) < (0)) __PYX_ERR(0, 20925, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20943
 * 
 * 
 * cpdef unsigned long long device_get_current_clocks_event_reasons(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves current clocks event reasons.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_161device_get_current_clocks_event_reasons, 0, __pyx_mstate_global->__pyx_n_u_device_get_current_clocks_event, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[526])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20943, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_current_clocks_event, __pyx_t_5) < (0)) __PYX_ERR(0, 20943, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20961
 * 
 * 
 * cpdef unsigned long long device_get_supported_clocks_event_reasons(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves bitmask of supported clocks event reasons that can be returned by ``nvmlDeviceGetCurrentClocksEventReasons``.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_163device_get_supported_clocks_event_reasons, 0, __pyx_mstate_global->__pyx_n_u_device_get_supported_clocks_even, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[527])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20961, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_supported_clocks_even, __pyx_t_5) < (0)) __PYX_ERR(0, 20961, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20979
 * 
 * 
 * cpdef int device_get_power_state(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Deprecated: Use ``nvmlDeviceGetPerformanceState``. This function exposes an incorrect generalization.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_165device_get_power_state, 0, __pyx_mstate_global->__pyx_n_u_device_get_power_state, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[528])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20979, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_power_state, __pyx_t_5) < (0)) __PYX_ERR(0, 20979, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":20997
 * 
 * 
 * cpdef object device_get_dynamic_pstates_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve performance monitor samples from the associated subdevice.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_167device_get_dynamic_pstates_info, 0, __pyx_mstate_global->__pyx_n_u_device_get_dynamic_pstates_info, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[529])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20997, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_dynamic_pstates_info, __pyx_t_5) < (0)) __PYX_ERR(0, 20997, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21016
 * 
 * 
 * cpdef int device_get_mem_clk_vf_offset(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the MemClk (Memory Clock) VF offset value.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_169device_get_mem_clk_vf_offset, 0, __pyx_mstate_global->__pyx_n_u_device_get_mem_clk_vf_offset, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[530])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21016, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_mem_clk_vf_offset, __pyx_t_5) < (0)) __PYX_ERR(0, 21016, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21034
 * 
 * 
 * cpdef tuple device_get_min_max_clock_of_p_state(intptr_t device, int type, int pstate):             # <<<<<<<<<<<<<<
 *     """Retrieve min and max clocks of some clock domain for a given PState.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_171device_get_min_max_clock_of_p_state, 0, __pyx_mstate_global->__pyx_n_u_device_get_min_max_clock_of_p_st, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[531])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21034, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_min_max_clock_of_p_st, __pyx_t_5) < (0)) __PYX_ERR(0, 21034, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21058
 * 
 * 
 * cpdef tuple device_get_gpc_clk_min_max_vf_offset(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the GPCCLK min max VF offset value.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_173device_get_gpc_clk_min_max_vf_offset, 0, __pyx_mstate_global->__pyx_n_u_device_get_gpc_clk_min_max_vf_of, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[532])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21058, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_gpc_clk_min_max_vf_of, __pyx_t_5) < (0)) __PYX_ERR(0, 21058, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21080
 * 
 * 
 * cpdef tuple device_get_mem_clk_min_max_vf_offset(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the MemClk (Memory Clock) min max VF offset value.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_175device_get_mem_clk_min_max_vf_offset, 0, __pyx_mstate_global->__pyx_n_u_device_get_mem_clk_min_max_vf_of, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[533])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21080, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_mem_clk_min_max_vf_of, __pyx_t_5) < (0)) __PYX_ERR(0, 21080, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21102
 * 
 * 
 * cpdef object device_get_clock_offsets(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve min, max and current clock offset of some clock domain for a given PState.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_177device_get_clock_offsets, 0, __pyx_mstate_global->__pyx_n_u_device_get_clock_offsets, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[534])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21102, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_clock_offsets, __pyx_t_5) < (0)) __PYX_ERR(0, 21102, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21122
 * 
 * 
 * cpdef device_set_clock_offsets(intptr_t device, intptr_t info):             # <<<<<<<<<<<<<<
 *     """Control current clock offset of some clock domain for a given PState.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_179device_set_clock_offsets, 0, __pyx_mstate_global->__pyx_n_u_device_set_clock_offsets, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[535])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21122, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_clock_offsets, __pyx_t_5) < (0)) __PYX_ERR(0, 21122, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21136
 * 
 * 
 * cpdef object device_get_performance_modes(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves a performance mode string with all the performance modes defined for this device along with their associated GPU Clock and Memory Clock values. Not all tokens will be reported on all GPUs, and additional tokens may be added in the future. For backwards compatibility we still provide nvclock and memclock; those are the same as nvclockmin and memclockmin.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_181device_get_performance_modes, 0, __pyx_mstate_global->__pyx_n_u_device_get_performance_modes, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[536])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21136, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_performance_modes, __pyx_t_5) < (0)) __PYX_ERR(0, 21136, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21156
 * 
 * 
 * cpdef object device_get_current_clock_freqs(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves a string with the associated current GPU Clock and Memory Clock values.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_183device_get_current_clock_freqs, 0, __pyx_mstate_global->__pyx_n_u_device_get_current_clock_freqs, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[537])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21156, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_current_clock_freqs, __pyx_t_5) < (0)) __PYX_ERR(0, 21156, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21176
 * 
 * 
 * cpdef unsigned int device_get_power_management_limit(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the power management limit associated with this device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_185device_get_power_management_limit, 0, __pyx_mstate_global->__pyx_n_u_device_get_power_management_limi, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[538])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21176, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_power_management_limi, __pyx_t_5) < (0)) __PYX_ERR(0, 21176, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21194
 * 
 * 
 * cpdef tuple device_get_power_management_limit_constraints(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves information about possible values of power management limits on this device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_187device_get_power_management_limit_constraints, 0, __pyx_mstate_global->__pyx_n_u_device_get_power_management_limi_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[539])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21194, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_power_management_limi_2, __pyx_t_5) < (0)) __PYX_ERR(0, 21194, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21216
 * 
 * 
 * cpdef unsigned int device_get_power_management_default_limit(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves default power management limit on this device, in milliwatts. Default power management limit is a power management limit that the device boots with.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_189device_get_power_management_default_limit, 0, __pyx_mstate_global->__pyx_n_u_device_get_power_management_defa, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[540])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21216, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_power_management_defa, __pyx_t_5) < (0)) __PYX_ERR(0, 21216, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21234
 * 
 * 
 * cpdef unsigned int device_get_power_usage(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory).
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_191device_get_power_usage, 0, __pyx_mstate_global->__pyx_n_u_device_get_power_usage, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[541])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21234, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_power_usage, __pyx_t_5) < (0)) __PYX_ERR(0, 21234, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21252
 * 
 * 
 * cpdef unsigned long long device_get_total_energy_consumption(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves total energy consumption for this GPU in millijoules (mJ) since the driver was last reloaded.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_193device_get_total_energy_consumption, 0, __pyx_mstate_global->__pyx_n_u_device_get_total_energy_consumpt, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[542])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21252, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_total_energy_consumpt, __pyx_t_5) < (0)) __PYX_ERR(0, 21252, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21270
 * 
 * 
 * cpdef unsigned int device_get_enforced_power_limit(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get the effective power limit that the driver enforces after taking into account all limiters.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_195device_get_enforced_power_limit, 0, __pyx_mstate_global->__pyx_n_u_device_get_enforced_power_limit, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[543])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21270, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_enforced_power_limit, __pyx_t_5) < (0)) __PYX_ERR(0, 21270, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21288
 * 
 * 
 * cpdef tuple device_get_gpu_operation_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current GOM and pending GOM (the one that GPU will switch to after reboot).
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_197device_get_gpu_operation_mode, 0, __pyx_mstate_global->__pyx_n_u_device_get_gpu_operation_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[544])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21288, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_gpu_operation_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 21288, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21310
 * 
 * 
 * cpdef object device_get_memory_info_v2(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the amount of used, free, reserved and total memory available on the device, in bytes.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_199device_get_memory_info_v2, 0, __pyx_mstate_global->__pyx_n_u_device_get_memory_info_v2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[545])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21310, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_memory_info_v2, __pyx_t_5) < (0)) __PYX_ERR(0, 21310, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21330
 * 
 * 
 * cpdef int device_get_compute_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the current compute mode for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_201device_get_compute_mode, 0, __pyx_mstate_global->__pyx_n_u_device_get_compute_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[546])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21330, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_compute_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 21330, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21348
 * 
 * 
 * cpdef tuple device_get_cuda_compute_capability(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the CUDA compute capability of the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_203device_get_cuda_compute_capability, 0, __pyx_mstate_global->__pyx_n_u_device_get_cuda_compute_capabili, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[547])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21348, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_cuda_compute_capabili, __pyx_t_5) < (0)) __PYX_ERR(0, 21348, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21370
 * 
 * 
 * cpdef tuple device_get_dram_encryption_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current and pending DRAM Encryption modes for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_205device_get_dram_encryption_mode, 0, __pyx_mstate_global->__pyx_n_u_device_get_dram_encryption_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[548])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21370, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_dram_encryption_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 21370, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21392
 * 
 * 
 * cpdef device_set_dram_encryption_mode(intptr_t device, intptr_t dram_encryption):             # <<<<<<<<<<<<<<
 *     """Set the DRAM Encryption mode for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_207device_set_dram_encryption_mode, 0, __pyx_mstate_global->__pyx_n_u_device_set_dram_encryption_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[549])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21392, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_dram_encryption_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 21392, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21406
 * 
 * 
 * cpdef tuple device_get_ecc_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current and pending ECC modes for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_209device_get_ecc_mode, 0, __pyx_mstate_global->__pyx_n_u_device_get_ecc_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[550])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21406, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_ecc_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 21406, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21428
 * 
 * 
 * cpdef int device_get_default_ecc_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the default ECC modes for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_211device_get_default_ecc_mode, 0, __pyx_mstate_global->__pyx_n_u_device_get_default_ecc_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[551])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21428, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_default_ecc_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 21428, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21446
 * 
 * 
 * cpdef unsigned int device_get_board_id(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the device boardId from 0-N. Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with :func:`device_get_multi_gpu_board` to decide if they are on the same board as well. The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will always return those values but they will always be different from each other).
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_213device_get_board_id, 0, __pyx_mstate_global->__pyx_n_u_device_get_board_id, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[552])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21446, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_board_id, __pyx_t_5) < (0)) __PYX_ERR(0, 21446, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21464
 * 
 * 
 * cpdef unsigned int device_get_multi_gpu_board(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves whether the device is on a Multi-GPU Board Devices that are on multi-GPU boards will set ``multiGpuBool`` to a non-zero value.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_215device_get_multi_gpu_board, 0, __pyx_mstate_global->__pyx_n_u_device_get_multi_gpu_board, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[553])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21464, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_multi_gpu_board, __pyx_t_5) < (0)) __PYX_ERR(0, 21464, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21482
 * 
 * 
 * cpdef unsigned long long device_get_total_ecc_errors(intptr_t device, int error_type, int counter_type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the total ECC error counts for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_217device_get_total_ecc_errors, 0, __pyx_mstate_global->__pyx_n_u_device_get_total_ecc_errors, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[554])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21482, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_total_ecc_errors, __pyx_t_5) < (0)) __PYX_ERR(0, 21482, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21502
 * 
 * 
 * cpdef unsigned long long device_get_memory_error_counter(intptr_t device, int error_type, int counter_type, int location_type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the requested memory error counter for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_219device_get_memory_error_counter, 0, __pyx_mstate_global->__pyx_n_u_device_get_memory_error_counter, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[555])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21502, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_memory_error_counter, __pyx_t_5) < (0)) __PYX_ERR(0, 21502, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21523
 * 
 * 
 * cpdef object device_get_utilization_rates(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization rates for the device's major subsystems.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_221device_get_utilization_rates, 0, __pyx_mstate_global->__pyx_n_u_device_get_utilization_rates, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[556])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21523, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_utilization_rates, __pyx_t_5) < (0)) __PYX_ERR(0, 21523, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21542
 * 
 * 
 * cpdef tuple device_get_encoder_utilization(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization and sampling size in microseconds for the Encoder.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_223device_get_encoder_utilization, 0, __pyx_mstate_global->__pyx_n_u_device_get_encoder_utilization, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[557])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21542, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_encoder_utilization, __pyx_t_5) < (0)) __PYX_ERR(0, 21542, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21564
 * 
 * 
 * cpdef unsigned int device_get_encoder_capacity(intptr_t device, int encoder_query_type) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the current capacity of the device's encoder, as a percentage of maximum encoder capacity with valid values in the range 0-100.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_225device_get_encoder_capacity, 0, __pyx_mstate_global->__pyx_n_u_device_get_encoder_capacity, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[558])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21564, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_encoder_capacity, __pyx_t_5) < (0)) __PYX_ERR(0, 21564, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21583
 * 
 * 
 * cpdef tuple device_get_encoder_stats(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current encoder statistics for a given device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_227device_get_encoder_stats, 0, __pyx_mstate_global->__pyx_n_u_device_get_encoder_stats, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[559])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21583, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_encoder_stats, __pyx_t_5) < (0)) __PYX_ERR(0, 21583, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21607
 * 
 * 
 * cpdef object device_get_encoder_sessions(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves information about active encoder sessions on a target device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_229device_get_encoder_sessions, 0, __pyx_mstate_global->__pyx_n_u_device_get_encoder_sessions, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[560])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21607, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_encoder_sessions, __pyx_t_5) < (0)) __PYX_ERR(0, 21607, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21629
 * 
 * 
 * cpdef tuple device_get_decoder_utilization(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization and sampling size in microseconds for the Decoder.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_231device_get_decoder_utilization, 0, __pyx_mstate_global->__pyx_n_u_device_get_decoder_utilization, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[561])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_decoder_utilization, __pyx_t_5) < (0)) __PYX_ERR(0, 21629, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21651
 * 
 * 
 * cpdef tuple device_get_jpg_utilization(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization and sampling size in microseconds for the JPG.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_233device_get_jpg_utilization, 0, __pyx_mstate_global->__pyx_n_u_device_get_jpg_utilization, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[562])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21651, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_jpg_utilization, __pyx_t_5) < (0)) __PYX_ERR(0, 21651, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21673
 * 
 * 
 * cpdef tuple device_get_ofa_utilization(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization and sampling size in microseconds for the OFA (Optical Flow Accelerator).
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_235device_get_ofa_utilization, 0, __pyx_mstate_global->__pyx_n_u_device_get_ofa_utilization, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[563])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21673, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_ofa_utilization, __pyx_t_5) < (0)) __PYX_ERR(0, 21673, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21695
 * 
 * 
 * cpdef object device_get_fbc_stats(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the active frame buffer capture sessions statistics for a given device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_237device_get_fbc_stats, 0, __pyx_mstate_global->__pyx_n_u_device_get_fbc_stats, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[564])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21695, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_fbc_stats, __pyx_t_5) < (0)) __PYX_ERR(0, 21695, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21714
 * 
 * 
 * cpdef object device_get_fbc_sessions(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves information about active frame buffer capture sessions on a target device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_239device_get_fbc_sessions, 0, __pyx_mstate_global->__pyx_n_u_device_get_fbc_sessions, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[565])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21714, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_fbc_sessions, __pyx_t_5) < (0)) __PYX_ERR(0, 21714, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21736
 * 
 * 
 * cpdef tuple device_get_driver_model_v2(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the current and pending driver model for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_241device_get_driver_model_v2, 0, __pyx_mstate_global->__pyx_n_u_device_get_driver_model_v2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[566])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21736, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_driver_model_v2, __pyx_t_5) < (0)) __PYX_ERR(0, 21736, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21758
 * 
 * 
 * cpdef str device_get_vbios_version(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get VBIOS version of the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_243device_get_vbios_version, 0, __pyx_mstate_global->__pyx_n_u_device_get_vbios_version, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[567])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21758, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_vbios_version, __pyx_t_5) < (0)) __PYX_ERR(0, 21758, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21774
 * 
 * 
 * cpdef object device_get_bridge_chip_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get Bridge Chip Information for all the bridge chips on the board.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_245device_get_bridge_chip_info, 0, __pyx_mstate_global->__pyx_n_u_device_get_bridge_chip_info, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[568])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21774, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_bridge_chip_info, __pyx_t_5) < (0)) __PYX_ERR(0, 21774, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21793
 * 
 * 
 * cpdef object device_get_compute_running_processes_v3(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get information about processes with a compute context on a device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_247device_get_compute_running_processes_v3, 0, __pyx_mstate_global->__pyx_n_u_device_get_compute_running_proce, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[569])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21793, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_compute_running_proce, __pyx_t_5) < (0)) __PYX_ERR(0, 21793, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21815
 * 
 * 
 * cpdef object device_get_mps_compute_running_processes_v3(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get information about processes with a Multi-Process Service (MPS) compute context on a device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_249device_get_mps_compute_running_processes_v3, 0, __pyx_mstate_global->__pyx_n_u_device_get_mps_compute_running_p, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[570])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21815, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_mps_compute_running_p, __pyx_t_5) < (0)) __PYX_ERR(0, 21815, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21837
 * 
 * 
 * cpdef int device_on_same_board(intptr_t device1, intptr_t device2) except? 0:             # <<<<<<<<<<<<<<
 *     """Check if the GPU devices are on the same physical board.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_251device_on_same_board, 0, __pyx_mstate_global->__pyx_n_u_device_on_same_board, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[571])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21837, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_on_same_board, __pyx_t_5) < (0)) __PYX_ERR(0, 21837, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21856
 * 
 * 
 * cpdef int device_get_api_restriction(intptr_t device, int api_type) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the root/admin permissions on the target API. See ``nvmlRestrictedAPI_t`` for the list of supported APIs. If an API is restricted only root users can call that API. See ``nvmlDeviceSetAPIRestriction`` to change current permissions.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_253device_get_api_restriction, 0, __pyx_mstate_global->__pyx_n_u_device_get_api_restriction, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[572])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21856, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_api_restriction, __pyx_t_5) < (0)) __PYX_ERR(0, 21856, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21875
 * 
 * 
 * cpdef object device_get_bar1_memory_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Gets Total, Available and Used size of BAR1 memory.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_255device_get_bar1_memory_info, 0, __pyx_mstate_global->__pyx_n_u_device_get_bar1_memory_info, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[573])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21875, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_bar1_memory_info, __pyx_t_5) < (0)) __PYX_ERR(0, 21875, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21894
 * 
 * 
 * cpdef unsigned int device_get_irq_num(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's interrupt number.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_257device_get_irq_num, 0, __pyx_mstate_global->__pyx_n_u_device_get_irq_num, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[574])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21894, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_irq_num, __pyx_t_5) < (0)) __PYX_ERR(0, 21894, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21912
 * 
 * 
 * cpdef unsigned int device_get_num_gpu_cores(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's core count.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_259device_get_num_gpu_cores, 0, __pyx_mstate_global->__pyx_n_u_device_get_num_gpu_cores, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[575])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21912, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_num_gpu_cores, __pyx_t_5) < (0)) __PYX_ERR(0, 21912, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21930
 * 
 * 
 * cpdef unsigned int device_get_power_source(intptr_t device) except *:             # <<<<<<<<<<<<<<
 *     """Gets the devices power source.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_261device_get_power_source, 0, __pyx_mstate_global->__pyx_n_u_device_get_power_source, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[576])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21930, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_power_source, __pyx_t_5) < (0)) __PYX_ERR(0, 21930, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21948
 * 
 * 
 * cpdef unsigned int device_get_memory_bus_width(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's memory bus width.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_263device_get_memory_bus_width, 0, __pyx_mstate_global->__pyx_n_u_device_get_memory_bus_width, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[577])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21948, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_memory_bus_width, __pyx_t_5) < (0)) __PYX_ERR(0, 21948, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21966
 * 
 * 
 * cpdef unsigned int device_get_pcie_link_max_speed(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's PCIE Max Link speed in MBPS.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_265device_get_pcie_link_max_speed, 0, __pyx_mstate_global->__pyx_n_u_device_get_pcie_link_max_speed, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[578])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21966, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_pcie_link_max_speed, __pyx_t_5) < (0)) __PYX_ERR(0, 21966, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":21984
 * 
 * 
 * cpdef unsigned int device_get_pcie_speed(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's PCIe Link speed in Mbps.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_267device_get_pcie_speed, 0, __pyx_mstate_global->__pyx_n_u_device_get_pcie_speed, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[579])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21984, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_pcie_speed, __pyx_t_5) < (0)) __PYX_ERR(0, 21984, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22002
 * 
 * 
 * cpdef unsigned int device_get_adaptive_clock_info_status(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Gets the device's Adaptive Clock status.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_269device_get_adaptive_clock_info_status, 0, __pyx_mstate_global->__pyx_n_u_device_get_adaptive_clock_info_s, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[580])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22002, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_adaptive_clock_info_s, __pyx_t_5) < (0)) __PYX_ERR(0, 22002, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22020
 * 
 * 
 * cpdef unsigned int device_get_bus_type(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get the type of the GPU Bus (PCIe, PCI, ...).
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_271device_get_bus_type, 0, __pyx_mstate_global->__pyx_n_u_device_get_bus_type, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[581])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22020, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_bus_type, __pyx_t_5) < (0)) __PYX_ERR(0, 22020, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22038
 * 
 * 
 * cpdef object device_get_gpu_fabric_info_v(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Versioned wrapper around nvmlDeviceGetGpuFabricInfo that accepts a versioned ``nvmlGpuFabricInfo_v2_t`` or later output structure.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_273device_get_gpu_fabric_info_v, 0, __pyx_mstate_global->__pyx_n_u_device_get_gpu_fabric_info_v, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[582])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22038, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_gpu_fabric_info_v, __pyx_t_5) < (0)) __PYX_ERR(0, 22038, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22058
 * 
 * 
 * cpdef object system_get_conf_compute_capabilities():             # <<<<<<<<<<<<<<
 *     """Get Conf Computing System capabilities.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_275system_get_conf_compute_capabilities, 0, __pyx_mstate_global->__pyx_n_u_system_get_conf_compute_capabili, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[583])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22058, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_get_conf_compute_capabili, __pyx_t_5) < (0)) __PYX_ERR(0, 22058, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22074
 * 
 * 
 * cpdef object system_get_conf_compute_state():             # <<<<<<<<<<<<<<
 *     """Get Conf Computing System State.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_277system_get_conf_compute_state, 0, __pyx_mstate_global->__pyx_n_u_system_get_conf_compute_state, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[584])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22074, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_get_conf_compute_state, __pyx_t_5) < (0)) __PYX_ERR(0, 22074, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22090
 * 
 * 
 * cpdef object device_get_conf_compute_mem_size_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get Conf Computing Protected and Unprotected Memory Sizes.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_279device_get_conf_compute_mem_size_info, 0, __pyx_mstate_global->__pyx_n_u_device_get_conf_compute_mem_size, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[585])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22090, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_conf_compute_mem_size, __pyx_t_5) < (0)) __PYX_ERR(0, 22090, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22109
 * 
 * 
 * cpdef unsigned int system_get_conf_compute_gpus_ready_state() except? 0:             # <<<<<<<<<<<<<<
 *     """Get Conf Computing GPUs ready state.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_281system_get_conf_compute_gpus_ready_state, 0, __pyx_mstate_global->__pyx_n_u_system_get_conf_compute_gpus_rea, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[586])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22109, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_get_conf_compute_gpus_rea, __pyx_t_5) < (0)) __PYX_ERR(0, 22109, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22124
 * 
 * 
 * cpdef object device_get_conf_compute_protected_memory_usage(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get Conf Computing protected memory usage.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_283device_get_conf_compute_protected_memory_usage, 0, __pyx_mstate_global->__pyx_n_u_device_get_conf_compute_protecte, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[587])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22124, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_conf_compute_protecte, __pyx_t_5) < (0)) __PYX_ERR(0, 22124, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22143
 * 
 * 
 * cpdef object device_get_conf_compute_gpu_certificate(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get Conf Computing GPU certificate details.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_285device_get_conf_compute_gpu_certificate, 0, __pyx_mstate_global->__pyx_n_u_device_get_conf_compute_gpu_cert, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[588])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22143, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_conf_compute_gpu_cert, __pyx_t_5) < (0)) __PYX_ERR(0, 22143, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22162
 * 
 * 
 * cpdef object device_get_conf_compute_gpu_attestation_report(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get Conf Computing GPU attestation report.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_287device_get_conf_compute_gpu_attestation_report, 0, __pyx_mstate_global->__pyx_n_u_device_get_conf_compute_gpu_atte, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[589])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22162, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_conf_compute_gpu_atte, __pyx_t_5) < (0)) __PYX_ERR(0, 22162, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22181
 * 
 * 
 * cpdef object system_get_conf_compute_key_rotation_threshold_info():             # <<<<<<<<<<<<<<
 *     """Get Conf Computing key rotation threshold detail.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_289system_get_conf_compute_key_rotation_threshold_info, 0, __pyx_mstate_global->__pyx_n_u_system_get_conf_compute_key_rota, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[590])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22181, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_get_conf_compute_key_rota, __pyx_t_5) < (0)) __PYX_ERR(0, 22181, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22198
 * 
 * 
 * cpdef device_set_conf_compute_unprotected_mem_size(intptr_t device, unsigned long long size_ki_b):             # <<<<<<<<<<<<<<
 *     """Set Conf Computing Unprotected Memory Size.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_291device_set_conf_compute_unprotected_mem_size, 0, __pyx_mstate_global->__pyx_n_u_device_set_conf_compute_unprotec, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[591])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22198, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_conf_compute_unprotec, __pyx_t_5) < (0)) __PYX_ERR(0, 22198, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22212
 * 
 * 
 * cpdef system_set_conf_compute_gpus_ready_state(unsigned int is_accepting_work):             # <<<<<<<<<<<<<<
 *     """Set Conf Computing GPUs ready state.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_293system_set_conf_compute_gpus_ready_state, 0, __pyx_mstate_global->__pyx_n_u_system_set_conf_compute_gpus_rea, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[592])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22212, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_set_conf_compute_gpus_rea, __pyx_t_5) < (0)) __PYX_ERR(0, 22212, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22225
 * 
 * 
 * cpdef system_set_conf_compute_key_rotation_threshold_info(intptr_t p_key_rotation_thr_info):             # <<<<<<<<<<<<<<
 *     """Set Conf Computing key rotation threshold.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_295system_set_conf_compute_key_rotation_threshold_info, 0, __pyx_mstate_global->__pyx_n_u_system_set_conf_compute_key_rota, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[593])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22225, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_set_conf_compute_key_rota, __pyx_t_5) < (0)) __PYX_ERR(0, 22225, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22238
 * 
 * 
 * cpdef object system_get_conf_compute_settings():             # <<<<<<<<<<<<<<
 *     """Get Conf Computing System Settings.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_297system_get_conf_compute_settings, 0, __pyx_mstate_global->__pyx_n_u_system_get_conf_compute_settings, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[594])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22238, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_get_conf_compute_settings, __pyx_t_5) < (0)) __PYX_ERR(0, 22238, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22255
 * 
 * 
 * cpdef char device_get_gsp_firmware_version(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve GSP firmware version.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_299device_get_gsp_firmware_version, 0, __pyx_mstate_global->__pyx_n_u_device_get_gsp_firmware_version, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[595])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22255, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_gsp_firmware_version, __pyx_t_5) < (0)) __PYX_ERR(0, 22255, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22273
 * 
 * 
 * cpdef tuple device_get_gsp_firmware_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve GSP firmware mode.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_301device_get_gsp_firmware_mode, 0, __pyx_mstate_global->__pyx_n_u_device_get_gsp_firmware_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[596])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22273, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_gsp_firmware_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 22273, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22295
 * 
 * 
 * cpdef object device_get_sram_ecc_error_status(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get SRAM ECC error status of this device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_303device_get_sram_ecc_error_status, 0, __pyx_mstate_global->__pyx_n_u_device_get_sram_ecc_error_status, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[597])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22295, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_sram_ecc_error_status, __pyx_t_5) < (0)) __PYX_ERR(0, 22295, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22315
 * 
 * 
 * cpdef int device_get_accounting_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Queries the state of per process accounting mode.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_305device_get_accounting_mode, 0, __pyx_mstate_global->__pyx_n_u_device_get_accounting_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[598])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22315, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_accounting_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 22315, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22333
 * 
 * 
 * cpdef object device_get_accounting_stats(intptr_t device, unsigned int pid):             # <<<<<<<<<<<<<<
 *     """Queries process's accounting stats.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_307device_get_accounting_stats, 0, __pyx_mstate_global->__pyx_n_u_device_get_accounting_stats, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[599])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22333, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_accounting_stats, __pyx_t_5) < (0)) __PYX_ERR(0, 22333, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22353
 * 
 * 
 * cpdef object device_get_accounting_pids(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Queries list of processes that can be queried for accounting stats. The list of processes returned can be in running or terminated state.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_309device_get_accounting_pids, 0, __pyx_mstate_global->__pyx_n_u_device_get_accounting_pids, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[600])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22353, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_accounting_pids, __pyx_t_5) < (0)) __PYX_ERR(0, 22353, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22375
 * 
 * 
 * cpdef unsigned int device_get_accounting_buffer_size(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Returns the number of processes that the circular buffer with accounting pids can hold.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_311device_get_accounting_buffer_size, 0, __pyx_mstate_global->__pyx_n_u_device_get_accounting_buffer_siz, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[601])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22375, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_accounting_buffer_siz, __pyx_t_5) < (0)) __PYX_ERR(0, 22375, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22393
 * 
 * 
 * cpdef object device_get_retired_pages(intptr_t device, int cause):             # <<<<<<<<<<<<<<
 *     """Returns the list of retired pages by source, including pages that are pending retirement The address information provided from this API is the hardware address of the page that was retired. Note that this does not match the virtual address used in CUDA, but will match the address information in Xid 63.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_313device_get_retired_pages, 0, __pyx_mstate_global->__pyx_n_u_device_get_retired_pages, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[602])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22393, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_retired_pages, __pyx_t_5) < (0)) __PYX_ERR(0, 22393, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22416
 * 
 * 
 * cpdef int device_get_retired_pages_pending_status(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Check if any pages are pending retirement and need a reboot to fully retire.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_315device_get_retired_pages_pending_status, 0, __pyx_mstate_global->__pyx_n_u_device_get_retired_pages_pending, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[603])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22416, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_retired_pages_pending, __pyx_t_5) < (0)) __PYX_ERR(0, 22416, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22434
 * 
 * 
 * cpdef tuple device_get_remapped_rows(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get number of remapped rows. The number of rows reported will be based on the cause of the remapping. isPending indicates whether or not there are pending remappings. A reset will be required to actually remap the row. failureOccurred will be set if a row remapping ever failed in the past. A pending remapping won't affect future work on the GPU since error-containment and dynamic page blacklisting will take care of that.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_317device_get_remapped_rows, 0, __pyx_mstate_global->__pyx_n_u_device_get_remapped_rows, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[604])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22434, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_remapped_rows, __pyx_t_5) < (0)) __PYX_ERR(0, 22434, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22460
 * 
 * 
 * cpdef object device_get_row_remapper_histogram(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the row remapper histogram. Returns the remap availability for each bank on the GPU.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_319device_get_row_remapper_histogram, 0, __pyx_mstate_global->__pyx_n_u_device_get_row_remapper_histogra, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[605])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22460, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_row_remapper_histogra, __pyx_t_5) < (0)) __PYX_ERR(0, 22460, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22479
 * 
 * 
 * cpdef unsigned int device_get_architecture(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get architecture for device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_321device_get_architecture, 0, __pyx_mstate_global->__pyx_n_u_device_get_architecture, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[606])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22479, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_architecture, __pyx_t_5) < (0)) __PYX_ERR(0, 22479, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22497
 * 
 * 
 * cpdef object device_get_clk_mon_status(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the frequency monitor fault status for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_323device_get_clk_mon_status, 0, __pyx_mstate_global->__pyx_n_u_device_get_clk_mon_status, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[607])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22497, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_clk_mon_status, __pyx_t_5) < (0)) __PYX_ERR(0, 22497, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22516
 * 
 * 
 * cpdef object device_get_process_utilization(intptr_t device, unsigned long long last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """Retrieves the current utilization and process ID.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_325device_get_process_utilization, 0, __pyx_mstate_global->__pyx_n_u_device_get_process_utilization, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[608])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_process_utilization, __pyx_t_5) < (0)) __PYX_ERR(0, 22516, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22539
 * 
 * 
 * cpdef object device_get_platform_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get platform information of this device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_327device_get_platform_info, 0, __pyx_mstate_global->__pyx_n_u_device_get_platform_info, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[609])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22539, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_platform_info, __pyx_t_5) < (0)) __PYX_ERR(0, 22539, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22559
 * 
 * 
 * cpdef unit_set_led_state(intptr_t unit, int color):             # <<<<<<<<<<<<<<
 *     """Set the LED state for the unit. The LED can be either green (0) or amber (1).
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_329unit_set_led_state, 0, __pyx_mstate_global->__pyx_n_u_unit_set_led_state, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[610])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22559, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_unit_set_led_state, __pyx_t_5) < (0)) __PYX_ERR(0, 22559, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22573
 * 
 * 
 * cpdef device_set_persistence_mode(intptr_t device, int mode):             # <<<<<<<<<<<<<<
 *     """Set the persistence mode for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_331device_set_persistence_mode, 0, __pyx_mstate_global->__pyx_n_u_device_set_persistence_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[611])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22573, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_persistence_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 22573, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22587
 * 
 * 
 * cpdef device_set_compute_mode(intptr_t device, int mode):             # <<<<<<<<<<<<<<
 *     """Set the compute mode for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_333device_set_compute_mode, 0, __pyx_mstate_global->__pyx_n_u_device_set_compute_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[612])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22587, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_compute_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 22587, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22601
 * 
 * 
 * cpdef device_set_ecc_mode(intptr_t device, int ecc):             # <<<<<<<<<<<<<<
 *     """Set the ECC mode for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_335device_set_ecc_mode, 0, __pyx_mstate_global->__pyx_n_u_device_set_ecc_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[613])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22601, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_ecc_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 22601, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22615
 * 
 * 
 * cpdef device_clear_ecc_error_counts(intptr_t device, int counter_type):             # <<<<<<<<<<<<<<
 *     """Clear the ECC error and other memory error counts for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_337device_clear_ecc_error_counts, 0, __pyx_mstate_global->__pyx_n_u_device_clear_ecc_error_counts, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[614])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22615, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_clear_ecc_error_counts, __pyx_t_5) < (0)) __PYX_ERR(0, 22615, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22629
 * 
 * 
 * cpdef device_set_driver_model(intptr_t device, int driver_model, unsigned int flags):             # <<<<<<<<<<<<<<
 *     """Set the driver model for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_339device_set_driver_model, 0, __pyx_mstate_global->__pyx_n_u_device_set_driver_model, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[615])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22629, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_driver_model, __pyx_t_5) < (0)) __PYX_ERR(0, 22629, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22644
 * 
 * 
 * cpdef device_set_gpu_locked_clocks(intptr_t device, unsigned int min_gpu_clock_m_hz, unsigned int max_gpu_clock_m_hz):             # <<<<<<<<<<<<<<
 *     """Set clocks that device will lock to.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_341device_set_gpu_locked_clocks, 0, __pyx_mstate_global->__pyx_n_u_device_set_gpu_locked_clocks, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[616])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22644, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_gpu_locked_clocks, __pyx_t_5) < (0)) __PYX_ERR(0, 22644, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22659
 * 
 * 
 * cpdef device_reset_gpu_locked_clocks(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Resets the gpu clock to the default value.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_343device_reset_gpu_locked_clocks, 0, __pyx_mstate_global->__pyx_n_u_device_reset_gpu_locked_clocks, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[617])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22659, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_reset_gpu_locked_clocks, __pyx_t_5) < (0)) __PYX_ERR(0, 22659, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22672
 * 
 * 
 * cpdef device_set_memory_locked_clocks(intptr_t device, unsigned int min_mem_clock_m_hz, unsigned int max_mem_clock_m_hz):             # <<<<<<<<<<<<<<
 *     """Set memory clocks that device will lock to.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_345device_set_memory_locked_clocks, 0, __pyx_mstate_global->__pyx_n_u_device_set_memory_locked_clocks, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[618])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22672, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_memory_locked_clocks, __pyx_t_5) < (0)) __PYX_ERR(0, 22672, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22687
 * 
 * 
 * cpdef device_reset_memory_locked_clocks(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Resets the memory clock to the default value.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_347device_reset_memory_locked_clocks, 0, __pyx_mstate_global->__pyx_n_u_device_reset_memory_locked_clock, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[619])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22687, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_reset_memory_locked_clock, __pyx_t_5) < (0)) __PYX_ERR(0, 22687, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22700
 * 
 * 
 * cpdef device_set_auto_boosted_clocks_enabled(intptr_t device, int enabled):             # <<<<<<<<<<<<<<
 *     """Try to set the current state of Auto Boosted clocks on a device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_349device_set_auto_boosted_clocks_enabled, 0, __pyx_mstate_global->__pyx_n_u_device_set_auto_boosted_clocks_e, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[620])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22700, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_auto_boosted_clocks_e, __pyx_t_5) < (0)) __PYX_ERR(0, 22700, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22714
 * 
 * 
 * cpdef device_set_default_auto_boosted_clocks_enabled(intptr_t device, int enabled, unsigned int flags):             # <<<<<<<<<<<<<<
 *     """Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will return to when no compute running processes (e.g. CUDA application which have an active context) are running.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_351device_set_default_auto_boosted_clocks_enabled, 0, __pyx_mstate_global->__pyx_n_u_device_set_default_auto_boosted, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[621])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22714, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_default_auto_boosted, __pyx_t_5) < (0)) __PYX_ERR(0, 22714, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22729
 * 
 * 
 * cpdef device_set_default_fan_speed_v2(intptr_t device, unsigned int fan):             # <<<<<<<<<<<<<<
 *     """Sets the speed of the fan control policy to default.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_353device_set_default_fan_speed_v2, 0, __pyx_mstate_global->__pyx_n_u_device_set_default_fan_speed_v2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[622])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22729, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_default_fan_speed_v2, __pyx_t_5) < (0)) __PYX_ERR(0, 22729, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22743
 * 
 * 
 * cpdef device_set_fan_control_policy(intptr_t device, unsigned int fan, unsigned int policy):             # <<<<<<<<<<<<<<
 *     """Sets current fan control policy.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_355device_set_fan_control_policy, 0, __pyx_mstate_global->__pyx_n_u_device_set_fan_control_policy, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[623])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22743, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_fan_control_policy, __pyx_t_5) < (0)) __PYX_ERR(0, 22743, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22758
 * 
 * 
 * cpdef device_set_temperature_threshold(intptr_t device, int threshold_type, intptr_t temp):             # <<<<<<<<<<<<<<
 *     """Sets the temperature threshold for the GPU with the specified threshold type in degrees C.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_357device_set_temperature_threshold, 0, __pyx_mstate_global->__pyx_n_u_device_set_temperature_threshold, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[624])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22758, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_temperature_threshold, __pyx_t_5) < (0)) __PYX_ERR(0, 22758, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22773
 * 
 * 
 * cpdef device_set_power_management_limit(intptr_t device, unsigned int limit):             # <<<<<<<<<<<<<<
 *     """Set new power limit of this device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_359device_set_power_management_limit, 0, __pyx_mstate_global->__pyx_n_u_device_set_power_management_limi, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[625])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22773, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_power_management_limi, __pyx_t_5) < (0)) __PYX_ERR(0, 22773, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22787
 * 
 * 
 * cpdef device_set_gpu_operation_mode(intptr_t device, int mode):             # <<<<<<<<<<<<<<
 *     """Sets new GOM. See ``nvmlGpuOperationMode_t`` for details.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_361device_set_gpu_operation_mode, 0, __pyx_mstate_global->__pyx_n_u_device_set_gpu_operation_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[626])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22787, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_gpu_operation_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 22787, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22801
 * 
 * 
 * cpdef device_set_api_restriction(intptr_t device, int api_type, int is_restricted):             # <<<<<<<<<<<<<<
 *     """Changes the root/admin restructions on certain APIs. See ``nvmlRestrictedAPI_t`` for the list of supported APIs. This method can be used by a root/admin user to give non-root/admin access to certain otherwise-restricted APIs. The new setting lasts for the lifetime of the NVIDIA driver; it is not persistent. See ``nvmlDeviceGetAPIRestriction`` to query the current restriction settings.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_363device_set_api_restriction, 0, __pyx_mstate_global->__pyx_n_u_device_set_api_restriction, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[627])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22801, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_api_restriction, __pyx_t_5) < (0)) __PYX_ERR(0, 22801, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22816
 * 
 * 
 * cpdef device_set_fan_speed_v2(intptr_t device, unsigned int fan, unsigned int speed):             # <<<<<<<<<<<<<<
 *     """Sets the speed of a specified fan.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_365device_set_fan_speed_v2, 0, __pyx_mstate_global->__pyx_n_u_device_set_fan_speed_v2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[628])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22816, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_fan_speed_v2, __pyx_t_5) < (0)) __PYX_ERR(0, 22816, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22831
 * 
 * 
 * cpdef device_set_accounting_mode(intptr_t device, int mode):             # <<<<<<<<<<<<<<
 *     """Enables or disables per process accounting.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_367device_set_accounting_mode, 0, __pyx_mstate_global->__pyx_n_u_device_set_accounting_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[629])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22831, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_accounting_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 22831, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22845
 * 
 * 
 * cpdef device_clear_accounting_pids(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Clears accounting information about all processes that have already terminated.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_369device_clear_accounting_pids, 0, __pyx_mstate_global->__pyx_n_u_device_clear_accounting_pids, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[630])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22845, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_clear_accounting_pids, __pyx_t_5) < (0)) __PYX_ERR(0, 22845, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22858
 * 
 * 
 * cpdef device_set_power_management_limit_v2(intptr_t device, intptr_t power_value):             # <<<<<<<<<<<<<<
 *     """Set new power limit of this device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_371device_set_power_management_limit_v2, 0, __pyx_mstate_global->__pyx_n_u_device_set_power_management_limi_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[631])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22858, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_power_management_limi_2, __pyx_t_5) < (0)) __PYX_ERR(0, 22858, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22872
 * 
 * 
 * cpdef int device_get_nvlink_state(intptr_t device, unsigned int link) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieves the state of the device's NvLink for the link specified.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_373device_get_nvlink_state, 0, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_state, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[632])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22872, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_state, __pyx_t_5) < (0)) __PYX_ERR(0, 22872, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22891
 * 
 * 
 * cpdef unsigned int device_get_nvlink_version(intptr_t device, unsigned int link) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the version of the device's NvLink for the link specified.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_375device_get_nvlink_version, 0, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_version, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[633])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22891, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_version, __pyx_t_5) < (0)) __PYX_ERR(0, 22891, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22910
 * 
 * 
 * cpdef unsigned int device_get_nvlink_capability(intptr_t device, unsigned int link, int capability) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the requested capability from the device's NvLink for the link specified Please refer to the ``nvmlNvLinkCapability_t`` structure for the specific caps that can be queried The return value should be treated as a boolean.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_377device_get_nvlink_capability, 0, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_capability, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[634])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22910, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_capability, __pyx_t_5) < (0)) __PYX_ERR(0, 22910, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22930
 * 
 * 
 * cpdef object device_get_nvlink_remote_pci_info_v2(intptr_t device, unsigned int link):             # <<<<<<<<<<<<<<
 *     """Retrieves the PCI information for the remote node on a NvLink link Note: pciSubSystemId is not filled in this function and is indeterminate.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_379device_get_nvlink_remote_pci_info_v2, 0, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_remote_pci_inf, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[635])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22930, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_remote_pci_inf, __pyx_t_5) < (0)) __PYX_ERR(0, 22930, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22950
 * 
 * 
 * cpdef unsigned long long device_get_nvlink_error_counter(intptr_t device, unsigned int link, int counter) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the specified error counter value Please refer to ``nvmlNvLinkErrorCounter_t`` for error counters that are available.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_381device_get_nvlink_error_counter, 0, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_error_counter, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[636])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22950, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_error_counter, __pyx_t_5) < (0)) __PYX_ERR(0, 22950, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22970
 * 
 * 
 * cpdef device_reset_nvlink_error_counters(intptr_t device, unsigned int link):             # <<<<<<<<<<<<<<
 *     """Resets all error counters to zero Please refer to ``nvmlNvLinkErrorCounter_t`` for the list of error counters that are reset.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_383device_reset_nvlink_error_counters, 0, __pyx_mstate_global->__pyx_n_u_device_reset_nvlink_error_counte, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[637])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22970, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_reset_nvlink_error_counte, __pyx_t_5) < (0)) __PYX_ERR(0, 22970, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":22984
 * 
 * 
 * cpdef int device_get_nvlink_remote_device_type(intptr_t device, unsigned int link) except? -1:             # <<<<<<<<<<<<<<
 *     """Get the NVLink device type of the remote device connected over the given link.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_385device_get_nvlink_remote_device_type, 0, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_remote_device, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[638])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22984, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_remote_device, __pyx_t_5) < (0)) __PYX_ERR(0, 22984, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23003
 * 
 * 
 * cpdef device_set_nvlink_device_low_power_threshold(intptr_t device, intptr_t info):             # <<<<<<<<<<<<<<
 *     """Set NvLink Low Power Threshold for device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_387device_set_nvlink_device_low_power_threshold, 0, __pyx_mstate_global->__pyx_n_u_device_set_nvlink_device_low_pow, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[639])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23003, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_nvlink_device_low_pow, __pyx_t_5) < (0)) __PYX_ERR(0, 23003, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23017
 * 
 * 
 * cpdef system_set_nvlink_bw_mode(unsigned int nvlink_bw_mode):             # <<<<<<<<<<<<<<
 *     """Set the global nvlink bandwith mode.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_389system_set_nvlink_bw_mode, 0, __pyx_mstate_global->__pyx_n_u_system_set_nvlink_bw_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[640])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23017, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_set_nvlink_bw_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 23017, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23030
 * 
 * 
 * cpdef unsigned int system_get_nvlink_bw_mode() except? 0:             # <<<<<<<<<<<<<<
 *     """Get the global nvlink bandwith mode.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_391system_get_nvlink_bw_mode, 0, __pyx_mstate_global->__pyx_n_u_system_get_nvlink_bw_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[641])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_get_nvlink_bw_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 23030, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23045
 * 
 * 
 * cpdef object device_get_nvlink_supported_bw_modes(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the supported NvLink Reduced Bandwidth Modes of the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_393device_get_nvlink_supported_bw_modes, 0, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_supported_bw_m, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[642])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23045, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_supported_bw_m, __pyx_t_5) < (0)) __PYX_ERR(0, 23045, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23065
 * 
 * 
 * cpdef object device_get_nvlink_bw_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the NvLink Reduced Bandwidth Mode for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_395device_get_nvlink_bw_mode, 0, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_bw_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[643])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23065, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_bw_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 23065, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23085
 * 
 * 
 * cpdef device_set_nvlink_bw_mode(intptr_t device, intptr_t set_bw_mode):             # <<<<<<<<<<<<<<
 *     """Set the NvLink Reduced Bandwidth Mode for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_397device_set_nvlink_bw_mode, 0, __pyx_mstate_global->__pyx_n_u_device_set_nvlink_bw_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[644])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23085, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_nvlink_bw_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 23085, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23100
 * 
 * 
 * cpdef intptr_t event_set_create() except? 0:             # <<<<<<<<<<<<<<
 *     """Create an empty set of events. Event set should be freed by ``nvmlEventSetFree``.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_399event_set_create, 0, __pyx_mstate_global->__pyx_n_u_event_set_create, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[645])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23100, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_event_set_create, __pyx_t_5) < (0)) __PYX_ERR(0, 23100, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23115
 * 
 * 
 * cpdef device_register_events(intptr_t device, unsigned long long event_types, intptr_t set):             # <<<<<<<<<<<<<<
 *     """Starts recording of events on a specified devices and add the events to specified ``nvmlEventSet_t``.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_401device_register_events, 0, __pyx_mstate_global->__pyx_n_u_device_register_events, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[646])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23115, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_register_events, __pyx_t_5) < (0)) __PYX_ERR(0, 23115, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23130
 * 
 * 
 * cpdef unsigned long long device_get_supported_event_types(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Returns information about events supported on device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_403device_get_supported_event_types, 0, __pyx_mstate_global->__pyx_n_u_device_get_supported_event_types, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[647])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23130, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_supported_event_types, __pyx_t_5) < (0)) __PYX_ERR(0, 23130, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23148
 * 
 * 
 * cpdef object event_set_wait_v2(intptr_t set, unsigned int timeoutms):             # <<<<<<<<<<<<<<
 *     """Waits on events and delivers events.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_405event_set_wait_v2, 0, __pyx_mstate_global->__pyx_n_u_event_set_wait_v2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[648])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23148, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_event_set_wait_v2, __pyx_t_5) < (0)) __PYX_ERR(0, 23148, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23168
 * 
 * 
 * cpdef event_set_free(intptr_t set):             # <<<<<<<<<<<<<<
 *     """Releases events in the set.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_407event_set_free, 0, __pyx_mstate_global->__pyx_n_u_event_set_free, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[649])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23168, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_event_set_free, __pyx_t_5) < (0)) __PYX_ERR(0, 23168, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23181
 * 
 * 
 * cpdef system_event_set_create(intptr_t request):             # <<<<<<<<<<<<<<
 *     """Create an empty set of system events. Event set should be freed by ``nvmlSystemEventSetFree``.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_409system_event_set_create, 0, __pyx_mstate_global->__pyx_n_u_system_event_set_create, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[650])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23181, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_event_set_create, __pyx_t_5) < (0)) __PYX_ERR(0, 23181, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23194
 * 
 * 
 * cpdef system_event_set_free(intptr_t request):             # <<<<<<<<<<<<<<
 *     """Releases system event set.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_411system_event_set_free, 0, __pyx_mstate_global->__pyx_n_u_system_event_set_free, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[651])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23194, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_event_set_free, __pyx_t_5) < (0)) __PYX_ERR(0, 23194, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23207
 * 
 * 
 * cpdef system_register_events(intptr_t request):             # <<<<<<<<<<<<<<
 *     """Starts recording of events on system and add the events to specified ``nvmlSystemEventSet_t``.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_413system_register_events, 0, __pyx_mstate_global->__pyx_n_u_system_register_events, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[652])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23207, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_register_events, __pyx_t_5) < (0)) __PYX_ERR(0, 23207, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23220
 * 
 * 
 * cpdef system_event_set_wait(intptr_t request):             # <<<<<<<<<<<<<<
 *     """Waits on system events and delivers events.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_415system_event_set_wait, 0, __pyx_mstate_global->__pyx_n_u_system_event_set_wait, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[653])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23220, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_event_set_wait, __pyx_t_5) < (0)) __PYX_ERR(0, 23220, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23233
 * 
 * 
 * cpdef device_modify_drain_state(intptr_t pci_info, int new_state):             # <<<<<<<<<<<<<<
 *     """Modify the drain state of a GPU. This method forces a GPU to no longer accept new incoming requests. Any new NVML process will no longer see this GPU. Persistence mode for this GPU must be turned off before this call is made. Must be called as administrator. For Linux only.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_417device_modify_drain_state, 0, __pyx_mstate_global->__pyx_n_u_device_modify_drain_state, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[654])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23233, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_modify_drain_state, __pyx_t_5) < (0)) __PYX_ERR(0, 23233, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23247
 * 
 * 
 * cpdef int device_query_drain_state(intptr_t pci_info) except? -1:             # <<<<<<<<<<<<<<
 *     """Query the drain state of a GPU. This method is used to check if a GPU is in a currently draining state. For Linux only.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_419device_query_drain_state, 0, __pyx_mstate_global->__pyx_n_u_device_query_drain_state, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[655])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23247, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_query_drain_state, __pyx_t_5) < (0)) __PYX_ERR(0, 23247, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23265
 * 
 * 
 * cpdef device_remove_gpu_v2(intptr_t pci_info, int gpu_state, int link_state):             # <<<<<<<<<<<<<<
 *     """This method will remove the specified GPU from the view of both NVML and the NVIDIA kernel driver as long as no other processes are attached. If other processes are attached, this call will return NVML_ERROR_IN_USE and the GPU will be returned to its original "draining" state. Note: the only situation where a process can still be attached after :func:`device_modify_drain_state` is called to initiate the draining state is if that process was using, and is still using, a GPU before the call was made. Also note, persistence mode counts as an attachment to the GPU thus it must be disabled prior to this call.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_421device_remove_gpu_v2, 0, __pyx_mstate_global->__pyx_n_u_device_remove_gpu_v2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[656])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23265, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_remove_gpu_v2, __pyx_t_5) < (0)) __PYX_ERR(0, 23265, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23280
 * 
 * 
 * cpdef device_discover_gpus(intptr_t pci_info):             # <<<<<<<<<<<<<<
 *     """Request the OS and the NVIDIA kernel driver to rediscover a portion of the PCI subsystem looking for GPUs that were previously removed. The portion of the PCI tree can be narrowed by specifying a domain, bus, and device. If all are zeroes then the entire PCI tree will be searched. Please note that for long-running NVML processes the enumeration will change based on how many GPUs are discovered and where they are inserted in bus order.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_423device_discover_gpus, 0, __pyx_mstate_global->__pyx_n_u_device_discover_gpus, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[657])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23280, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_discover_gpus, __pyx_t_5) < (0)) __PYX_ERR(0, 23280, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23293
 * 
 * 
 * cpdef int device_get_virtualization_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """This method is used to get the virtualization mode corresponding to the GPU.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_425device_get_virtualization_mode, 0, __pyx_mstate_global->__pyx_n_u_device_get_virtualization_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[658])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23293, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_virtualization_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 23293, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23311
 * 
 * 
 * cpdef int device_get_host_vgpu_mode(intptr_t device) except? -1:             # <<<<<<<<<<<<<<
 *     """Queries if SR-IOV host operation is supported on a vGPU supported device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_427device_get_host_vgpu_mode, 0, __pyx_mstate_global->__pyx_n_u_device_get_host_vgpu_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[659])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23311, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_host_vgpu_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 23311, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23329
 * 
 * 
 * cpdef device_set_virtualization_mode(intptr_t device, int virtual_mode):             # <<<<<<<<<<<<<<
 *     """This method is used to set the virtualization mode corresponding to the GPU.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_429device_set_virtualization_mode, 0, __pyx_mstate_global->__pyx_n_u_device_set_virtualization_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[660])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_virtualization_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 23329, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23343
 * 
 * 
 * cpdef object device_get_vgpu_heterogeneous_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the vGPU heterogeneous mode for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_431device_get_vgpu_heterogeneous_mode, 0, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_heterogeneous_mo, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[661])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23343, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_heterogeneous_mo, __pyx_t_5) < (0)) __PYX_ERR(0, 23343, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23363
 * 
 * 
 * cpdef device_set_vgpu_heterogeneous_mode(intptr_t device, intptr_t p_heterogeneous_mode):             # <<<<<<<<<<<<<<
 *     """Enable or disable vGPU heterogeneous mode for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_433device_set_vgpu_heterogeneous_mode, 0, __pyx_mstate_global->__pyx_n_u_device_set_vgpu_heterogeneous_mo, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[662])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23363, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_vgpu_heterogeneous_mo, __pyx_t_5) < (0)) __PYX_ERR(0, 23363, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23377
 * 
 * 
 * cpdef object vgpu_instance_get_placement_id(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Query the placement ID of active vGPU instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_435vgpu_instance_get_placement_id, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_placement_id, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[663])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23377, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_placement_id, __pyx_t_5) < (0)) __PYX_ERR(0, 23377, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23397
 * 
 * 
 * cpdef object device_get_vgpu_type_supported_placements(intptr_t device, unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Query the supported vGPU placement ID of the vGPU type.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_437device_get_vgpu_type_supported_placements, 0, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_type_supported_p, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[664])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23397, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_type_supported_p, __pyx_t_5) < (0)) __PYX_ERR(0, 23397, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23417
 * 
 * 
 * cpdef unsigned long long vgpu_type_get_gsp_heap_size(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the static GSP heap size of the vGPU type in bytes.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_439vgpu_type_get_gsp_heap_size, 0, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_gsp_heap_size, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[665])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23417, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_gsp_heap_size, __pyx_t_5) < (0)) __PYX_ERR(0, 23417, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23435
 * 
 * 
 * cpdef unsigned long long vgpu_type_get_fb_reservation(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the static framebuffer reservation of the vGPU type in bytes.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_441vgpu_type_get_fb_reservation, 0, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_fb_reservation, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[666])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23435, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_fb_reservation, __pyx_t_5) < (0)) __PYX_ERR(0, 23435, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23453
 * 
 * 
 * cpdef object vgpu_instance_get_runtime_state_size(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the currently used runtime state size of the vGPU instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_443vgpu_instance_get_runtime_state_size, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_runtime_state, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[667])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23453, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_runtime_state, __pyx_t_5) < (0)) __PYX_ERR(0, 23453, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23473
 * 
 * 
 * cpdef device_set_vgpu_capabilities(intptr_t device, int capability, int state):             # <<<<<<<<<<<<<<
 *     """Set the desirable vGPU capability of a device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_445device_set_vgpu_capabilities, 0, __pyx_mstate_global->__pyx_n_u_device_set_vgpu_capabilities, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[668])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23473, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_vgpu_capabilities, __pyx_t_5) < (0)) __PYX_ERR(0, 23473, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23488
 * 
 * 
 * cpdef object device_get_grid_licensable_features_v4(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the vGPU Software licensable features.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_447device_get_grid_licensable_features_v4, 0, __pyx_mstate_global->__pyx_n_u_device_get_grid_licensable_featu, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[669])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23488, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_grid_licensable_featu, __pyx_t_5) < (0)) __PYX_ERR(0, 23488, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23507
 * 
 * 
 * cpdef unsigned int get_vgpu_driver_capabilities(int capability) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the requested vGPU driver capability.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_449get_vgpu_driver_capabilities, 0, __pyx_mstate_global->__pyx_n_u_get_vgpu_driver_capabilities, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[670])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23507, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_get_vgpu_driver_capabilities, __pyx_t_5) < (0)) __PYX_ERR(0, 23507, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23525
 * 
 * 
 * cpdef unsigned int device_get_vgpu_capabilities(intptr_t device, int capability) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the requested vGPU capability for GPU.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_451device_get_vgpu_capabilities, 0, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_capabilities, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[671])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23525, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_capabilities, __pyx_t_5) < (0)) __PYX_ERR(0, 23525, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23544
 * 
 * 
 * cpdef str vgpu_type_get_class(unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Retrieve the class of a vGPU type. It will not exceed 64 characters in length (including the NUL terminator). See ``nvmlConstants.NVML_DEVICE_NAME_BUFFER_SIZE``.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_453vgpu_type_get_class, 0, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_class, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[672])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23544, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_class, __pyx_t_5) < (0)) __PYX_ERR(0, 23544, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23566
 * 
 * 
 * cpdef str vgpu_type_get_name(unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Retrieve the vGPU type name.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_455vgpu_type_get_name, 0, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_name, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[673])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23566, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_name, __pyx_t_5) < (0)) __PYX_ERR(0, 23566, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23582
 * 
 * 
 * cpdef unsigned int vgpu_type_get_gpu_instance_profile_id(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the GPU Instance Profile ID for the given vGPU type ID. The API will return a valid GPU Instance Profile ID for the MIG capable vGPU types, else INVALID_GPU_INSTANCE_PROFILE_ID is returned.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_457vgpu_type_get_gpu_instance_profile_id, 0, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_gpu_instance_profi, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[674])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23582, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_gpu_instance_profi, __pyx_t_5) < (0)) __PYX_ERR(0, 23582, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23600
 * 
 * 
 * cpdef tuple vgpu_type_get_device_id(unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Retrieve the device ID of a vGPU type.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_459vgpu_type_get_device_id, 0, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_device_id, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[675])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23600, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_device_id, __pyx_t_5) < (0)) __PYX_ERR(0, 23600, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23622
 * 
 * 
 * cpdef unsigned long long vgpu_type_get_framebuffer_size(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the vGPU framebuffer size in bytes.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_461vgpu_type_get_framebuffer_size, 0, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_framebuffer_size, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[676])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23622, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_framebuffer_size, __pyx_t_5) < (0)) __PYX_ERR(0, 23622, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23640
 * 
 * 
 * cpdef unsigned int vgpu_type_get_num_display_heads(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve count of vGPU's supported display heads.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_463vgpu_type_get_num_display_heads, 0, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_num_display_heads, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[677])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23640, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_num_display_heads, __pyx_t_5) < (0)) __PYX_ERR(0, 23640, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23658
 * 
 * 
 * cpdef tuple vgpu_type_get_resolution(unsigned int vgpu_type_id, unsigned int display_ind_ex):             # <<<<<<<<<<<<<<
 *     """Retrieve vGPU display head's maximum supported resolution.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_465vgpu_type_get_resolution, 0, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_resolution, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[678])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23658, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_resolution, __pyx_t_5) < (0)) __PYX_ERR(0, 23658, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23681
 * 
 * 
 * cpdef vgpu_type_get_license(unsigned int vgpu_type_id, intptr_t vgpu_type_license_string, unsigned int size):             # <<<<<<<<<<<<<<
 *     """Retrieve license requirements for a vGPU type.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_467vgpu_type_get_license, 0, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_license, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[679])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23681, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_license, __pyx_t_5) < (0)) __PYX_ERR(0, 23681, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23696
 * 
 * 
 * cpdef unsigned int vgpu_type_get_frame_rate_limit(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the static frame rate limit value of the vGPU type.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_469vgpu_type_get_frame_rate_limit, 0, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_frame_rate_limit, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[680])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23696, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_frame_rate_limit, __pyx_t_5) < (0)) __PYX_ERR(0, 23696, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23714
 * 
 * 
 * cpdef unsigned int vgpu_type_get_max_instances(intptr_t device, unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the maximum number of vGPU instances creatable on a device for given vGPU type.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_471vgpu_type_get_max_instances, 0, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_max_instances, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[681])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23714, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_max_instances, __pyx_t_5) < (0)) __PYX_ERR(0, 23714, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23733
 * 
 * 
 * cpdef unsigned int vgpu_type_get_max_instances_per_vm(unsigned int vgpu_type_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the maximum number of vGPU instances supported per VM for given vGPU type.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_473vgpu_type_get_max_instances_per_vm, 0, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_max_instances_per, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[682])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23733, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_max_instances_per, __pyx_t_5) < (0)) __PYX_ERR(0, 23733, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23751
 * 
 * 
 * cpdef object vgpu_type_get_bar1_info(unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Retrieve the BAR1 info for given vGPU type.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_475vgpu_type_get_bar1_info, 0, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_bar1_info, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[683])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23751, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_bar1_info, __pyx_t_5) < (0)) __PYX_ERR(0, 23751, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23771
 * 
 * 
 * cpdef str vgpu_instance_get_uuid(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the UUID of a vGPU instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_477vgpu_instance_get_uuid, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_uuid, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[684])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23771, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_uuid, __pyx_t_5) < (0)) __PYX_ERR(0, 23771, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23787
 * 
 * 
 * cpdef str vgpu_instance_get_vm_driver_version(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the NVIDIA driver version installed in the VM associated with a vGPU.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_479vgpu_instance_get_vm_driver_version, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_vm_driver_vers, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[685])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23787, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_vm_driver_vers, __pyx_t_5) < (0)) __PYX_ERR(0, 23787, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23803
 * 
 * 
 * cpdef unsigned long long vgpu_instance_get_fb_usage(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the framebuffer usage in bytes.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_481vgpu_instance_get_fb_usage, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_fb_usage, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[686])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23803, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_fb_usage, __pyx_t_5) < (0)) __PYX_ERR(0, 23803, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23821
 * 
 * 
 * cpdef unsigned int vgpu_instance_get_license_status(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """[Deprecated].
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_483vgpu_instance_get_license_status, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_license_status, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[687])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23821, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_license_status, __pyx_t_5) < (0)) __PYX_ERR(0, 23821, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23839
 * 
 * 
 * cpdef unsigned int vgpu_instance_get_type(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the vGPU type of a vGPU instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_485vgpu_instance_get_type, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_type, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[688])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23839, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_type, __pyx_t_5) < (0)) __PYX_ERR(0, 23839, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23857
 * 
 * 
 * cpdef unsigned int vgpu_instance_get_frame_rate_limit(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the frame rate limit set for the vGPU instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_487vgpu_instance_get_frame_rate_limit, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_frame_rate_lim, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[689])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23857, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_frame_rate_lim, __pyx_t_5) < (0)) __PYX_ERR(0, 23857, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23875
 * 
 * 
 * cpdef int vgpu_instance_get_ecc_mode(unsigned int vgpu_instance) except? -1:             # <<<<<<<<<<<<<<
 *     """Retrieve the current ECC mode of vGPU instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_489vgpu_instance_get_ecc_mode, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_ecc_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[690])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23875, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_ecc_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 23875, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23893
 * 
 * 
 * cpdef unsigned int vgpu_instance_get_encoder_capacity(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_491vgpu_instance_get_encoder_capacity, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_encoder_capaci, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[691])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23893, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_encoder_capaci, __pyx_t_5) < (0)) __PYX_ERR(0, 23893, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23911
 * 
 * 
 * cpdef vgpu_instance_set_encoder_capacity(unsigned int vgpu_instance, unsigned int encoder_capacity):             # <<<<<<<<<<<<<<
 *     """Set the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_493vgpu_instance_set_encoder_capacity, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_set_encoder_capaci, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[692])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23911, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_set_encoder_capaci, __pyx_t_5) < (0)) __PYX_ERR(0, 23911, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23925
 * 
 * 
 * cpdef tuple vgpu_instance_get_encoder_stats(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieves the current encoder statistics of a vGPU Instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_495vgpu_instance_get_encoder_stats, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_encoder_stats, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[693])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23925, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_encoder_stats, __pyx_t_5) < (0)) __PYX_ERR(0, 23925, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23949
 * 
 * 
 * cpdef object vgpu_instance_get_encoder_sessions(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieves information about all active encoder sessions on a vGPU Instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_497vgpu_instance_get_encoder_sessions, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_encoder_sessio, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[694])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23949, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_encoder_sessio, __pyx_t_5) < (0)) __PYX_ERR(0, 23949, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23971
 * 
 * 
 * cpdef object vgpu_instance_get_fbc_stats(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieves the active frame buffer capture sessions statistics of a vGPU Instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_499vgpu_instance_get_fbc_stats, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_fbc_stats, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[695])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23971, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_fbc_stats, __pyx_t_5) < (0)) __PYX_ERR(0, 23971, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":23990
 * 
 * 
 * cpdef object vgpu_instance_get_fbc_sessions(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieves information about active frame buffer capture sessions on a vGPU Instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_501vgpu_instance_get_fbc_sessions, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_fbc_sessions, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[696])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23990, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_fbc_sessions, __pyx_t_5) < (0)) __PYX_ERR(0, 23990, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24012
 * 
 * 
 * cpdef unsigned int vgpu_instance_get_gpu_instance_id(unsigned int vgpu_instance) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the GPU Instance ID for the given vGPU Instance. The API will return a valid GPU Instance ID for MIG backed vGPU Instance, else INVALID_GPU_INSTANCE_ID is returned.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_503vgpu_instance_get_gpu_instance_id, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_gpu_instance_i, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[697])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24012, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_gpu_instance_i, __pyx_t_5) < (0)) __PYX_ERR(0, 24012, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24030
 * 
 * 
 * cpdef str vgpu_instance_get_gpu_pci_id(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieves the PCI Id of the given vGPU Instance i.e. the PCI Id of the GPU as seen inside the VM.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_505vgpu_instance_get_gpu_pci_id, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_gpu_pci_id, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[698])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_gpu_pci_id, __pyx_t_5) < (0)) __PYX_ERR(0, 24030, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24052
 * 
 * 
 * cpdef unsigned int vgpu_type_get_capabilities(unsigned int vgpu_type_id, int capability) except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieve the requested capability for a given vGPU type. Refer to the ``nvmlVgpuCapability_t`` structure for the specific capabilities that can be queried. The return value in ``capResult`` should be treated as a boolean, with a non-zero value indicating that the capability is supported.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_507vgpu_type_get_capabilities, 0, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_capabilities, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[699])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24052, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_capabilities, __pyx_t_5) < (0)) __PYX_ERR(0, 24052, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24071
 * 
 * 
 * cpdef str vgpu_instance_get_mdev_uuid(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the MDEV UUID of a vGPU instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_509vgpu_instance_get_mdev_uuid, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_mdev_uuid, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[700])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24071, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_mdev_uuid, __pyx_t_5) < (0)) __PYX_ERR(0, 24071, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24087
 * 
 * 
 * cpdef object vgpu_type_get_max_instances_per_gpu_instance():             # <<<<<<<<<<<<<<
 *     """Retrieve the maximum number of vGPU instances per GPU instance for given vGPU type.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_511vgpu_type_get_max_instances_per_gpu_instance, 0, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_max_instances_per_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[701])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24087, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_type_get_max_instances_per_2, __pyx_t_5) < (0)) __PYX_ERR(0, 24087, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24104
 * 
 * 
 * cpdef gpu_instance_set_vgpu_scheduler_state(intptr_t gpu_instance, intptr_t p_scheduler):             # <<<<<<<<<<<<<<
 *     """Set vGPU scheduler state for the given GPU instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_513gpu_instance_set_vgpu_scheduler_state, 0, __pyx_mstate_global->__pyx_n_u_gpu_instance_set_vgpu_scheduler, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[702])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24104, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_set_vgpu_scheduler, __pyx_t_5) < (0)) __PYX_ERR(0, 24104, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24118
 * 
 * 
 * cpdef object gpu_instance_get_vgpu_scheduler_state(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Returns the vGPU scheduler state for the given GPU instance. The information returned in ``nvmlVgpuSchedulerStateInfo_t`` is not relevant if the BEST EFFORT policy is set.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_515gpu_instance_get_vgpu_scheduler_state, 0, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_vgpu_scheduler, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[703])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24118, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_vgpu_scheduler, __pyx_t_5) < (0)) __PYX_ERR(0, 24118, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24138
 * 
 * 
 * cpdef object gpu_instance_get_vgpu_scheduler_log(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Returns the vGPU scheduler logs for the given GPU instance. ``pSchedulerLogInfo`` points to a caller-allocated structure to contain the logs. The number of elements returned will never exceed ``NVML_SCHEDULER_SW_MAX_LOG_ENTRIES``.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_517gpu_instance_get_vgpu_scheduler_log, 0, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_vgpu_scheduler_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[704])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24138, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_vgpu_scheduler_2, __pyx_t_5) < (0)) __PYX_ERR(0, 24138, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24158
 * 
 * 
 * cpdef object gpu_instance_get_vgpu_heterogeneous_mode(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Get the vGPU heterogeneous mode for the GPU instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_519gpu_instance_get_vgpu_heterogeneous_mode, 0, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_vgpu_heterogene, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[705])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24158, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_vgpu_heterogene, __pyx_t_5) < (0)) __PYX_ERR(0, 24158, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24178
 * 
 * 
 * cpdef gpu_instance_set_vgpu_heterogeneous_mode(intptr_t gpu_instance, intptr_t p_heterogeneous_mode):             # <<<<<<<<<<<<<<
 *     """Enable or disable vGPU heterogeneous mode for the GPU instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_521gpu_instance_set_vgpu_heterogeneous_mode, 0, __pyx_mstate_global->__pyx_n_u_gpu_instance_set_vgpu_heterogene, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[706])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24178, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_set_vgpu_heterogene, __pyx_t_5) < (0)) __PYX_ERR(0, 24178, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24192
 * 
 * 
 * cpdef str device_get_pgpu_metadata_string(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Returns the properties of the physical GPU indicated by the device in an ascii-encoded string format.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_523device_get_pgpu_metadata_string, 0, __pyx_mstate_global->__pyx_n_u_device_get_pgpu_metadata_string, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[707])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24192, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_pgpu_metadata_string, __pyx_t_5) < (0)) __PYX_ERR(0, 24192, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24214
 * 
 * 
 * cpdef object device_get_vgpu_scheduler_log(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Returns the vGPU Software scheduler logs. ``pSchedulerLog`` points to a caller-allocated structure to contain the logs. The number of elements returned will never exceed ``NVML_SCHEDULER_SW_MAX_LOG_ENTRIES``.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_525device_get_vgpu_scheduler_log, 0, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_scheduler_log, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[708])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24214, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_scheduler_log, __pyx_t_5) < (0)) __PYX_ERR(0, 24214, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24233
 * 
 * 
 * cpdef object device_get_vgpu_scheduler_state(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Returns the vGPU scheduler state. The information returned in ``nvmlVgpuSchedulerGetState_t`` is not relevant if the BEST EFFORT policy is set.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_527device_get_vgpu_scheduler_state, 0, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_scheduler_state, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[709])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24233, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_scheduler_state, __pyx_t_5) < (0)) __PYX_ERR(0, 24233, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24252
 * 
 * 
 * cpdef object device_get_vgpu_scheduler_capabilities(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Returns the vGPU scheduler capabilities. The list of supported vGPU schedulers returned in ``nvmlVgpuSchedulerCapabilities_t`` is from the NVML_VGPU_SCHEDULER_POLICY_*. This list enumerates the supported scheduler policies if the engine is Graphics type. The other values in ``nvmlVgpuSchedulerCapabilities_t`` are also applicable if the engine is Graphics type. For other engine types, it is BEST EFFORT policy. If ARR is supported and enabled, scheduling frequency and averaging factor are applicable else timeSlice is applicable.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_529device_get_vgpu_scheduler_capabilities, 0, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_scheduler_capabi, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[710])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24252, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_scheduler_capabi, __pyx_t_5) < (0)) __PYX_ERR(0, 24252, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24271
 * 
 * 
 * cpdef device_set_vgpu_scheduler_state(intptr_t device, intptr_t p_scheduler_state):             # <<<<<<<<<<<<<<
 *     """Sets the vGPU scheduler state.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_531device_set_vgpu_scheduler_state, 0, __pyx_mstate_global->__pyx_n_u_device_set_vgpu_scheduler_state, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[711])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24271, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_vgpu_scheduler_state, __pyx_t_5) < (0)) __PYX_ERR(0, 24271, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24285
 * 
 * 
 * cpdef set_vgpu_version(intptr_t vgpu_version):             # <<<<<<<<<<<<<<
 *     """Override the preset range of vGPU versions supported by the NVIDIA vGPU Manager with a range set by an administrator.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_533set_vgpu_version, 0, __pyx_mstate_global->__pyx_n_u_set_vgpu_version, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[712])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24285, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_set_vgpu_version, __pyx_t_5) < (0)) __PYX_ERR(0, 24285, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24298
 * 
 * 
 * cpdef tuple device_get_vgpu_utilization(intptr_t device, unsigned long long last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """Retrieves current utilization for vGPUs on a physical GPU (device).
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_535device_get_vgpu_utilization, 0, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_utilization, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[713])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24298, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_utilization, __pyx_t_5) < (0)) __PYX_ERR(0, 24298, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24323
 * 
 * 
 * cpdef tuple device_get_vgpu_process_utilization(intptr_t device, unsigned long long last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """Retrieves current utilization for processes running on vGPUs on a physical GPU (device).
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_537device_get_vgpu_process_utilization, 0, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_process_utilizat, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[714])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24323, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_process_utilizat, __pyx_t_5) < (0)) __PYX_ERR(0, 24323, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24346
 * 
 * 
 * cpdef int vgpu_instance_get_accounting_mode(unsigned int vgpu_instance) except? -1:             # <<<<<<<<<<<<<<
 *     """Queries the state of per process accounting mode on vGPU.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_539vgpu_instance_get_accounting_mode, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_accounting_mod, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[715])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24346, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_accounting_mod, __pyx_t_5) < (0)) __PYX_ERR(0, 24346, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24364
 * 
 * 
 * cpdef object vgpu_instance_get_accounting_pids(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Queries list of processes running on vGPU that can be queried for accounting stats. The list of processes returned can be in running or terminated state.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_541vgpu_instance_get_accounting_pids, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_accounting_pid, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[716])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24364, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_accounting_pid, __pyx_t_5) < (0)) __PYX_ERR(0, 24364, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24386
 * 
 * 
 * cpdef object vgpu_instance_get_accounting_stats(unsigned int vgpu_instance, unsigned int pid):             # <<<<<<<<<<<<<<
 *     """Queries process's accounting stats.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_543vgpu_instance_get_accounting_stats, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_accounting_sta, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[717])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24386, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_accounting_sta, __pyx_t_5) < (0)) __PYX_ERR(0, 24386, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24406
 * 
 * 
 * cpdef vgpu_instance_clear_accounting_pids(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Clears accounting information of the vGPU instance that have already terminated.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_545vgpu_instance_clear_accounting_pids, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_clear_accounting_p, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[718])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24406, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_clear_accounting_p, __pyx_t_5) < (0)) __PYX_ERR(0, 24406, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24419
 * 
 * 
 * cpdef object vgpu_instance_get_license_info_v2(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Query the license information of the vGPU instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_547vgpu_instance_get_license_info_v2, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_license_info_v, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[719])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24419, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_license_info_v, __pyx_t_5) < (0)) __PYX_ERR(0, 24419, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24438
 * 
 * 
 * cpdef unsigned int get_excluded_device_count() except? 0:             # <<<<<<<<<<<<<<
 *     """Retrieves the number of excluded GPU devices in the system.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_549get_excluded_device_count, 0, __pyx_mstate_global->__pyx_n_u_get_excluded_device_count, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[720])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24438, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_get_excluded_device_count, __pyx_t_5) < (0)) __PYX_ERR(0, 24438, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24453
 * 
 * 
 * cpdef object get_excluded_device_info_by_index(unsigned int ind_ex):             # <<<<<<<<<<<<<<
 *     """Acquire the device information for an excluded GPU device, based on its ind_ex.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_551get_excluded_device_info_by_index, 0, __pyx_mstate_global->__pyx_n_u_get_excluded_device_info_by_inde, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[721])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24453, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_get_excluded_device_info_by_inde, __pyx_t_5) < (0)) __PYX_ERR(0, 24453, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24472
 * 
 * 
 * cpdef int device_set_mig_mode(intptr_t device, unsigned int mode) except? -1:             # <<<<<<<<<<<<<<
 *     """Set MIG mode for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_553device_set_mig_mode, 0, __pyx_mstate_global->__pyx_n_u_device_set_mig_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[722])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24472, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_mig_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 24472, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24491
 * 
 * 
 * cpdef tuple device_get_mig_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get MIG mode for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_555device_get_mig_mode, 0, __pyx_mstate_global->__pyx_n_u_device_get_mig_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[723])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24491, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_mig_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 24491, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24513
 * 
 * 
 * cpdef object device_get_gpu_instance_profile_info_v(intptr_t device, unsigned int profile):             # <<<<<<<<<<<<<<
 *     """Versioned wrapper around ``nvmlDeviceGetGpuInstanceProfileInfo`` that accepts a versioned ``nvmlGpuInstanceProfileInfo_v2_t`` or later output structure.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_557device_get_gpu_instance_profile_info_v, 0, __pyx_mstate_global->__pyx_n_u_device_get_gpu_instance_profile, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[724])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24513, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_gpu_instance_profile, __pyx_t_5) < (0)) __PYX_ERR(0, 24513, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24534
 * 
 * 
 * cpdef object device_get_gpu_instance_possible_placements_v2(intptr_t device, unsigned int profile_id):             # <<<<<<<<<<<<<<
 *     """Get GPU instance placements.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_559device_get_gpu_instance_possible_placements_v2, 0, __pyx_mstate_global->__pyx_n_u_device_get_gpu_instance_possible, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[725])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24534, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_gpu_instance_possible, __pyx_t_5) < (0)) __PYX_ERR(0, 24534, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24557
 * 
 * 
 * cpdef unsigned int device_get_gpu_instance_remaining_capacity(intptr_t device, unsigned int profile_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Get GPU instance profile capacity.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_561device_get_gpu_instance_remaining_capacity, 0, __pyx_mstate_global->__pyx_n_u_device_get_gpu_instance_remainin, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[726])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24557, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_gpu_instance_remainin, __pyx_t_5) < (0)) __PYX_ERR(0, 24557, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24576
 * 
 * 
 * cpdef intptr_t device_create_gpu_instance(intptr_t device, unsigned int profile_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Create GPU instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_563device_create_gpu_instance, 0, __pyx_mstate_global->__pyx_n_u_device_create_gpu_instance, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[727])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24576, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_create_gpu_instance, __pyx_t_5) < (0)) __PYX_ERR(0, 24576, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24595
 * 
 * 
 * cpdef intptr_t device_create_gpu_instance_with_placement(intptr_t device, unsigned int profile_id, intptr_t placement) except? 0:             # <<<<<<<<<<<<<<
 *     """Create GPU instance with the specified placement.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_565device_create_gpu_instance_with_placement, 0, __pyx_mstate_global->__pyx_n_u_device_create_gpu_instance_with, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[728])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24595, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_create_gpu_instance_with, __pyx_t_5) < (0)) __PYX_ERR(0, 24595, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24615
 * 
 * 
 * cpdef gpu_instance_destroy(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Destroy GPU instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_567gpu_instance_destroy, 0, __pyx_mstate_global->__pyx_n_u_gpu_instance_destroy, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[729])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24615, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_destroy, __pyx_t_5) < (0)) __PYX_ERR(0, 24615, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24628
 * 
 * 
 * cpdef intptr_t device_get_gpu_instance_by_id(intptr_t device, unsigned int id) except? 0:             # <<<<<<<<<<<<<<
 *     """Get GPU instances for given instance ID.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_569device_get_gpu_instance_by_id, 0, __pyx_mstate_global->__pyx_n_u_device_get_gpu_instance_by_id, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[730])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24628, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_gpu_instance_by_id, __pyx_t_5) < (0)) __PYX_ERR(0, 24628, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24647
 * 
 * 
 * cpdef object gpu_instance_get_info(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Get GPU instance information.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_571gpu_instance_get_info, 0, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_info, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[731])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24647, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_info, __pyx_t_5) < (0)) __PYX_ERR(0, 24647, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24666
 * 
 * 
 * cpdef object gpu_instance_get_compute_instance_profile_info_v(intptr_t gpu_instance, unsigned int profile, unsigned int eng_profile):             # <<<<<<<<<<<<<<
 *     """Versioned wrapper around ``nvmlGpuInstanceGetComputeInstanceProfileInfo`` that accepts a versioned ``nvmlComputeInstanceProfileInfo_v2_t`` or later output structure.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_573gpu_instance_get_compute_instance_profile_info_v, 0, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_compute_instanc, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[732])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24666, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_compute_instanc, __pyx_t_5) < (0)) __PYX_ERR(0, 24666, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24688
 * 
 * 
 * cpdef unsigned int gpu_instance_get_compute_instance_remaining_capacity(intptr_t gpu_instance, unsigned int profile_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Get compute instance profile capacity.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_575gpu_instance_get_compute_instance_remaining_capacity, 0, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_compute_instanc_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[733])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24688, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_compute_instanc_2, __pyx_t_5) < (0)) __PYX_ERR(0, 24688, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24707
 * 
 * 
 * cpdef object gpu_instance_get_compute_instance_possible_placements(intptr_t gpu_instance, unsigned int profile_id):             # <<<<<<<<<<<<<<
 *     """Get compute instance placements.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_577gpu_instance_get_compute_instance_possible_placements, 0, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_compute_instanc_3, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[734])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24707, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_compute_instanc_3, __pyx_t_5) < (0)) __PYX_ERR(0, 24707, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24730
 * 
 * 
 * cpdef intptr_t gpu_instance_create_compute_instance(intptr_t gpu_instance, unsigned int profile_id) except? 0:             # <<<<<<<<<<<<<<
 *     """Create compute instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_579gpu_instance_create_compute_instance, 0, __pyx_mstate_global->__pyx_n_u_gpu_instance_create_compute_inst, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[735])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24730, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_create_compute_inst, __pyx_t_5) < (0)) __PYX_ERR(0, 24730, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24749
 * 
 * 
 * cpdef intptr_t gpu_instance_create_compute_instance_with_placement(intptr_t gpu_instance, unsigned int profile_id, intptr_t placement) except? 0:             # <<<<<<<<<<<<<<
 *     """Create compute instance with the specified placement.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_581gpu_instance_create_compute_instance_with_placement, 0, __pyx_mstate_global->__pyx_n_u_gpu_instance_create_compute_inst_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[736])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24749, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_create_compute_inst_2, __pyx_t_5) < (0)) __PYX_ERR(0, 24749, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24769
 * 
 * 
 * cpdef compute_instance_destroy(intptr_t compute_instance):             # <<<<<<<<<<<<<<
 *     """Destroy compute instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_583compute_instance_destroy, 0, __pyx_mstate_global->__pyx_n_u_compute_instance_destroy, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[737])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24769, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_compute_instance_destroy, __pyx_t_5) < (0)) __PYX_ERR(0, 24769, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24782
 * 
 * 
 * cpdef intptr_t gpu_instance_get_compute_instance_by_id(intptr_t gpu_instance, unsigned int id) except? 0:             # <<<<<<<<<<<<<<
 *     """Get compute instance for given instance ID.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_585gpu_instance_get_compute_instance_by_id, 0, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_compute_instanc_4, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[738])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24782, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_compute_instanc_4, __pyx_t_5) < (0)) __PYX_ERR(0, 24782, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24801
 * 
 * 
 * cpdef object compute_instance_get_info_v2(intptr_t compute_instance):             # <<<<<<<<<<<<<<
 *     """Get compute instance information.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_587compute_instance_get_info_v2, 0, __pyx_mstate_global->__pyx_n_u_compute_instance_get_info_v2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[739])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24801, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_compute_instance_get_info_v2, __pyx_t_5) < (0)) __PYX_ERR(0, 24801, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24820
 * 
 * 
 * cpdef unsigned int device_is_mig_device_handle(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Test if the given handle refers to a MIG device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_589device_is_mig_device_handle, 0, __pyx_mstate_global->__pyx_n_u_device_is_mig_device_handle, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[740])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24820, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_is_mig_device_handle, __pyx_t_5) < (0)) __PYX_ERR(0, 24820, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24838
 * 
 * 
 * cpdef unsigned int device_get_gpu_instance_id(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get GPU instance ID for the given MIG device handle.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_591device_get_gpu_instance_id, 0, __pyx_mstate_global->__pyx_n_u_device_get_gpu_instance_id, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[741])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24838, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_gpu_instance_id, __pyx_t_5) < (0)) __PYX_ERR(0, 24838, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24856
 * 
 * 
 * cpdef unsigned int device_get_compute_instance_id(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get compute instance ID for the given MIG device handle.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_593device_get_compute_instance_id, 0, __pyx_mstate_global->__pyx_n_u_device_get_compute_instance_id, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[742])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24856, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_compute_instance_id, __pyx_t_5) < (0)) __PYX_ERR(0, 24856, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24874
 * 
 * 
 * cpdef unsigned int device_get_max_mig_device_count(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get the maximum number of MIG devices that can exist under a given parent NVML device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_595device_get_max_mig_device_count, 0, __pyx_mstate_global->__pyx_n_u_device_get_max_mig_device_count, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[743])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24874, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_max_mig_device_count, __pyx_t_5) < (0)) __PYX_ERR(0, 24874, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24892
 * 
 * 
 * cpdef intptr_t device_get_mig_device_handle_by_index(intptr_t device, unsigned int ind_ex) except? 0:             # <<<<<<<<<<<<<<
 *     """Get MIG device handle for the given ind_ex under its parent NVML device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_597device_get_mig_device_handle_by_index, 0, __pyx_mstate_global->__pyx_n_u_device_get_mig_device_handle_by, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[744])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24892, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_mig_device_handle_by, __pyx_t_5) < (0)) __PYX_ERR(0, 24892, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24911
 * 
 * 
 * cpdef intptr_t device_get_device_handle_from_mig_device_handle(intptr_t mig_device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get parent device handle from a MIG device handle.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_599device_get_device_handle_from_mig_device_handle, 0, __pyx_mstate_global->__pyx_n_u_device_get_device_handle_from_mi, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[745])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24911, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_device_handle_from_mi, __pyx_t_5) < (0)) __PYX_ERR(0, 24911, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24929
 * 
 * 
 * cpdef gpm_sample_get(intptr_t device, intptr_t gpm_sample):             # <<<<<<<<<<<<<<
 *     """Read a sample of GPM metrics into the provided ``gpm_sample`` buffer. After two samples are gathered, you can call nvmlGpmMetricGet on those samples to retrive metrics.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_601gpm_sample_get, 0, __pyx_mstate_global->__pyx_n_u_gpm_sample_get, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[746])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24929, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpm_sample_get, __pyx_t_5) < (0)) __PYX_ERR(0, 24929, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24943
 * 
 * 
 * cpdef gpm_mig_sample_get(intptr_t device, unsigned int gpu_instance_id, intptr_t gpm_sample):             # <<<<<<<<<<<<<<
 *     """Read a sample of GPM metrics into the provided ``gpm_sample`` buffer for a MIG GPU Instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_603gpm_mig_sample_get, 0, __pyx_mstate_global->__pyx_n_u_gpm_mig_sample_get, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[747])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24943, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpm_mig_sample_get, __pyx_t_5) < (0)) __PYX_ERR(0, 24943, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24958
 * 
 * 
 * cpdef object gpm_query_device_support(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Indicate whether the supplied device supports GPM.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_605gpm_query_device_support, 0, __pyx_mstate_global->__pyx_n_u_gpm_query_device_support, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[748])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24958, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpm_query_device_support, __pyx_t_5) < (0)) __PYX_ERR(0, 24958, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24978
 * 
 * 
 * cpdef unsigned int gpm_query_if_streaming_enabled(intptr_t device) except? 0:             # <<<<<<<<<<<<<<
 *     """Get GPM stream state.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_607gpm_query_if_streaming_enabled, 0, __pyx_mstate_global->__pyx_n_u_gpm_query_if_streaming_enabled, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[749])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24978, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpm_query_if_streaming_enabled, __pyx_t_5) < (0)) __PYX_ERR(0, 24978, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":24996
 * 
 * 
 * cpdef gpm_set_streaming_enabled(intptr_t device, unsigned int state):             # <<<<<<<<<<<<<<
 *     """Set GPM stream state.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_609gpm_set_streaming_enabled, 0, __pyx_mstate_global->__pyx_n_u_gpm_set_streaming_enabled, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[750])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24996, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpm_set_streaming_enabled, __pyx_t_5) < (0)) __PYX_ERR(0, 24996, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25010
 * 
 * 
 * cpdef object device_get_capabilities(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get device capabilities.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_611device_get_capabilities, 0, __pyx_mstate_global->__pyx_n_u_device_get_capabilities, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[751])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25010, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_capabilities, __pyx_t_5) < (0)) __PYX_ERR(0, 25010, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25030
 * 
 * 
 * cpdef device_workload_power_profile_clear_requested_profiles(intptr_t device, intptr_t requested_profiles):             # <<<<<<<<<<<<<<
 *     """Clear Requested Performance Profiles.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_613device_workload_power_profile_clear_requested_profiles, 0, __pyx_mstate_global->__pyx_n_u_device_workload_power_profile_cl, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[752])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25030, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_workload_power_profile_cl, __pyx_t_5) < (0)) __PYX_ERR(0, 25030, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25044
 * 
 * 
 * cpdef device_power_smoothing_activate_preset_profile(intptr_t device, intptr_t profile):             # <<<<<<<<<<<<<<
 *     """Activiate a specific preset profile for datacenter power smoothing. The API only sets the active preset profile based on the input profileId, and ignores the other parameters of the structure. Requires root/admin permissions.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_615device_power_smoothing_activate_preset_profile, 0, __pyx_mstate_global->__pyx_n_u_device_power_smoothing_activate, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[753])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25044, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_power_smoothing_activate, __pyx_t_5) < (0)) __PYX_ERR(0, 25044, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25058
 * 
 * 
 * cpdef device_power_smoothing_update_preset_profile_param(intptr_t device, intptr_t profile):             # <<<<<<<<<<<<<<
 *     """Update the value of a specific profile parameter contained within ``nvmlPowerSmoothingProfile_v1_t``. Requires root/admin permissions.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_617device_power_smoothing_update_preset_profile_param, 0, __pyx_mstate_global->__pyx_n_u_device_power_smoothing_update_pr, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[754])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25058, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_power_smoothing_update_pr, __pyx_t_5) < (0)) __PYX_ERR(0, 25058, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25072
 * 
 * 
 * cpdef device_power_smoothing_set_state(intptr_t device, intptr_t state):             # <<<<<<<<<<<<<<
 *     """Enable or disable the Power Smoothing Feature. Requires root/admin permissions.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_619device_power_smoothing_set_state, 0, __pyx_mstate_global->__pyx_n_u_device_power_smoothing_set_state, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[755])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25072, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_power_smoothing_set_state, __pyx_t_5) < (0)) __PYX_ERR(0, 25072, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25086
 * 
 * 
 * cpdef object device_get_addressing_mode(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the addressing mode for a given GPU. Addressing modes can be one of:.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_621device_get_addressing_mode, 0, __pyx_mstate_global->__pyx_n_u_device_get_addressing_mode, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[756])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25086, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_addressing_mode, __pyx_t_5) < (0)) __PYX_ERR(0, 25086, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25106
 * 
 * 
 * cpdef object device_get_repair_status(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the repair status for TPC/Channel repair.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_623device_get_repair_status, 0, __pyx_mstate_global->__pyx_n_u_device_get_repair_status, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[757])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25106, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_repair_status, __pyx_t_5) < (0)) __PYX_ERR(0, 25106, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25126
 * 
 * 
 * cpdef object device_get_power_mizer_mode_v1(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves current power mizer mode on this device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_625device_get_power_mizer_mode_v1, 0, __pyx_mstate_global->__pyx_n_u_device_get_power_mizer_mode_v1, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[758])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25126, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_power_mizer_mode_v1, __pyx_t_5) < (0)) __PYX_ERR(0, 25126, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25145
 * 
 * 
 * cpdef device_set_power_mizer_mode_v1(intptr_t device, intptr_t power_mizer_mode):             # <<<<<<<<<<<<<<
 *     """Sets the new power mizer mode.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_627device_set_power_mizer_mode_v1, 0, __pyx_mstate_global->__pyx_n_u_device_set_power_mizer_mode_v1, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[759])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25145, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_power_mizer_mode_v1, __pyx_t_5) < (0)) __PYX_ERR(0, 25145, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25159
 * 
 * 
 * cpdef object device_get_pdi(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the Per Device Identifier (PDI) associated with this device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_629device_get_pdi, 0, __pyx_mstate_global->__pyx_n_u_device_get_pdi, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[760])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25159, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_pdi, __pyx_t_5) < (0)) __PYX_ERR(0, 25159, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25179
 * 
 * 
 * cpdef object device_get_nvlink_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Query NVLINK information associated with this device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_631device_get_nvlink_info, 0, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_info, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[761])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_nvlink_info, __pyx_t_5) < (0)) __PYX_ERR(0, 25179, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25199
 * 
 * 
 * cpdef device_read_write_prm_v1(intptr_t device, intptr_t buffer):             # <<<<<<<<<<<<<<
 *     """Read or write a GPU PRM register. The input is assumed to be in TLV format in network byte order.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_633device_read_write_prm_v1, 0, __pyx_mstate_global->__pyx_n_u_device_read_write_prm_v1, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[762])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25199, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_read_write_prm_v1, __pyx_t_5) < (0)) __PYX_ERR(0, 25199, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25213
 * 
 * 
 * cpdef object device_get_gpu_instance_profile_info_by_id_v(intptr_t device, unsigned int profile_id):             # <<<<<<<<<<<<<<
 *     """GPU instance profile query function that accepts profile ID, instead of profile name. It accepts a versioned ``nvmlGpuInstanceProfileInfo_v2_t`` or later output structure.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_635device_get_gpu_instance_profile_info_by_id_v, 0, __pyx_mstate_global->__pyx_n_u_device_get_gpu_instance_profile_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[763])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25213, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_gpu_instance_profile_2, __pyx_t_5) < (0)) __PYX_ERR(0, 25213, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25234
 * 
 * 
 * cpdef object system_get_topology_gpu_set(unsigned int cpuNumber):             # <<<<<<<<<<<<<<
 *     """Retrieve the set of GPUs that have a CPU affinity with the given CPU number
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_637system_get_topology_gpu_set, 0, __pyx_mstate_global->__pyx_n_u_system_get_topology_gpu_set, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[764])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25234, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_get_topology_gpu_set, __pyx_t_5) < (0)) __PYX_ERR(0, 25234, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25256
 * 
 * 
 * cpdef str system_get_driver_branch():             # <<<<<<<<<<<<<<
 *     """Retrieves the driver branch of the NVIDIA driver installed on the system.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_639system_get_driver_branch, 0, __pyx_mstate_global->__pyx_n_u_system_get_driver_branch, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[765])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25256, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_system_get_driver_branch, __pyx_t_5) < (0)) __PYX_ERR(0, 25256, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25274
 * 
 * 
 * cpdef object unit_get_devices(intptr_t unit):             # <<<<<<<<<<<<<<
 *     """Retrieves the set of GPU devices that are attached to the specified unit.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_641unit_get_devices, 0, __pyx_mstate_global->__pyx_n_u_unit_get_devices, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[766])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25274, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_unit_get_devices, __pyx_t_5) < (0)) __PYX_ERR(0, 25274, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25296
 * 
 * 
 * cpdef object device_get_topology_nearest_gpus(intptr_t device, unsigned int level):             # <<<<<<<<<<<<<<
 *     """Retrieve the set of GPUs that are nearest to a given device at a specific interconnectivity level
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_643device_get_topology_nearest_gpus, 0, __pyx_mstate_global->__pyx_n_u_device_get_topology_nearest_gpus, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[767])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25296, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_topology_nearest_gpus, __pyx_t_5) < (0)) __PYX_ERR(0, 25296, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25329
 * 
 * 
 * cpdef object device_get_temperature_v(intptr_t device, nvmlTemperatureSensors_t sensorType):             # <<<<<<<<<<<<<<
 *     """Retrieves the current temperature readings (in degrees C) for the given device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_645device_get_temperature_v, 0, __pyx_mstate_global->__pyx_n_u_device_get_temperature_v, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[768])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25329, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_temperature_v, __pyx_t_5) < (0)) __PYX_ERR(0, 25329, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25350
 * 
 * 
 * cpdef object device_get_supported_performance_states(intptr_t device, unsigned int size):             # <<<<<<<<<<<<<<
 *     """Get all supported Performance States (P-States) for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_647device_get_supported_performance_states, 0, __pyx_mstate_global->__pyx_n_u_device_get_supported_performance, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[769])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25350, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_supported_performance, __pyx_t_5) < (0)) __PYX_ERR(0, 25350, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25373
 * 
 * 
 * cpdef object device_get_running_process_detail_list(intptr_t device, unsigned int mode):             # <<<<<<<<<<<<<<
 *     """Get information about running processes on a device for input context
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_649device_get_running_process_detail_list, 0, __pyx_mstate_global->__pyx_n_u_device_get_running_process_detai, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[770])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25373, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_running_process_detai, __pyx_t_5) < (0)) __PYX_ERR(0, 25373, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25404
 * 
 * 
 * cpdef object device_get_samples(intptr_t device, int type, unsigned long long last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """Gets recent samples for the GPU.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_651device_get_samples, 0, __pyx_mstate_global->__pyx_n_u_device_get_samples, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[771])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25404, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_samples, __pyx_t_5) < (0)) __PYX_ERR(0, 25404, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25429
 * 
 * 
 * cpdef object device_get_retired_pages_v2(intptr_t device, int cause):             # <<<<<<<<<<<<<<
 *     """Returns the list of retired pages by source, including pages that are pending retirement
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_653device_get_retired_pages_v2, 0, __pyx_mstate_global->__pyx_n_u_device_get_retired_pages_v2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[772])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25429, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_retired_pages_v2, __pyx_t_5) < (0)) __PYX_ERR(0, 25429, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25456
 * 
 * 
 * cpdef object device_get_processes_utilization_info(intptr_t device, unsigned long long last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """Retrieves the recent utilization and process ID for all running processes
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_655device_get_processes_utilization_info, 0, __pyx_mstate_global->__pyx_n_u_device_get_processes_utilization, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[773])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25456, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_processes_utilization, __pyx_t_5) < (0)) __PYX_ERR(0, 25456, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25497
 * 
 * 
 * cpdef device_set_hostname_v1(intptr_t device, str hostname):             # <<<<<<<<<<<<<<
 *     """Set the hostname for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_657device_set_hostname_v1, 0, __pyx_mstate_global->__pyx_n_u_device_set_hostname_v1, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[774])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25497, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_set_hostname_v1, __pyx_t_5) < (0)) __PYX_ERR(0, 25497, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25516
 * 
 * 
 * cpdef str device_get_hostname_v1(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Get the hostname for the device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_659device_get_hostname_v1, 0, __pyx_mstate_global->__pyx_n_u_device_get_hostname_v1, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[775])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_hostname_v1, __pyx_t_5) < (0)) __PYX_ERR(0, 25516, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25547
 * 
 * 
 * cpdef object device_get_field_values(intptr_t device, values):             # <<<<<<<<<<<<<<
 *     """Request values for a list of fields for a device. This API allows multiple fields to be queried at once. If any of the underlying fieldIds are populated by the same driver call, the results for those field IDs will be populated from a single call rather than making a driver call for each fieldId.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_661device_get_field_values, 0, __pyx_mstate_global->__pyx_n_u_device_get_field_values, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[776])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25547, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_field_values, __pyx_t_5) < (0)) __PYX_ERR(0, 25547, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25566
 * 
 * 
 * cpdef object device_clear_field_values(intptr_t device, values):             # <<<<<<<<<<<<<<
 *     """Clear values for a list of fields for a device. This API allows multiple fields to be cleared at once.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_663device_clear_field_values, 0, __pyx_mstate_global->__pyx_n_u_device_clear_field_values, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[777])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25566, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_clear_field_values, __pyx_t_5) < (0)) __PYX_ERR(0, 25566, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25583
 * 
 * 
 * cpdef object device_get_supported_vgpus(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the supported vGPU types on a physical GPU (device).
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_665device_get_supported_vgpus, 0, __pyx_mstate_global->__pyx_n_u_device_get_supported_vgpus, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[778])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25583, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_supported_vgpus, __pyx_t_5) < (0)) __PYX_ERR(0, 25583, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25605
 * 
 * 
 * cpdef object device_get_creatable_vgpus(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the currently creatable vGPU types on a physical GPU (device).
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_667device_get_creatable_vgpus, 0, __pyx_mstate_global->__pyx_n_u_device_get_creatable_vgpus, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[779])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25605, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_creatable_vgpus, __pyx_t_5) < (0)) __PYX_ERR(0, 25605, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25627
 * 
 * 
 * cpdef object device_get_active_vgpus(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieve the active vGPU instances on a device.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_669device_get_active_vgpus, 0, __pyx_mstate_global->__pyx_n_u_device_get_active_vgpus, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[780])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25627, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_active_vgpus, __pyx_t_5) < (0)) __PYX_ERR(0, 25627, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25649
 * 
 * 
 * cpdef str vgpu_instance_get_vm_id(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the VM ID associated with a vGPU instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_671vgpu_instance_get_vm_id, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_vm_id, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[781])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25649, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_vm_id, __pyx_t_5) < (0)) __PYX_ERR(0, 25649, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25667
 * 
 * 
 * cpdef object gpu_instance_get_creatable_vgpus(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Query the currently creatable vGPU types on a specific GPU Instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_673gpu_instance_get_creatable_vgpus, 0, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_creatable_vgpus, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[782])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25667, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_creatable_vgpus, __pyx_t_5) < (0)) __PYX_ERR(0, 25667, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25701
 * 
 * 
 * cpdef object gpu_instance_get_active_vgpus(intptr_t gpu_instance):             # <<<<<<<<<<<<<<
 *     """Retrieve the active vGPU instances within a GPU instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_675gpu_instance_get_active_vgpus, 0, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_active_vgpus, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[783])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25701, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_active_vgpus, __pyx_t_5) < (0)) __PYX_ERR(0, 25701, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25733
 * 
 * 
 * cpdef object gpu_instance_get_vgpu_type_creatable_placements(intptr_t gpu_instance, unsigned int vgpu_type_id):             # <<<<<<<<<<<<<<
 *     """Query the creatable vGPU placement ID of the vGPU type within a GPU instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_677gpu_instance_get_vgpu_type_creatable_placements, 0, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_vgpu_type_creat, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[784])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25733, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_vgpu_type_creat, __pyx_t_5) < (0)) __PYX_ERR(0, 25733, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25768
 * 
 * 
 * cpdef object device_get_vgpu_type_creatable_placements(intptr_t device, unsigned int vgpu_type_id, unsigned int mode):             # <<<<<<<<<<<<<<
 *     """Query the creatable vGPU placement ID of the vGPU type within a GPU instance.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_679device_get_vgpu_type_creatable_placements, 0, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_type_creatable_p, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[785])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25768, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_type_creatable_p, __pyx_t_5) < (0)) __PYX_ERR(0, 25768, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25805
 * 
 * 
 * cpdef object vgpu_instance_get_metadata(unsigned int vgpu_instance):             # <<<<<<<<<<<<<<
 *     """Returns vGPU metadata structure for a running vGPU. The structure contains information about the vGPU and its
 *     associated VM such as the currently installed NVIDIA guest driver version, together with host driver version and
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_681vgpu_instance_get_metadata, 0, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_metadata, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[786])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25805, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_vgpu_instance_get_metadata, __pyx_t_5) < (0)) __PYX_ERR(0, 25805, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25827
 * 
 * 
 * cpdef object device_get_vgpu_metadata(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Returns a vGPU metadata structure for the physical GPU indicated by device. The structure contains
 *     information about the GPU and the currently installed NVIDIA host driver version that's controlling it,
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_683device_get_vgpu_metadata, 0, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_metadata, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[787])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25827, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_metadata, __pyx_t_5) < (0)) __PYX_ERR(0, 25827, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25849
 * 
 * 
 * cpdef object get_vgpu_compatibility(VgpuMetadata vgpu_metadata, VgpuPgpuMetadata pgpu_metadata):             # <<<<<<<<<<<<<<
 *     """Takes a vGPU instance metadata structure read from vgpu_instance_get_metadata() and a vGPU metadata structure
 *     for a physical GPU read from device_get_vgpu_metadata, and returns compatibility information of the vGPU instance
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_685get_vgpu_compatibility, 0, __pyx_mstate_global->__pyx_n_u_get_vgpu_compatibility, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[788])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25849, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_get_vgpu_compatibility, __pyx_t_5) < (0)) __PYX_ERR(0, 25849, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25873
 * 
 * 
 * cpdef tuple get_vgpu_version():             # <<<<<<<<<<<<<<
 *     """Query the ranges of supported vGPU versions.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_687get_vgpu_version, 0, __pyx_mstate_global->__pyx_n_u_get_vgpu_version, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[789])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25873, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_get_vgpu_version, __pyx_t_5) < (0)) __PYX_ERR(0, 25873, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25891
 * 
 * 
 * cpdef object device_get_vgpu_instances_utilization_info(intptr_t device):             # <<<<<<<<<<<<<<
 *     """
 *     Retrieves recent utilization for vGPU instances running on a physical GPU (device).
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_689device_get_vgpu_instances_utilization_info, 0, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_instances_utiliz, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[790])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25891, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_instances_utiliz, __pyx_t_5) < (0)) __PYX_ERR(0, 25891, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25924
 * 
 * 
 * cpdef object device_get_vgpu_processes_utilization_info(intptr_t device, unsigned int last_seen_time_stamp):             # <<<<<<<<<<<<<<
 *     """
 *     Retrieves recent utilization for processes running on vGPU instances on a physical GPU (device).
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_691device_get_vgpu_processes_utilization_info, 0, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_processes_utiliz, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[791])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25924, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_vgpu_processes_utiliz, __pyx_t_5) < (0)) __PYX_ERR(0, 25924, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25958
 * 
 * 
 * cpdef object device_get_gpu_instances(intptr_t device, unsigned int profile_id):             # <<<<<<<<<<<<<<
 *     """Get GPU instances for given profile ID.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_693device_get_gpu_instances, 0, __pyx_mstate_global->__pyx_n_u_device_get_gpu_instances, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[792])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25958, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_gpu_instances, __pyx_t_5) < (0)) __PYX_ERR(0, 25958, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":25984
 * 
 * 
 * cpdef object gpu_instance_get_compute_instances(intptr_t gpu_instance, unsigned int profile_id):             # <<<<<<<<<<<<<<
 *     """Get Compute instances for given profile ID.
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_695gpu_instance_get_compute_instances, 0, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_compute_instanc_5, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[793])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25984, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_gpu_instance_get_compute_instanc_5, __pyx_t_5) < (0)) __PYX_ERR(0, 25984, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":26010
 * 
 * 
 * cpdef object device_get_sram_unique_uncorrected_ecc_error_counts(intptr_t device):             # <<<<<<<<<<<<<<
 *     """Retrieves the counts of SRAM unique uncorrected ECC errors
 * 
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_697device_get_sram_unique_uncorrected_ecc_error_counts, 0, __pyx_mstate_global->__pyx_n_u_device_get_sram_unique_uncorrect, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[794])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26010, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_device_get_sram_unique_uncorrect, __pyx_t_5) < (0)) __PYX_ERR(0, 26010, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_ProcessInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_699__pyx_unpickle_ProcessInfo, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ProcessInfo, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[795])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ProcessInfo, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * cdef extern from *:             # <<<<<<<<<<<<<<
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_701__pyx_unpickle_ProcessDetail_v1, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ProcessDetail_v1, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[796])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ProcessDetail_v1, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_BridgeChipInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_703__pyx_unpickle_BridgeChipInfo, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_BridgeChipInfo, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[797])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_BridgeChipInfo, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * cdef extern from *:             # <<<<<<<<<<<<<<
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_705__pyx_unpickle_ClkMonFaultInfo, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ClkMonFaultInfo, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[798])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ClkMonFaultInfo, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_ProcessUtilizationSample(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_707__pyx_unpickle_ProcessUtilizationSample, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ProcessUtilizatio, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[799])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ProcessUtilizatio, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * cdef extern from *:             # <<<<<<<<<<<<<<
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_709__pyx_unpickle_ProcessUtilizationInfo_v1, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ProcessUtilizatio_2, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[800])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ProcessUtilizatio_2, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_VgpuProcessUtilizationInfo_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_711__pyx_unpickle_VgpuProcessUtilizationInfo_v1, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_VgpuProcessUtiliz, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[801])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_VgpuProcessUtiliz, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * cdef extern from *:             # <<<<<<<<<<<<<<
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_713__pyx_unpickle_VgpuSchedulerLogEntry, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_VgpuSchedulerLogE, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[802])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_VgpuSchedulerLogE, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_HwbcEntry(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_715__pyx_unpickle_HwbcEntry, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_HwbcEntry, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[803])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_HwbcEntry, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * cdef extern from *:             # <<<<<<<<<<<<<<
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_717__pyx_unpickle_UnitFanInfo, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_UnitFanInfo, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[804])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_UnitFanInfo, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_EncoderSessionInfo(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_719__pyx_unpickle_EncoderSessionInfo, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_EncoderSessionInf, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[805])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_EncoderSessionInf, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * cdef extern from *:             # <<<<<<<<<<<<<<
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_721__pyx_unpickle_FBCSessionInfo, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_FBCSessionInfo, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[806])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_FBCSessionInfo, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_GpuInstancePlacement(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_723__pyx_unpickle_GpuInstancePlacement, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_GpuInstancePlacem, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[807])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_GpuInstancePlacem, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * cdef extern from *:             # <<<<<<<<<<<<<<
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_725__pyx_unpickle_ComputeInstancePlacement, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ComputeInstancePl, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[808])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_ComputeInstancePl, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_727__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_EccSramUniqueUnco, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[809])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_EccSramUniqueUnco, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * cdef extern from *:             # <<<<<<<<<<<<<<
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_729__pyx_unpickle_Sample, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_Sample, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[810])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_Sample, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_VgpuInstanceUtilizationInfo_v1(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_731__pyx_unpickle_VgpuInstanceUtilizationInfo_v1, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_VgpuInstanceUtili, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[811])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_VgpuInstanceUtili, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":1
 * cdef extern from *:             # <<<<<<<<<<<<<<
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_733__pyx_unpickle_FieldValue, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_FieldValue, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[812])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_FieldValue, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "(tree fragment)":4
 *     int __Pyx_CheckUnpickleChecksum(long, long, long, long, const char*) except -1
 *     int __Pyx_UpdateUnpickledDict(object, object, Py_ssize_t) except -1
 * def __pyx_unpickle_GridLicensableFeature(__pyx_type, long __pyx_checksum, tuple __pyx_state):             # <<<<<<<<<<<<<<
 *     cdef object __pyx_result
 *     __Pyx_CheckUnpickleChecksum(__pyx_checksum, 0xa75e18a, 0xc05ad22, 0x7125153, b'_data')
*/
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_4cuda_8bindings_5_nvml_735__pyx_unpickle_GridLicensableFeature, 0, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_GridLicensableFea, NULL, __pyx_mstate_global->__pyx_n_u_cuda_bindings__nvml, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[813])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  PyUnstable_Object_EnableDeferredRefcount(__pyx_t_5);
  #endif
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_unpickle_GridLicensableFea, __pyx_t_5) < (0)) __PYX_ERR(1, 4, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /* "cuda/bindings/_nvml.pyx":1
 * # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.             # <<<<<<<<<<<<<<
 * #
 * # SPDX-License-Identifier: LicenseRef-NVIDIA-SOFTWARE-LICENSE
*/
  __pyx_t_5 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_test, __pyx_t_5) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;

  /*--- Wrapped vars code ---*/

  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_10);
  __Pyx_XDECREF(__pyx_t_11);
  __Pyx_XDECREF(__pyx_t_12);
  __Pyx_XDECREF(__pyx_t_13);
  __Pyx_XDECREF(__pyx_t_14);
  __Pyx_XDECREF(__pyx_t_15);
  __Pyx_XDECREF(__pyx_t_16);
  if (__pyx_m) {
    if (__pyx_mstate->__pyx_d && stringtab_initialized) {
      __Pyx_AddTraceback("init cuda.bindings._nvml", __pyx_clineno, __pyx_lineno, __pyx_filename);
    }
    #if !CYTHON_USE_MODULE_STATE
    Py_CLEAR(__pyx_m);
    #else
    Py_DECREF(__pyx_m);
    if (pystate_addmodule_run) {
      PyObject *tp, *value, *tb;
      PyErr_Fetch(&tp, &value, &tb);
      PyState_RemoveModule(&__pyx_moduledef);
      PyErr_Restore(tp, value, tb);
    }
    #endif
  } else if (!PyErr_Occurred()) {
    PyErr_SetString(PyExc_ImportError, "init cuda.bindings._nvml");
  }
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  #if CYTHON_PEP489_MULTI_PHASE_INIT
  return (__pyx_m != NULL) ? 0 : -1;
  #else
  return __pyx_m;
  #endif
}
/* #### Code section: pystring_table ### */
/* #### Code section: cached_builtins ### */

static int __Pyx_InitCachedBuiltins(__pyx_mstatetype *__pyx_mstate) {
  CYTHON_UNUSED_VAR(__pyx_mstate);
  __pyx_builtin_staticmethod = __Pyx_GetBuiltinName(__pyx_mstate->__pyx_n_u_staticmethod); if (!__pyx_builtin_staticmethod) __PYX_ERR(0, 1517, __pyx_L1_error)
  __pyx_builtin_super = __Pyx_GetBuiltinName(__pyx_mstate->__pyx_n_u_super); if (!__pyx_builtin_super) __PYX_ERR(0, 1146, __pyx_L1_error)
  __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_mstate->__pyx_n_u_id); if (!__pyx_builtin_id) __PYX_ERR(0, 1382, __pyx_L1_error)
  __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_mstate->__pyx_n_u_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(0, 25538, __pyx_L1_error)
  __pyx_builtin___import__ = __Pyx_GetBuiltinName(__pyx_mstate->__pyx_n_u_import); if (!__pyx_builtin___import__) __PYX_ERR(1, 101, __pyx_L1_error)
  __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_mstate->__pyx_n_u_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 409, __pyx_L1_error)

  /* Cached unbound methods */
  __pyx_mstate->__pyx_umethod_PyDict_Type_items.type = (PyObject*)&PyDict_Type;
  __pyx_mstate->__pyx_umethod_PyDict_Type_items.method_name = &__pyx_mstate->__pyx_n_u_items;
  __pyx_mstate->__pyx_umethod_PyDict_Type_pop.type = (PyObject*)&PyDict_Type;
  __pyx_mstate->__pyx_umethod_PyDict_Type_pop.method_name = &__pyx_mstate->__pyx_n_u_pop;
  __pyx_mstate->__pyx_umethod_PyDict_Type_values.type = (PyObject*)&PyDict_Type;
  __pyx_mstate->__pyx_umethod_PyDict_Type_values.method_name = &__pyx_mstate->__pyx_n_u_values;
  return 0;
  __pyx_L1_error:;
  return -1;
}
/* #### Code section: cached_constants ### */

static int __Pyx_InitCachedConstants(__pyx_mstatetype *__pyx_mstate) {
  __Pyx_RefNannyDeclarations
  CYTHON_UNUSED_VAR(__pyx_mstate);
  __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self.name is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state
*/
  __pyx_mstate_global->__pyx_tuple[0] = PyTuple_Pack(1, __pyx_mstate_global->__pyx_kp_u_self_name_is_not_None); if (unlikely(!__pyx_mstate_global->__pyx_tuple[0])) __PYX_ERR(1, 11, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_mstate_global->__pyx_tuple[0]);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_tuple[0]);

  /* "View.MemoryView":583
 *     def suboffsets(self):
 *         if self.view.suboffsets == NULL:
 *             return (-1,) * self.view.ndim             # <<<<<<<<<<<<<<
 * 
 *         return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
  __pyx_mstate_global->__pyx_tuple[1] = PyTuple_New(1); if (unlikely(!__pyx_mstate_global->__pyx_tuple[1])) __PYX_ERR(1, 583, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_mstate_global->__pyx_tuple[1]);
  __Pyx_INCREF(__pyx_mstate_global->__pyx_int_neg_1);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_int_neg_1);
  if (__Pyx_PyTuple_SET_ITEM(__pyx_mstate_global->__pyx_tuple[1], 0, __pyx_mstate_global->__pyx_int_neg_1) != (0)) __PYX_ERR(1, 583, __pyx_L1_error);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_tuple[1]);

  /* "View.MemoryView":680
 *     tup = <tuple>index if isinstance(index, tuple) else (index,)
 * 
 *     result = [slice(None)] * ndim             # <<<<<<<<<<<<<<
 *     have_slices = False
 *     seen_ellipsis = False
*/
  __pyx_mstate_global->__pyx_slice[0] = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_mstate_global->__pyx_slice[0])) __PYX_ERR(1, 680, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_mstate_global->__pyx_slice[0]);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_slice[0]);

  /* "(tree fragment)":11
 *         use_setstate = True
 *     else:
 *         use_setstate = ('self._data is not None',)             # <<<<<<<<<<<<<<
 *     if use_setstate:
 *         return __pyx_unpickle_ProcessInfo, (type(self), 0xa75e18a, None), state
*/
  __pyx_mstate_global->__pyx_tuple[2] = PyTuple_Pack(1, __pyx_mstate_global->__pyx_kp_u_self__data_is_not_None); if (unlikely(!__pyx_mstate_global->__pyx_tuple[2])) __PYX_ERR(1, 11, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_mstate_global->__pyx_tuple[2]);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_tuple[2]);

  /* "cuda/bindings/_nvml.pyx":5487
 *     def ib_guid(self):
 *         """~_numpy.uint8: (array of length 16).Infiniband GUID reported by platform (for Blackwell, ibGuid is 8 bytes so indices 8-15 are zero)"""
 *         cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(&(self._ptr[0].ibGuid))
 *         return _numpy.asarray(arr)
*/
  __pyx_mstate_global->__pyx_tuple[3] = PyTuple_Pack(1, __pyx_mstate_global->__pyx_int_16); if (unlikely(!__pyx_mstate_global->__pyx_tuple[3])) __PYX_ERR(0, 5487, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_mstate_global->__pyx_tuple[3]);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_tuple[3]);

  /* "cuda/bindings/_nvml.pyx":7478
 *     def supported_schedulers(self):
 *         """~_numpy.uint32: (array of length 3)."""
 *         cdef view.array arr = view.array(shape=(3,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(&(self._ptr[0].supportedSchedulers))
 *         return _numpy.asarray(arr)
*/
  __pyx_mstate_global->__pyx_tuple[4] = PyTuple_Pack(1, __pyx_mstate_global->__pyx_int_3); if (unlikely(!__pyx_mstate_global->__pyx_tuple[4])) __PYX_ERR(0, 7478, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_mstate_global->__pyx_tuple[4]);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_tuple[4]);

  /* "cuda/bindings/_nvml.pyx":10977
 *     def cert_chain(self):
 *         """~_numpy.uint8: (array of length 4096)."""
 *         cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(&(self._ptr[0].certChain))
 *         return _numpy.asarray(arr)
*/
  __pyx_mstate_global->__pyx_tuple[5] = PyTuple_Pack(1, __pyx_mstate_global->__pyx_int_4096); if (unlikely(!__pyx_mstate_global->__pyx_tuple[5])) __PYX_ERR(0, 10977, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_mstate_global->__pyx_tuple[5]);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_tuple[5]);

  /* "cuda/bindings/_nvml.pyx":10992
 *     def attestation_cert_chain(self):
 *         """~_numpy.uint8: (array of length 5120)."""
 *         cdef view.array arr = view.array(shape=(5120,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(&(self._ptr[0].attestationCertChain))
 *         return _numpy.asarray(arr)
*/
  __pyx_mstate_global->__pyx_tuple[6] = PyTuple_Pack(1, __pyx_mstate_global->__pyx_int_5120); if (unlikely(!__pyx_mstate_global->__pyx_tuple[6])) __PYX_ERR(0, 10992, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_mstate_global->__pyx_tuple[6]);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_tuple[6]);

  /* "cuda/bindings/_nvml.pyx":11154
 *     def nonce(self):
 *         """~_numpy.uint8: (array of length 32)."""
 *         cdef view.array arr = view.array(shape=(32,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(&(self._ptr[0].nonce))
 *         return _numpy.asarray(arr)
*/
  __pyx_mstate_global->__pyx_tuple[7] = PyTuple_Pack(1, __pyx_mstate_global->__pyx_int_32); if (unlikely(!__pyx_mstate_global->__pyx_tuple[7])) __PYX_ERR(0, 11154, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_mstate_global->__pyx_tuple[7]);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_tuple[7]);

  /* "cuda/bindings/_nvml.pyx":11169
 *     def attestation_report(self):
 *         """~_numpy.uint8: (array of length 8192)."""
 *         cdef view.array arr = view.array(shape=(8192,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(&(self._ptr[0].attestationReport))
 *         return _numpy.asarray(arr)
*/
  __pyx_mstate_global->__pyx_tuple[8] = PyTuple_Pack(1, __pyx_mstate_global->__pyx_int_8192); if (unlikely(!__pyx_mstate_global->__pyx_tuple[8])) __PYX_ERR(0, 11169, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_mstate_global->__pyx_tuple[8]);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_tuple[8]);

  /* "cuda/bindings/_nvml.pyx":11453
 *     def bw_modes(self):
 *         """~_numpy.uint8: (array of length 23)."""
 *         cdef view.array arr = view.array(shape=(23,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False)             # <<<<<<<<<<<<<<
 *         arr.data = <char *>(&(self._ptr[0].bwModes))
 *         return _numpy.asarray(arr)
*/
  __pyx_mstate_global->__pyx_tuple[9] = PyTuple_Pack(1, __pyx_mstate_global->__pyx_int_23); if (unlikely(!__pyx_mstate_global->__pyx_tuple[9])) __PYX_ERR(0, 11453, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_mstate_global->__pyx_tuple[9]);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_tuple[9]);

  /* "cuda/bindings/_nvml.pyx":20065
 *     """
 *     if node_set_size == 0:
 *         return view.array(shape=(1,), itemsize=sizeof(unsigned long), format="L", mode="c")[:0]             # <<<<<<<<<<<<<<
 *     cdef view.array node_set = view.array(shape=(node_set_size,), itemsize=sizeof(unsigned long), format="L", mode="c")
 *     cdef unsigned long *node_set_ptr = <unsigned long *>(node_set.data)
*/
  __pyx_mstate_global->__pyx_tuple[10] = PyTuple_Pack(1, __pyx_mstate_global->__pyx_int_1); if (unlikely(!__pyx_mstate_global->__pyx_tuple[10])) __PYX_ERR(0, 20065, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_mstate_global->__pyx_tuple[10]);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_tuple[10]);
  __pyx_mstate_global->__pyx_slice[1] = PySlice_New(Py_None, __pyx_mstate_global->__pyx_int_0, Py_None); if (unlikely(!__pyx_mstate_global->__pyx_slice[1])) __PYX_ERR(0, 20065, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_mstate_global->__pyx_slice[1]);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_slice[1]);

  /* "cuda/bindings/_nvml.pyx":1526
 *         return __from_data(data, "pci_info_ext_v1_dtype", pci_info_ext_v1_dtype, PciInfoExt_v1)
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, bint readonly=False, object owner=None):
 *         """Create an PciInfoExt_v1 instance wrapping the given pointer.
*/
  __pyx_mstate_global->__pyx_tuple[11] = PyTuple_Pack(2, Py_False, Py_None); if (unlikely(!__pyx_mstate_global->__pyx_tuple[11])) __PYX_ERR(0, 1526, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_mstate_global->__pyx_tuple[11]);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_tuple[11]);

  /* "cuda/bindings/_nvml.pyx":2479
 *         return obj
 * 
 *     @staticmethod             # <<<<<<<<<<<<<<
 *     def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False):
 *         """Create an ProcessInfo instance wrapping the given pointer.
*/
  __pyx_mstate_global->__pyx_tuple[12] = PyTuple_Pack(2, __pyx_mstate_global->__pyx_int_1, Py_False); if (unlikely(!__pyx_mstate_global->__pyx_tuple[12])) __PYX_ERR(0, 2479, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_mstate_global->__pyx_tuple[12]);
  __Pyx_GIVEREF(__pyx_mstate_global->__pyx_tuple[12]);
  #if CYTHON_IMMORTAL_CONSTANTS
  {
    PyObject **table = __pyx_mstate->__pyx_tuple;
    for (Py_ssize_t i=0; i<13; ++i) {
      #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
      Py_SET_REFCNT(table[i], _Py_IMMORTAL_REFCNT_LOCAL);
      #else
      Py_SET_REFCNT(table[i], _Py_IMMORTAL_INITIAL_REFCNT);
      #endif
    }
  }
  #endif
  #if CYTHON_IMMORTAL_CONSTANTS
  {
    PyObject **table = __pyx_mstate->__pyx_slice;
    for (Py_ssize_t i=0; i<2; ++i) {
      #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
      Py_SET_REFCNT(table[i], _Py_IMMORTAL_REFCNT_LOCAL);
      #else
      Py_SET_REFCNT(table[i], _Py_IMMORTAL_INITIAL_REFCNT);
      #endif
    }
  }
  #endif
  __Pyx_RefNannyFinishContext();
  return 0;
  __pyx_L1_error:;
  __Pyx_RefNannyFinishContext();
  return -1;
}
/* #### Code section: init_constants ### */

static int __Pyx_InitConstants(__pyx_mstatetype *__pyx_mstate) {
  CYTHON_UNUSED_VAR(__pyx_mstate);
  {
    const struct { const unsigned int length: 15; } index[] = {{2},{27},{37},{68},{22},{31},{22},{26},{35},{26},{54},{37},{60},{24},{52},{23},{27},{24},{26},{31},{32},{36},{41},{41},{53},{43},{37},{34},{33},{34},{25},{35},{28},{33},{38},{30},{36},{26},{33},{50},{45},{49},{34},{26},{30},{32},{42},{27},{36},{31},{29},{31},{46},{46},{36},{58},{48},{42},{39},{38},{39},{30},{40},{33},{38},{43},{35},{41},{38},{55},{26},{35},{25},{32},{27},{38},{33},{32},{42},{42},{35},{39},{34},{25},{37},{23},{26},{30},{35},{38},{35},{35},{42},{24},{24},{30},{23},{32},{37},{44},{32},{43},{45},{30},{25},{28},{22},{46},{41},{48},{34},{32},{29},{38},{33},{35},{37},{48},{36},{42},{38},{33},{40},{36},{39},{42},{38},{36},{34},{39},{28},{30},{30},{30},{30},{30},{30},{21},{30},{52},{52},{47},{39},{47},{58},{40},{45},{22},{26},{20},{27},{18},{22},{22},{33},{28},{27},{28},{32},{37},{37},{30},{29},{33},{34},{29},{17},{21},{29},{33},{45},{22},{20},{32},{15},{18},{21},{4},{179},{25},{30},{33},{30},{30},{37},{37},{19},{25},{19},{18},{27},{32},{24},{28},{19},{23},{33},{37},{32},{36},{39},{27},{38},{14},{18},{22},{27},{20},{25},{22},{24},{26},{25},{27},{37},{36},{33},{24},{27},{24},{24},{27},{21},{24},{29},{28},{25},{29},{35},{34},{35},{25},{26},{32},{21},{28},{27},{29},{31},{43},{40},{26},{32},{26},{28},{27},{29},{20},{26},{19},{25},{31},{34},{30},{26},{21},{22},{27},{33},{31},{43},{32},{25},{30},{50},{57},{50},{60},{64},{63},{46},{48},{54},{50},{50},{49},{48},{40},{42},{52},{37},{46},{41},{39},{41},{46},{56},{56},{68},{58},{52},{49},{48},{49},{40},{50},{43},{48},{53},{45},{51},{48},{65},{36},{45},{35},{42},{37},{48},{43},{42},{52},{52},{45},{49},{44},{35},{47},{33},{36},{40},{45},{48},{45},{45},{52},{34},{40},{34},{33},{42},{47},{54},{42},{53},{55},{40},{35},{38},{32},{56},{51},{58},{44},{42},{39},{48},{43},{45},{47},{58},{46},{52},{48},{50},{43},{46},{49},{52},{48},{46},{44},{49},{38},{40},{40},{40},{40},{40},{40},{32},{19},{23},{25},{20},{23},{17},{41},{36},{38},{42},{43},{29},{27},{24},{33},{28},{30},{32},{37},{41},{43},{31},{37},{33},{29},{33},{35},{28},{31},{34},{37},{33},{31},{29},{34},{23},{1},{1},{1},{1},{1},{0},{1},{8},{5},{6},{15},{23},{25},{23},{37},{21},{28},{50},{52},{60},{76},{54},{50},{45},{56},{57},{44},{46},{51},{60},{61},{40},{47},{66},{58},{67},{32},{7},{6},{2},{6},{35},{35},{22},{102},{9},{30},{9},{24},{50},{8},{11},{31},{24},{25},{25},{25},{25},{25},{25},{22},{61},{21},{27},{20},{32},{22},{14},{30},{37},{25},{7},{7},{7},{8},{13},{3},{5},{17},{16},{5},{15},{33},{35},{25},{24},{25},{43},{45},{35},{34},{13},{23},{28},{1},{10},{28},{30},{20},{19},{6},{5},{11},{13},{17},{10},{12},{25},{16},{18},{16},{20},{16},{16},{9},{12},{16},{11},{11},{15},{13},{16},{15},{9},{19},{37},{39},{29},{28},{14},{32},{34},{24},{23},{14},{14},{32},{34},{24},{23},{3},{11},{14},{9},{8},{11},{4},{7},{17},{19},{29},{28},{22},{9},{5},{4},{7},{18},{15},{33},{35},{25},{24},{12},{30},{32},{22},{21},{7},{12},{14},{32},{34},{24},{23},{9},{19},{37},{39},{29},{28},{24},{42},{44},{34},{33},{29},{47},{49},{39},{38},{29},{47},{49},{39},{38},{11},{41},{51},{50},{59},{61},{31},{41},{40},{49},{51},{25},{43},{45},{35},{34},{22},{40},{42},{32},{31},{21},{39},{41},{31},{30},{22},{40},{42},{32},{31},{13},{13},{31},{33},{23},{22},{12},{21},{23},{15},{17},{26},{26},{27},{13},{40},{21},{32},{37},{48},{45},{38},{41},{39},{32},{37},{27},{38},{18},{23},{25},{29},{23},{23},{24},{47},{41},{36},{41},{34},{26},{15},{19},{19},{18},{18},{19},{19},{21},{19},{19},{18},{18},{19},{19},{21},{15},{19},{18},{18},{19},{19},{21},{19},{18},{18},{19},{19},{21},{10},{27},{45},{30},{15},{26},{26},{27},{27},{26},{26},{26},{26},{26},{26},{26},{26},{29},{26},{26},{27},{27},{26},{26},{26},{26},{26},{26},{26},{26},{29},{38},{30},{33},{30},{30},{31},{31},{31},{31},{31},{31},{30},{30},{30},{30},{30},{30},{30},{30},{37},{44},{48},{44},{40},{24},{30},{30},{26},{27},{35},{28},{34},{27},{30},{29},{27},{30},{29},{34},{34},{34},{35},{35},{34},{34},{34},{34},{34},{37},{34},{34},{34},{34},{34},{35},{35},{34},{34},{34},{34},{34},{37},{34},{34},{34},{34},{35},{35},{37},{34},{34},{34},{34},{34},{34},{34},{34},{23},{28},{26},{26},{30},{34},{34},{40},{36},{20},{20},{22},{21},{34},{34},{34},{35},{35},{34},{34},{34},{34},{34},{37},{34},{34},{27},{32},{32},{33},{33},{32},{32},{32},{32},{32},{32},{32},{32},{35},{28},{24},{24},{25},{25},{24},{24},{24},{24},{24},{24},{24},{24},{29},{29},{28},{28},{33},{23},{22},{33},{26},{25},{25},{28},{24},{30},{29},{23},{23},{30},{29},{31},{30},{23},{32},{27},{31},{21},{27},{26},{23},{32},{33},{17},{23},{23},{17},{19},{19},{25},{27},{29},{16},{20},{20},{16},{16},{15},{19},{23},{23},{15},{30},{30},{31},{31},{28},{6},{20},{4},{11},{11},{10},{15},{14},{24},{23},{41},{43},{33},{32},{16},{34},{36},{26},{25},{21},{39},{41},{31},{30},{26},{44},{46},{36},{35},{23},{18},{36},{38},{28},{27},{24},{42},{44},{34},{33},{20},{11},{20},{17},{18},{18},{21},{23},{25},{31},{23},{16},{23},{24},{24},{17},{19},{24},{28},{23},{22},{19},{12},{15},{23},{29},{12},{15},{15},{19},{13},{19},{22},{20},{30},{13},{19},{13},{28},{14},{21},{39},{41},{31},{30},{38},{48},{47},{56},{58},{37},{47},{46},{55},{57},{8},{11},{18},{36},{38},{28},{27},{11},{9},{27},{29},{19},{18},{18},{36},{38},{28},{27},{10},{10},{14},{32},{34},{24},{23},{14},{8},{26},{28},{18},{17},{16},{15},{2},{15},{33},{35},{25},{24},{8},{10},{28},{30},{20},{19},{21},{21},{4},{6},{10},{11},{10},{26},{30},{30},{36},{36},{37},{37},{37},{37},{38},{38},{37},{37},{38},{38},{37},{37},{38},{38},{37},{37},{38},{38},{36},{36},{37},{37},{36},{36},{37},{37},{36},{36},{37},{37},{36},{36},{37},{37},{36},{36},{37},{37},{36},{36},{37},{37},{36},{36},{37},{37},{36},{36},{37},{37},{36},{36},{37},{37},{31},{31},{27},{23},{25},{26},{20},{20},{20},{31},{34},{35},{35},{29},{31},{34},{35},{35},{29},{31},{34},{35},{35},{29},{31},{34},{35},{35},{29},{31},{34},{35},{35},{29},{31},{34},{35},{35},{29},{31},{34},{35},{35},{29},{31},{34},{35},{35},{29},{24},{27},{28},{29},{27},{23},{14},{23},{23},{23},{23},{23},{23},{23},{23},{23},{23},{23},{23},{23},{23},{23},{23},{23},{23},{23},{23},{31},{31},{32},{32},{32},{32},{32},{32},{32},{32},{32},{32},{32},{32},{32},{32},{32},{32},{31},{31},{31},{31},{31},{31},{31},{31},{31},{31},{31},{31},{31},{31},{31},{31},{31},{31},{34},{34},{23},{23},{26},{26},{28},{29},{23},{18},{3},{12},{35},{29},{29},{31},{24},{26},{25},{26},{26},{23},{5},{11},{10},{28},{30},{20},{19},{21},{39},{41},{31},{30},{16},{34},{36},{26},{25},{15},{33},{35},{25},{24},{20},{38},{40},{30},{29},{25},{43},{45},{35},{34},{25},{43},{45},{35},{34},{14},{16},{16},{15},{12},{18},{36},{38},{28},{27},{16},{22},{21},{21},{39},{41},{31},{30},{22},{40},{42},{32},{31},{17},{35},{37},{27},{26},{22},{9},{9},{9},{5},{12},{9},{27},{29},{19},{18},{1},{13},{11},{11},{11},{13},{10},{13},{22},{26},{21},{7},{8},{19},{20},{17},{13},{23},{1},{8},{8},{4},{4},{4},{4},{8},{8},{26},{28},{18},{17},{25},{20},{3},{7},{7},{8},{6},{18},{26},{20},{30},{20},{38},{40},{30},{29},{6},{11},{15},{14},{24},{26},{16},{15},{9},{27},{29},{19},{18},{4},{4},{9},{10},{16},{22},{24},{21},{24},{25},{16},{28},{30},{28},{31},{30},{29},{33},{35},{30},{25},{25},{26},{27},{28},{22},{25},{25},{26},{18},{24},{24},{24},{24},{22},{16},{15},{13},{11},{17},{13},{13},{17},{16},{18},{13},{31},{33},{23},{22},{30},{27},{18},{36},{38},{28},{27},{21},{39},{41},{31},{30},{18},{36},{38},{28},{27},{18},{36},{38},{28},{27},{25},{43},{45},{35},{34},{13},{9},{18},{20},{23},{2},{20},{22},{21},{18},{19},{19},{22},{20},{31},{32},{29},{28},{37},{24},{13},{18},{11},{14},{19},{15},{18},{18},{23},{17},{27},{17},{23},{22},{19},{28},{29},{22},{21},{20},{24},{25},{17},{27},{26},{17},{19},{19},{26},{17},{21},{17},{27},{12},{21},{8},{8},{9},{9},{9},{9},{9},{9},{8},{8},{8},{8},{8},{8},{8},{8},{14},{7},{25},{27},{17},{16},{35},{46},{41},{43},{47},{30},{31},{21},{53},{35},{37},{43},{43},{22},{39},{36},{40},{34},{19},{7},{13},{31},{33},{23},{22},{25},{27},{17},{16},{13},{15},{6},{24},{26},{16},{15},{14},{15},{33},{35},{25},{24},{16},{20},{38},{40},{30},{29},{16},{34},{36},{26},{25},{11},{29},{31},{21},{20},{25},{43},{45},{35},{34},{24},{42},{44},{34},{33},{27},{45},{47},{37},{36},{7},{20},{1},{11},{13},{15},{33},{35},{25},{24},{18},{26},{13},{6},{26},{44},{46},{36},{35},{18},{9},{22},{23},{10},{16},{5},{6},{4},{5},{7},{6},{24},{26},{16},{15},{12},{8},{28},{46},{48},{38},{37},{3},{17},{15},{35},{34},{34},{27},{30},{29},{29},{30},{30},{14},{11},{27},{26},{28},{30},{11},{19},{14},{12},{20},{19},{17},{17},{13},{15},{15},{5},{19},{18},{21},{17},{13},{12},{11},{7},{9},{12},{13},{18},{14},{8},{18},{11},{29},{31},{21},{20},{13},{31},{33},{23},{22},{8},{26},{28},{18},{17},{12},{11},{29},{31},{21},{20},{8},{9},{9},{10},{11},{11},{11},{11},{11},{11},{11},{15},{4},{14},{23},{23},{18},{29},{19},{28},{37},{36},{29},{30},{21},{40},{27},{42},{44},{20},{15},{3},{12},{12},{5},{9},{23},{25},{15},{14},{14},{29},{47},{49},{39},{38},{20},{24},{18},{24},{42},{44},{34},{33},{30},{48},{50},{40},{39},{31},{41},{40},{49},{51},{17},{35},{37},{27},{26},{15},{33},{35},{25},{24},{12},{30},{32},{22},{21},{21},{30},{39},{41},{31},{30},{16},{34},{36},{26},{25},{18},{36},{38},{28},{27},{20},{38},{40},{30},{29},{29},{47},{49},{39},{38},{31},{41},{40},{49},{51},{19},{37},{39},{29},{28},{25},{43},{45},{35},{34},{21},{39},{41},{31},{30},{16},{21},{39},{41},{31},{30},{23},{41},{43},{33},{32},{34},{36},{26},{25},{19},{37},{39},{29},{28},{22},{40},{42},{32},{31},{25},{43},{45},{35},{34},{21},{39},{41},{31},{30},{19},{37},{39},{29},{28},{17},{35},{37},{27},{26},{22},{40},{42},{32},{31},{11},{29},{31},{21},{20},{19},{12},{15},{3},{22},{34},{7},{13},{23},{24},{26},{25},{23},{20},{21},{3},{15},{8},{8},{7},{18},{18},{22},{27},{18},{23},{11},{15},{10},{24},{15},{9},{12},{10},{20},{20},{17},{9},{8},{8},{9},{4},{10},{27},{16},{22},{12},{3},{6},{3},{6},{13},{7},{8},{1},{22},{8},{12},{10},{5},{10},{22},{27},{10},{15},{21},{12},{17},{9},{17},{18},{9},{14},{21},{24},{12},{17},{20},{8},{17},{21},{10},{12},{10},{5},{24},{16},{24},{28},{19},{27},{32},{38},{38},{28},{53},{41},{34},{32},{30},{31},{10},{20},{17},{5},{7},{12},{9},{8},{12},{6},{19},{26},{7},{12},{13},{12},{4},{5},{4},{5},{3},{7},{13},{8},{13},{16},{16},{14},{7},{7},{7},{11},{6},{31},{23},{28},{28},{25},{29},{25},{26},{41},{35},{20},{33},{26},{26},{27},{23},{37},{26},{26},{23},{24},{38},{27},{19},{28},{16},{27},{19},{26},{23},{25},{16},{21},{24},{30},{23},{39},{46},{39},{37},{46},{22},{19},{23},{36},{26},{34},{36},{31},{30},{39},{30},{27},{47},{25},{23},{31},{26},{31},{19},{27},{27},{24},{30},{31},{32},{20},{24},{23},{23},{20},{23},{36},{28},{28},{29},{26},{46},{38},{44},{42},{24},{39},{29},{38},{28},{31},{29},{34},{27},{25},{26},{25},{22},{16},{41},{32},{26},{18},{26},{30},{29},{25},{35},{31},{35},{30},{36},{28},{26},{27},{31},{25},{37},{19},{35},{28},{23},{20},{43},{26},{15},{19},{24},{23},{25},{28},{31},{22},{36},{36},{23},{36},{25},{26},{21},{23},{22},{30},{30},{21},{26},{14},{28},{28},{27},{31},{24},{41},{33},{45},{30},{23},{22},{22},{30},{37},{24},{24},{24},{39},{27},{33},{38},{18},{17},{32},{51},{41},{32},{36},{34},{39},{26},{27},{32},{24},{27},{35},{32},{27},{35},{28},{15},{24},{28},{34},{42},{24},{35},{42},{38},{29},{31},{41},{41},{27},{30},{27},{25},{20},{26},{33},{46},{32},{50},{24},{24},{22},{20},{30},{33},{34},{26},{26},{38},{24},{23},{44},{23},{46},{31},{31},{23},{19},{29},{23},{28},{29},{22},{31},{19},{25},{44},{27},{33},{36},{30},{32},{28},{34},{31},{30},{23},{54},{8},{5},{14},{15},{7},{6},{15},{12},{5},{15},{3},{30},{48},{49},{5},{15},{7},{7},{8},{6},{16},{13},{18},{26},{15},{11},{9},{7},{13},{11},{4},{9},{11},{5},{12},{10},{10},{16},{16},{14},{17},{10},{11},{26},{11},{3},{23},{4},{22},{15},{12},{15},{13},{8},{17},{13},{16},{4},{5},{6},{7},{6},{7},{7},{4},{9},{9},{8},{8},{10},{25},{33},{22},{28},{16},{12},{18},{24},{30},{10},{14},{25},{17},{3},{30},{24},{12},{36},{51},{20},{29},{48},{52},{53},{39},{34},{32},{21},{40},{37},{35},{47},{15},{23},{28},{34},{34},{40},{37},{24},{9},{26},{15},{9},{29},{24},{30},{25},{20},{16},{18},{16},{12},{11},{14},{4},{19},{7},{25},{8},{4},{16},{7},{1},{7},{2},{10},{13},{6},{5},{4},{8},{7},{15},{14},{5},{5},{4},{4},{17},{21},{14},{33},{13},{25},{11},{15},{16},{9},{13},{10},{19},{5},{8},{10},{8},{20},{12},{15},{5},{25},{14},{12},{5},{4},{10},{8},{13},{11},{3},{8},{5},{18},{27},{4},{22},{21},{21},{18},{19},{18},{16},{13},{11},{7},{8},{6},{17},{12},{14},{18},{15},{7},{13},{10},{4},{22},{21},{21},{18},{18},{13},{11},{5},{4},{10},{9},{5},{15},{14},{20},{4},{8},{5},{7},{4},{7},{9},{13},{5},{4},{22},{17},{6},{5},{21},{14},{26},{29},{27},{27},{34},{11},{3},{6},{9},{8},{7},{11},{16},{5},{10},{20},{23},{11},{17},{4},{7},{10},{13},{8},{14},{21},{17},{12},{9},{10},{13},{24},{3},{9},{12},{13},{14},{22},{6},{3},{5},{16},{11},{11},{10},{15},{11},{28},{23},{18},{12},{21},{33},{32},{35},{12},{7},{10},{22},{6},{14},{3},{13},{31},{33},{19},{23},{22},{13},{31},{33},{19},{23},{22},{13},{31},{33},{19},{23},{22},{13},{31},{33},{19},{23},{22},{13},{31},{33},{19},{23},{22},{13},{31},{33},{19},{23},{22},{12},{14},{12},{11},{10},{29},{30},{39},{52},{33},{19},{29},{25},{35},{36},{24},{31},{26},{39},{40},{21},{26},{45},{44},{36},{14},{12},{8},{8},{10},{17},{13},{8},{22},{7},{18},{8},{6},{8},{35},{1},{12},{15},{12},{16},{16},{5},{8},{3},{4},{6},{10},{13},{6},{13},{10},{12},{14},{3},{11},{12},{16},{10},{12},{19},{5},{24},{20},{20},{17},{16},{8},{5},{6},{11},{4},{5},{9},{11},{6},{7},{11},{6},{7},{5},{5},{10},{5},{12},{6},{4},{4},{3},{6},{9},{9},{11},{5},{27},{20},{13},{37},{23},{21},{21},{36},{40},{51},{32},{29},{30},{33},{24},{25},{22},{25},{23},{23},{27},{22},{40},{51},{25},{6},{17},{4},{8},{14},{4},{9},{8},{14},{10},{9},{9},{9},{5},{14},{11},{4},{10},{5},{6},{6},{6},{6},{5},{5},{6},{6},{7},{4},{19},{21},{14},{16},{23},{24},{18},{17},{20},{18},{15},{18},{6},{24},{6},{5},{6},{12},{4},{28},{15},{11},{17},{4},{16},{12},{5},{11},{10},{6},{7},{12},{10},{38},{32},{13},{35},{19},{33},{33},{34},{26},{34},{34},{31},{26},{30},{27},{34},{33},{28},{33},{32},{27},{26},{30},{36},{22},{22},{35},{23},{34},{39},{14},{40},{25},{23},{13},{19},{29},{24},{26},{28},{20},{18},{38},{40},{27},{15},{24},{33},{30},{24},{30},{32},{27},{31},{34},{29},{27},{23},{26},{19},{23},{28},{30},{30},{37},{27},{21},{27},{34},{44},{18},{31},{24},{12},{26},{13},{24},{31},{15},{12},{18},{24},{21},{4},{12},{4},{12},{19},{20},{7},{9},{1},{4},{12192},{24819},{936},{38},{45},{40},{76},{61},{38},{183},{152},{163},{41},{43},{11},{42},{70},{30},{58},{93},{28},{40},{59},{24},{192},{11},{53},{26},{121},{38},{34},{40},{27},{40},{32},{72},{39},{27},{31},{126},{35},{42},{78},{55},{59},{79},{136},{11},{26},{45},{34},{32},{32},{38},{42},{128},{34},{11},{26},{32},{42},{59},{87},{54},{32},{32},{32},{32},{28},{48},{43},{62},{71},{66},{67},{161},{68},{34},{164},{162},{40},{32},{57},{44},{57},{157},{167},{11},{26},{38},{28},{43},{67},{56},{57},{55},{28},{30},{130},{32},{43},{168},{163},{11},{34},{34},{78},{68},{47},{31},{67},{42},{27},{56},{49},{48},{44},{56},{68},{159},{149},{45},{34},{32},{171},{32},{63},{11},{11},{11},{54},{99},{99},{102},{40},{103},{100},{99},{99},{22},{20},{22},{21},{21},{21},{21},{21},{21},{21},{22},{22},{22},{22},{22},{22},{22},{22},{22},{22},{22},{22},{19},{22},{22},{22},{30},{22},{22},{30},{38},{100},{103},{15},{151},{158},{151},{114},{148},{149},{148},{111},{111},{115},{152},{159},{149},{112},{149},{148},{148},{149},{148},{149},{112},{116},{149},{149},{112},{149},{112},{149},{156},{148},{112},{148},{151},{158},{111},{111},{148},{111},{154},{149},{146},{148},{146},{112},{159},{148},{111},{52},{72},{56},{100},{100},{100},{33},{54},{34},{40},{67},{44},{45},{35},{73},{43},{32},{99},{100},{99},{41},{73},{38},{134},{32},{169},{70},{11},{40},{38},{122},{115},{59},{38},{38},{38},{40},{28},{38},{38},{32},{55},{74},{74},{34},{80},{57},{38},{54},{38},{41},{86},{61},{70},{58},{59},{156},{41},{145},{38},{43},{56},{38},{38},{42},{42},{40},{42},{38},{38},{48},{41},{120},{38},{43},{58},{61},{69},{42},{39},{45},{72},{76},{44},{39},{48},{9},{66},{48},{61},{72},{51},{91},{28},{28},{68},{38},{152},{152},{11},{26},{40},{39},{42},{30},{30},{33},{30},{33},{122},{26},{41},{39},{81},{39},{80},{122},{47},{39},{80},{81},{91},{91},{92},{91},{92},{92},{94},{91},{96},{94},{91},{94},{94},{91},{91},{42},{42},{39},{39},{41},{40},{43},{85},{54},{42},{86},{42},{42},{42},{47},{42},{42},{41},{56},{41},{39},{42},{42},{63},{21},{21},{30},{11},{46},{47},{67},{46},{49},{70},{71},{70},{56},{40},{40},{48},{47},{24},{284},{24},{64},{122},{111},{178},{124},{11},{26},{153},{32},{32},{57},{50},{24},{47},{45},{156},{46},{56},{49},{46},{57},{43},{11},{27},{32},{40},{53},{59},{57},{59},{55},{57},{59},{58},{59},{59},{55},{59},{58},{55},{55},{58},{38},{45},{26},{28},{116},{45},{48},{27},{72},{70},{51},{29},{32},{36},{43},{41},{68},{11},{41},{45},{45},{54},{45},{156},{41},{65},{65},{49},{117},{30},{36},{67},{1}};
    #if (CYTHON_COMPRESS_STRINGS) == 2 /* compression: bz2 (25735 bytes) */
const char* const cstring = "BZh91AY&SY4\321\r\357\000&M\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\300@@@@@@@@@@@@\000@\000`\207\034\373\276\207>\300+-}\356\355\307\217\2735la\276\264G\275\317a\233\346\353\017\276\316\356{%\253\r\233}\360W\270\362\372\357\276\353\267\260\000\003\267.\327\273}\014\033\333\340\274\302\3568\354wRk:*t[\236\230\366\211\3458\017\276>z_t\372\3573\264\371\275\364\276\335\364\356L\2059L\355[:y\332{{G\273\357w\317r\372\n\334\335\336\367\274x\016k\274\313\260z6\365\316\372\367\201\250_}\226\234\275\275\276\332\353\336wO\237pl\303\337g\336\316\357\270k\347\314\032\005\037u\343G\3334\355\302sw\337o=\235\273\340\001d\313Og\323\275\265\237K\313\317>\367\213\237Y\034\340\325\247;k\340\232{\274\272\272\326goZ\362\300\007\207\313\305\256\272\314\336\366\351\243=\215SP\202\271\233q\240\251\243\327T\273\335\312V\215h\333A\334rUu\275\275WE\036\266gW\337;\337m\214\240i\236}>\037F}_U%.\335\341\357`\303\326\235\357<\365+\023M\026\373:\333\270:{\357}\357\252\333\315\325N>\372\367G\220\320\206\323X\305\255U\333\331\2475\253Z^\252\205\231\310\017\241\035\354\370\037}\306\367<\366\t\"@\215\000\000F\206\200\000\000\004\311\201\023L\t\242i\221\252y<SaL\231\246\232\236\231LO\025<\324\304\004\246\200\223HL\220\322\004\320i\240\230\3121\017H\304\324\321\264\312b\000\3204\006\200\000hh\000\000\000\323i! \202\002\232mOP\003 \003#@\0320\323L\2404h\323&@\006\020\000\3654\000\006\200I\352\244\204\232h\005OMM=5OMM4\364\233)\243\312z\2324z\232\017P\006\200\000\000\000\320\003@\000\320\001\240\211\020\204\020$)\352\237\246\224\375\023\321=\023h\323\322\247\351\246#OSE?\024\364\233H\232y\031\031'\224\r14cI\352=#M13M\023A\022D\004\320M\t\246\200\230CL\215Ld&\214MJ~\324O\322)\357$\365S\365\031M=Oj\215=@<\2200F \030@~N\266E\266YH\027\351f\324\333S\216k\026.\\\027)q\021\024D\22076\347\nV\003\335z:\373\035\270+\336\271\206-\241\334oV\253\0079*\233\311\235Y\334N\320z\324\240\262\250\202\336(\221Br\020\340\200I\212\023&.\274\371\202K\304\266\363I!#|\302*\234\"\234\323\203rE\243""\341\210\216\021\364\216d\207\022\273,R\326\264\241\004\332K\024\216\211?3\352\335\023\365\036\340yQ\000p\322J\322\332*\236\225}z1\0279\206\021D[\216\230\261\357\343\270m\336\236!\226\237QW-3*\022\034\332{S\t\351\341L\217\346\346\325Iz\2302\356;\215\377\300a\t\204\302e\211)D\314\311\251b\220\315\365\004\334\270\231!\223F\242\231\253\3647K\233\252M\375\177\317\364\355_\302\375_\243\271\350\352\360\327\244\333e\231|$\326Tx\017\221\373/-\\\230\375[\375<\350%s\203\311\376_\363\3573\367\354\312\\\363}T<\376\267\031\027\373\357\023\3327\314\272\347\264\344\333v\311\353\315\025\336N\353\nR\357\344\374\364\233\347\2275\337\177\350\255\353\327\251\346^\233\365\343\230~\376nxU\216\342n\00726\241B\355\250n\023H\270[\343T\350S\344[\312\007\225\025tZ/9\365\363\277\023\316\205>\251'\212[\362\237+\177g\267\2047T\222d\206\203\236/\237\034\177r\324\371\243\267\257^H\226\362\212S\255\\*\366<m>m\372*\311\206\264\2175T\323^\217\227p\352+S\226o\230\036o\326\357^[\366\325z\373zqU\346\273\023\332\021]\321\314\370\211\337zVW\216\361}\246\355\027\267/\254k\256o\333\253\213\242\010\203\312^7\3452\030\235T\016G/\343\317\371\260W]~\363\304\247N\215\347\013\037;Z\210i\266\357\213\216rI\265\241\335\263\250\366\277TEd\277\023\336?\254\345]T:\346\034\\r\347\032\367yd\002d2B@\311\254\233cj,mDl\021Rl\006\330\253\006\306\306\r\032\355\353\371\371yx\355\327\237\303\317\236\273\212=\"\265\"%\310\2177\322\355C\262\037 \354s\346\367\2076F\226\352\270\265\231\243\242\371\353\255\335\240\240Z\234\233D\367k\203\212\333e\301\351\343,\342\022y\262@\345\242\234\364C\351\372\234!\355\337kjH\267g\243\256\267\3132\247\374\237\007\331!\362\377\037\266R\251\221\377?\320\373\212_W\335%)~\237\351,\363^\202\374\177\026\377-\370\260\230d\304'\347\225'\347\224\242\tF[D\300\t\376\002\037\342\006\303\255\265E\r7\226\202\2535\350z>\226\355\337\267}Um\352^7\207\027\302f\020#\023Zus\220\232\325[\260\204\304\201\214\200\005\025\r\030[$e9\027w\225v@\332\265j\003\"\002\341\304%\ns<]\016\207\313>\205\364\257\340\243\372\203\351""\372\346\372\277\214\222\375\251\264\nE\016-\373\377\362?\335\302\277\267\364\374w\250c\\\3323\177L\363;2\256\032\374W\246\252\325\343\302\377\323\"7\300\177\326\3740\\\227^\264U\002\222s $\306\261\252h\262\030\351\030\361\253X\312\246^3\036 \277\203\036ffm\353\365->i\232#H\020jK\037\376\000\177\352\006\244\325Qj\231\213\025\025\215\261\255\0326*\330\332\321mZ6\212\253F\265\026+h\326\306\254\333*\243b\323,\010H\002H\t\010\310\001\226^\257A`\0244\025\322\337\017\300zk\373k\261\306\312\360\233\240n\233\256\376\327\356\242&\264h2\343\220\007\263mm{r\215\223%\215\010iT\253\033`\000\000\0005kY\266k!\267\357\\\027gP5@\325Cdw&\241\254\214\204\222\n\315\222\246h\223Q\001\032\212\246a\220\030@$\204\031\t\rAS]s\3176D\314\374\266L\320A\352\233\273\033W\342P\023\000\344\345\204U7E \"\252#Z\232\244TmSK\327U\352\336\255\325o\322\253\343\275\225\375\rZ\267\267\335\265^\360\000\000\000\t\000\000$\220[Z\257t\010p\342\252pT\335\020\221z8S\270\317\013a\321\313'\226\177\324\360U9\253\250k\241\236fa\375|ErQr\212:\200\021X\000\r&fW\025\3115UM\350\006f\356\002\260\320P\023-h\303\251\326!\277\246\204\n\225!\t$\003\226\372\317:L\021pQ;\025\356@\335\003\201x\035\220\333F\234\315VD\025\255\255\251\246\214H\340\263$yT\224\315ee<0\310\02291\252I&F\256\263\233\306tJ\230\305\350\000\273\325\252\233#\252@4\000c\036*\216\345N\215\204\r\352\300T\337\256{X\274\020\310B\225\320\266\272D:\322\034\010\350\323zW\232\275y\034\214\233\002\212ACRh\341]\264=\036\256\204E9\250\274\t\366\320\0167Ng\007\034\006\302\257\n\363\330\330\322Q\256J!\262\006\312\344\032nrt\005\325WP*\206\265\323M\356X<k\"\331l\213\230c\261\241\300\243G\016<\262\177\254\353\323>Zr8%\371W\036+\303\217\365\357w\357\276\233\316\215\306\207\023~\231\350X\315\341\300\341\240\364vn\036z\032\232p8]\233\215r\272\274\241\304`\240T\234\240*\025\004\276\361e\361x\316\303\336\024\010\274\275~\312RxY\3156G3)\027\007\332W\026\325\261\253\324\325qh\325v\363\345\\H\226@!\021s\020*5\005\n\200H\241Q\026\210\331\020\221H\021\215J\376\230\202o?\306\rD""\007o\354\2578IO!`\314\215\253\020\336\035\r)\213\233\333\001\260\000m\266\000\000\3478\000s\2349s\207*\347=\274\340\000\001\337\375\275\356\335\200\346\373\275t\000u\362\353\257~\255\316\000\037\354\333\305u\321\343\234\347\227]\000\000\000\000\000\000\001\362\355\266\327]\004\222I\315E\347\007\270\242\002\247e\363\324$\373\327\317\366*\205;9\210\017\227\025\250+\303\276\261\254`\256U\332\247-\256cp\220\347\323\037g\031.\357\030\345\342s=\315\257\327\337\362s%K\311\344S[\366\276ZT\230\260~\357\345\232\372\275\033\236c\241\016\232\226\372\371\355\2026r:\361\016\274\311\177w\312\212\376\207\260N\267\343\3127\272\267\221\320\336\223\021Qk\036\215k\036\213\351 \342\204\326\224M\313\\\360\3521\033Mv\331i\277}k\227\207\376\373\025\357\222$\204\200\220\210\261\004\225F\"\246\320\203lm\2436\276:\337\017_\273\263_~\201S6\005\243\341I\340\217\010\373{q<G\235\265\337/;\005KF6\357\374\353\347\330e9j\272\014071F?\255\365\353\365\314qV\362:c\304\304MM\247\310\272Zu7\306\231\305\010uH\270J\246B\347\207Q\211\261]zI,z+\337\244\033L\215Tp\276/oY\317{\335\216\275\33180\357~\177\004\317\272\270\217MJ\t\375\310\006f56\347\020\375\376uz),\253\210\366\351\257\233NZ\330\300\326\262\354G\034<\027\255\020\020\304\247Myq\277\t\350S\327\371\3770z\375p\320\020\032\332\374[{\\\033\303\002\010\204\2252\201D\223\210\r\361\n\202\002\210.F\\i\212\n\314|\021\203\210D\325QH\246\246\336\234\227\206fL\315js\034\222\034\0353\334\313\306 \002\200\014ff$\310*\357[L\036\261\3257\350/a\320:\235i\376=\207a\331\357\374>\033\037\311\310\247\243\300g\235\377A\245'\023\231\321\251\242|a\322\233\237\373\365\216\2745uN\243\206x\355\016;\n\232\n\340\005D\202P\247(\314\225\265X\264\025@\252FS\032\251\217\217W\035dW\014\005t\005q\\\022\260\312\223\273%\373\377W\357\"\204j\373\026\255\005\2527\332;k\347\371\377\023\362?\013\346\326\377w6\304Q\2033$RZ\224\220\021\221\220$\3525t\352yGi\340\234\377T\304\307\300\362S\251\007\250\003\250\r\266s\033\205\336\225\370\016\236r4\311ERH\222$y\034\267\352\271\325\261\272\267\333s""J\352\345Qh\251#\024D[_\265q\026\256s\233IY\233`\221$Y\021\220\220\t\002\313?\303\364wC\344\213\241\347W\223\307\260K\205\353\2262$\013cAZ\014\"I\013\251\204\220T@\324\320\220S\na\277\276x\312\357\030\0347\234!\262\273\003\344m\2700\203\246\244\277\207w\200\350\r\n\246*\244\305&E\201_\375\202c Cj6q\273\231\033\033X\244\2113\030\330\001\333V\222\270\257\271\337\316\373\037y\351\235}\377-\346\25265\002\030\212\214P\227\233\222\250+\032\250\264U\311nL\230\3321\262I\026\211\n\024fP\2518\334I\021\204\201!\024\t\004\220\323\300\253\327\324\257Q\323\340\003\256/\177\340\377'\223\306_\307\346\304N\023\223\240\0302W\246\2621\200\014\221\223|I#\005\2326h\260\221;*\277\354\357\215U%\316\231L5.LZ\n\265g\343i2 \360N\300\021\344\274\031\243\344\204\033\032(\335\356s\223~c\3606\273\000\273'9\377\333[~\217Z\\j! \310'\276n\341\261\360T\265V_g\333{ta\215\\\250\266\364\302\322\361yf\246p\366F\214-u\356\204\324hxk!#\305\332Y\275\302\214M\227\363\\\310\230\327\347>o\361{2\264tu\016\375u\251\3515q#\246h\352\210\211\251\264\371\027KC\343\321\235%\234D!\325\"\335\342\025L\265\251\342\024bl\276\2672&5\325\363}z\376\r/\341xb6v\362\216z\220\352\035\373\365\3531\336\255\344t\315\032DD\324\332|\213\245\241\361\350\316%\234D!\325\"\335\342\025L\205\317\016\243\023e\367\271\2211\256\357\233\357\337+GGP\357\327[\\\343\214\226\207U9\032\222\361yf\246p\366F\214-q\272\033q\241\322-\336!T\310\\\360\35216)\276\262K\032\347\254k\236\275\374\276^\334\375\026\367\320\205\231\306\332A\275\234og\006u\0039\263\303Q\032y\310\025Z\325\200*\353\032\317\373\225b\256\367\2049\253\364\362y\035\317\001\352?<\017\027\212N\261\257\350\222|\037\323V\222I$\222\3461\266\025\023\371\323\374\027\223\367\203\225U\252K\023\000\310\233|\275\303\247\274\013\327\007\273\207x\357Zf\001\332\347\234\377\327\374*&\370\200H\270\300\022\240\010U\242\2559\375/\205\357Y\361\270\233\2139q;uut\234\271q9%\3277\376W\346}\337\321\352z\376\254\247Z\371\3177\342:%\t&\270\247\364\301C\206iS\366\316\377\316\205\027\260\224\257\307f\223\365o""\335u\361\314\230\241dN\273\246\274Rz\000\326*\256Iz\r'X\212t\n\240\257m'+a\003\327\301\017~\000{\360T\365\300\250\"OE\004\0009\200#\000\317Ie\346\363\350h\020\320\376u\022Z\340\252\210\252*\242\001r%\336\367\253\226\232\332\226\330\330\330\031r'\024\035BB\302\357\236N\204\257\264\234\2045\323\314AN\365\221\026X\000\225\234\223\213C\243=\031\230<p{bH\262\014\203 \316\352\177\203\301\207\367\374j\001\227\210p\360\320\371\t\033i\304/\254\027n3\023-\016\213\372L_\322IBt\223\363M\306\t\232\014\233~q\036@\2139\200%i=\240wl\234\354\226H16fk]\303*\340\230\316h8\221\350\214\214\214\214\214\243\331\335\370\203\377\214{\335\207w\252\206\327u\246\210\031\315\032\345g\234\231\363\244\234J\222V\0368dk\321=\020\003\314\356\200\333\322\322?\201\r+\255)t\244kl\357V\025\305\203\360\215|\326\313\227\034QM\200C\264q\272k\350\276E+\216\362FJ8\351TwcT\022F\271|\021b\341\377\n\346\250k\030\310_*%\267\273\200\260Y\260\342\270\020\203\247\223\260\032\001\025\324]$%q\336\"\210\300[\004m\227\022W\020\255[\034\3144\261\221\037}\305\177LZ\t,\002\213\000\033\231\005\276\235<\265\213\213[Y\034K$\310ae\241\331e\243,\247\033\216\355\031\033\345\000\026\222Z\371yfU\306\005\237\223\225\224}\277\\\"\314\331\373\363R\315Y\205d%\216\250C\031\214\371\027\2439d\031\014,\201d4\032V1#\021\034\216;\310b1\010+d\225\262P\0317\364\026\036Xd\027\313\301\243\202s\000\264/\355\340\321\272]aaI\330X\0100\320K@\300\030\000\014\374\001\201\013\325w=\234\276\31375\001\201\013\000\0300`\"F\000\275|/\\.0\257-\335\2140Q\2711\005\204\225\206\215,\010A\202\362\017%\354\2553\t\0142?\253\016\033\246#\n\303\2077$\335\272\213I\005\252\242\222\246W\252\274fU\263\356\373~\351;\3128K\274\266\355\225\263\355\001h\265`\221\221%=C\026N\332b*\265VJ\002\220\212Z\3315\352\206M\203\004\355e/\007G\345t7k\025(\335\014\037\005\250~\t+\367\342\220$\306\370\3136\313t\213\177\326\320\253$\005\322\272U\347\253_\310\365K\306\023\ru^\271\211\254\253%\353\373\210\367\030.&h\\\026-\333\251a\261y\355\227M\023V\004\212;\227.[\205\260\266X\003\"\305""\203P,\362Y\264\027\372\317d\272\261\33643#\274\362\227K\267m\245\t\313aN\255\362$U\301\002\227j\324\251R\245:!\367\375\340\316\337\354~\356\277\334\351\241\326\334\\M\345\002\"P\211u\262H\372|\216\271v\034 ~'gw\232%\351)K=gB\361\377\005\027\215\206\006f\264L\023$D\241\353\017`z\366T\217V\022\016\355\250-H\245 \007@\013\331\\\235WU-Md\204<\276\2428W\231\003\014\347\244\307aV`\365\317\326\016\300\311\343\0142\320\014\301\017}\334\247e\243\206F\177\3448va\323\265jo5\351L\367\275\274\310*\342q\255\036Sm\202\366\000\352\216\251\216\037\343/\235\t\315.I\324\202\345N\275\\\270K\352\211\302\205d\36316\021\204\000\240`\010A\021\026\303\003\014\307W/\273H\367f\217\362\037\001\3603\302l\271\211\247\216p\351I\375\216\270klp\375\371W\223-T2\022\350`\036\337\327\374__\332'\332\226\025\017\265D\204F\201u\241t\243\203\227\200\000\334^\005\331\332rB5\332\221]*h\246b\370\262s\363\254\2655\023MLE\034(\230\305\022\257\241U]\025\334\nJ]\341\311\255\363\271\214`\306\340\005c\031\312\270\313\262\251\205]\024\212D\222x\307i\231\227-u\311\312\233\377\371\010\003\006]\214q\031\274\021\311\235\031\216\032\202\0276\371\367\357\326\371o\325\256\327`\001\000\017\037\350\273\353\317\366\234\362\273W\337\0034\261o\177\334\212q\221\300\334\274\021!B\244#\367\241?4)\017\312\347<Q\316\365'\236/\311\n\t\203\225\202\373\241\300\227}\004\016\007\276]\370\223\017O\t\221\225\225H\312\243\034\223KFRZ1A\344\227\227\360\241\024\377\200@A\312\"\231M\206dj\362\013\217S\277\373x\275\330\374-\320\373\273\274\236)\315\323x\302\204\221k\006D\361\0316\343\301\020\031\272N\222\r=l\376\354\330\310\266\365\231\346\202\247\217\323\371\376\253vo\223|\34536\352I\3210C\021C`A\330\365\010<\005\024k/Q\036\3161\2111\266\236R\010z\2506\217d\345\313\262\344\364\323\230oc\352\316\315\315\361\200\242\rj\202\242\"\034\335 \337L+\356\301{\373W\306#\260h\r\327\210\005\221\t\260\t\343\351\342\361k!\213\302d\302\016\334\220,\336\344\222H\027C\001]O\034\000\000\000?.[u]r[r\266\260\000\211uV7\200\001\021\205\026\303\263\277\377I\374""\036/i\337\360\313@*D<x\222\341UEJ=b\037\214\017-\233\007\206\204\367\270\004\304\032\375\276\277w\320\372\177Bc^q\371$\277*\370\371\347\200j\221\377{\257\350\314-t\372\335\016\231\\\033\320O\314\216\310)tt\235\247\315\230\362K\373\274\246fl\316\005\317o>\306\262\262\362\313\3551E\027?\005\031\225\212\377j\313\272\327\032a\371\006ECG\325\227ffQ{\276<\367\273\322\314\311\267\255K\336Qjn\330{\353E\352\263\022\354\300\243^z5\252\322\275\331wW\256\027\212\364\005BR\257}vn\253\037\317]\005\260O\236p\327\226\225R@s!\262\032( E\210O/\217\325=\324\367\375\343\276\366\367\217|\343\366=\213\334z\242\2208\203\207#G\201\301$\343\000mK\277\273\266F\301\006\"o,k\200\014\247\224(\266\354\314\363\003\203\004A\253I\343\010\242\342\201\006G\035^\037\306\370~\222O\337J\374\365\372S\253\354vZ\225\307\237\267\355\366\373\276\227wwv\365U\025UUU]\267\323\277^>\001o\347\376?\354\177_\372\277\273\373\276\\l[\336\367\275\357{\366{\213\236\213\247N\223\322fc\247N\2356\307;\346\030\205\3315U\035:t\353\322\371\353\327s\263{\336\367{\336\3677\275\357z\337\032\315\376/3\307\227\253\324\376\024\307\267\317\236O\036<y\370\276\376~}\267B\336\367\234k\216y\353\276E<\363\317<\363\317X*Z+\332U\342r\007\331\361A\230\212\303\333\205\251Kp\027\232\204\261^\275,!\034\203U\361\201\356H\313\207\216\301@FE\272xn\017\223^\217\035\203=\352\264\233\326[\345T\225Y\302bff\026\365\216\354O\217!\265LG<\002\364\242\261(\217\223\220\216\217\007\026u\323X\254\231\243\263&Iz\206\362@\304F\030C\026\224HB\315Kv\303w\203\325_n\037\037\235'o\272}O\320?\000\301\016~\177\337\343\326}\373\253W\226^\252\305z\262\356\254?\027\342v\215kF\265V\014\314\303\2557C\201\210\376\002k\336\366o\365(\252\242\271\254`c\365;m\242\22001\323\271y\223\370\227R\214hi\375@y\220u\313\323\2750s\211.\177I_\332Iq\007\271\346 \271\270N#\213\243\236\207\210\360g\031:`\267\006\324h\0000\361\202\017\251@\272\321^\240\024\0010\374\311\364'\315\366\376\345\352\033\313\307\026o\017[^@\274=\000\365/|j\303\264\262\236EW\\\335_2\021\352\217\030\323\231\237""\031\205o\3307\263\215\272z\335\317\201\336rMBw\001\326\216\347\367\366\003\026\261\244\345\206?<K!v\220(\014\202\337G\307\362\375\272j\033s\036\t\253\351\334\245\330:\236~F\333\231\277Ia\022\025\316\257\200\236\223\315m\230#\216M\3363\257\020\311\023m\335\233\221#\203\317\273.\201\201]jj^\214L\243\206a\340\003\267\\\201\200;\240 1u\271\346\223V~\223\245\367\273a\2714\247\220m\243\204\007\270 \004%}t\273\212\201\nY\374\205\n\035J`\260\006\241G -D(\020z\002\224\262udU\336\332\n\352\352\022\240\266\330,\025\013T\222\213\224\306\267\203\020w\032px\234\262N\336\023\032\316\323\232\"\236pd\263z/E\020c\030\3166\017\0008tr\323\226 :D\000\017i\232\\l\273\256X\003\241\351G\016\033D\235\215\221];\373Xi\235\312\207\355\271\232',\331\335\254\265\\F\235\245\207\274\204\261\225\345\347\000M\016j\010I!x\242W\005E\371OY\335\334h%x\302\214\215\022\202\025D)\366\203\203#n\033A\264\373\373\031\227\315\315\370\344i(\311,\261T\365T\362\275\366W\363i\206\371\234K\243g\336\303\216\223\032,:8\375\230w\337\353\211$\222I$\225\001\005\364\351\333\346\023\374\257\371\335\337^Qo#\241\273\314EF,z5\254z/\357K8\210MiD\334\265\317\016\243\021\227\332 LM\270\362Q\326\256\\\375\274\265\030-\021 \266\300\2545ku\236&(GB\305xA\251QrwA\022\034U\253\365\372q\350(\351\276\247g\352$\t\215\301\3237\351\311\315U\351\300\234\002Q\035\341\314\222\357\357V\375H\3419\374Iy\360\344\243\316R\346\363\245As+\252\336\203\234\253\231a\271\021\207j(a(6@dG\225!4Y2\221r\320\251\306D\001\302\353f\271\3106\353\303h)\343B\201/G\207\003\200\204\037fz\237A\003\306'\207\231\3400\217e\207\205\334\252\005\330\255J\205nmM`\"*_\004Ri\311\336\020\232J\033 |\303\001Ck\276A\022F\"\213\311\235\260\222\356\331;\215\245\3536\273K\373\033\232\307\3477\024\213\336<O!.\356\357z\322@\256iet\326\325\2210\3141\267\031\262\r\261\036\273\300\023\022G4/\215+c\334\243\323\022\252\375\317\037\270\n\303\375\035\324\326z}\214\343\030\3061\214c\023333333333333E;\237\230\201\214\031\201\010\231\374\232\355\310\351\206r\243$\0008\322\333>\020\263\303_""\327\227\035\r\316B&<\253;\224\364\224C\245v\002\300c!\022\004\0008A\224QR\216K\257\311\276\336\373uf\245W\336I'\345-\311W\033\201O\214\203h\243\"Z\002T\005\250,\212\222\025\021\357\241\026\240Z\rF\344VG\033\032\334\017o\313~'\262\377f:\t\374\360c\232\343\364\3708\236\274r\243><2skZfdN\270\321\251\314\274\262\352b\212\013\237\325\243'1\357v\\\336\260\332f\007\204\311\t\302\021\367\2210~\257\314\331\317\340\344\346c\216S\036\023o\231rx~\016\036#\207\342\n\234\343\210\316p\311\302YW1\310\001\304\222\342\016%\350\337\342\367\377'#X#\334\337)o\365\023.\245st\333BbPP\367\375>\203\001\013\206\n\237\021PZ\220>/\217\341\264\014A0x\"D\004)U\ri\326H\233\037NP\004N\2030D\204&\314\034h`\232\221\233<P\223\241f\346\2434\027@\006i\335\253\314b4\274\005N\307}\037T\253D\nZ\032\ntt\032(nc\217\2278\031\007'\222\010l\211\0270\230a\007rr/\"\031\320\024\021\\\240\243\010 \037\021\372a\345\266\272\320\020L\277\224G\026\260\253\332\322Z\217B,\333\326&\361\277\247\326`\274\"\004@>\325\200\374\226C\026\207\010\034\315C\210\2768x\265\344\351\254\221\362\331\330d\321\313\313M\341\265T\211\246%\336z\034o'\034 (Z\025\000\300\322IU04\204\204X\004b\033t\232X\334\025\273\242\351\237\261\273\270m\3070\ntyFD\3479\272{C\267\035.\314\304\305\263E\243Cg\207\000cIp;p\244\n\3652\001m5\254\275\256\007\332\375\017\237\031<\236\255}Z\373\037\004\260|-\217I\202\320\236\355\300\374?k5\342\241\246\231\377s\350?\315\322\272\034\267/\276\302\373\317\367\364\332G\275\343!#t\345TMW0\357\016BR\345\334MU\273\220\224\271w\023Un\344%.]\304\325[\271\tK\227q5V\207D&\224]\324\0161U\005LQ\273{o\002\324Ij!\310Jv.\323\023\256:\323\017\206\277e\365=*l\352\365\010\376\003\301\235\024b8\255Qj\213Awt\340\255Qh.\256\234V\250\264\027WN+TZ\013\253\247\025\252-\005\325\323\212\334wUh\272\272$\244\025UR4w\335\251\\x\210\004\002\001\000\200\240\000V\371\232m6\233M\030.\376\247\247\337\270\274\376>\021\213h\266 \000H\264\000\001!H\000&@\0011\254\023\360s\222\306\266\345C\260\351\021\310f4\363\024-\320\214\226h\177.M\245\273\311\362|""\274\227[\267\204[\341\275\276\313|\013\031\350\262\335}=>\277\017=n\332\205\221\372%\360\367\235v\357\230:\272\256\206\370\tp\232'B<\0351\240\367\275\214\227\002\242`\366P\370\271@1\020\022@\022D\0220\001\3123\262\0230\232\010\3232\304>X\273t\302\310{\226\361\007\267\372qb\221\024OVVE\0148\020\203I\204\027\030K\302\300 \311\277\026\000\211F\203\"4\202\350\3001\037\256{\360f\362\302jA\363\341_^\254\007\343mu\350gY\3055\001\373`c5\"\00166/\260l\201\200X\272\352\033\250zU\267\270\325L\253YJ\2116\324k0Y\024>\375V#\255\003\230k\300\225\2217}\365\227\253\267\233\221\240\206kJ\216\344\226\276\251f\270\352m\276\216E\220\211R\264\3213\313\370\2719\207A\006\227v\2018>\334pLtD\220\"\240\002\004\207v\003\313\346\345\372\n>\237\331\357\362?\216\376\334\277PjX\372\3045\237\\\222I$\222I\010\205\027\036F\212\372\304\376\024p\301$\010\304\207\027\020\210d6f\224\022\357\223Eq\354-\355\020\365\362{\242\360p\345\310kx\033/1jcG\021\224\302 \016j\036\235\334\355\230\326\006B\3235\230\322\364\221\tO<q\325Q\323d\314\245i\026\264L\3205\250O\207\242\322\202\350\212JJ[j\024\352\020r\304\036\202$\365\376\r\246\225\321FZ\204f\373q\241\013\003d\200\033)\217\275\303\0048\340\221\023s\333\\\3553\342\261\t\016\035Oc\301\356\260\3031\220s\333f\"\221i\0254u\266:k\0331P\227\330f\323@\341\252\306\304C\255\230\304Q\231\247i\032vf\236gZ|\217\223\267f\251\346\007'?w\246_\021\370\272\035'\213\362\3450\307\344?*\373r\316\2171y\232f>\357\354\372\227\354S\256f?\243\351\025\323.I.j\021\005MQ\2201%\273W\205\225\032\300\233W\271g+ \200@ \020\010\020\206\234\252\314\363\033\200\000\000\000\000\330\234\341\305RO\217\200\274\005\337{\256L\t-\316\030\271\341\273\244\031&i\317\244\313f\r\255\352\325rf\031\206\007\035\303tLg\001\227{\214\\%cB0\212\237R\370\031\264\322\312\207)\255\315sV[\216\353\245\tl\332\313nK\325)f\005\200A\025e\353?;\316\3347\201\"\007\336\002\032v\236\245\347\200\201\310uip\220\234\260\355\206\214P\267\006\345\205S\"\243rh\305M\002Sb\274v\355\271\266\373\212\265\372\377\275\362|\332Q^\317oG\022""\374\014\262\316\032\200\006\034\r,\217L\3079U:\242\251\"\251\361\340pxg\343\322\3701\017\027L\336\010\340\252\220.P\331v!\310\021\243B\200\317\253G*\366\326\322\326\326\2265\347mM\206\20553\021\312\357\266:&q\305_s\356v:\316\257\031\337\272\206'_\313\321\251\305^TT\361\303\337{.n\336\356\311\"\222\214\260\226YIb\224\221\242\232\276\307ZK\244\353\237T`\346\213_\240\350\331\331\227W\2344b\205w3\340 \324\214\353\006\326@\315.\310\323\000\240\306\254\311>\204rM\254%\246\036\264\014\242\360\305\220OZ_p\347\014\350_\220\273!\271\337\016\014\217\004\036\310+\266$\002E\230\256\342\003T\330a:\216\302\033k\232\360y\010\233\316\337\232k+\336\3504&\221\355\344\234\000@\301\326\n\016Dh'\n\306\357\n*Hd \344[|\020\331^FZ\246\360\342\360ACh\363\rd\335\022\3109\360\"q\0216\331\211#\014\311AT\200\324\351\230\226\230\013\231:\343\322\345\007H\331\263\260\226\236\202\362\234\271\301\265\305\245\333\344\3224\205\022\322:\361\336\254\263.:sf\327.<x\363's\221\224\224)\034n\034\223\320\321G\305\350\305_W\nL}\031\333\010\007\201\266\247-\266\370\r\311\210\271\230\304\362\372\262V\303\206\325&\371\351\235R\363t\306\"\215\327\253\217\372P\342\006\002pP\330N\341\311\253\321\277\326\210\246\334\260@\213\200\264\r\266\320:\246_V_\317\215\373v\325/\000\335\244\340\275\300\013\322|*\230Y\372=U\327\020xn\314\016\222k\033\235\315w\243\246-8\242\212\036N.\255\214~\016\233'DY\367\2712\256$\340\270\254\006NV\t@\240\351\250O@D\027\037\031\352\360>\367\240\232\345<T_\014+$\350A\212i\343\213\267\270\034\306\222\021\"7\344Q\202\020\271\003\027^\227\373\217\233\243\327$\211$\222IG\351\374\276_\005\373\347\277:\333\337\307e\317\316\351\203-q\370\307y\024CL\216\257{\331\271\343\313\365[l\021\306\317.\256ga\201\323~0\210?\224p\014\364\301HD\"\231Kg\273\217_\023\215\362\307\334#FzE\244(\343{I$\272\374?\256\361'w\343\257\253\251\326r\252\212\346ay\27131\255\367R\372T\222\305\212\224\247\342\n\251\212\326\235\326\325.\025\244\264\251irq\033\031\201\335\243\261\027x\210\251\004\236\031\342\376\274\277\365\371]},}""m\361\375\017\360\331\006d\247\311\336\334\334\374f2\315\362/\222.\303\261\264\304R\317\246\204\016\004\004:Ki\334/\344\267\237k\271\317\311\3548\217'\261\234|&PO\272\212}\2416\0007WJ\356&\353\260e\313\217\374\374\237\371\350\307\241\372-}5M\3111\320r\035\242\ty\231\230vv\205\\\311e]\310\365W8\306\265x1\225uu\214\346\325\274]\252\305`\004\304z\230\247{\271\221\343\036Q5\215\375\2454\250\321{os\334\374\014uf\0230\036\213\276\177\n\354\203\344%\324\352$\203\331\230\222\022\003\366\273\244\230^of\010\354~\330\321\032\027\263\256\316\222\004,=\375>\374\324\037.(\374\227\221\317\212N\303!\264\007\236\315\002xA\tt\034\0209\213\036\366'5\014\240ua4DS]C\220s?&\372\241\266\234I \223\366Shn7\332\316\273Z\r\263\033\rc\230*\366%k\333\323\337\036\347\000\000BP\234\351J\371J\304x0W4\263!\225\261Fg8\204\202\021\301j\347\307\235\035\255s\223R-T\226\231\021RH\205\n\2515%Z\311_\013C$8\310d32\202\322=\267\037\233\274c\3026\261\022~\321\035\304!\341\342\327\355\360=\334v\357\332$8>\275W\365Se\223\233\346\226\340*\325E\317\330\007<S\261L\247\025*\030&\2445\224\032\006\336\276\275H\227.\271\3459\274\274\362\000\021\233Ax\226\330\213\354%\217Kg;\332\233B-\217A.:\0146\241\250:b\014;\017\253\002\020=$u\001\201\260\007\177\271v4\037\233\316\0227\311\330\236\353\240\010\346\370w\031\234\010\344\314,\267\234\202i-\334\333\307\002\344N\362Wq(BCn\376%\315\334\265\343\023\035\341\211\307=lAfrCK~\333^oj\232\216\265Z\352\343\034\200!c\263\034\205\365\037Z\362h\024\000L]\260:\024\010\257\215\002\375\031\356w!.\254\302\260\374\200\n\200D(\201\235\32307\031\222\200\331*\313 ;r&\327q\002cyN\031\017\240\367'\2756\222z\177\017\226\333\346S\037U8\036\237\025\315\317\244Q1\365\237\026\250/\006\332q\324\220\000\005\017W}\367u/d\274eL\242\271\334\n\016\332\273\036\263+U\020\210Lb\377\335y\300\224\235\220\354?cj\022\253\317\317\244\233\244\225\256\353\215\315\253M>e\357\227\007\220\000\000\000\017,y\225x\300\372\277\343\333\266wx\026,Lb\341%\304q\026\222I$\201\333\237\037>\316s\234\000\023\016;\367\353\331""\343\252\270u\000\026'\355\273\027\305\303\262\264\3065\230\361\323\264\202\300\212H \314\203\006\032\263\344\025b\222KL\201:\016\361\035\351\013\254\342:\270\273\266\374~'\342\334{\256z\340\n.>%\235\264K\266\001\010\200x\375F\302\275\337\211\215E\003\"/\217\350]\233\026m\003\020!\320\351\232\217\252\237}\233I\220!\223\306}u\025Z\304\365\014s\2373\204^~\205;\221\226\0333\200\246:\265\274\365\205\302%\201\002a~\210p\233z\014]\200q;g\025G\256\206;@\207C<\036\307n\036<9\336\215\224\"\274``#\240l\227f`\016\t\226\375L6'DZ\030<M&\306\024-\275\360\235\234\212\036O:\276\027\263~&a\220\r\354\002\240\001\304\343\211\213\252\356\331\021L\343\224\013/ ]\221\204\302w\311\0055\364\004=\215\032\354\225\220\344L\000\224\010\362'\263\022\207p4l\331!\320\223\241E\230\244i\003\304\025p\301\200\000\355\216ay\2472\212j\200\216\210\231\203x\246\222\320\240jd\252\251#\220\035~\237v\245\203\345\2767\243\355\246\257P\014/k\374\310eoO\320\242\226~\231\3001>\336\372\206\341\0363V\207\261\364\275[3\310\345h\205\304/9\237\226\276\214l\345\335\032\332d\261\336y\212\223\227}\276p\372\345\257\277\312\314\337'\310|\021\006W\274i\033\207G\006\276\317\344\240\013\245f\016w\323\327N4QF%\244\243PQ\"5\240\202IcAz\271\254\206\361NG\241\251~:iA%B6\331\230\2124\t\005CZ\236\016`\027F}=\3244\221\307~\007.r\225\010\311\372\207\010T6\000k\215\213\303\305\345\376\232\317\325}+\027\341\026\n0/\347P\372\204>\257\340\314\374\036\354\307\312\377$\0255\321\203\264f\260\311\314\030\301\367\366:\365\237\311\366\255\277\202yQ\256\221\367}\277\017?}\343\356W\213\307\226\327\243F\215\0266(\217z\207bz\326\r\217\022@l\371:E\267\2006\372\007\022\302\267\232\256\224\207?\2236\376\252`\265>\363\331\306@\240\366\275/\231H`0\007\031Mm\344>\t\354\303\007\006\220c\"%;\243\355\231\353R\rU>\233\357/0\266\005\361`\r\033\320\264\002\242\010\276\271F\264^\302\324\2658z\357\360\252\025a\022\021\332\010\tQP\221N\346\230=\235\237\327\344\363\337\340\230M\244\236\214*\370\274,\366\303\316\301\272>?\251\325\276\254yg'\326\257\261\321\246""\004$\222I...AW\037\226\022\302\351\034\245\013\313\317\341\365\036\251\3713\215\020\357\000\202u\255\032\236,\006L\n\356\313\324\3050\306\226\246\2532\245W\026T\272\237d\0237\256#*\305\027e\204^\360\311\3434\243*\305\025L\365\273.n\336\376\205\2277\273\325\227S\032\265\223U\231Soue\313\343\327\252J\235i\357\233.n\364FM\227\034\301\037jb\364\243\231.t[\3376\\\332\252\242\246\257`\2435\206N[\335\331sv\367v\\\336\030\300Ff\0319l3\335\331sx\017\177j\313\212I\2234\354]\246\020f\242Y\t\030q\225\234G\2305\326\271\256\217eorc\337\320\323\373\366Q\252\271\022\213\277R\272^\373K\271\211RM\260\2166\020\212\202\025q\320\215\256\311\025A\n\355\005\304\017c\245\000\347T\205\252\253[\230\036\006\017\250\364[\310y&nm>\244\227\036njY\250\315\315K\217\371\361\336\2356\204P\0106&\316\013\376\010f\262J\001\224\037C\tp&\261'\305\263\3437P\004\216O\212!+X\222\233%\330\000\000\000\000\000\000\000\000\000\000\000\001\316\231\230b \017?\220\002\351v\352WD+\240\027F\025\223F\265\205wJ\342\210\034\263\251\355v\235\026R\010\343@\232\347S\275\347\324\217L\203\t\243\301 \031\021\245\250U[\312\355\364\326\271\266\275^w7\243\256\274\372\350\000\000\002\005\367K\247Z\016%\214$\243He{\330\252\372\3544h\236\037\010w\203\312\276\213\036\305V*\337C\300\277\345\207\205\220\243\332\202APH=\234\354\273\004\003\"`\211*y\r\223p\2552T7]\024\025!%v\351\234g\013R,\013\006u\372\177:\256\303\323V\325\335\t~\013\206\215\004+(\021\371\353a\301\000\303zT\270+\"\2658MS\321\251\231\260Z\363LUH\260\002\327\3110\337\343\277\241\375\350,\355&X/`(\230\232\370\324\022Ea#\374\030\303\r\317]|\231\035\333_\351\204fV\274\225s\364)E8-\3208\322\013X;;\275\275]\035\243v\3475\032,\314\314\311$\226\275Xfff\360\303.\356\212*\252\252\253\360\376\021\26333+\360\263\\\221\307\010B\020\207bN\364\022\364\362\250\205p\014\200&\2252\031\013\034\255L\237BC\230\236L\023r\304Q\232\234\270\003\306Z\300\345\2706\030\242h\016\344%\230\241Yo\010TJ\304V]\207*LJu\265X6}\231JV\343,\332\325\216\0031J\250Z\350\020\252\010K%\205\316\257\006\243\267-""\374W\330u\313D\220\n$\tS\267\271\207x\352;4\316\245-\372\2741'\303\307F;j\275\\\365\r\372u~\372\323\266\024\3437\023\223\253\303Q\242\315\344$\000\320+E$`\020\205%\024\030\"10\337\304\365z\276\207\322\372_\237\374\234\314\314\314\314\314\314\314\314\314\314\314\314\314\314\314\317{\242\365\370\367\000X\014,\0030\203\024\200\224mT\253V\253\273\325\253V\257^\254\363\316O<\356\357<\363\317<\3671R\022\271m\255\337\310y/3M%\371\202\204/\333\222\200\030\223\264&F\010bj\016\007\211\317\253\276\016;tM8\361F\205\324\326\221\243\210\034\262\232a\237D\303\205p3}}\373j\333\317\035\331zLbc\023\030\230\304\306&11\321*Q*Q*Q*Q*Qe'<\266M\372s\223\222xc\010\035\234q\322\360f\025`\035\275\202\356\225am\367`\320m\273\334v\251]/\\\301\345\322\276\034\250\357Wy\330\0066\270\276\016\243\035P\t\007\227VX\001L%\021\005\317\226\303\rN\347\314\326Q\331\232zl\346\243\301\3348\346.h\345\333k\354_\231\373O\307\317\371\375\230\342{*\252b\244\231\226&Fn\376\304\256\355\213\262\355+\273b\354\231\226&I\231bd\231\226&I\231bd\231\226&I\231bd\231\226&N\204\010]\221\364|\240\356\307\200\031\245d\273\034\365}3\262:\2145-03\234\371\301\343\326e\n\370\341x\342\274\272\3640\273\3228\337\031S|\333\n\305\021I\273Y\326a6\323nFm\007\035\264\317\210\3562\275:\302N \362\270i\365\276t\233\276m\274X7`\020\017\335\230\031\270\200B\270@R\203\n\367x\243\320Myn\341\273NAy\317y\266F4!\274\341\240\024\3348\330\276\230\201\345\336\005\301\007d+w\334\201\246\334\253O\001\007n\\\262\253\224\233a\237]^M\342\330!l9\213\333\t\t\t\017\345\226\351=\334\263\216\272\017W9/\262^%iW\211~\232\342v\017_0\r\257'V\010\327\251\213\236\226*\226\031w\206\017X\034\221\235*8zC\261\372\334\361\360\366\360\347\263\317\335\324\000\000\000/\246\372~\317\331\373?\215\366~\317\217\036<x\361\343\354\350Q\255\210+\005\366\270\316\370/c\005\000@\26014CI\000\275\013\214d6\"W\314\0308=\315K\r\322\354\234\367\207m\201\\\331=Lp\264\027\241\307\2620\243\037\261\364Q\314|\340][\246xJO\200\353\325\014\222B8\005\346a1\224U\034\254\314\247\320\221\331\261b""\215\025\002\264DX\261;\276,X\261b\305\223rry\347w\347\247\247\247\247\247\247\247\247\247\327\372>\217\217\313\250\000\000\000]\357\207\303\341Z\265j\322I$\222I%j\324p\0056.`wt5\267\205\374\017%\243$\303)r\255\233\024cV\355\307nKs[\226Ye\204!\te\226Ye\2479\226\027\306\303j\375\245s$\304\302\271t\222\3739\276\274\226\232#\331^\036\366\034LQ\312\323p\033A\t\001\220CcW\277X\267\034<\377\310\010\262\250S\323\000B\343_\215\313\314(\037\367m\351\307/_\343\345i\245\362Q\363m\277\273#O)\356;\224\342r\230*^\001U\375r\272\022'\374\302\230\246C:\2651\211\353\346\261UT]\355\335qT}l\033Z\250\367\375\333\362\003\341@r\222I\"~\343Z$t\227\375\305\342I\241B\217\356\300{f\231\366\227t\017sd\262\254\003\313\227\010G\n\241\301\337G\230\312C\336\341\207;\027!ga\330\024\307\316{h\321\310\201#\"\035\335v:N\262\2535.\356I\036\233\234S\246\333\\\335Mm\316\200EnK\311\231{\352\273\2743\020\252S\243\302R;\244\343e\016\372\237\016\360\334P\333\277\003\020\222\210p\215A\243_\266}B)\235\t\031K\002\023y)\nP\314HCc\200s\214\340\323\222\220\233\262Q\216\223$CyLr\356\n\\\231\213\307\273\220\220\275\312\010\004FGb\200\005\354\233 \215\340\326\355/y\222\016\302\002\003`\271\244\220H\205\354P\336gb\310\272\360I\355a\"\004\343\021MA\350\004H\2520SiXq\206g\331\301\277\277\277\277b9\314.\320\r\373ms\241\214\227\025\202\221\232-2J\303\2528\261\314\013;l\2610l\215\031Z\356\256\206\243C\303\031\222\017\027if\367\016i\326\263\322\315\334\211\236\234g|\355;\234\317\001\200\032n\204\216\324wj\343\224\023$\310\243y\235\355\343N\226\310\366\244\323HB\3417 \343Aa\313\354\023;-\264\330\336\354\342\275\350\014\206\207\026\201\320\332\016\353o\355d\360\231q\324\313\224\232h&\216\031\\\355\260\316!\200\334D\322\025\025w\220 @\200\275H\253\224\252\342\334I\253rklE\\\226\327%j\270*\222\266\223r[^\265n2\005\305\242\010\231\333?\027!y~!\341O>\234\215\214q@\024\312\203x\343u\242\220(\241\034\241\271\241\322\022`Ag\221\346\300\244Zg\r\201\\`\350\332\244lH\230q\324vV\245\003\273\327\207\265\251s\253\272\\<\303`\347\224(\351""\321\313@\223%\025<tmgF\353\354\343\200T\343\277S\204Jw:\276MU3\354\216\3107\323\031\321h=U3\333\211n;UU$m\335\321\326VG=`\343\243V\325\355\340e\313u\216\336M\005\0106\r\265%\254I\213\343\313\221\032B$\220dj\250\204\222\2225*\037EtxL\265\342\343\005wd\270\226SP0\204\233\024\027}\327\3268;\350.6\007\335\203\334~\206*\270\304\222K\000\263xz\231#\271C\020u^\236\034\350!\0348\034\244\232\272\354\213\013vl\217)d\322\010G\232\022\007)q\321\r*O_\211\035\352\002\016\0346\t\274\374\231\275\016\276\230\275N\361\326\276\234u\271\213\212U(\210T*m4AP\317\270\223/\215:\216+Q\2250\366\242\246\247\207\023\031R\030\365\362T\n\326\335\\.n)\362\244\2044\336\366409\005B\250a,|Q \247E\335\241(\340\227\274\306<\n*E\010\312\341\000``\250X\017C\275\360\010\343G\014h\353K\305\2524l\263\004f\304\207\035\322v\r\020K\202\200\304\026#\314\017t\240\211N\360T\002D\236\301\221\314\346\022\341\201\200\265L\032\016\262\025\327[\254=\227\334m\213L\204\224\025\301\240i\276\033;C\005\017\213\200\005\364k\321\347\23777\316\265\347cZ\250\267\277\2417u\026\367\364f\356\242\336\376)\273\250\267\265\361M\333\332\006\030\367\272\224\"k\316C\320Fz\303\3008\300\315\032I\334 \3667\276\020{}\224A@\nF\"\002\213\006\006\312\206\000Uq\204q\"\226H\234\220(U\203\204\314\345\213\\C\303\325!\2268\210\371\rr:L6\231Ut\336I$\222M\207\247\252\203bW\212U)\244\257\250\350^J\275\257u\332\345n\353\3048\0033\327\207I(\317,\230\353\r\010\353\333bE\313\372+\235z\230f!\246@\302e\306gw$\356\223q$\370|/O]\334\2543\216_\213\266O\306\246N)<#\313\342\277\\\327D\266y\317d\234I\335\335\365\276\222V\247ry\276Ne\231\364;%\036\005>\236\343\371\364l=i\232D#\224z.:\262\004\010,\211N\036:\223+q\347\343\301\236+\270\337\256I\235\2539\201\013\004\235V\216\317+\357\323J3\212\231\026\262|\325\0271r\221\321A\017\354N\210\023\243s\034\306J\2156\364\030\264\205#\020rF)\242\364\2412\013\210'\323Yx\215\216\264aE}\373+-$l\016\271\332\336\322\201[\236\005$H\242z\260\325Q\250\247\304\200Q\001\220F\023h$\210H\263z\332\\(\211P\233U\357\214=6\320\231\211p""\322\010\006\225j\023u\363\330\320P\214\3077\271\206)^\346e\261mkQ\204I\010\216f \233\202\304$\355\305\216\302H_\002\370\024Q\236\310b\375\372\244\350\272\367\222\341\343\341@?\013\246M\3620\307\247\275\317Mk\264q\267\177\n\023\370\262'\341\206\033\340\t;\003\020{\246\367\335\335:\367\273\036\021E\373\335\312G\010HnDYw\005\234OJY>\247\324:!\375\357\t\271\304}\304\320R\037\242\240\242u\035\304\355\351\2156\235\210\272@\246\037N\326\036\032\313C\022\020\210\332\034\264L\r1\r\023\213(\221R1\016\204\204\212C\310\331\020\3218\262\211\025#\023\246HH\244<\215\221\r\023\213(\221R1\016\204\204\212C\310\331\020\3218\262\211\025#\020\350HH\244<\215\220J&Fwg\214S\002\225i\320\220\222h\232YD\212\221ht$,\250gwb\321\020\3556\022\210\206z\305tH\212X\207BB\204\377\003\233D{+6\372(\356\346\333(\203\005\343Z\316^:mD\302\255\250\322`\233CCCCCC\0353\241\323\241\320\350t\3749\251\312\311\307\2625\0067\025BB\006d$\334V\334\351\317\225\025NU\027\301\246\2533\322\371\235\236\245\270qJN9\231\221\363\352\327n\321\006\366\361\\$\222]Y\233\256\335\233\252\001\234tY\000\225\335<\256\022\224\230k0%\n\305\247(\227|\347\210^dnxr\257\257\363\267\270\375\304\365\217p\330\323\360\354]\344\372\207\326\330\3017_,E\357\021\365\004X\237/^\037/o\213\314\321s\2574\325\316\257WgR\261\333\350\2227u0\213$\341\357\005\244A$0\311\221|\261dBD\010E_\222 \375QV\310QT\021\261\007\350\200]\237g\267\353\025m\363>\261\nR\205\214\240)(\022\247\253\371D\315\361\241\313\204\020\371\341\354\371\300\023s\260\007\032\242_\007ty?<\261T\365\311\010`\320\001\366\272c\016sap\n\241'p\372C\253q\013\335\346\255o\304\204\205\355\306\262\373?=n\327\346\310z\203AK\230\220\372\312\010P\024\356W\335\356\311!|\237\320\264f@y!I\276\367{>\254q\366[\022%C lx{\016P\025/\177\004\313):M\013)ta\2644\001\361\326\333\323\354f\277\037\253\225\370w+\232\2141\0047\035\364!\240 \306B4\351C\313\353\372\267\324\004\361\000\375i\324&j\"H\200\020\200#\010\200F\000\247\200\211\212f\204\r\017\210H!\235\212\327\311\037\373t\024q\3329|\275>\036io=\212\213KH""\326i\246\257]v\361\272\321+x\006*^@\354{|\016\212\376(w\320\255\204%\375\311\000#\261\260\236@|B\027\202b\231\037#\310\204;\004\350\rN\303F\262h\000\263\354C\005\230\232\010+LN\315Zy\353\216\234Yd\214*u\234\324e\233j3j\335\332\213\227\273V\374\014b\"5Q&\312\222p!\020\341\024\344\365\360\315\003Qj\022\014\201(\246FUTdddh\252dJ\"\031\236\231<\353\305\216h>U\200-\306\302`\357\rQ,s\0054\261B\210&uC]\24557\212\0251\023y\224x\344!v\356\n\233\037\376$\221\211+\322m\306\266\3359\001\013\275\246\322\263.\\\326\000\334\251\272\271c\256\240\256\201\333\277}!\224{\236]\335\306\362\242j\024\004B,\000\220\200\022\302/\210<`Ev\361\204\310z\363\326\307sGd{\352\220D\3763\2738*\\;\202\002@\221\221\207\225\007\352y>\353\373\207\336F\301\017\334\027\356\202$\021\330'\211\230\210Z:\276\257\017\362c\204\343;H\3115\353\001U$\312\203'\245\314_3\345\346\303\211\310XFJ\216\336\253Z\ni\350\246\232\325a\323\203\031\232{\303\254IQ\227\020\245L\3033S\rE\353DhP\335\276\363}-\366\265\301\251\026^(\263\307v\301\002Lz\331\240\361\273\254Nw\356\002\231!\350\233\242\316\022|\301\306&e\361E/ \310\306$BF\036\317\307\373\313\364\376c/t\007\230\"\3064\033\365\t\001u\222(\306H#[]\330\003\253\253\266\264\217\227y\336x\\\332\207\313\301\213d\223\276\370\272<\2528\211\346F\204\351j\262&`\337\030H\232\236l0@\366\361u\352j\357\373\372\317\331\257\344\350\244--#K\236\301\300\3279\010!\3200`\004\301\322!\234C\010X9!\232\354A:0\034\2740\005\214\341\014\202\305\rpP\322\t\213\205\202{'\317\307\010}\230\207\306\212\027\243xH2\025)@u\350k\366@M\000e\023\231\324\t\021=\3257\371c \252lR\304J \305\223i(\003T)\264[\000`\240\321\261\264\226M\213\032\250\325\026\240\242\300d\331F\010\210 \242\244\315\245\264\254\266\246J\242\250\020\246\246\223\3622\345 \332\032k\225[r\271\270\334\000\005jI\263Z6\260\000\005d\000MA\254\232\322\225\224\254\225\212\325&\333%\266L\032\n\200\0132\246(a)e\n\233+un[\224\252\222\333\t\023L\2279\304\211\031\tE\243k\0324m\264\002\004JQ\242\n\010i\252B@\212B\0040\317\336\323\243\034reU\377\013\0273""\000HT\024\014\353|M\243A\234\320&{\221F\234B\0208U7kd\363{\327\332\201\316\276\355\337\207]\232=\001D\210\274\355p\034\240\235\347\326\005\356\240Eb\207\027\034x\200r\000\r\211PY\322\230\242U\220\013\205bN\212\356\221\033\316\357\301\255\202\334\316X\336A$G\030}\260:U\277\016\253\237\037r\367\371x\364+^\025\005`\362\347[\344\255{\272}\035}g\253\277\273\255\311\210\266\336\347\021h\251+\252 v\025Aj+I\334\354$\206\026O\227\200\245!\244\003*\245\357 \221\023gc\276X\032\305/\"\t\272\236\016\220|\"\"\310\205\025EA EXw w\360\224^\256\312\212PTf\0215\370\265\374?\231w\017\317\020\345\233F\306\350o!#*\252\034\013\200\n\374\3070\203o\257_O_GU\3176`\336YWT\332\"\255e\232\303\273\014\331#8\327\035\232U8\370J\010\3477WM\222\223+\323\324\3338\210\272Kb\302\204\020\377\254\002\340a\273\304\220\270V\n/\004|\321T\307\314\024\246\224j\226\344\322\252ZI\252\023\337\262\032p\000\000\000\002\252\212\251!\220X8\216\304F\341\254\311!\351\354\2465\252U\221/IA\261mI\212(\245\"\036\224\034\020tD\035\3041\306\200\226\221\250\324,\225E\317(\205y\254hC\323\351\361\342}\361\013\275\366\021Wn[nKZ+\021\265z96XH\306s\355\204oE\245f\357\224\250\311\331)\305]\226d\350\014{\221P\332\365\312\26764x\251\224K\351\200\215C\177\222\300N\027V\363;\322\243\010\240h$\t\357\353\364\2131\376\277\233\273\346\311\362\247}h,\305\365\347GN\016@\343\346q\344\371=n>g\035;\2775\352_q\337\256\036\036\037\312z:xq\305\355\364\355\327\251\331\354\177\001\330t\356\000\360\362B\305i+ZF\327~\035+\022T\222\352\255$\352\223\351\355ip\276\302\343\247\220\372\2577\237\216|\237Y\355y\2168\356=\317\213\336\356\370:\037\007O\223\336\364z\307\271\335\307\311\307g\310t\351\346\360\366\273<\334y=\203\213\242\332\332\341:\225\350\255i%\372\013\224X\251xV\222\\\275\252Be\323\306\322\337H\033m\267M\276)\364\234\345\006\336\366<8\347\016\023\273\301\036\037.H\333\307UU\026 of6\333|\214|\317O\231\215\266\355\354\364\345JK\315J\265+\311:\245*\345\364\241BK\021\336\036\325\252V\251-\301U7gw~\312\236\2602\346\213\010\367}\242L\213W\1776""\331\331:\271\351p\013\204e/-\024\035\241\331}\356B\272\326\031\307\256\275\2328b0\320\033x\2208\265\270\016G\210K\301\"t\214p\027/\262\214\3059\202ce\007h\252\024\010\272\304J\004\\\232\212U\010\001\"}\275\203\264\3565\355\365Hs\357\372\347<\333\272\223\036\035\252 r\340\301\211\262\241\237\034Pm\006\2555\327{=\262{\027!\221\033\265&\342\212n\223\307[\005\253\301\273k\215\255\247V\266\212\275\304\"\305N\342%pq-I\215\"\267\306T\320\235R!$-.8\372h\335\230\207V\223R\254\307\275\376lO\327\351\304\376r\3500\314\341\321v}$\320\230\304\017\r\362.\236@\314\305\3567\226gW\310w\3737iR\344\341K;\337\035\253v)^\271\3658\202\317\023e\np\212\024\341\024\031ue\327P.\003\304\022!\264\242\326\261hN\340JLs\334\316\247(i\262+U*\2508\234\272\325\343\000\023\204\242#x\247\306\333\272\244\266\306\3322lZ6\335jqc\024!\005\t\000\032\024\332\216,\343\006\002GUU\244M\321\265\215\304r\n5!\032\220\215HF\244\304V\333Wb-f\357\rj\n\342\006\300\000\000\033\023\230T\222\211R\255f\223\364P\253B\304\223{\251\002\027\362j\365\265\026\211h@ f\205\262\256\305\013\235H\233\337~p\366\006\013\344o\221\014Eb\233oW\253\203\"\300+\211\254\241Q\r\207k\3477\354\250]]\254\031\226\324\301\212kr\301!}\2308w\340Z\246\330\334j\306bD\263\262\342z\031\225\304E\025kP\256\014\200\221\231\3357,\333\206\t98\361\256\273^\036\345\027\234\265\334\000\000\001\010\000\000\004\200\000\000\000\000\000\000\000\000\000\000\000\000\022\000\001 \000\000\001\000\013\323s\\!\n\333b\252\354\272\273\307\r\001\274\013\005\330\212\311\030\202\245\265%a\226\311\265&\266\360\270\242\313ABH\305\020\204\020K,N\010\026%\363\252\231\342\230\235\304%C\216\361\322\tgG<\345\022\374L\211\020\213\271\246\031\254\325\006^wa\324\034\227\234\006\330\251*\252.\345\0244\261N\2264\014 \350a\324 \233\220k\001)A\347\005\347\200\345,\306SD(\336\346h\331m%\323\3448D,\254\217V6\206\244x\273\212+C\305\310\333p)\301Eq]\333\222\\P\2615Ivkd\266\305n+q\252\275n(\244\333\325-\210+n\256\211\tAF.\222]l\034\215\030\343C\304\224\024aZ\336\235i\325\253\022\021#u\024#""\250\241\035E\010\352(+\215\227v\331uq\262\360\\\231\326\t\256\221$\350\\\230\035\371dU?T*~\034\313\332\0207\330\241\275\031\247\305\001\357\363\323\216f\244A8s\375b}\303\t\006\022HW,\372\355\327c\255\334b\334\246\372\236%\274\003\014\3753\257\023]gH\351\255\243\342\330+t\n\213\310\230\377\370\002\006\244!\010\220,\205\010n E\352\253\226\rs\255\2617\204d\010\222\335(8?\025\010!` L7\320\276\000\201\370\271)Z\301*\t\322\\v\022%\260\332\027\327e,7\010\004\rw\223\204\301Q;Hn\335!\006<\020\003\203\005uJB\215\260\2416\306)4\336qO\007\001m\r\272\262k\254*\347\321\250f#q\264\036\306\021E\234E\251n\256\247:\242r\346\246\374\301\027\242\202\355\305\207\270\"\303 \367A^qKf\265\312Y\316\345f\025\312\217\024\327\224\021J\025=}\221\2074h\005\246v\0366f\240\023\272L.\376#\255\353\306\233\306\022\211\t\037\n!\014\271\005_u\347\010'\314@B)\020\332K-i\261\357\275\224KJ\361\356\367r[uN\225\2455\222\322\245[\312\274M\266V\222\225\013b`\022\031'\272H\244\010\004N\202\006\330\365_\305\337\002\242r\254z\267>4\n\320f\033Ly\370\261$M\030'\313\360\017\004_\270\307A\264&\250\231\367z\225\003\220\361{\304\240N\225'b\021@\356\323\"2RZQ@\342!v\001U\002Pk\027\200\346\\\361C\224\033(I&P\354:\027\273\021\327\276&(a\244\353t\275\243\034\215\201\337X\301\211H@\221\327\235\276Y\036\372\340\355\306\302\367\307\201\201=\023D\"{\220\206|\215dB;4\177&-!\334\303a,&\020\026\000v\230X\204\326\000\230\r\201\2648,\n\024\2624b\343\022t*\r|; \306V\314`F\332D\316\332\020\t\200\330K\t\240\330\244\306g\200\200\035&\022b\003\001\340\177S\222\020\370\016d;\371\366\t\273\206=c\024\214\0000\226\333Z$\205\270\251#\023\027\260\351 \347\271\003\034\227\224\021\313^\026\354W\333\327\335\275\310\034\303\340\014\331Jm\231CZ\364\276\025\361\364\347o\253G\355\365\017\225A\360\014o\350\236\320\362\317p\335\327u\323\207t\372\313\275k%V\250\204\232sQ\367&\356\370\365Yx\337\274\232\304\222I$\222]\253\273\335-R\317\013Y\330i\261!!!!!!!,\276\\\323\303\334\303,0 \216,^:\362\357y\274\375H\357\345\214c\030\304Q]\2573\265\270l=\231\233""\221B\342\350\326\363f\233\210\302#\340\206\227g\020\206\036x\210B\007\350W\234\346Lj N6\270*\023T+v\257\234)\330\204\010\270\353\276{\016\260\250<\313D\362\213N\300ww\276\241\273\010Y\363\2049T\266\273o\340Ft\177\035\201#\341B\356\360\360\311\003$\002H\000\006\306\305\223G\033\214\312\203DQ\032\305\306\342\314\255\3278[\026\215F\264\347.-\2165\301J\226\215\001nLmq\251*7Kt\267H5%\2237[\244U\006\202\212\301l\032\301\250\002\244\330\304\245RIh\010\260b*\210\3305\2456\322\2306\261\252\372\353\210\263-\\[r\224\026\306\250\002I*\270\270\255\306\334\032\211\000\332SZ\\\346\341\226\235W-\301\266\232\230(\003h \260\006W\273y~6\324\245+\222h@#t\333\320\001\334\0233\"\3749\274\322\006\033\215\020\250 \262lH\004\202F\340\n\341\335\3109Z\251\233\303\246\2178v{:\374a\356J\225*\025U\363\302\240s\316jtx.\003\016\214:\256\342v\002\022 \010E\001X\000$BN\211\336\003\276Me\201\335\021\221E\307a\231\263\2313\032\214=G\212\307x,\221^<\253\316p\256\025\302\254\370SNR\201\344O\253\307\030\264\301.\212\352\360$\027\320J\024\034\265$i%\rP&;L\2516\000\306#'d\321\030BB\005v\320\332\036j\242p\245\365\005\302<\243\350\207\221`\363\016\201\030\204XG\316\035#\263\321\023\234\347\314\352\356\250\032@\275GD1\271\253\353\376z5>\2344i\025W\0006\024\372H\370\001\344\253\000\3709P\356\354\207\222\\\036\272\375\352\243\363\334\244\021\352\203\336\346\000k\202\302u5\247\367<\310\247\342\006r\316Y\310\316\363\346\371|g\217\330\254s|\016tM\330x\254\240\3103\306a\270{]$\205\002\013\002\007hB\204!DD\220\023\333j\003\022\350Z\304\257\312\316]\n\r\262j\342\357\235\355#\257\305\267\332\254D\200E\264\000\260\217\001\253@(\362\372\032A6\200+3=+q\026D\024=\217\227\351~\327\330\373\272\020]\210\204\202\236\367/\231h?\014C\016/(\007W\312\372@\307\021y\207X\026\270b\200\244:\215\177\023\265{O\250<\276~u\317\313\265\3666\232\265\276\206\325\300\000\210V\326\336Z\352\226\3729\311^\376y\272\324\246\363\272\234X\370\257\305*T\276w@\227T\255x\246\322z\270xu6\276\335aa\0336\271\n-\252*\022\021gFH\322\300\022s\305\242\222""\312\033\302\326\336\234\351.\310toF\306\3607o\222\306\204\315\352\222\035P\004\200t\303!@\316\3431\306\\*\260\365\215;\204\002\021\206\250\200\325Z\246\252\325\r\035=\201\230\273\333\254\275\034D\214Ha\027\201\222C$\220\320!\335\211\323\002J,\025\326\tV\010\201\022X\354\211s\017\323F_~*.OPX\326\004\363\341\3503\343\344]\2014\273\002\246\r\007h8l EAv\376\035\376\\\313\303\306fpa\236\350\305`0$\233\254n\373\246\236=1\023P5\371\376q\003\334Oq\007\334\021\367\025e\210f\\\ta\305\344vjfxv\036\314\\oY,\004\267\331!]\325\313\314\211\017\032\245(&5\020\307\036\320\364\304\311\233\375?\242\342\031\016\354&(?p\237}4\371\317}\035\300\210\232\302\021EP\220\222f!\000\222\"\304h\205\236\010\233\261\177_\023Pr\007\334^D$\220\365\332\327WW\350\343 \263S-\241\022\230\346$\204\266\241\212\240X\334\036R\037< S\010}\207\351\261\313d\222\007/0@\355D\373.\232\346\371\252\370>5^\325\220\252\022\371_xc\327|;#\037\343^\342\272wP\024\301\331\307\324\217]\337\030\221\200\303\005R@\010B\020~P\004\240\020:r\330\360\374X\nw\360KYu\017\000\302$\t\000\313\300\337c\2409Q\270I\320H\r\212\032\260M\247<\\%E\357\342\202O^w!a\255\336\235\r m\021\037j\n=\300\270*n\241|</\361\234N6b\220,\335\240r\233\233h\300h5l\222\ti\202\223E\025F\346,\013\204\204X\216W\251\274{\333\217\232O\253N\352\006\201\326\214\000\214B\013\020\201\004\211\010D\007\257\210f\365\002\244\036QT\014\365N\301\177m\310F\257\331-TQ\264[\033xm\232\203 \013\336\237w;Z\334m\366KyM\226~[\203\3769\270\326\022D\201\025F\276\246\373\037?\273^\366H\321\004I\211\244\261\243&\210\210T&U)M\253%RmR%\206\315jf*\031fF4\211\003X\242\314\223\353\357\242\261\350E\"\337\263'\254B\240h\257|N\234A\367D;\342y\272\311\347\030>5\r\217 u#\304\025 \200\363\303\243N\004\372*\211!?Ec\347\257z\036~\036c\315tS\006\001^~\326\t \021\320t\003\227\276$1\003\266\022'y\006\020J\211LU\225%\025T\357\260@\270t\366\363\350\341\267\240\370\030\262\033\216\"&\324o\224KO\021\031D\245\270&\327%\032\313\335\316N\256q\316:\\\347\263Vm\203\266\312\200j\375)\026)\020(\203\326\246\213A""\201\376\363\364\376\177\234\\bL\270Vo\010u\356\341\354\276<zq\025\363x$\360\014\231\265\345@ \313\264\223\344\302e\2065\303v\352\271\313F\211%\247\345\344\201\022E,\260\253\230\200\034C\002d35'\342\326\271[\226\263M\271#X\2673f\"4\271[\226\271Rb\345+\353\364^\026\257\026\350\304S\311dL(:\026\255LD;\345T\016\177X\001\317\353\036\233\266Z\021\227\347\251\224\327V\341\231\213\333\350=\"\231\262L\261\214&\027f4i\262\0048\307\265>+\001\004v\201\210\241\"\212TP\261\"\241\200\210\200\030\022\002\302\"\352?F\250Am\014\261!\014\206\261aIp\243\002S\235M\037@\363\305H\014\010\220$\211\321\360D\303\370<Ho-\215d%;\335h]\373\256;\"@b\222A\272\tA\324\242\355'\225\210I\345\355!\030\004\245\272\020\2537\302\241\204n\245\013D(\365\301+\026VE\313a\353\340\014\026\031\314 \270Yc\010Q\332=\252v\330\235\245\302\203sjYemg\252\2118\023\251\020\202E\000\207\024\024\305J!fd\222g\256\321>@\360\357\027\345\307\325\203\203\230=\340\177.\025\270q\336\363\346\210\027\030I\010\265\022\203]\330\333\336\211\310@M\303\021\204F\004V\036!L\210x|=\265&u\330\035\376\356\025S\007\240k\220v\277G%T\345\321\257s\221\320\210@^`A\245 \006\373=\230\322rR\310\0209\2268\362\016\234\361\203q\205\336\220[\334ph\235\332\210\003\202\026\033\004$JZ\312\233!$Y\323\317\31738k\336\314\031k\362c8n\037\017I\322t\024Q\372|W\324\033\021\n\302\255\200\336\324\204\033\301e\302\377\020\226\310\264\215Q\201\030\310\t\020\304B\273i\014?,p\355\242\227Y\200\202\330\261\210\027\033\020F\035\000U\3605a\262\334\001\231\302\204\310d \026\003\356\313\233\035\277\213\262\344\332m\rf\201\004\200P\005I^\35586<(\331*\007\345R\227\035\356\321\262\006y!A\322\216\2078\357\020\212A \020\010\303\275\212>\\\312\017v\037\305\265\t\264|\331\347c7\rF\006\304\000\221\000\221WQZ\263\233\321\273n\023d<\313\261l\003\267\244\240+:\252\270\330\322k\216B\266^\220E\000\327s\343\033\252\345\225(\207K\000\367\377Bu\217\004\024\320B(\034\004M\371\024l^W\245z\005\007\006z\246\2042\023\322\316\\v\n\366\327h.\3764\371\357\310\364k\336\275\303m\203G\313\022-\t#""\231\220[.\2732\031\201\353\272\320\212(\t\014M\256\352\272\225\323\021\021\021\026\266\345*\016\177\266\224\265\304a\nR\204\202\371`\200\265\031\006X)*B5\t\025\241 \002@\241\010\024\344\024Y# JJ\035\345y\262\206Q\367> \271\030\221M\225\331u\267>\363\373|\036\tnR\222H\236ds\335\300J\255\224D\204\004\t\000F\004\024\036wH?:!\253\317b$\024\355\020\334-,j&\224\240\216S\247\241|\2040\035\341$\201\000\345\272R\026\201\200\254\3036Pm\311TMm\344\303E\003huas1b\210\362\211jsP\211\004d\004Y\006\000\020PR\252\246\324e\025R\223MYkyyU`Ds\003!\005R#\020\007\021[\227mTD)\022\252\0047`A\354\342\010t\010\010o\226\325,\013\260\222\213\244T*\264\210;\322\020y\370\272\034\357\002\215\033\245\215\304$$!\207\243\334\035\0217Gs\254\t\000\221\324F5)D\222\272\341TK\006\360\0077\253\001R\"!\007m\232\332J\226\233e\242\346\346Z\365]\374Hl\240\201A\000\006\001\001\2029 \201\033(\022\020U\216\250\240\222E0@\010\265MJ\232\010\n\335\325\"\224\266\217`\005$B8\305\025%R\320\221\225)\220\203 o\333\006\r\372)\n'Frj\020\013\335d\0220LW\225\276\320\244\247\333\271\267\356\245\3279\233\234\022\272\262\002\251Q\242 \220\252\240-$Pa\021R\r\261lg\\n\270\233\246\216\265\305r3l\244\335U7@\311\001\235\\\340\240\301&(\335S\2006\214\352\327\361Q\266\243V\257\3715\375\365\266\327\357\233x\362\362\346uu\000\266+\235Cu\316\252\034\331\272\034\345\312\353V]mH\351U\325Su\214\014\311\232\016\315B\202\260\203O\263H4\226\346\216|\341{(\327\334\350\251\007r\230Pj\t\003\237\227\316\236o\303\001;\350\336\200#\020\016\020\000\246\017\367`,\21040G(\002\201\357\207\337\261\366\004\033\007x\021x\375|3\374\332\206J\251\253\002\221F(\005\004A>qG\230\217\274\253\006?Nw@j\316Dd\261\013\223\314\262\304\010\005\020\026\021\241\222-\0000\240X\310\2070eC\325\347v\033q\005\342s\214\200\0342T\r(\342\243\006K\010\313`\215)\363\274\234\004\247a\003\255\002\377bI\003>96S\223ls\364\256\224\025\370\016\346\272\273\356@a\010@\020  y\252\217|\016\021\201\023\252\r\020z`=@D\025\202@\013*\243%\224\204 \204`8\024`\236Q\013`\245\305\016\272\206""\200\241\242\255\352\212\352@\200k\372\205S\205\216\0209\022MDyj\365#\233zsu\2453\254\232\367\353uf\315v\366\221\203\212\235\315\2776\266\314z\002w\327P\356\313\227=\325\261\247\020T\335R\211\305\250\236\021|\250\tp0\020j\016\212\301iB\325[K\361U\335~o\345\021\246\030\370Q\264\001@5\341\310\225v\006C\023\036\205P\352nxH\036\037\027\322\024x\006!)\216\207\231\r\312\3143\006H\213w\213\200s\202\260)\t+\n\010.\323H\276m\342#]!\342y=\207k I\026D\214\212A\221A\212A{\177LD\314[\337[\002\026X\323\215R\272\240\211\360[\021yL\262\004\006B\020Y\275J\2142\233A\237w\254\337\333\300\326Ne\232\r~\003\333\277l^\267\310!\356\303\256\"\226?b\276\324\035\365\277\241G\006\232\025\014\365\302R\271i*'\265\347sE6\020\376#\335\017>\031\036\016\2327M\321-T\241\247\351\361\264[\t\347\312\022\007(\244\211 TU\031\024$@z\220\3614\003h<\223\310\001\000\214\002\001$\247h\007\240\210\032\004\020\310Jc\017\250\006\341\320\000\333Y\023\255.bt\200'\337\344\3506\312\235\300N{\360\003>*\0356\002tS\204*\001\302k/P!\007\205\320N\357pC\270\rd\250\355\0239\320\246\373t\311$\222I$\220\206\225P$b)\273\206@\232A\0140\344\001\275\005\327\310\353\277\263\312\267\260t\353\207\002\343\340\347\\\215\276\216r\215\261\276G8S\271ND<P\312{\3309\357\252\227\267\014\314!\274IyX5\212\362\212#\250\235\210\247\223'\244\036b\236b\020H\014\210\304$\264\2651\231\262\321R\222\320\251U\023R2\211JSSf\321\252e\254\362\301iE\204UH\201\024XD`D\240\"\022 # \203QXB\020\037+\200(=n\202\215B\003\343d\201\024c+\342\2631]\362\312\003=\335Y\345D$\315\267m\2250\035\n\372\010nbX4X\246\007\321\374?O\362z%\217\327\372k\324\000\345\215^\003p\361\200\023\000J\001\"\024\2414\244\256\222\226QJ\224\262\314--OgYd\247\353s\303\275\346\340\270\331\311\214\362\026\320(\001X\225\032p\247\221DbD\350\276\332m\024N\030\032\324\315\273\325R\321\017\373\354\315\307p\324\343I\306>\2368\227\213\212OW\024\256*!\031wL\365)\350\013\3233i\323\003\325\314\034\314\346Z\236t\351\016\375J\031\330\2154\n\226\261H5,\316.\252\276u\320\227S\266f\251Y.j\272\\\262\250(""\013\nK\226Z\024H\224J\214\022A(\n\224\213\"J)\026QP%\r\022\200\235\262U,\tr^]m\255\365z\200\000\223\256\266\3414\256nnr\222\025\004j\030\n\260\210\021HPQATR\022>TQ\324\010@,\205\006\212M\014\321\001\013\311\010\340dQ\270H%P\310\251X\242\347\264y\341\227\313\324\236nR(\331\017\204\305\"Q\207\260\364\204\205(g\371\3179\002W)\357\346oS\014\002\210E\365E\204T:\005L\240\034\255p\neI\257\360\221L[@\304\243\002\332\021\010\204\221 A\010?8U3}\310\032\014\017\205\037\204a\030\220O\213\267I\335C\200\244M\311\022\346`\243\357\301\235]d\221\316t\201\202q\210e\230P\352\t\0312\202g\234\022%\034::\035\001\357F\2631;=\245\r\203\014D\013\010v\240t\0245\006\372\352\t\202\217p\246\360K\253\330\214\007\272\013\322\205'h%\300\2450\202\037\237\351\216\257\204Cx&\034\3049\031\240R\235aU\334\005\342\000\255\226O\217\020\352\007\025\263ZV\226M+i\252(:\265\275H\344w#\331\006\200\026\020Ac\246\240\252u\216zE\3568\227\371#\250\356(\225\367\032\234\016\206\004A\356\250A4\024\260&V\036\350\240\224\t&.T\204d\200\211\275W\316\002\206\240\270\224\020\242$\215DR\251\032+tr\362\007\267A5\264\323m\267\224P\000\000\000\000\000\030Q&\234m\252rD\226\235\330\232N\251\332@\243\000Q:\244\002B\244\325+\211\362\222\031\022\037\376\321\320\241\240Ld^\014mW\246\212S\030Z\315x<\006\201\025\307f\"\351Aa\253\353\313O\276\303k\330Be\350 \315\330\266\255\217Y\314\302\313\230\314h5\270\266\236\260\2026\220\201\347u\233\232\302\213\003\r\223Z\276\035Q+\342\210\262`\247J\324_c\271\020\354\303\rB)\322\3708\216\034\305\334\306}\336\2651\2474\264p\361\323{@\314\304\352\337H\244\312w\271\212s\205\262n\346-\313VFVe\3059\213\t\273\230\267-X\242o\333\234\014\027\276\255\231u\032skb\302\253%c;p\222I$\204\013\337;Uuu\341\272\236\337_S\275\333Q\004\020A\206HI\014\321\356G$l\010\244\227@\225\324\350<\267kc\246]F\234\345l\3541?W\216v\006BJ\032\030\224\304\263\2152\010e \211N\243p\347\232s\221\331\323>\216\\\207\024\250^S'\014H\220 X\224\310h\267-\212\335\265,,\266\245\205\226\324\260\262\332\273\035\226\324\2625 X\005\265,,Q:mR""\234\311\256UJ\270Q\355bi\326\373\322\240fG\301\\q\302X\230+V\245j{\3728\337\216\372\316AF>nE\033\2645nz\242&\345\316\222w\320}\031Y\254(\nd\303\231:zn\013i\007\235-@\215t\024p\215\345\272t\337\327\316\374p\362\307\"b\237^\303\314\000;f\003\254\014@\330\r\255m|\"!RA\273U \220\210@\203P\210A;b\361\260\230K\000. \030\314\354\325\316\332\225\346]\274;^B\010\033\326\375:,\245Q(\003]\270*]\202\027D\201\010\020`H\257\032\002\314n\021z\325\017\233\321'Y\n\003\256h\370 \3468\217\211\212\247\237\266\013\235\275\245r\254u`\341\r\307]N\2203\221\355\203S\345\266;\242\227\024\323\217+\251\236\013\236\342\022 \260\211 \204\343\n\025\323\260m`\014\240\211X\325t\357\360^\374\253NZl\006\202\265\226\343&\004\242\212\222Iuup\260\202\024W2hA\027n\270\000\357\262\240\002\301\036>\352\221\201YD\275\"R\001\214\243CF\2101_\010\200\330yE\337:1ZF\316\354\235\235\355j\020m\007\365\332,*X\336a\337\350\201\335\207\273\263}\215\257\223\301\361CV\334\243QZ\355\306\257\006\261%\326KTn:y\361^\356$\222\341b\205\341B9\237\323\217\253/$\274\252\322\331\314\245\346xn\366\217\306\000\237/\260\260\177F\377\017\341\016?T\277q\016n\263\266\337\263qwN\326-6\341\023X\305\256\225\230\032)\272\350\n\2358\306\214\326\255\220\327\352u\273\267U\322)l\372\274\333\213&\320n\315\306\264m&\343\215\026J\010\215q\\%\2631\244\202\247j+\256\235\0274\256s\232H\321#3)\273Z\3374\001\014D)d\220\216r\212u\305\200.F\244\200\222\306D\010J\027&,%8\266\326P\254\330\253I&\023\t%\223F&Tz\225\302\244L$\304\264\245\221\263&k4\252M\261\261 SJ**M\243TlmcTm%\033AE\020j+e\336z\244\222IW\222\216\327kyj\274\333\311kw\245\307-4\344QQ\r\340\227\024-\010\022\003\230\r\0303\\\260\006\200\203\252B\236A\006\200\206\3205\001\332\013X\222\001E\032\222I$\222`\224b\240\030\r\340\206-\035\255@\332\026\301\002\020\220\002\350\347f\327D\200A\250\005+\200\212\025\337)\242\022\t\240AL7\r\315lM+[\223,\201{\320-\t\002\310)\240\211Yij\215Z\t\010\000Q\000\223j\352\234\354\316]\330\326\335\345\\\033\215\264\3336\0030G%BJ.`\254c\010\\*\000b\226B\222\240\004""\202\262\025\004\332+\243\225\"%E0\021\003hP\030 \222d\2042@\316\205\007/\010*\202\014P\314\23545K.\222\022j\226r\000\000\000\000\201\2603m7B\025M\324,D\031\001\243Q\027x\202\225\021\373\242]\355\200\005\006F\326\024\330M>h\010\213\211\251\2367G\000\010\313\026\251\253l\265\245cQ\255^+-6nY\225IK{\026\346\306\244\326\225\274\263\314\305\2007a\250c\235\227\016\235\33053\331\207uC\224\022A\n\032\343nkd\226\232\326eW~\272\335\032\305h0\"\"\321\002\226\004\010\305X\021h\240\262\320\242\004\020!\004+\030B\202\355\016 A6e\tp\256[\336\340\273JY\271JResl\335\2555\320\231\300\243 )\313\250\266\026<\262\316s\245\342X\033\333K\002$\212f#T\332\027v<\220iI\002F0\326p\004\202\230g\264\243\302>P8D`Z=\020'\215\301\217=\316\r)\236\365An!\220`k\271B\204JGW\320\360d\301\252\002! \250D\t\024\221\t8\302\236.\016-\246\341\337\232\231\321\270*l\t\"&\335\3025n w\346\232\211P*I\323[\213t\311\252\304mqmqU\021\024\200\025\026\241*\200\250UQd.\024I\255\221\300\346)\014\320h\351\342\337\236 \367!hk\030\000\302=\022\2343\235\207\2459\314\200\032\003\006\n\241jx\231\327Q\023>(\361,\201\242\303*\300\343\353\341\033R \031\356i\250J\250@\252\242\020\214\201\326\003\335\013\201T\215b\024\224J\200T\351J\013\2349\272\2361(J\021\240*I2\2165\327l\310\243\"\004\212\031 +\004_\"`c \216\350\202H\034\\\373\200\224\255\022\311\033&\321\266\222\300Z\212\302\006wz\014\0165\226t33\010M\030L\251\342\016#\214\3332|\273\030\254\3427\356^^\375\372D\006\360D\000\214\004V\375\344~;\246\004\351\004_\314\232\366\226\231\237K\021\361(^!\224M\004 F2\020$\017\266\305\245|gsa \331\315\366\210\206\317W\267\263^\212\034D\312\337\277\363~\355\277v\036\336\200\025\363X\363\243\312\013\3731\013\030O\274*J\210T\006\031\231;U\006\034\030\271{\336\356\305\211\013\242\372N\032yX2\331\336\233H\332m\034\271[\\\273\362\272\376\243\006\362\211\362&\223\230$\373S\362\377\r~\374\376\r?\261\021\003V-&\014\230\212\202\330\242\215h\360\020w\211\274)\004;\256\307\233\237L\366! \204\201\0252\305\310C\000@\221\001_I\336\010\244C\337\262\310\311$""\010]\210z(\002\305\035\311W\334\006\002X,6 \207e\033\326\325.B@\207\037!\337\034\2502\362\374\365\266f.\236QZ\343\304\276\007\017\302\025\364\260\024\036H\356\007\227\232\220xG\376\025\231\233\326|\3571\270y\364\240\260\313}-\364\217\007\007\007\177\t\336Z\253\255\222X\212\3722j\244\222\000\330\000+\336\211\251\271A\000\242\004\252\013}/\225\260\332X\354\206\370\214\"a\222}\3421R\021\260\336+\010\201\201\212!\347\206\206\005\001v\024\263\300T4\273\303E\270\345\203\217\256\343n1\267\207\305\267\360\035\307\341:\264\223\361\304G\277\275V\006\235\320\230\227\255\274\241R~\311\320\251\034\222\222\213\317\217c\314z\336\217[\303\263\213\361\336:\340s\267^/\364\2701\003\020\353y\264\355\261DW\241\227\224\333\337\330u~\002\014\020,\220\004V\230\202\010V\336\2347\362\322\261\361\2360\017\270W\3342\303,Ya\227\344-\360\362\013+z\322\363ZK`\000\312\004\346\240\201\321\212\031\355p<@\277\212\037\013\222\240\\|\204B\2225\014l\316\360=\343\274\253\274\276\027\202\367&as\261\363\032\014\241I\006\212\023\014\014\311\0067w\031\007a(`\260\200AR\022\320e\024\211M%W\300\244O\277t\270\010`\006\014\221\010\275\347\245\336\036\211\336\003\350\200\370\t`Q\202J\010\201\221rd\214\374\213\361i\213q\234`\035\334\014kZ\316\014\246Q\311#1\371T\322jB\247\325:T\202\220\350\035\016\201\320\350\0354\"\021\366H|_\025\275\246tuA\213\027\323d\201\023!D\rQW\270\016\274\320l\255\216j)\314\262\266\311$\220\234(PF\036\360\243\023P\226#\261\r\240\34310\243C+\036\021\271E\001A\250\026b`\3500\307^\241\\\247\034UP\224\0254\020J \222\316tW\033\024\005\276\252*^a0\243\371D\321\353\277\003\336\310\374\230)\004B\0101V\n\014\220\201\"\211\313'\nx_\260(\350\np\374\375\001\200\200\271\032\032\024\321}\205\023\334_\200\374\307\206\036\341\362t\376\303S^\303\344T\003\266\"\007d\n]\347\277\301<\352\240|\\ym\355\312-|t\2378)*E \002p\3245\026\240T\t\t\002\255\345\277\263\220{\2454L\001AI\355\346\373\332\237\3665\267\264Q(\212\315\232U\225?\375\312`\366\350\321Tm\023\227''Y\332\355\377\206\3119\202\211\376T;\375\013\361e""\347`\203\020\030\023\261\257\335\373$\374\261\351\313\037t\236\336\377#m:\215W\010d\032\330\2544\322n\\CN\246\320\335\214\0237\213\247\217\025\316\232\342\341\030\327\021\311;\367\347M&LL\250\263\212p\247\251v\326\023Hq\221\342\026\365\014\025Ct\334\305\320\302YeTd\220*\350\316\031\n\250\\\266\304I\005\237G\364G$rB\005>N\335\tV\227\037\006\316\347\007.\346\326\247\006\rJv\232\323&a\230\274\337\334B\375\354\022D\006/\365Fz\337\360\207\347\204% \257X\010\037\366U\"\000\374h\022\0006\300W\351)\323('\301\376\037$O\362V\325\277\203\321\300I_\374Q\177\306/\275\021'\337U(\000\212\305b\260\000\"\260\000\013\n\300\222\177uH(\t\367\024\207a\373\004\271\332\n\374I\004Q\364\253\001\017\331\001\360\327\366\237\324\000`\375w3\366}\343\362\210\036\204V*\237\220\322P\001`\251\352\311\370\275\370\257\342\377`\022\031\371'\377Eq\245l@\261\277\254\037\207\362\037\321\023*\007\3700\311\362\260+\356+J\344\003\377\t\375_\327C\375\202\204[\013=\372\364\325\371\241\025\231\323\375\037\315\375~\242\017\343\305#\371\177\252x\325\263@\004\376P\374\177\201}\340\374\022_\271\375$Qv\007\374\0366\340\374\022L\270~\321\000\3113$\240\301\367\200\037\300+o\344\3768\317\244\257\260-\224\000{\036\323@\001\025\200\005{*\376o\337\374\333\377\323\351\212\375?\245\375t\255\232\335\262\275\265\177\246\210\320\264\371\025\237\332\257\363\347\310\022\000\023\351\200\036\330\255\030?\277\373\221B{*\376\345\005~onY\377\261_\304\257\262+\374\036EiT\327\250\022\263\265\375,\264\037\345\375\277\213\007\021\302\204\216_\007(\000\337\336C\361N\222\347nV?\210\206G3\306< \306\356(o\177g\\\014\315\001\003E\351\341\362\010\334\000>oR\340\225\231\304\270\344\\Q\274g\017\365\361\356\356\310~\2562!\010\373\310\227'\023<\311\216T\227\r8\227\332\303\037\340s\360\177\323\353\204\017k\214I8\231%\014\340\335l\263\322:\337\213\324\343C\215\2447\033m\207\314C\354\372\022X\036\330|\337\222\263\025\261\230\001\203FB\314\327\366\305<_,\224\247\312\244\375x\205\037\335\233>\021>\030H\036\377\357?\203I\334\300\372$\321\017\335)\251$""a\204\257\311\2478I\363\375\341o\226\t\354\271\002\247\266TRG\361\307C\332S\362\3760(aL\002\377\365~\037\035\177D\223\376b\235\242\334\217\365+\n$Y\347\346\327\330\200\3112K\320\275[\316A&\335d\377\033\245\233\347\335\364>\374C&\020\276>\002\217\361\222\000\025PC\343\377[\354~j\237L\224\003P\366\237\224\350\375\365r\0234\031&\321BC\365\277?\356\372\367?x\302\265\377?\342\240\260\223'\343\226\233AL3\330\376\351\353QO\263_p\200\265\027\037FxK\031\354+\017\367\336\346\\\001\213\254\000~\327\346\372G\310\000\366S\355\375\312\013I!=\272RAdk^\317\022\345\207\254d#\322\374\304\034\000c\346\216X\345%L\234I\227\264K\360\370?7O\302B\036\021\201x:[\341\031z\332!J\357,_4E\006\031\220\232e$\016\237\r\306~\243a\010\212\014|\275O\027P;\376^\310\024\"N\223\221\250t\3527P\342\366\306\027A\020\375W\177r\260\277\\\204\211G\272\261C7\363=\277\253\017\253b\327\222X\005Y\376\330?\332X\300m*1\"\027$\261d\373>\251\350\351 \207\331\177\335\375\264\340L\334J\231\367 \\B\014C\353\230>\347\266\276\347\312F\276\245]QR\274\277\346\262\003F2`p\0326\370O\t\034#_=\256\177\001\362\303\332_\021\000#\371\3326o\335\027\372\340\304,>\374\037\345\344\rL\t\021\311{\377\027\273\365\222{\377\314\177\022bI\3571w\311\010 \211\274J\t\306 \354\235\014!\307_\336\013\366\027\316\321\322s\302u\373\364{\322\023*{\324\350\010\221'\343\330\212\321\237'\325\217\273\374\237\204\201x\241\216\346\034\020\320;\340\016\240\214/\332\242a\217\304\207\325\003\304Nx{\273\023\341\361\037y%~+M\342D?\031\312o\347\213\355e*\027\373*\273YM\253^7\276k\307a?\017\3410\267\361s\366$\271\207E\306\354pI\017\357\275C\030e,\270\177Ut\256\315\372\317\320\375\355\374.\277?+\251\376\037\316\352H\210]S\277\347w\234\265XfAD\232\247E\246\212\202\360\327a\244\375O\261\350\335x\313\267\207H\022M\306\256T\216\204\232YU*5=Z\236o\204\227\204z\312R\273\036$\273.\020\227Fe\313P+\337=\375a\230\022V\263\211e\022@\217\326\360W\205\341\257aQ\324\360\322<@\264\322\363f\256\342Ct\373\212}\010\314)\210\235/\225\330\372\036'a\372\212\357\316W\207]\213s\004V""\323\322\210I\323\265q-Q\340\234\355\326\300\343.\305\334\307\307\024!\001!\224C\246\007\356\204\222|\360\017I\314\272]8\336\007YC2q\224M\3201\321\"\2212L|\375O\360FFw&D]<\375]]v\331\362\277\007\327\nd\034h\371\033\341\301\034\264\223j?\001L\312\263!\214\272\351\363\364\2630\307g\204\353`n\307t\304k\336\226Q4\261\003\307\363\256\256\237\221\215\025\332\036S\371\177\212b\274_,XenXr\264\225\346%\233g\213\224Zk\275\360W\342\360\273wgy\334\332\007\211w\310\244>rG\241\024\006\203u\365\036#\341\315\372{\221\000\357dBW\225G\362\231\006[iL\035\237\257\034^\313\261\331\374}\217\036\327f\031\215,\246\273\213\3165\205\036]\275395\230\375\230O\275\372'\340\217\351\375\363\341\266\031\237\253\224\370W\032\2722]\217\301\366}\177e\316\327\217\343\366tv\220\352\331UZY}\251wR\266\026\354G\334]\265\342x\315/m\334\036A\337\3107\217\350\210\366X%_A,\260\"\314<~\317\350\361\374\337+y|\2621\010\345\010(\0379y\246\377=De\373?\212\232ml\327\311\205\016H\274\316\336<\261\027\360\004\263\324)0\253t\332m\270\021\326\315RD\020\3632\342lt\262\347\017\215\372\303|\027\272\256\t/\260\366>D#\302}K\231\224\303\014\314\217\3653\000H\325d\221\031\024_\357\211\336T>\270\201 \177\311\367\304\375\321\017\346\336\037Q\251\240\277y<\377f$\223{\364\2759l\314\311\204/\332\024\376\265mj~\201\221[\334\374\3030\315Z\277(\377Q\321\374\327C\373h\364B\210\351\352%\222}\0220}E{*\200*?\010\312\027\216\3026L:e\263\357@\034\372\215\316\024\316\342#\006c\364\226\005\025\026:\271\257\366\\\231+\nNe\036\217\361Q<\207\350\222PW\247\"\021\215\023\244`+\346\261\357\2705\221{\216X\023\336vq\253|\026\024\017\330\315\226\313\244{\014\037\021\013\014\354\337\350\300\337\255\233u\024\\o\326U\246+\177\024n\332\006\021\217\021i\037?\314\277?\240]?\017\227\343\313\264uc\371(\333\275o6\335\330\035,y\357^V<\037\351\361p(u\350\236\377\273\243\3532\023T\356\340^\263\267\217\277\223\027LC\342G\321\376\003\030\304\301A\316\247\3750%m\305$\364I\350><\315,\354\354\374\373\237Dm\212\263\033\037\336?U\037\022\342\323\014\214\263\357\372""\t\375\205\020\3753\325\017\373\006zqaJ+\266\243\236\267\367\371k\317\367j\335\203\256\371\374?\2326\261\020A;\277\276\274\257\000\375?\241\325z\036\200\247\346O\322ZE\252\330\267>\037\221\335v\201z.\204\320/H\370\237\177\3349\203\250\353\360\016\020\031\330\010\330\t$\201\017\244i-\037\013$t\0143\t\306\363\370]4\016\177\032\345nE\314\350N\311?D+\355\356\033\260\203\010<\347\330\267\231\333v5\024\361\317\344\203n9\375\217V\211\021\222F\021\376\177NzDI\024>TJ#\310\007\306)h\207\364*\376\312\007\345@\374\242\037\221\003\362\000n\240?\200\2038A\234$}b0\244\255\211r\202\262\200\372A}\300\036a\035\351\037\264G\330\0072_\304\372\201{\307\245/\260\317Y\250\3148\327\245%\372\262\252J\342G?\310\341\3002\222\317\237\343h\241\017\217\342\212\224\260\225\344l\233\215R\267\243\277K?$\277N\244\221\341\246\"\307\362\336\224\313\326\322\265o\272\357\233V\232I#i\201 \212w\000\327\252\342\002\275\247H\331\330\215\366[`\240D\322}o\273_w/*\266m\016\250\250B2\346U\376\033\231SQ\251\317`\241I\242\321\340\217J\223\307\211\345k\273\215\273nX\367\262)\350\352P\275\231OF\355;\266Z\\\332\233\364\253\327\321\275O\177\2023b\255*\032\226\032\366\236V\254\264l\350\276\364\360\265b\345\331aZk7\365\344\324\2475\027\212\026\264a\257J\265z/FL\271\342\320\347\334\372\370|}+~\224W\305\342\333\215\277\313\327\254_\305\324\256\263\323\026t\262+\255\302\3519\022\244&\204mOJ\355\274\211\347\226x\254<\233\026j\322\217^\205f'\301\036\316\236\235\314\024)\313c6G.IbL\212\227\243\2222\274\220\257\033\224\257\310\322\311=G\235\353B\225H\356E58\265h]\226\267\363\332\277C/\r\"\345\212Z\232:\273\322\322\271\202\03375\252\276\204\230 \355\255e\312\317\255\237b;W\347\311\275\206\356\333U\020J\031\314\221\013y7\262\361\325\255_;\035;\224\253\354C\016\306\032\332\232\373\0313mb\261z;FD5mi\345NJ\312Y\236\013\341\311+\307\265\223\347\231\366g\353kW\363\337\201\365\366\211\370\005q\300\352\305\365}\267\300{\373\020\275\244\342>\036\334\315\327c\271\326\255\235g9N\346]\031uI\345\031]*d\250\323\256\365<{2\t\333(\327\267""\304\221zw\214\177\237\366f\344\354\243\247\322\250\372O\361\235\346\371\325\345\327[\246sUVRK\332\365\027Nt:\361Ik\215\242\\\222I$\222+Z\326\267\213\237\253#\254\360\373\375\232\3158\246\342\302\304f\337\265\3774#I\221\024}\353\31714\312\342.\357\213w\307\266Ha$\231&\004\251\352I\266\020\207y\023\254\253\234\214\234\314\367\024\314\334\2634K3\\\246\224 \242\035\321\333\216\217w\313\360\275\273\332\301\230\220\226R,Q\226\001\0323!LYh\222\233\"k(\222\232cL\223$4\315\236\267,\314\3134K$K\333\315\311\242\231\204d\244\201H\302\236\234\271\333\225\313(^\216.\335\272\272\222H\354\336\336\253\223\306\333\322\254XD\t\002I\031\030\311$\216\272o\361\363\325\316N\275\3773\256&tu\271\354s\210\244\315%m(\243\247&\323\305\024V\350\277\026D8\345\216\253o\331\257J\206\334\322\267\035m(o\334\241\rk/B\036\024\304\266?\273\362\363R\270~\027\331\362\250\251\365L}\276\217z\236\245\265\303\265\315\307\275\345\021\007\354+\342x\336z\3633330\314\314\314\314\302\314\370\344\000\300\001Mj\365\253\307\272\212s\036\342\244\301\351\357&$\000\220\003\366\365d\200\033\253\245t\255\252\237\006\2007,\347n~o\000\006\370U'\001\224\\I\004\303qy\235b\256wv\222\031\315\005u\307\\\355\2169z=\356\377\253\343\271\2572K\203B\375\350\225A\250\266\335m\230Y\030\205\211ai\241u\323A\231\355\200\r\003\214\244\034l\200&d\266\233T`8um\032\334\021\356\\4\364H\264\026\350l\234{\005\315\374_G\277\313\352};o\231W\326\326\323[y\334`(\010\301\203I\0276\253\217\245t\372Q\255\341\267\232\252\353\351\352\001\200\222\\\253\274Y\204\023\017\203\374X\253MK\336\216/\337\250\343\355\374\367#\352P\235\251q\351Uw\241\243oJ:QF5Jd\261\307b\215*\024!G_j\365RjqBi,npS\312\213Z\021\356F\255!\037\374.\344\212p\241 i\242\033\336";
    PyObject *data = __Pyx_DecompressString(cstring, 25735, 2);
    if (unlikely(!data)) __PYX_ERR(0, 1, __pyx_L1_error)
    const char* const bytes = __Pyx_PyBytes_AsString(data);
    #if !CYTHON_ASSUME_SAFE_MACROS
    if (likely(bytes)); else { Py_DECREF(data); __PYX_ERR(0, 1, __pyx_L1_error) }
    #endif
    #elif (CYTHON_COMPRESS_STRINGS) != 0 /* compression: zlib (29617 bytes) */
const char* const cstring = "x\332\314\275\311\177\033\307\2618>\322\223\023'q\336\213\2638N\2348P\022\333\222\027Y\340&\311\361\006\202 \t\013\033\001\220\222\354\357{\223!0$aa\023\006\240Dgy9\352\250#\217<\362\310#\2178\3628G\035\375'\344O\370Uu\367\314\364:3\200\354\357\367\347\217E\314TUwWo\325\325\325\3255\037en\344Z\255\301\244?\356\364\367\033cg\354e\006\273_\273\255q\306\031#j\3349tw\366\207\223b\337\033;\375\226[\354\357\r\354\303,G\224\353v3\355N\317\355{\235A\337\313\014Gn\313mCf\0210\363V;\323\233x\343\314\256\233\351\364\333\356\023\267\235q\372\355L\1770\316x\335\016\220\337X\315\325\263e\2677\030\035\361\305\257\216:\355}7\177\320\031nv\334\2213j\035\030\320\204\253\334h\344\034\331\022\224\243_\235\354\355\271\243\314a\307}\234i\017\\\217\224\357>\031\016<7\343\215!\225\353\335\310/\264\312\203\266\246\226y\247\237\031\364\273G\231\326\310u\306n\306\311\354\322\354\306\007\200\355x\231\326\000\232p\1772\230xP\307L\217\324\345\006\244\302B\034\317\353\354\3673\343A\006\022\267? \371P\nd\206\021\261\214\037\217:cg\267\3532\002\312\356\336h\320\213KKZ5\363\2703>\310\214\217\206n\346\035\006\037\217\234\276G*\030%\241d\220\2423\302\272E]w#\337}X\036\364\327\235Iw\3147\250\004\346{\200\242p\330L<\021>h=\254\356\355y\356Xl\306\033\371Ao8\031\273\374p\212A\327\272N\313\005\376\306!/\006|\\\036\243\301^\247\313\272t!-\341\242H\330\337c\304\033\356\370\256{T\037@\225\241\315\232\007#\327;\030t\333\352\200\021\022\r'\271\361\330\365h\242\272;\034\214\306f\322\274;\032w\366:-\034\rz\"\230)\215\3167\232\306\013)\032G\336\330\355\345\235\241\027K\200='\0252\350\272#Mm\326\334C\230\250\271v\033*\354\301\354\306Y\242'\031\303T\332\205\022<\025\007\3748\273\235ng\334q=m\342\374d4\202\336$\303g}\344>\322S\325\334\321\036\226o\300\016\036\273\24324\317HC\263\306\313\244\016\025\001t\"\334(\264Z\215\221\323+\214F\203\021\035\321R\366\214`\273\337y4q\267\373\255\301\010\323\271m\222\"\217\"t\246\024\205\376xt\004\t\202\241\235\212\230\313\275\320\033\216\2172\336\201\003\363}<\031\202\304\330\033""\2142\255\243\361\301\240\177\303\301<o\024 \227\266;j`\227\r\372\374\234V1|\316Xb\306\351B/8\270,d\244\025B\2037,\023\ne$\347U\224*\354\025\032Q<\253hN\034i\220\274LR\321qRc&\352\305$j$\323\320\244\2241\261)5\202&\201\236\2236q\224\234\310\211#\213\344N2\025\021>\0322N\002)X\203\0302\321\205\262\310@ \t$\023\225F*\031Hy\321d\"Q\344\223B\250\025E&\252\004y\244&;\204\272\2549cG\305<iu'm\267M\371\324v\365\372j^/\002\326\235~c\350\272\306A\2721\3545&C\355x\204A\270v\324wz\235V\r\007\256\353iK\006\252ugw\324i\231&\331\206(}\342\360\t\223\333D\251-\263y\340\216zN\267\341\216\021\2426\314\006\210\265\0224g\337C\255n\035t\274\311\310\215#s\013O\206\035\215|,\271m\375\214);\243\375N\277\351\366\206 61w]\373\033\204.\005\353\032\241rX\352\364\037\232\232\250r\330\005\354zg\324{\354\214\364\355-\222\354\270#\\i\014T \361V\037\233\3462%i\244 \241\003\314mSB\355\350\2575\266\265\334\326Z\235X\370\023\355\242Qkw\264\340\2563\206\305\270gj=\030U-\220_k\356\330\351tK\035O\2377%r\275\3551H\250o\210D7\315.\220\364N'FT\324\007\217\353n\317\031\302\010\331\204\342\006\373 7v\234\356D3\020\231\004\347\2045\033\331\272|A\366\214\203\231\357i\261\332F\345j\244\340\010[*\024t\213<n\220p\022\205Z\277\2519\220z\323\035\273\243\301\276\333waKf\0327\274\312\222\246\235\221>~\226r\024\332\252#\276\014\375\336\326\t`D\326\260\246\320\362\000!+\323\221\221*>\233\260\215\332\246\212\204$t\004.\350\211f\030\205H_G=\261\347\022Ie\242i\264\016\334\366\004Vy~\371\215\247\004\001\241\027~\002Ui\260\237H\020\307|HWs`\202$\260\004\363\"\025\031\262\235\252\320\3306k\302\266~\325\031e\343rB\232b;\211\242\354<\t\206\274\211\314$\254\355\341\221\355\364\007}{8h\337\214\305fc\261\013\261\330\305X\354R,v\371F\250\336\010\0331E\263\341\267;O\206Do\312t\335\376\376\370 \223\275y\223l\245\366:n\267\r\177\351\002f\037\322Fy?\263?\320\244Y\270\315\245\331%[\031\273\005{\031\273\003\205\351\323,\010\345t\007\3736p>\202\251` _\342\271r\372\006\262\305\005\216\254\325}h\367\240e\2720\303\r\344\034\365>\260mwC]\305""\336c\312JrJ\017\222\014Fz:\276a&\221\020\241\3047P\241T\267\247\"\224\357\311@\001\025`\242\356)\240\260X\262\244\204Y\207\020\236.ROE\250F3\225\010\004\245T\302\231\254\\\274\202)[\270t8SZ\243e\313\250\301JD\222\362*`uzk\310\244\016\231\230\332\220?[Jy\344\346\343\335\0265y\260\002C\000GU$F\317\301\004\232g/\263\013\373\235\266\227\271\346<\351x\231\267\332\327\213\252}\223\332\233\003\035\261\330?t\272\235v\246\007\252\301\373h\006\246\203\366\235\326;\031\030\256\357\300\230E\343\351;t\230\006\304\324\332\322\351gH17\002\225\234g]\247\215s\370\217\251\306\275\203\206X`\373\206j\364\016Ur\016X\031\364\335\312\000\n\"\326\346<\261\361\240\371\252\355v;\273X\216\333=\"6l\250\0031I\3673\265B\355\203\245\333K\304\320>r1#\017*\277\333\352:\270\232\223&\233t\272\320\355\304b\354\335\310\024\3672G\203I\246\017\023\t-\325C\240\343\023\214\017\334>\314\3631>d\336!\346e2\217mH\016c\347\035fF\353\034\272\230z\335\351z\356\215\033\302&\202\257\244\272\1770c\331j\244\022\360\273\006\025\333\210\307\252{\005\216\250*\014*(\233\031\373\235\026*Cl\224\335`{\t>sa\273\240A\010 \262y\020 \342\276A@i\266\014F<gU\224\341\2324\274\340\345@\032JU\007\224\322\251\004\361\2714\234\336\260\353\2323\241xM\036nBQ\322\206H@\031\367B<\225\310\231\302G\303u3\177\355\037\366\272\253 $\332\250V\331\343\277\336\340\240\201-SA\021+\022(\345\n\254\324\351u\306:\204\232\0053\311\341\330\026\341h:\203\255\333x4\350j0M\020M\356X@\340\270h\035\300J@\225O\021\245\232\332\024V(\021\244\257\273\255\001hHGh\nF\241\240\320\220\035\\\240\357\037\211\004#\220\032\304\032&r]h\265\210\031\0138\227\313-\364q]Q\231fvm\205>R(T\224\323W\363\001}\000\266W\270\260\267%\370\244J\004;\344\244\264? k\01354~\222\245I\203c\003RB4\007\303\001\250\177G%\367Pj\001@r\343|m\320s:}\225\243\235\316h<q\002*\225\255h\245e\013q^&\331\034xc\2623\225\0218\265F\203^\225\014|\t3\246\322\235v\257\322\254\2606\346\241V#\001\310Y\241\224\004\024W\"Z\2754\202hA\206\321C\221\221\321\323\035i\320\\+\022\242\332\30312\340\245 E+\206L\207+""\010[\226\004L\315\331w\353\356\030VBT\333\362\316\304\023\353\010\253\200\213e\250\003\0161X\262\256\nhT\256\r@1?R\032\215\030\223\231\212\247\"\251\322*\300\352.S\023\332\271ZQ\302\300\320\020\253C\304\036L}%gN\275i\020\325\3373\241\303\323\013\211\202*\236LVu\245\n3\254F^mo\027\327\024n\210\364V\241f\201\203(*tb\0106`E \026&\265\263\264v\032\"\277\225\231E\266\323=\201P\203/FK\310\330\035fz\316\021QVw\335\3147\356h\020i\265\r\350;\330\361\216\007\003\330/\302\003\267\345\004\261\322i\277\017I\237\004\033/H\262\230MJaw\335}\247u$'\314.\233\023\266pX\313\t\026\226cR\250{h1\361\235\230\264\373\330\rv\233\364\226)\375\255;\346\364\007\203\027J\256\266i\034\263\260Wtg\241\037\014\235G\2605E+\236\322u\346T\236;\3528\335Y\312!\202`\246.\003)\241\320\337\\\272eN0\231\250Mu\353\316\2158s2\247O5\017\200\\vD\352\260-,f\025:\2770J\223_\2229\r\347b\024C\244\36182SK\336B1\204\274\237L\034\231\3406\023C\250\361\242IM-\231\r\346K\267\030\233.\255\273L\272<t\3363iS\362\3164\251\322\360\2765\251\022p\25663\320S\273A\\\002\336\021\307Lg\362\313IL\021\271\351$\221\312^;\211\364:'\236\244D\202OO\"\261\352\342cN\242\367\353I\244Or\363\211\311 \264@\307\320\250\006i3qh\363\214!\221L\240fJ\316\322\031G\2443|\306\322\213v\320X\322\224b\313l\351\234=M\002G\262\0354\206Xo\330L\223 \260s\232iC{\242\231Dk^\214!OZ\360\"k\243\231F\264\342\305\321)F\275\264\304\201\215/\211^0\371%\0217f\"\326\030\004\315i\002\323_\014\205`\tL\244\213\243\240v\302\030\002\311l\030C\251\263\"&\222k\215m\346T\262\355-\206\322l\2123'\212\325(\315\311\004W\205\004\272\204\016\341Z#\206\212\2361\305\340\343\\\032\022\322\351\234\033\022\222\304\2709$\244L+\275$\327\207\004\312\300{!\201L\331f\247\240O\233\265\340\"\221\226\2309K$\221\3175s4\016\024\t\324ZW\212\264i\002\247\212\264\364\221\367\304\014)\322\222R_\212\264\324\241\363E\352\004\234\033\306Li\222\351%\327\214\024\324\305vzZ\321]#!A\3622*\270p\244\244\313\246\244[HI\267\230\222n)%\335\262\236n\233\234\021\340idk\320?tQ\341\205\205\003\337\251\335\341\006[""\022\370\363/\016\304\237\006\211\213\207\204P\210\271\345\201\003+>\007\361\013\200D\250\225\370\022M0N\314\307t\361T\246\374\274dR\343I\276\274(H\250PTKpU\354k\010\214\211E\301n\302\006\222\\\306'\235u\306\022\031rK\323\206\262\344\227\320zQo\"\ne\273\211\000}\345x\017\013-2.\265\241\026\202\3547\341\230\3546\241#\351n\244\340\305y,\221\206@\026\330\032t\261\035\203\224D\262D\241qW\370\364\235\353\037\177\346\264\333v\037}8\320#\003\3017\237\264\360\350\203\034\230z7\234\335\326\307\334\035B$\242\036\025\237\312\340\340\352\336\247\255I\333\371p\027_A\373\375\320\306\343\204\033\303\243'dJ\3006qB\\\207\202\213\227N\2462\351\325\2162\3756\271\033\304\210\340)\244\310\256i\200\203\275L\233\\)\214\303\311\376n6\001\307\245\010\034\323\366\360Va\3324T\335\267\003ao\017C\337\251\304\264n\253e{0\242\354\t1\353\300Oh\327\261]4\354\020\337;r\035+9/z\306l{\364@9%\367{\273\255YS\240]\335>$\316k\211\3040\360\346j\030\203\317_r\302\203\307P!\332h\211\264C*\004S\326;\240n\207\276,\251\223p\376\205\266G=8\346J\333a\022?1q\332B`\330\215a\260\247\355\372C\354\316\027b\214\344\340\005\222\320\016\274K\217R\246\014\207R\352\302\017\234C\0242^\347\033\222W\266\335!\003\312%J\330~+s\r\375\350\366\311uKt\245\302S$\367\311\030x\"\367\245\243\273\342x^\207\307h4\327\225\245L\353\000\226\002\364k\363\3201\257K\314\334\350\370\007\252\236\340\373\327\351\217\257]\327\311\275\335\243\261\353}\320\355<\014\027CHC\370\314\336\3104\007\231}\346\3246\034t\360\350=\343Pc:R9}Z\305\3673\023\317\315\334\030\216G\035\217\326\250\215\272$\311\344\343O27\225\253\227!6\323\353x=g\014\035\341\341\231\332\004J'\360\376 \323v\211\364\313\330\366\010z\t\332\332\316\264'D_\005\275\366\003(\352\260\343t\001\333\352\340\340\261\031\363\237r\013\313\260\325\261\3511.W\335\032\365\016\204\302\200]\ngg\311\375I\267\233\271v\363\372\rq\023\300-T\242\326o@,\230\020\213&\304\222\t\261\314!<\267\273w\303\246\373v\352\253\211^\217\024\2125i\321+\355\273n\240\316S?\305\260\302,'\354\211a\247\365\020\375\026Hb2\230""\204\034\361\364T\333`\037\3230\000ma\365Ua8\016\303EX\300\213P``0\031\265\334O'\341N\204\271\255\273l\346`moh\260\324\317\024s\014\002\023\340\361\252\226\347\334Z9{sq!\267\326\274\265\264\222%?\267\026\331O#\267\261Q/l\344\232\005\273\220\317\347J\245\\y\265P\317\325jv\276T\315\337\265\327\n\353\271\355R3\0024s\365\215B3\327\310\027\213\322a\254\364z#\034\264t\324\333\266J\340\271crx`$\301\250\006\244\303\265\010\350s\3431\257\021\241\343\313L\252\341\320D\314\361\032K\202\\\357\355\341\214=j\264\006C7\327\305\375\350Q\021\000\035t\321b\347H9&\246\230\262XfB\202\340V\243\223\352\350I\255\230\200Sj\302aC\326e\030\360\272Z\254\344\352\017V\253\271\372\332j=WY\263\363\325\355J\223>n\024\326\253\365|Ax\261\353\315\373\014P/\262$\225\235\342Z1\307?\343h\332\006\222\\\271X\331\020\020ar\366\276\003c\257!B\362\322\273&\227\235Z^|\277\027\246a\017[\333\271\265z\225\177\216\212n\026\032%\306n\263\330\314U\270\307\210h\273r\267R\275\007\270\342\332F\301\316o\026k\366j\275\272\304\277\327J\367C\357O\215\337\200\006\244\351D-\221\332\233\032\262\250[MH\354_!\030\212\370\026\307\r\303\3070B(4<Dp\241xl$\321]B|S\231Q\360\n3\022E\310\214\016\016\314\344W\267\251\224#c\234>n\2004\334,\346\033\364\255\\(\323\207\006\373\205\341U\250\346\253\245\265|\265\\\333n\026\330O\271\272V`\271p\000&MyP\341~\276\264\335(\356\024\354Z\275\232/4\032zds\263^\310\255\3618 \337,\256\026\233\005\200\326\353\205<y\300\362\266\327r\371m\200\340c\243Y-\027\352\366j\265\332h\332\345\334})P\213\364\252ia\205@mb\211$jc\035\002\033\231\363t\341\237M\245\207XC\321\014/\225\313CI\241\304\247\232w\243\026]i\3047\035/\022^\303\215@\301\361\243\302\003\216\310\230W=u4 \rGZ\"\225-\rY\304\233\t\211\014\032\002\351\230\340\211\034\362\224IlF\264&^%\n\r\303\202G@,2\231u\231<\221\1771\201\261\022\032\262\204\232,\306\"g\253\311\342\2545YLW\223E\271&(lS\373|\245&\344\231\231)\ta,m\nM\213\246O\252i\335X\257\265\004\264\276\306FB\251\236:\272\330\332\351\023$\324\211\363\2473\"\222\212\025I\323\027hj\037\205Dl\031\316""\243O\017\215\345W\244\213c\226\247\324q\252\340E6#?B-0\226I\201,\216G\216P\307\242\214\326qH\254\352zh\n\036\003\272d&)\245\231K\016O\330\344\356>\t\316\223\302\213\216C\021\255a\214'\340\370Q\300!\033\364\342B~0\032M\206c\352\224\007x\262\271[+\344\355\355f\261T\3742\327,V+v#W\256\225\n\2155\330u\3477\355\215\332\266}\267P\250q\257uP\010w\nk\205\235\"\354\276rkku\320\"a?d\02351\327l\0300\233\345\262\001S\251V\202\354@\337\255\326\037\260\227\035,-\237\253\331L\017\005,\356\255\n\225\215b\245\200\254\250t\240\220\312@\366\336h\202F\213\3736\031\277^\317\345\261\342\271\222]\006e\271H02\321f\241Y\250W7\n\225Bu\273\001;\2632\354\330\220\000\264\342\365\"i\255t\364\215\342\227\032\342j9$\255\225r\371B\031\224i\205\252\\\334\0103\302\246kl\327j\325:\250\341I\204\205Jn\265\244#\253\024q'\nY\324u\215\211\033\200\240\361V\267\327\327Q\257\277'\023\335\313\325\313\366vm-\327,(\250:l\026t\031\300.)o\227\212\225\273a\177E\220B\275^\255\333\305J\263\256\001\327\013\320:\017\214\010{uaU@n\024\310&D.\025\301\215f\256\271\335\020\300\265\352=\340\020\021\005\002\307\355V\303.\354@_`[4`jl\336cT\260#\277\013\235Y\252\336[\203-y\014ys\263\000\r\224D\330\010\362\205v\213\241I\231\331\203J\236\356\300\220b\255\236+Vl\264\037\300\224\023*^\310\347m\266q\013^\327Varnl\330\260/\225A\360.\203JY\005\262 C\352\205\r\031\324,\334W@\325f\256\304\003w\252%\231\t\004IL Hd\202@\026d\210\304\004\202$&\010\210g\242V\250\254QYA^\033j+4\224Vh(\255\320P[\241\241\266BC\327\n\r\265\312\r\245\312\r\245\312\r\265\312\r\265\312\r\245\312\225B}\203\314,\234\034T\310\347A\310\327\037\330T8\"\252\330 \242\205\210\354\"@\260\215p\334!pk\033ia\370\022QC\211\311\200\003\331H\223\361\342\212\211y`\251L\306{e\207\314\300U\030\246\367\212k\315M;\177\323.\3354b\262fLL\"s\252\005#f\321\210Y2b\226\215\230\025#\346\226\021s\333\210\271c\302\204\275\252\"\263\306f\315\032\2335kn\326\254\271Y\263\306f\315\032\2335kl\326\254\261Y\263\306f\315\032\2335kl\326\254\261Y\263J\263\222\305+X\330p\256\324\267+t=j(T\005 \202y""\264\003\013a\241\036\2035$\007\264\275Yl4q\316\334\214\305f\343\261\t\211\023R/\304\243\027\343\321K\361\350\345Xt|\321\361%\307\027\034_\356J,\366V,\366v,\366\216\202%\217\241\334%kz#\201h=\007\312\347Z:\332\306v\036\355\275\353\333%#}5\017J0\001\200\006V\330\000\005\356\201iH\226s\245\365j\035\024r\273\226\313\337\205E\303@W\317\335\323\216y\006\267K\271J\341f,V\035\224\365\374\216\275\372\240Yhh1&F\362\270\270\301:\007U\214!\241\265\321\343p\343\3234N\322\306\203\362*\254\251\272\3122\224!\341N)\273\014zZ\265V\243\213\243\200\274_.6\r\225%\250\265b#\237\253\257\031\260\232\332\324A\343\3115sLsf\035\1773\221\342v\"\305\235D\212lr1\242\010\322\223$R,$R,&R,%R,'Q(\013\2066\233\225D\212[\022\305z\251\330\214\357>\225\342v\"\305\235D\212lr1J\367iH\022)\026\022)\026\023)\226\022)\226\223(t\335\247f\263\222H\301w\037\331o\304\316>=\305\235D\212lr&B\347hI\344*\033\362I\244XH\244XL\244XJ\244XN\244XI\244H\356\034~\346P\304Z\t{Z\007\016\326Z=.0^0\014\356\264$\233\203\202\301c\330\306f\265\264\026\217E+G\022E\261\222@!l\320b\350\266+EaE!\346\224ZAI\247\253\0254N\203\355&\031T\264\0031`\244\004\231\246\211\236\342v\"\305\235D\212lr1\302D2\220$R,$R,&R,%R,'Q\310S^\237\315J\"\305-\201\202\350I\354\255\270&\240\210\245.\246_U|6\t\237\230AR\016\013\t\370\305\004\374R\002~9\001\277\222\200\277\225\200\277\235\200\277\023\217\227\307\000\231\315vy\265\326@\313{Y\230\257\034N\3508\036\2365\301\215\tL)\026\014\360E\003|\311\000_6\300W\014\360[\006\370m\003\234o`\220\224\325\355\215\315\332v\223\256&\365\3731\310\246\001\211\033\240z\014.H\327\270Wl\3467\241\233*\025\342\222#\311\323Z\276X\010\014\025h\315/\225j\032pS\2012\027\0374\346q\273\026\216`\035\230g\033\032\t\203[6=\002\325#\035\242\222\273\213\306\302|\241\270C\027\021\031\327((\265\251T+1<\260\314\352z\344\375h?\305\201\233z\360v%\\\027!\333\255\020[\254\254\002~\315\3165\253\345b\276\001+p\343n\210,\241\021.\022\224$\247B\304Gu\273\031\223\230MP9\021\003\327\253\245\022\346*\340\013\365uX\246K\305\374\003\2338N\302(\200""\335\237\214*U\357\361\007}2\232\254\3632\260^(\025s\253\220\246\371@F\211\347\014<\206\234SP\211\"\200Q\314\330\241koC\217_\3155\n<\001\321>rP\343\334F!\002\260\203\013\256\242\004\314\034\336dp\261\002\312H\205\003\340\251\220D\003\n\222\014\202\376\336.4\350\254\022\020\264\3529\030\352\344\200-\267nD\255\2631\003Kb\016\367\3678\263\204w\264\340l\327\013\002\214;|\010a\333\225<}\027\017q\352\205f\021R\343!\006\377*\344 \200\014\224x$\300\203\331+\032\350\241\345\233\300!9\025\300vk\206\215\301c\313\205r\014\266\261\271\335\3043+\023\232\035i\361h2\030\350\271\004\212\267\306v\271FFmu\033O\030\310_{\025\367Z\371@\252\324s\345\265:\231\366\345\374Z\360xo\215{,\257\271\3701@'\210\235#\306\0325\205\0275\00442\200\325\363|3\241r\262o\"\r\317\370c\t\206\343\221\034JI~7r\307S\230\330\212hd~$L\310\210\024\250I\0134\261\244\222\031\370\222\t%\346\264\350\210CMh(3\306\310\253\236\326\304\260\216Z\346\332H\023\262\256\304\270U\343V\251\020S\r$\032\003\347\002\225\304\261\212\0139U\243c\231\340F\366t\224&&UZ\231U\003E\310\260\030\251\223\213\006L\037+\203qi\340\264\203/\341U\362\3255\220/\364\0144\267\223\025\001\233\013+K\022\244\260\223\027!\314\267\037\200:W\034\252\253\347J\350\221\361\000\026\263b\263\230\003\032\220\320\024Q\337\330F\217\221`w\rKY\243\234\003\2650\320\361\353\365\355\032\256b\305\312z\265\216\016\332\304$Q\250\201\202B\266\350\364\235\n\311J\025\317\001rk\001\030\0270\002\014\325 \006\207\005\211\360\210\270u\324f(\034W\211\"h\304\240\026D\000\211\006\226\343\355\365\365\"h5\025f^\320\300a\245\253n\327\363A\355\005\034z\321\004\340\035h\212\265\260\rD(1A\004 {\273\021<\327\267\200\307\3066{-\025W\3539\350\004\211K\000\333\365\262\241Q\351\0016}\226\322\341+\351\251\350Uj\273J\225(\377\341\013\254\203\220;\226BAU\2620\222U\371\0014c9pr\301\245\037u\022\\\241yX\363A\255\240+\007\235\200@\327\244/\333\025e\340\004\243\216\274\020\277\035\\L\305\214\204\200\327\372\257\300\351\200\352<6\221)\223XO\030\316\340\0304L\337\224_\240KE\245\224\231\212>5\023\306&""JNgj\263\370/u\246\"JYi\221<\251\316!\365lU\346\222\2515\356v;C\257\343qa\327\325/\210\252\020\r\007:\032\2658\225*j(\003\016[%\n\376\036}\3440x\320\360\302\241T\026BdT\262\010\302\002\325\017&*\020M\271:\032\225\001\225*\342\304\200\003\226\326s\025vN\215O\225*n\r\305\257\351\210o*{\n^aM\242\010\331\322\301\221%!\366~\020\3142\370\3253`\272B\032\342\204B\205K\243\353\005\272\315Y+6\210{T\360\316\0343\327\213R\254L\351U\303\217B\240\262%\221D\334\351\020\310$\373\350@\3645\242\350I\303\001\217S\013\217\260Q\271\022\014\213\004\275\026T\2510\316!\231\364\353\223>Qf\001\261\216\267\350\tp\343\326\355,\275u\271Q-\203&T\262\253\025|b\216\301\370\210\326\215\265\332F\r\266\241\205f\275\230\267s\025t0\2534p\241\003\235\212\303\240\313'3\315\341\332k7\ny\035\266i\302\242\215\355fB\016\034MB>t\247[OC\024\233S6\rK\3314<eS1\225M\307U6\rW\3314\\eSq\225M\305\325B\032\256\026\322p\265\220\212\253\205T\\-\246\341j1\rW\213\251\270ZL\305U\032\246\322\360\224\212\2454\034\245\351\2744}\227\252\353R\365\\\232\216K\323o\251\272-U\257-\245\340h)\005GKi8ZJ\303\321r\n\216\226Sp\264\234\206\243\3454\034\255\244\340h%\005G+i8ZI\303\321\255\024\034\335J\301\321\2554\034\335J\303\321\355\024\034\335N\301\321\3554\034\335N\303\321\235\024\034\335I\301\321\2354\034\335I\342(!\217\230\324k\353\345\234AAAS\266\275zO\013\316\347\362\233\005{\263\330\324#\320\220\300a\326k\331\0259\233\365\332\342\202\n[Y\222a\033\365\233v\276y\277q\217\334\001\300\210\001\371\246\026\235\177\220/\025\032\214*\216\242P\312\325\032\205\2658\222\032=w\321\222\260\363\230\206\200\314\306\363\230M\3441\233\314c6\231\307l\034\217\013\361<.$\362\270\220\314\343B2\217\013q<.\306\363\270\230\310\343b2\217\213\311<.\306\361\270\024\317\343R\"\217K\311<.%\363\270\024\307\343r<\217\313\211<.'\363\270\234\314\343r\034\217+\361<\256$\362\270\222\314\343J2\217+q<\336\212\347\361V\"\217\267\222y\274\225\314\343\2558\036i\244\026Ynn\226\215\202}\263\332h\342I\251N\210\2138I\216\027\315y\022W\375\202\002.\347\356so\225\035\274""\346{S&\242\340\254\036\274\240\007/\352\301Kz\360\262\036\274\242\007\337R\301x&rS\017\316\352\301\013z\260\206\357/j\033\272\274\021\234\325\203\027\364`C\336Kz\360\262\036\274\242\007k\332\204zN\336\324\253\035\021\272\031\207\316&$\317&\245\317&\244\317&\244_HH\277\220\220~1!\375bB\372\245\204\364K\t\351\227\023\322/'\244_IH\277\222\220\376VB\372[\t\351\023\222\307\247N\350\274\204\276K\350\272\204\236K\350\270\204~K\350\266\204^K\350\264\204>K\350\262\204\036\273\035\237\372v|\352;\361\251\357\304\246\216\331\345\010\024\206<\252\3539\235\234E\260\"g\251\263\233.\037\202\321\226P+\024\352\246eU\304I\313j\243lW\363\371\355Z\256\222\177 \202)W\333\3448\033\217\225+\271\222\346Z\264|\275\336LR[\250\351\220\024fHY\301\233\326\365\302j\265j@W\n\010\347=\006\326\252e,mu\273a\300\254\257\032\020\254\262\032\314NqM\3060\307\204\215z\241P\341>L\034}\311+zR\317\016\004\234rv\300a\303\263\003\0316\034\217\264\337\003\323\002u\014\350\3114\274\350\0109\266\214h\312\241\360\0052\371]\313\227L\241cI\244\341\271\321`(#|\3005\351U\313FR\2207\231\204gB\033\334\215\377\nZ\020HM\007\213\345&6\240\233\226N\307\227\032\310\315\370Y7#\"\236\315\304\340mfb-\303\372\240m\306\017\313\031\021\351\271^\234\205\353\305d\256\305\301\350\225\006\336\230\236\353\r'\3429\237\3641s\351\373\345\374'\313\325o\344\251\020m\205U\032]Me*\276\212Z\034\255\233\360\341t\375\267\322\265\237G\327~\304O\013\324\324\311@\246VKK\030\325\314\214\306\312i?3\250\207\246\344\321K\315\244\227\300\245\247a\223}\372E\001\3041\027\221\304\360\025\020iX\022P\"7\334\207\3567\213\253\250P4\013\270\345'\216W\364\241\261\221\333\274\007[\325M\230\036\344S4H\373x\267E\374`\302\007\225\177\036\245\360\035!C~%\020\360Yd\256\201\364VA\360\262V\250\004\217\205|>x\254\026\312\301#\361\335+\366\267\241\3368{YT\261*\t{\016r`\262\267\327iu@\322\022\177KF\022A\353.\rF\356)\030\014J\307\200\343B\177\322\263\331/\374\320/?R7\023\362Y\224\376!L\244v\020\275\232\245\"0\352\024D\000\243GE\317\233\320\027\334Skt\231\022ly\210\216\010""\233\027\372[\334)\224\312+K\245\362\355;\245\362\235;%\267\235\207\271=\n>\211\031\374\252\375\301a\224\356\010qao\210\020\350\214Rg\267\336\323\205\341\006\304\310\031\035\t\022\263\234\273\017\377gW\262\267\340gee\351\016\373\251S\017E\026h'_\272\033\324\223A4-P\256\256m\227\n\301=\006\006\303\210h\360`c\2646\331\343\276\241\373\364\247\016\2466\221\201Ji.-]\330tf,4#\215+N\377\322\246\212\036\3117s\310k\t\303\333cC\033b\232\233\343\231\313\261\314\2458\346\341gL\303\007S\346\006%!BJE\360J\000n\021p\037\200\367\241\032\365bu'\212j\036\\J\017\342\342q\357\260\210\006w\217$h\350\370\311\301\033\245\242M\243\214\363\300\0078\222`+\2021E4p%w\342\221\313G\250\200AV\273\333\\/\226\360)W*\031q\353\245\355\306\246\021[\251\326\314\270:\362aD'`\013\271\265\030d\003\203\336\345b\t\240s\342HH\200<\t\213\227\253\351=4\035B\350G\001A\014\330:\014\213\000\242C\341\276\263\276\023v5\213\324G\234\211aI\322@\213\253\345\212\026Ao!j\020\314\303X\270\013\317W\202\277Pos\215%\3001\244\202\014\017\356\352\233n\340\353o\3373(\275\375U\271\013\255\002\3035\017S'\237\253Ts\321kay9[\031\020gM\224\026\225A\r\024\276\016q\030d\000N\002\303K\035?\250\020\274\210\356lt\275\212\356\022\320\367\310\243\327\035\t\3372\026^T\201!\243\025\241!\022\204\202C\003F\341A\240\234\212J8\252=\034\243x\364\364X\374\274\240\247~VY\205\350\230\327\320hj\240Pq\325\320\343H]4\037o\326\002\223\030\213\310\022x\013\010\r\354\t\350\220C\376s\321*\304\304\233Dc`L\240\222\270Rq!K\r\205\245F\n\226\032\251Xj\304\260\324\210aI\375\364\265\021adPKj\342SC,\263k\"\t\271\016G]\257\313\346>{\200r\351\027\223xH\360\211%\264\202j\264\261j\203\355\204\373\3734\2720I\207k4\254\246\r\022\200\361~\260\314JP*\346$`-_\224!\030rJ\004\341\232'\201\230\010\227\240d\355\"\n\003\271\212J>?\202WQ\242\313#\205\265XtS\300\007\236\321\260\364a\350\312\273\205\007\0342\270A\244KX\254n\332\315j\255Z\252n<0\022\231\340\325\273\334KP\317\\\243\021\334\265'W\271q\301\300\000\310\321\033^f\265\361\272*\001a\327\3215-z\r\256\230G\220\340v\271""\341\2526\017\246y\231\357m+w\266\r\367\265\365w\2655\367\264\343\356h\307\336\317\246;\005\026\002\231]z\206\006\026\240\314C[\004\256\345\261eD\03095\007e\266\241\2037j\271\272\202\250\345E@\251T\306+n\205:l\241\013*\252\211vx\330\305\210\030\320\005\024\200]SA[\022\210\356\242\3105~\tS\224J\250\024\232\367\252\365\273\"\260\276Z\026\001\334\275\361\240\tq\310\226\036\260\317\264\240\342\024m\342j\344f\233}\223\375f\203\337\020\020B\026\202\207\305\340a)xXf\017\001I@\021\020\004\370\025\366{\213\375\336f\277w\330o0s\032\333\270\"\263\037U*G\010E\006\007\250P\342\n\000\220\257\265{\320\034\345j\265\271I\356\3333\227\020z\373\215\265\240D\262\206\367\371IT\316\"~\274\246P\317\343\305\301f\271\006\352d\265Z\217\245\256C#\333\3335\334\037\024\222\t\311\305\365\031H7\037\240\255\007\246\235H\016}]\004\001\210\034\346\013\305\030\254\206\177v\221D\004n\336\263\363\305z~\273\330\204\201\0324@\251\270^\300k\201$\260\000\235\016B\242b\271\314\261\252\315\027gCe\273,5\177C%R\032\335\006\372\246R$6SJ\322Z\275\270c\227vJ2\224N\241\204N\016\247^L\247\2514\372\336\022\350\202\221\342\354\273uw\334\031\321\257\3268\023\317\255\265:dJ\320\237\302\023\374 \217\360\242\231%\022Z\235+\002A4cT0\316\033\n5\026\023S\200\2225\237\251\213[\003b\265\302\027\334#\260\315L\255\335\301:\222\277\232R\003\270Z(\305Der\357X\244;\332\253\r\272\235\326\021nMj]g\2747\030\365\330~Fz\325\024\253\020\250\345K$\021#:\004r\204&Mv\306Ax\242\237F]#_\210\245\337\361\316\352`\032\356\364T*\213:\272\210O#\026\231\345\2212c\311L\245a\310\314\214\206\0212)\242Gc\361\306!\312\241\345B\305\241j\372(\272\021a\344EKj\342LC,\363i\"\321r\335 \037\3655\301\323\360\034R\246`\231\321\306p\314SD\014\273\346\206vgijw\266\306v\3234\267\033\333\340\364\370\036r<z\002\377\326:\255\261]q\237\214\353\356\336V=W\331\300\257\207\344\352M\330\231\024\033\304\"\t\262\277\356\016\235Nt\003]zU\353\246\022(\365\221I\302:h\021\300w\335\205<\352\356\243\t,:\324\334D (\217\024C\024`\320I\003^s\265\"\254S\223Q\277>x\\w{\316\020\366""\231\233 1\006\373#\247Gnhzf\214\246bq\264j\035\315\324Qu\023h\240\346D\017\206\225\230X\031\311\216\251\261\312N>P)!\372R\236\356\247\351^\205@\267\233U\272\013\302@L\024\\\334\250\220\310\030M\366T\252\302\362\216\177\032%\330\3645\252\304z\n\313<1\256\263\020\332t\364\233\346\236y\246\311\363J\232E\344\025\266\373\330}\r\350T\267\337\n>\177\304}i\210\035:\3030\210\303i\330\212\247V\231\215\243\217\252\220H\005\025k\256\325\370hO\244\273\244\310R\374k\024j4\007\244\215f\221~\247$\201\244\214nZ\361\024\271\373z\n\205\241\010\265\2011\020\215\205\263\220Xz$\213\210\245G\006\001\261\014X\026\017\253Y\270\037\004\327\202\335f\360\326\330,\263\235;p^-\341N\021\306n\216Y!\364(<#\322c\232\325\215\215\222\001\267\223\253\027q\007\020`\261\243\242G46`\374\230\000\304\270do|\211\374~6\264\326\340\2317=V\nA\201\247]\010\010\216\0379\023\317Z\364B\317$\243W\022,\245Y\305_b\252\020N2\271S\302\206\333\367\006#\217\203\204_\322\363\230o\007\373l\027~9\213\002\350\007\264\232\235\236;\230\320S\346\355J\370!T\266\003\336\256\020c\016\276Gr%|F\241\"\274\210\020\350\371zs{\273\270\206\323\177\273\337\221>\317\214v\367u\207\254\\\334\243:\315E\2442\253yt8\211\025 \314Y\006#\241\002<\341\305Xf\2106\225\312\010\344ry0+9\250\246\271\216q\025Tk'U\355a\177\360\230\236\344pJ\001\367\250)R@\252\245r\350\250`\031\010e\357\254\026\253\215\302Ns'\277F\355\177\370P\254\300(\306\207\352v\023\237X\304\241\254}3x\\\340\037\027\202\307\305\010\272hg\203\307\245\010\272\034=\262\220H\350l\"~.-|\213>\376\213\341\2254`\\kC0~0\254\210\243?\372\236X\370\361\264(M\210d'o\265\205\032\005U\3135X\235\251\241\222\232@\355\2203\035\016\357\336\260\020UF\"\342D\223@\203R\311\210\254\242\274\"X\026\014Kl$\016(~\336-\252\271L\307}\036m\207\272\022c \312|\201U\010\375hh\220*>\252W\002\245\020\310\211\320\356\224\355\342Z\340\255[\344a(Ov\340\377*\010j\264W\024\362\371\0354FbN0*\210RE\376\240\3201\204\3720F\371\220\002|\210\261=\304\010j\344m\344\002\325n7r\300djx,R\303O\002\271\312gl\202\210\377d2V/\032\004N\254]""\241\325RTn\204o\200\326J2!\326\022\204l\272cw4\330w\373\260\222x\354\370\313\004\327\327\336@\251\255\270\226V\250\263\231\202U7\360\350T7P\361X=\363\261\364\332*\304\244\020*\222D'U\307\213\257\217\227\2668o\376\362\342[\310K\337D\202\367\241\002\320\027\223\350\361\250\022\t\365\327{<r\030\344Yz\215\345\304\264\244\313$:.\204\005\236\370N\272c'\240\t\236\365\245sXm\321!^(W\204\262Bk(B`\037\004]\026I\006\005H\276\007\217~\240Z\254\236I\035\231\226[\225P`\333\200\346\370\347\033\256\226\330x\265\024\rX36b\315\330\220\241\370m\263i)B\014\314\3104zvD*\221!\rNf\211\032W\027t\260\004\266\"\252x\306\002:=k\0026`\316dS\214E\032\330\235\305\336\031\237@\254@\242\335\223\243r\343\353\342\246-\315}\201\362b\233g\026\351\\\237\364\307\260q$\032\000\253\207\004\322\027\245\022\245\311^l\007-\222\325\275\321:\200B\273\234*\323q=#B\317\243\211T\313\251\236X\3407\206D\346z\303\035\207J\225\002L\340\226#\213\3474$\324s)\242e\016K\203}\371\235\370\335k\201\t\034sd\361\034\207\204z\216E\264\206cn\266i\300\311\\&L\006\035\251\221S\335\214\345\361\311\334\244a\303\\\276\276\340\2323rz\236\006\224\300NH\024\317\021#\3233\305#e\276\032\356X\303Z\010M\340\216\247\213g0\242\324\363(\341\02561g\335(\343\021I\314\212\244\t\374\362\304\006\226\025\022-\327Z\216Sr\233\232\323\004.e\016q\353\276\352\214\262\\\213J =w*\221\2267\231L\340L\213\344\370*\266%\256\212\355D\236\212\355\024\034\025\333F~\212m\0237e\347I\260\267\342X\022\241f\276\024:#s\022\245\302\241\016\317\330d.\244\334\243\236\241\030'e\036-\024-;$\023XO\331\257\354\364\212mb\016\352\270\217o\320\033'\370\350\354\266\234V\013\275<:\375}\033\213\364\354\366\030\350\234\326\270s\010\025\201\244v'\250V\207v\001\243\240\237\253p\366\367G\356>au0\212^&\375\226\275;i=t\307vwA\013\356\265&Z\370`|\340\3523\032\266:\256\026\341\365D\360\320\031A\265E\230\347\266\354\266\333v\272]\370\037\257\007\271\220zo\017\212\032vlR\237\321\310\356\301\376\315\361\340\3119r\274\243~\2533\270\001\265\032L\240u\\\317\031\217\035(nd;\355C\247?v\366]\200""\270\330f\320\376v\313\035\215\355\326\201\323\351\353\241\266\327\371FH0r\321\250\244B(\341!\266\201k\357\r\275\340\261\013,\367[G\316\341\276\275\007\2753\030\355b\306\375\276\333\305\204Ngd\017\335~\033\272q\327\336\357\016v\235.\351\316\211\267kw<{\027\n!\017C\3503\330n\354\342\010c\320qpLb\273OZ\256\013\215\004\260aK\316\325\261G\331\036\275\252D\372\177\027$\204=\361\200\032\036\366F.\001 \357\370;\036\214\235\356\256\343\271\370\317nu\035\317\333\035u\332P\215\326Agh\037t\240N\243\326A\220\025\207\3021&\277\213T8Z\241\353h\357\355B\005'\236\335i\323\277v\027z\274u\264\373\230t%\373\361Z\255\205\026y\022Gp\313\031\332=\307{\330\342\224\322\360\371\250\205\276^\255\226\275G\257x\266`\374\250\235\245\207\222.\214\272^\032\004\360\344y\320\025\236;\352@/\365'\275]\027\332\302m=d=\306?\223\0246mA\333\016\036\366\335qg\354\366\360\025F&\324\312\036\217`#\271\013\343\023 \217&\000j\267\272\017m\034\333mrC\031\337\350\023\214\236IwL+\016\300\336 \200D-\035\300\273\260)\345\237)\373\014\300\370c\t\006\3001\226\211\277\203\275=\034]=\373\340\033\001\020\266;\001\322\247\2117\206\0315\231@Z\350\237\026\205\342\205\314\026/\301\240\364^\007\332\020I\350\211t(\224\344w\230\344\336x48R\340\320f\254\373\027\024\034\226.\203\242\326\220Q\303`\323n\302S\007\262\240\264TT\213\006*\257\333i\2611\337\032\364\367\354\200\000k\363\320=\262G\0036\362\242Y,\216q!\021\010s\315h\325\023\342\250\355\354u\210\244T)@\022\320\241\311\267\023\207\367\210\013\201\r\323\3113c\351\022\027\240\331\321lk0\200\277r-\206G\266\333\337\307\301\316\332\"\370\203\t\310\017\035:\303I\205\316'\254\001\024\216\2778\366\310\310E\n\2575i;7v;D\250\201V\334?\354u[\223\336\004\304+.x \037\335\336\220\266(\354\260[\364\203=\354\207\310\220\340\2310\037\274@m\206\030\007\275m\037\302\037X\232\311\372\334v\216\200\036M\ndh\007=\204/\023\n\304\357\010\320\032\265\335`^>\241\271\005\3578\271\311\373\241=\206\246\361\010\023mrU\332\246?Y\372\263@\177\36247|dTN\370a)*\004\203V\r\320\341w\236\004pK\374\302\222\210""\353\302ris\232\303\260\323\366\004\034\266\273\263\267\207\007\336G\002\002E\252\213\347(\264\332b\252=\014'\217M8qC\004\036\336\320\021\031L\n3\306~\334\031\037D\3633 d\235D\345\316\036\373\356\222P\241v\307#\337\\\302\334\202\222q\212qU\244\353\r\031Hz\002\256_$\014\327:\022\206h\\\002*T\273\004p\333\031\022\004\023\26487\250\374\025\210\204n\3461\260\014\214\230\247\030\214l\036\003\013\017\254$-\\\341xp4$\016\027x\370d<\260w\007\003\230\275m\312\213\007\323\022\217\326\332\034\025\321\014\230\252\200\254\362\250\2013\002\t\325V@\240\262\215\331R\310\343FN_\240\225\024\003\036\005k\021\327\241\010\222V}\036\303\215m\036,,m\002\002\252*\277\313\034\360k\235\220X]m4X\251\313\002\360h\322\357\223!\024\030(a\251\020\310\022\305{\0345'\343Md\202\2407\021\001wc\362\205\227\240\333'\260/\020\263\014\205\272\000\205i \2160\215\334\220\301d\222\243.\005\253\202PFp\314\253L\037\024\371!\257\221\206'P\200\366\217\373\013\033\257\374\331xnJ\343\325\230i\036w\332\343\003\t-I\031\023\026&\r~\343\005:\310\361\006}\236,X\022&\221\031Z\300\3225\001e\2504^\330#l\010\332\320\000d3\330\353\354\213`\236\274\343\201\224<b\362F\203\220\263\0379=\230\351\255\321\021]\033\0254\236\235\023hW\354\3176\r\245e\017\2313\2564\00245q\373lU\204~j\211\275\024\240<\372\231\027O\207\222\004j\000\3277\250\213\261M`A\261\207\350\370O\265L\016\275\347\364m\246\230\000\005^T\020+\207x\017\375\234t0{4\354i\341b\026\273-]u\010X\252\212fqD\360>l\325\210\340\202)\201\312\303\341\036\223A\032\032=n\002\314a\2101UN\nk\353\356\221(\271D\323\200\0213\034@\355pV\206\313\262\264\250\210\324\202V\234\212\212\360e\246\035\271\270\353A\031\252\031Q<\245'\301\2611\023D\002\222\r\202\320V\362@\336\037\341~4\014j\024l$\241\366K<\2257\204\236\245\227\313\225\034x\334!5\364ph6\331\261\376\030GKl\325\010\tu\260\331\356\330@Aw\242Z\024\356\314\214\010\276\315\017\006x\200I\332M\254\004\"@\006\240\332\311A\t\307\302;\2112D\226\225\316\376\204\265(\331\t{\223\236\206\260\323C\243\210\332(\001^\203\031=B\r\203\203|=\3347""\010\006\330e\203J\261\373\304\336\203\035\352\001\331\np\330\036\211PC\324r\346j* \237\350\365\003\202\200\375\356\240\007\242\206(Q\212^\201$\234\330nE\252|\210N\030\221\"\211\274F\341b\236$+\002\032\003\016Ww\315\002\31508\3164\205\"\212S\372\005\035\217S\024\305\361\251\254_\341H\227\210\244\001\027T.\320\307\354!\335\253iHt\002\034p\300\247\242\212B!\223\256$\347z\260\271M\251\250\301\006s\334!\362\202\350\273\034\006\347\006\377:\351![\236\004\"\332\332`\344Jp\307\356\023\005W\310\220\204-\260\231\361KEh\025 \2063u\022CK#\232AA\302\016\306\301fTV\302E\032\224EjO3\032\271\227\002p\340~\027T\311Si\324\331>\330s\014s{\2700T5\374\2201\367\311X\007\026:3\232`8\210\344\001D\260\240{\243\032\245\266$\301j\223\214\017F\203\311\376\001\214&\036\323\356\360o\356\010\257-\222uMn\t\036'\267\344\020\233\307C\373\261\262\306\014\211\310f\356:6\356\021\373\373<\232\335\224\224\273\236*LP\032Haj\nc\272\251\254C)\204\351\010p!\000f`\345\026t F\214\037\371\rl\031\n\226\306\237S\301r\233\020\250\274Qa\323\3274t\302\311\315\341\345\246\031\321[N\240\000\016\036{\002\234\330\323\225\2417\"\327\213q\013\274\357\032\341\201\031>!\2718\257\200\201\200\233\221}\020\\\272\342\tD\241\005\235H\356v\242\255\227\243\362\310\215&\276LEa\360\310\376 4\355(L\022\374\204|\017\024Oc\202\017\202\232\254A$I8\355\023\366L\021!\245 V>-\036j?<\350\264<\226\243\226\206-G1\024\312D\323\223\311\233\3201\271k\242]r8E\202\267\022j\361\274\3165\246\267X\320\300I\256g\361(\026\262\024\327'4i\020\005w<\030\351H\372\256\203\326!\331\366E\316p\242\036\322\240@\377 %\364AE\033JS\205\237 #\251\221$\245\362p\2673\3604\"\234h\223\006S\r\301\035\360N\316\262`\023\216.c'\354!/\002ex\274@\340IR\024\342\005g\376\261\325\212\250\240{\314HY\240\0214\016~\316\022\022\355\271\264\204\334\2406\022\032*.\004\273\345\333\276\343\031-\017@\325\331;BcB\247/\260\21767\334\"\360\272\021N3\272\310\311\306Zy\0210\020x\275\301\000\rE\373\324\312\341\020;\025\336\244\r\266\217\006jr\\\3001'\343'\303\266\232\027\0320C\321\nRn\244""\253&tK\333~<\352\220\324\275h\355\032\271\373\035r\016F\004\230\027B{\203Cj\247\013\245:-\023A(\237B\331(`\231\0043\023\350t\275\200\3023\332\264=\243!\331Ke\031\366\014&ROo\004\365d\033\343\244/X\031yC\274\2477\034z\234\311,\035\177\001\265\306X\343\305[\301<\311\n\306\201%\033\227\2475,IHM\341\246N\367\342\354\020\236v\003\2360J<u_\345\231\366\025\034\202A\272\240t\320)#/e\236Y\023\365\2224E3\201\330Jf\005\321KXh\275\230\025\307K\\q\274x\371\354\305\212L\022\330\330\031\273\201\361\202\201\037\017F\017\273\003'\260N\006\222\206\036\225\215\360.9\031\315\014\356\331v\033C\034\320\237\300\216\013\373e\330\322\004o\260\376w\372\260r\333\355A\313f\376\000\322\240\026\0061\221\251d\245\000\251> \021\240a4\343\200\246\n\037\247\354\20528\304\352\324=\222\300e_\200OKOU\303(\001\350\032Gt\356\332\201\323\016\233\312P\r<m\205\037\262lQ\343\257lP\016\337\211\344c/Td\223\354E#3w\300\0355\022q\331s\251\342\214m\317\216\246a\253\006U\353\020\017\033\374\t\212\300\352\262G\214g\r\377F$=\214\210A\037G2\251g\320\232\270\003\243\317\244P\242\323\242f\022=1v\310\273\027\234|p\357\350\215\023\275=v\310$\211\224cNMv\331\027\336\203\311\313U\366\311\270\313\242(\203@\212d\222p.\217\206\n\316\202\315%\017\r\330\354\225\332>\211\007G\360\314\372,x%\323\205\332\270;m\316\326\3152\010\314\240X\204l\023\335\353:\373\370\317#\177\354=\2304\343\225%\242\244\217\351_\017~`7\331\307\226\331\033\3210\014G\241\037]\340;g\333{\350*f\357=\016r&\207\004R\033\221\236\324!Bk4\332\250\"\005\226wd\t\241l\232\361\262&\304\261\262mT\264\250\213\004\010xz\236C\267b\210@\010\3233\230\210\241\272\\\004\357\354\341`r\235\036.\344\254\261\021K\363\210\236\202\334\250\336\243K@s\246\035\001\014\022\3765\347:!\201x\236\260\030\301\003u\\0\320\263s|\371\2304\r\215t\336/$a~@\002L>bW\220\t^:\311\364\352iC\2122\324\023\222\344D\344\324#\221L\223\221xJ\252\340\261\242\n\320\260\366\351\351\244\0250\211\010\3668z\022\343VF:xR]T\225\361&\273l\305\0349-$Rh\306s\234\202\240\247\213k$\316\347\213\220\006\004\362v?\342""\203\333\243a\247\022\317'\303\361\023Kd8\2332\236Y\311\251@|\223+\242\014\201\252H \323\002\321I`\241\253\n\243\341\005\334\0011\237B\356\203\356dL\336\271g\327\351\216\017\210\227\"{\364&\275\2363::\350\354\037\220\263&\2614\002\352\264\311\217h\203\261A\354\357\273\201\022|0\230\214\016\036\303\342DWe\302=y\357\264;\235]{\177\002\277m\333\356\364\210\304\003\3669\277-\252K\021\351N\252\305\302\377\222\277\207\013\344\207\310#\262\nE\242\213\356\245\306\213@\001\253\022\374\271\r\377\206\240P\301>\313\035\222m\026jz\010`\332LT\003\000\242\027\013\023\306\370\246w8en\275\250\246\205\016\313\360,t\030\237)\203\341#h\357.W\000\261\216s\224\324\227\032\036Fa\250)|\241\206C\364b\r\033\233\256C\350\227\352\321?\337\270_\303\320\240\r\020\034\265\221\2035\017t\037r\246\206#\2437d\236\315\350L\014\272m\233w\013\354\342\027wt\343\221d*\016\305\340\r\373\206\354\014pW\022\035,\004\332L\360K\344\013\010\037\233)k\270o\261\211\203,\374\375\032\265\006\371\220O\205\204\212\0209\270\203\177\221w\266\r\032\007v(\177\022\025\272\305\222\263\247@\r\341)\211\302@\250\003\262H\010\241wZ\207\024DN\350x\"\336\353\2068\020B\333\022)B\016\371\350\014\001\032\324\2151\361\204\376B\022\336\332Is\343<\274\3313\361\376\351\355\006eD\242\246\027|\270\"$?\354\270\330\210\356\330a\016\313\221%\210\034\271\301?M\013\001Tm!tTVZ\010\200R\013a\256bst\372\\\365\341%\250>\036\353\221\311\025\034\345EGz=\330\212\037\300\373h\020\014\006\233;\260\303D\344\215\331\371\202\035\t\331S\333\321_\257\337&7\007\372\355N\017 \330\022\370\207\014>rJ\027\370\241\366\007\320\237\360\007w\002\304xh\223tA\331\010%;\302\220\031\200\014\217\350\237C;<\210\013\033^\334\230\2637Aa\026\350d\355Y@\022W\272\307\242\237hp\016\027\203R\216\3508\212\036^P\300\310t\260\201\244{H<\233#M\030\034\3221\213\320`\350\340\016\020Ur\356\221\264\331\340q\337\035\241l\242Bx\250Yh\207\212C4\251\3730Zk\207\362\262\013\032\332C\364<\3548\335\310K\002\237\002\215\276\035\034\000\206\007\201\244R\374iaXS\004z\223\335\300\271\031\322\222p""\247\014\3512\017e\230\306\320\371x{D8y#/\222\205\000W\361!d\023\350.\221\022\303\001\341\331\213^\260\251\204#\273p\220P3\323p0$\306\004\331LB\337\351~\013\027\223!\216\017nl\222'\354\251\350\025fB\005F\275\346\350(\252\264\210\223\301\\{2H\237\313\220\235:\321\201\242\261\302\213;Q\035\001\333\336\360x7>\213\366\2445\016\230@\215/T\374\332\252\355\321~\330\331\245\273\237\2417\341\353\002\373\310\341\221\355\300\004\007\315\276}SxQ\357}\311h\345\346\227\230\031\265\306\010i\302m\254\006,1\223\025^\342\231\311&1\223U\231\311\352\231\311j\231Y\020^\342\231YHbfAefA\317\314\202\226\231E\341%\236\231\305$f\026Uf\026\365\314,j\231Y\022^\342\231YJbfIefI\317\314\222\226\231e\341%\236\231\345$f\226Uf\226\365\314,G\314\000\370\t\n\303\216m\263g\346\026F\337@\035D\233=yf\346\022|$\345\220\247I\177\330i=\204i\274J\334\307\363\007\235!\271L*\"\363\335\207\345A\177\035\355\377:,\335X+\237\033\225\310\n\255Vc\344\364\266\211)s;\262d\222\310Q\205\300\356)\245\241\006\307\006\265\241i\312&\037\313\023A\353\253y3\375:\032\317v\230,\347\021\272\357\245\312$\272\0171J4\341\227\006%\270\034 Z\217\3260l\nE\234H\306\356\004Kt\332\304\\8D\t\023\037eJC\234\226\017m\204\006JsHm\032\360\337#X\360\251\016\211\247\203\260=9\202\021C\365\301\360c2\352\234\013\000\356\023;<=\024\374K\302U\215\235\023\250\307\005\270c\034\035\272m:}F\240\360\340p\322\273\2140\257c\232\243\307\257\253\354\031\360\266\370\n\344\241\256E\316F=\356\235\350\"\304\177\237\374A\035\306myn\027\264@\022\304\223\374%\337\253\"OL\351\243^'\201\255\231\354\262C\303s;x\242fm\352P\315N\016\3341\257\272\322#+\322\342\236dq%\316\216\364\0040\022c\266F\244y\007\016\224r\340\240\337\215r7\215\301\205\333]\014&\034=0X\264Gf\200P;\366\016&\3436(\276^\007&\263\327\301V\365:\373\375\240\251A\007!z\010SF\354]\316n\344u\273\230\246\333%\211\272\203\340n\215G\366\200\036\335\002\022\203>Tl4&\177\350\266\211\230sQ\205n\201jz0h\323\341\344\241\2263\036\300\277\021\374\017Z\022*\272d\217\207\017do\005\017\301\326\032\266\003PT\344[ \237""\327G\250pLx\336c4*\020\365\021:\223\335\036\024\356\014F1\210\331\320\243D\3621\210\002F\233\277\002\304C\021\006Tn\263\360Fy\023\r1\263\341\214=b>@\006\272\230;\232\246$AE\215x\271<\274\335\"Z\303\342\261\260'\340\010\030\016\257Z\265\016T\270\232\343A\247\245\201\212\233P\001\001\033@\225\236W\3719p\350\224\264O/n2\224\344!\021\334\037M\331+\336\354\275\242\036xS\377-\346\305E\215W8\337\320\032d\3338gl.'\034\237H\303\2424\241wT\217\370\010\007\277\324\205*\262\201\215i\014\342\236\027\332.\330l\004\024\241$\036WA\353\216G\341\321.\2264A\251B\312\234\240\250\230\020Q1\351\364\307\331\225\t\261<N\210\351\021\377\336\236\240`\230\020\2710\351\322g\372\002\353#\376#\356\007\321n&\004Qa\301\001\303\013\\\341\033\3354{\341\273xh\030\202%g\356\020\036\232\376BH\260\261\n\001\234\351-\204\221\207\220J\342\334\023\363\305\375\276\316\217\004\367r\324\255g\342a\223x\244E<7\224\374\030\347\000\377Q\363SK\271\360\026\342\330k\244\032\360\333M\312\027^\234\220,\337\207\3743\256\235\334\311'}\014\237<6\225\0169\255\205\2353B_\320'\365\314D\334\351\032N*D|\207\317<\262f\353n\376J$!#\322\271\233\340U\024K\240f\252\2737\253R\004\2566\032\214\344\004`\246\010\224\207\030\n}\341{\273\324\000\253\303\264b\262\r\217\3125\250\021\352)\350,@}]T\n\351,LO@\314U\032\034o1\207E\301L@\225\000\025\217\236*\364&\220\212b\206-\025\303\233\256Tl #\251\302\205\223S\245Q\306h\350U\252B\017{\322J\246%\221Sz\211C\306hF\022\375N\023\274P\305T\272\2435\001\021I7\301gUx\341\360C\305\377@F\352\022q\335#\262\027a\250\225o\201\307\211\006B\301u6\022\010\211f\274\004\177Z\221N\034*\002\212\350\225v\3306\321+=\241\003\036c\374q\345|\010\301~\340\025\252\305\3429\222\t\316\2354j\260j\265\344}\223\026E|\335b\320\343\300\235\314\224\267\246\321\210\305\004/\357\253\351\010*\274\333\217x\021\3147\237\204\301M\202\010\n\215\353\"\030\344'\335\223\222\376\226p:A(b\271\370\014\"V\177\224/\025\216W-\017\\g\250I\317&\237\010\344\217\346\274\030\0249\266;\354%Q(\013nH\213\372\261\004\231\364\302k\332\300r[*\236\323$""B8_\333N\3336\364o\207\317*\022\375\350\217\026\301\2053I!\017i\366\363\342V8i:4\034uPxO\024Xx\242\310h\351\312>\200\272\0140XK\227D\034\013\237\243\270_\002\210\205\375\002\030\036\271\020\337k\324\214\236\034\201\032S;\252\222\343\250\314\273\327@3\3160\013\342\303\316\2204\2573n\035\\\267\302\377D\342\3673\306\024\"!\332/\343\250\277\337\377L\274\244c(6u|\026\261I\223\332\304\234x\322G\003\210\333Nb\300\224\303\214\275\241\315&\344\241\366 \372\340\313\034\255a\316g\256\366I\33343T\360\205\372K\314e&\356\270\374\346\252U\212lfk4cN\251\331k\0358\243L\032y\200\210\030i\024\240g\033\310j\252\024\034\3140k\342\222\244/)uc\222ti\251\203\3743\327\242^L\312\235%\210c<\"I\323:\n\365\014\343:U\362\031\230H[\264V>\245\035|i\022'\r\217\331\362x\221\214\224&\231=\263\364\035\312Sf\342\025\017\2054\r_\363\211\010\315\17719&f\233\224v\216\212\314\266\024\244\316%5'3.E\t\211\323\024\333\035\364\367g\024\305\314\027\325\n\216\216\2544g\034\326\034\207\027V\322\351\205\025{|a\031\017\032,\375I\203e8Q\260f\331YY\374\025\033\313\0249\324\212\013\035j\305E@\264\214!\020-]\234A\313\034-\3202\307\365\263Lq\372\254t\201\372\254\230H}V\\\\>+&\020\237e\016\273g\031\342\356Y\306\300{\226)\250\236\2252@\2365[\204<+e\210<+U\214<k\266 y\226>J\236\225\034\276\316\212\017>g\305\207\225\263\022\242\273Y\346\360nVB|7K\023\340\315\212\t\343f\231\342\270Y\361\201\334,S\3305\313\020`\315\322EX\263\322\304R\263\342\002\246Y\261\021\301\254t!\301\254\270\230`VR\000,\313\034\325\312\212\r\\e\245\211\016e\031\3035Y\232PLV\\\240%k\206\360I\226\024%\3112\0069\262\364\241\212\2544\301\204,s\320 \313\020 \310\322G\010\262\304\270=V\\\340\036+!\004\217e\212\301c\315\024:\307\212\217\235c\231\302\326X\246\2705Vbx\031K\211\023c%\005\212\261\222#\261Xq\241E,)\266\210e\n.b%G\020\261\322\305\354\260b\203vX\tQ;\254\370p\016Vb<\007\313\020\371\3002\334(\267Dw\030\313t{\324\322]\241\264\264\227\004\255\264\267\004\255\0247\000\255TW\t-\331a\3132\370\000YZ' K\353\005d\351]L\2549|G,\203\353\203\245\3728X\212\223\203""\245z4X\261\221\263-s\324C\313\024\037\3122Fb\261\314\001@\254tq>,C@\017K\023\013\303\212\r^a\305\004\214\260\342#*X\252\333\207\225\"6\240\245\213\271b\231#\237XqA#\254\244\210\023\326l\001J,C\214\021k\266p7VR\274\033k\366\2007\226)\262\215e\212\372b%\304S\261\214\021G\254YB\216Xi\342\206X\361\201C\254\344\350\037VB\370\017k\276P\036\226x;\337J{\327\330Ju\331\330\212\217wg\245\010\201e\305D\211\262f\211\340c\351C Y1\341\362-C\\pk\266\010\304\326,\301\205\323\021\037Zi\002_Zib\323Y1a\336\254\324\261\274\254\344\270OVB\370\243\344\211d)\241F,c8\tk\256h\010\226\032\376\302J\023\034\3362\207\256\265b\2431Y\206pLV\\<&+!\272\2225k\264\t+1\310\210%]:\267\014\016\266VZ\207Y\313\350\376j\245\360\003\264\022\235\372\254d\277;+\301\361\316\212\367\257\263b\034\354\2548\3678+\321?\316\212q\200\263\342\375\334\2544\216n\226\336\247\315J\341\324f\031\274t,\2153\216e\360\306\261T\307\023K\353\tc%\273fZ\311\356t\226\311m\305J\210\200n\305{\267[\211\316\357\226\371\373-\226\374\361\021\313\360\251\016+\346\263\014\226\361C\013\226\341K\013V\\\254~\313\030e\335\212\013\333m\305\305\006\266b\202\003[\372\320\271V\312\320\264V|\224H\313\024#\3212\177\260\306J\021\330\324\322\205\222\264\264a\237\2554!\254-C|jK\023$\317J\364v\266\314\276\312Vr\320~+&j\277e\214\316o)\273\342\031\277\027b\316:Tt4\337`\262b\277\037a\245\n\360n\245\t\376d\245\014\222d\245\376^\2245G\314)+\366\203\014\226\351\326\203\225\362\3330V\352\210\310VRHd+E\000_+&\376\257\225\020\323\337J\010'o\231]\346\2558OP+\336\331\323\212q\344\2644\037\033\262\2141\335\254\264\227\275,\343\275'K\274\032c\305\305{\263\022\2773f\245\372\004\230e\370\236\227\245\371\356\226\245\371b\226\025\377\255*+\315g\222\254\204\357$Y\t\037\335\321\036\345X\346\217\315X)\277\325b\311\237\034\261R\177s\304R?\035b%|\252\303J\361\255\016+\376c\035V\31475,\303W*,\335g*,\363\307',\315\347&,\323\367&,\303\007'\254\370\357!X\361\037D\260\264_D\260R\177` \371\024\3042|\027\300\322\177\002\300\212\211^m\305\307T\264\264\037\206\263b>ie\031?\027c\245""\370^\214e\376p\204\225\024T\336\212\013\257\256T\304J\372\026\226e\370\300\225\225\376cL\226\361c\036VL\360|k\236H\214\226\356\036a\234\003\211\025\367\361\024+\371\222\231\225|\243\313J\274\322e%]\313\262\364\027\244\254\370\033\024V\252K\022V\362\325\006+\341v\202e\276)b\305\177q\2118\315\222\303\314\314\273\327\320\351\247N\302H\331\343\353\026\377\226\271F\010\337\025\235\273\256[:\"\226\333\373\031\304\2551m?\363.\272m\211\324\350a\365\256\222\007\276I\221@Tn\264D\264D\tL\302F\220rt9\364\367\030\371\206;\276\353\036\325\331\231S3\230\313)S7^,5\321t\362\370\031\337\264\304$8\262\211:hs\346(g&x?C{\365z\n\032\261\347\347I\223\202\035\363\210\220h\"\322$Z:(\270p-\311%\340\333j\256\236-S\335 \261\262\204\036\r\031\030M#-y\020%g3\374L{\272\204\023/})\371\205V\031\204e\0209%]\032\022\242\247A\003\234\244L\001\313X\225\206\025L\327\270$\005\255\006\007(\266e\347\321\324\245\363y\325\230\227\224\222\327\013\346-'NQM:c\313$p@\252\362\370\004i\310#\3615\234\344\"\237\273:\rX\232\262L>\223<\367A\362Y\223\303ti\300\206*N\350\311i\321\025/==}\313\205\256\232\351\033\226\245\3446n3\245\213<@gH\225\347o\273\316\222\216Z*\310\330['\337\321\236!q\315\035\355\225Il\310Y\022\241\212^\306S\331r\020V2\335<\246o\030\202)\254\354Q\200+\020\305\235\255S\327\347\316j\326)\313\353\033ii\337\2375s\372\006\323\245\266P\303e\273H,Q\002x\026\001\252d\333d\366\327\022F\346M\231\007\250\236\205\360\224?\230Sq\2709\363d\332]\232\324h\215)\023\177V\216\025\0018c.\262>\221\224\270\320j\345\351^\230\n\361T)0\266\034\t%7K\027\306\306\244\313\323ok\244\313\210\2376t\231I\261\322H\251LY\315\224\315\365\231\013\236\271{\350V\316\264\300&%\307\370|\344\353\027\351\250\235~\003w\265\351\207\377\306\260\307\242\334\215\323\021\263O8\244\314|\262F\335\272\231\2762\013_\223u\342\032\215IvR\247\251\006\356_\301\262i\306\314\223_\2524\222`S5\264YE8\344\271#\234\177\315T\005]\322\024\tu\341\033S\216\302\315\2017\306E.=\233\233\321\227\245R\316\342\"5\271\322\213\230\346\315XR6e\342\307\336\344-\\i\370\245""\233&\"\367x\255\\\026\305<q)\214\026o\270\261\375\235\226\373\202%\314\322\n\030\276%\025y\345\260\324\351?L/\002(=\321\333\210\261!m*4\257m\270\343\325\307\351\307\037M\324\230+QpBF\223\246\344\262V/7K;\251\367\2535g\337\255\223\243r\264K\347\035\214(\026\267\365\233\265\327_,\377\027)\270\325\301\001Qx\222rEa\364\251\211I\034V67\346Y~k\355N\312\262\330\325\212\031\270\303\201\335\010|ck\314R\232N\372\211iS\331\311\370\204\344\374b\206\004\324\246\224v\236\013\321{K\030\340i\226dj\354^\215U\3030\342R\027\342*1w\323\261\030X]\322uRD=\353\262T'7ef\321\315\353\341GUr\265\242Q\317~\361\\R\3441x\\gwy6\203\253<;4\352o*\036H\257\303\250\216]\312()\3116\260\024\352\365\254h\014%\227\314\251\001\241u\333\233a\347lL?\253\324\231Y\037\341Fs\272\004\250\236m\362^\337\351\227=S\322\324{f>L\366|\363\220\304\316F\0053\270\3526\227\341\223\213\300\375\002|\204q\271g7\205\t\311Ac\231a\242\312\021\301\347H\325\230\271@\234i\324~\0356_\340\274\024\211\371\031s\232\265\323\356\261\373\017d]b\213f=\270\366P\013>`\232\230\225QaI0\007$k@$\326\302<\245\317\221\204\311g\364\314j\340\271\304\334Y\244\020\244\206\340\366i\345\253\241pSIq\312@\312\202\302\023\214\270\377\322\347$Xs\370O&\314\310Qd\326y\321<\370\317 $O\275\244>\235\217\007~2\317\314\202\261\253\346\317d6\2012[^\363\347}}\226J\321\206\315\205~|)\315\177j\026iMLj\312\231\364F5\371\272\323\317S\007\237\032\365\357\371N\362\2305\207\324\226M]R\315\347M^h\004\304\346+\370\222l\314;%\325\"\350jXd\037\357\232m~\032\230y\261\364\337A\3735\351\025\266F\360\r\2079r)\366\307\324\264D\211\322y\032\250\331\320<\346?\3103\345\030\235\246\270\243\271Myj\346\251-'j\322\220r\246\224\032\351>o\342\027iXY\016\317\221r&\225N\227\224\211tvc\260A\276T3\326N\246H\227\323\027\030\257\353EXN3\\\323n\220t\311\205\331\232H\020\254S\350K\316+\022q\243L\223\301\306\334)\3119z\360\205\202P\236\316\221I\352\215\360\014\351cv\303\206\\\370\355\334\034\225\210\366u\270l\277p\0063&\246:\340\014\245\252R\306\370a\264\224\002\340E\n\020V`\305\345\362\273)\317""\270\200\316\223\321\0137\310w\230\\\337\272\032\345C-#aQ\342\320\201J;vZ\007\300Yx\030\037\330\371q\345\2141Q*9\245Pq\343\323\350R0\037W\301e6VM\241\t\250\373\307*\371nRTb\222\264\246i\003\221O\004a`\t\211/.H\262>rgKp\317\351\214S%\250\263\030\014$aB\212\355\355\342\232<\003\343w.\370\001\276\240\353Kn;?\350\016F\372\376\220(c{\233\247\2555\266\343\206%O\312\276\006\330\240\0375J\231 M\346\263;\r\350R\246\231\350(\277\351\020\234U\231\025\355\007\311\024\263\270Y\247I\251\327\347\322\244\214V\256\235^1\326\245Y\316-\255\323\221.]\2223\217.\r\302\250#F\354\242nJ:\253\211\334\224O\244_\265ge\241N\343\023\304\316>SZ\336\266\224\234j\206\255\2221i\334n%>\321\254\026\3024\271\245\263\023&\3454{\267\277\270%,.\307Y\351g2\211\251\243\336|d\304c\362\302\347\215b2\217\016/L\322l\256\204\311\305\205\233\2369\304\264\232\013BV\235Q6i`i\316j\324C\2239\223\316<e\315\007G\26301\247\211!\310\255\354<I\332\034\220\225\205\305\tIG\302\255H\346t:\213y\034\005\363\256\013?\263\234\220\345\365\330\354\322\r\333Y\354\320\032\3133\273NO\323\230\206\246\232\216\352>I\224\263\351T\370\205 6\227\231\2534\211\247\032\251\271\033\356\230\352M\254\307DTe\247\\\342\021\264 @l\222\273\300\253G\260\277X\235xEr\315UK\320\240q9\2648T\233\305\362\362\223\266\023\303\216\202\016\212\2256nk,T\247\006\005\271\260\375\234\224u\264\3411]\020\344\0224fM \226\300\037\263\033\211\210\256\3015\\\036\203LE\353G\r\003HI\330\374p\022\030\3508T\035\343\235\301~\263D\342o\345i\370-\t\035\372w\352(\260\266\332\234wX`U\346\310*\366\362\2067\\g\301\212\264C\210_\311\270a\031\342W\361v~\315\031\215+\364^\277\200d%\0261\354\2636\367\n\006l\022 \272\221\030\216\300\020\262\203\021r\2659n\014[\371\356\303\235=z\201N\304A\363\031q8h\345~?\322e]\356\364A*\307\025`\244 \275\037]\235\364\304\304FDtw2\034\256\021\216\004|\222@\301]H\r9\275\356(U=\272\326\270#a\270\313\2132&\274\244(\217A=&H#5\030i*\226\244&\317\245 \215Z\217 \221\036\303\002\020\254b\374\001M\241\334\265@\211u-B\234\357\272\233\201q\304\334\r@#\031w\323O""\246\tn\364\211p\361\316\236\204\343\303\251\210\030\341\313\013bA\262\250\343\346\205t}N\022\017Q\224.\342\223,a\205\233p\030\236Th\357\004\264\240\200*\214\305c\351\003]\302\326A\004\225;\373<\210#\256\242\277\207K\304\230\304|p\353L\004\007w.\362$\226W.\010\345%\226.\334\364R\007Z,>\\\353\313\301\267\013\244\264!N\026-\205V\213\273 %\311U,\222\341u\325\322_\267\212\3110Z\3434\374\323\270'\220\\\203\243\201\026\350i\216\026\245\246\251E\341\336T$\365&o\243c\271W\243a\335t\025\314M\306\203U\032\253\230.\235\005\026\nE\240RYn\304T\265\221*\333\206![s\255\032a\023\246\344\232Z\002\362A\014\017q]a\246 \t\312,\211\365Z\331\022|\0306\330\222\005\200-\214\037\3038b\261\326\305\376Rn\\\311\013&\177\263J\301\t7\236\304\332\307`\271)X\2011\017\263oc(\367\265z\021I\222,qxdN\177%\351pI\322\220\271\333G*\206\3352\022K\326#\"UI\253\323(W\207\024\255#\270\252\303\016\363\345\346\032;\335`*{\306U\250\026\004:\246\371m\223\020C\232\202x\305<R\344\302\0337b}y\367\207\322\340qt\305\206D\366\221\262\300K.\364n\213\234\211\026\023\246\221/\306\010j\263\323\276\2071\276k\365\262\322\346\274\3440c\224\272F7I\264p\374\252\206\014G/\355 \336\221\210kw$\000w\273\303\222\356\224\207\3270r,Zz\215\004If\007`F\352m\022\013]\240\255\221@\350\246\004\201\277\256f1\247\027:t\313x\030\313\252\024D*\027[\224~,E\271\261!\225\2418cj\361\212\023\265Q)1\326C\205\207\243HN\256\214\215\350\306\204\264\304\324\212u.\002\250\270N\230p\230\243\346\036\203\304\033\021\315\222\246\255\273\004 \315\374Y($\001\315\265p]m\004\2553\202\252\276\305\221\260\\L\376\371*e\260\rU1\351\206\205\321\207>\206L3Nd\017\n\265\326I\251q\013\246\272\223xz\302hX\212\204\001\236\356y\302V\3442\3219\260\023=Q\361b7\253\374\036;X\245\341:\365\363%5!!i\222\270\236\232\005\212\004\366\314sq=\005\232\222\343\215WW\357\257c\244\255f\247\247\354\334\302el;\212\330\317\366U\302r\300\035\027k\032\334(x\364\236\342&\215x\225\304\303\224\312&\3736\032\0203\334\273\352\244\010\331\212\024\333\332=k\350\277\324V\273,\360M\330\210\3426\032i""\356\221\330\214\222\016IC\\\222\321Bd\270^aT\3248\023O\200\202\201\231\304\025\211\365\241U\205\362|\210\313|\020\341R\244\034=\252\3100(3\334\354\021uHA'\261\304\221h\332\211*A\253\023O\207\303\010\227:CX\231\204\270\224[\250\2141.\241\2314\233O\310d\335\221'\022\000\321\242@B\\\312\010\247\202&\234\266\252y`5\240F\232\276Cl\235\204\270\324\252\215\210\326\245\022W{\266O\321\014\030\235Z\240\241\220\365\314\242\247\333\253\007\273\221\342^#\210e\031\267\023j\004\221\365\225\235\220\026\301f\227\250\243\320\357\271\211\035Vk\244%\215\274\025\345\262\002\301\257\301\205\222R\2033\332\224\311~\233\324\332(\231X\253\030\361\234\021X]\244\277\030\356\033\023BW\251\t\250QO//\252{\216Y\271\323\014\230<\367]8}S\321\371\250X\303\271\261\240n~\231\266\325\256\343g\343$\033\273\352\032\355iw\346A\375\024\003\215q\334\343\026\0335\353\270\361+\235<+\255\315O\214\325#\215\360\244\354k\024\017\345\232\203\314\267L\020\360\014\245\212\226\001\235\360\257\261/\240\324\204\317\325p\366)\3424\306%\270\327\031\037\204\304\346l#\337>\250m{'\r\341N\\\261\306\0140{#R\326W\304\233\001\272\016\250\223\340\374\221\333\277\216\306t\316\240\216@s\t\341\236S\352L\355\304\324\017Y\261\252\365 \004\255\336\230D\313\326Z)\232$\336\255~\332\207\263u\203}2Hwze\234Q\346S\261F\302\231\330\206\351LLD\334#\037\340!\336\362\272\305^H/\272A\n ts\024\000\367\234h'\314\2651\177\360\311\2015\253\206\210\r\2674\"8iK\241R\353\267e\034]#%\235i;\024C\022\211&\265\274x\"\375\342)\021H\276\301\212X\222\323\320n\224\222\251\302))E2+\241\200\211\245JQ'\375\014\215o\006\275t\\c_\366\226\014\250u\362A\301\r\372=\301\010L\255\301\270\026\313\253K\231|\231QBi\017\313y\211)y6P\247d\313\344ol\351\275\212-\275\357\260e\360\020\366\314.\016\264a\320\273\003\362\010\\xC\330\206\036V\363&\341\244d >0\032\017\017\274qy\030e\304\343A\262\225\227M\037\311k60Z\360\006\024\223\347\001OCe=.\032m\035\256\334v\017\303\303v\tg\202\357\364TW\020\205D\237Ts\256\"Q\360\347&\022J8\326\220p\274\027+\033\303rMy[\222\204\343\034Puh\336""\3114\334\342K4\341\222/3\275\033mw\344\252j\316p\344\324\030\017\035\215q\221N\251v\256\264\035\327\267\014gw\320\263!\354\2234-\037\203N\036\202\212b\316\343\033\251\232B\247'\007\207\302\274w\245l*\313\223\257\211I\300\320\013\205\203\261\206R\322\353f \303\241\237F8\3039\370\372n\235\3732\214\214\214>\r#\014&\206\206\235\331\246\353\014u(\266]^\323\346\250\216\222 CUgn\307\331\023k0\263{J\203Mz\354\254v\223\204\313\227\320\365\350\273h\3619\313\2129S\003xQ\262\241\202\270m;\373<\016#\024]\372\"\343\217n)\3025\017\017\207\2164+\226r\336\304d\263&\307\315N\213g\255\010\204\250<\254\223/\033\232<\321\214e\353\316\272\302\242\230\255Au\231\022v\203\306\346`{\250p\307\310\352\244n%\303\002\203cV\274\004\305\266~X\277\320)\357`2n\017\036\367\321\303\361\232m\017\217\236\330c{\211|\276\356\366n\207\234\303{\366\035\273\203\033\230\276\323\265\227\361{\253\236\335\247\237\215\305o\347\341Q\322\307\350\351\371i&\363\366\373\231\332\021\215\261\210\376\222A \375\027\314\274=\230\200\366\247fO\341\337A\001{\335\2013V\363'\340\357 {\300/.\330\232\002\030\342\273)be\311P\004\"\276\233\"\264\331c\326\350\372\232y\367\032\217\251\035\331\036~J\n\035\251a\2434\001(-{0\304\257EA\221\311,\320\217E\221\257R\r\007\004\231y7\314\004f+PbN\327-\n\334\3030\204\213\344[ \"\357\366p<\342i\226S\320,\245\240\311\246\240YHAs\323D\2436\200o}>\275\364\357\327\255\037\377\344\371\313\277\362\177u\363\354\322\331\353\347\257O_\235^}~\345g\317.={\365\371\225\237<\315\376\353\322\2773*\311\365\213\354E.${\345i\341\331o\216\337:~\224*\313\345g\227 \323\237[/\375\301\177\023)\000x\325\277\272|v\357\2748\335z~\345\317'\371\223G\247\227\236\377\350\307$\217\017N\267N\017\316\016\316{\027Y\226\307\267\220\307G\317\036\034;\307\337\234\376\352\324\201\274~\226\220\327\267,\257\017O\037\235\275|\376\362\364\247\027[,/H\374K\211[?\267\345o}\345\177\325\361;_GT\257[/\335\364?\314O\201\211\367\374\367\212\027o\372\367\376\352\377\325\371\367\017H\352\237<\375\213\377\332\235\363_\235;\370|\347Y\016~\374""\237\274q\234\305\327\333\317\262$\377O\316\267\316\017\246\007~\005\022\366\375\376\340\371\225_=\333z\346|{\345\207\377\362\236\276\363\354\027\230\352\277\236>\372\366\312;'\207P\351\375\263\306\371\217\247\327\375\273\216\357\034\370\007=\2777\362G\336\363+?\367\177\376\247\223lP%5\323\260\215\262\264\225\377x\362\352\311\325\2405\261\216o\234W/n\371\365\006+\3779\224?y\232{\332x\366\037\310\350/\236\255\034\277z\374\316\311\257N:\376\207\271\351\257/^\272\330\366\353;\376\316}\377\376\203\347W\336>\331;\315\235\336;+\234\277z\376\366\364G~\021Zj\327\337\335\367\367\273~\267\247)\006\220\300z_\032R?\323\360\205#\345\017~\355\236\177\357~\n\326\n\323{\027\005\177\013\030\373\322\377\362\377\370\377\347\277\237_\371\303\361\341\311\326\311\376\351\366\331\325\263\225\363\337\370\253\300\262\355\333m\277M\373\362\217\376\037\027\317\226\316\036\031\013M\032\365\357]l]8\334\250\277\373\014F<\226\375\357\337\312\304\277\023Hq\300n\375\353\322\363+\210~\355\354\221>\373\017.\036\371\265-.\377\3621\016\275\327\310\320\373\354\374\321\363+Y?[\274x\303o\356\371{\003\1770|~\345\027\317\376\342\277\371\311\371\366Ti\311W\245\352\374\\.\357\315\213G\321\360\346\312X\364\027\313\027_\371\367\277\366\277\366|o\034\344\273\000\215z\373\374\3664+\345\373\237\326K\277\361_gs\017G\362\325\247\267\236\375\021\307\362\177>\375\372\370\322\361\2530E^\372\t@o?\373\354\344\332\351\207\376\235/.\256^\334\366\033\320s\330\352\337J\363\3349\217\330z\376\341M\314\363\361\323\301\361\026\231\033X\350\307\317\036\035_\222\004\315\365\323WO\257\235];\277z\316\263\367i\320|Y\377&\024\372\374\312\222\277\004C\242\345\267`\254N\374\311aP\267\2253\347\354\311\371\223\351#e\224\262\374\377r\236=\317\361\"\343\245\353\247\227N\201\356\275\323\354i.\310&\340\342\303\213\327\374\252\353\273\243\177[\226wi\3552\374\254]n\340O\343\262\203?\316\345\335\313\341\020\377\355\361\333'\227N~\007\325\207\227\267\374\267B\341\365\303\177\375\375Y\366Y\376\231w|\225V~\276b\302\321T=\316\036\257\235\\\302a\210""\365Y\237^\365\255\377\366\377\373\177\376\375\273\240\232X\233\315\263\342\371\375\351\003\"PpJ\213s\227\312\356\237\361\t\312\347\257F-\003\363\372O'WO\262\334h\364\263\205\251\3557a\226\322Y\030\316\354\303\247[O[\241\330\303\351;9\311\235\354\220\026\275\362{\350q%\017\220\204C\177\030\365\022\210\302wP\276\247\032\rY\337\312\236\275\032\315\203wO\257\322\242x\n!\037 \207\\\256M\257\n\203^m\002\377\375\217\303\321\341[\320\266Q\026\357\203\260\274\177v\037\204\264\243d\361K\251\034\177\025\247D\324\226o+x\024\301\376\003\230\230\017\241w\273\227\212\330\311\305\313_\340\317\027\227\357\362}]\360_\005i\007\363\231/\207\261\372\205\277\016\023\017\3442]O\242\256\343\311*\241\364\342eG\320\"7\"9E;\375]h\362P\004\005T\037\372[\270F\206]\376\276\377\376\2470\277\377q\221\303\365\367\226\177\253\3417\376\307\377\237\207\376\303\261?\236\340\220\370\307\263\334\263\306\361\177\340\272\311\rx%?a\222\212\325\373\364\274\177\261\3417\270a\313\313\332\250G?\226em\t\205\n\366\035\310\213\227\3775F\201\375\354\340\330A1\326A\211\363\374J\346\344\245\223{\247\033gY\234\263W\277\345\344\355\357\317\277\276\270t\361\252,\356\261a\026\374\354\372\024X^\366\227\233~\023V#X\244\037\373\217\237\004u\273\003\353\250\254o\275F\222\021\211\005\311\250\304\222\222\341\004\336\234n^\344\244\244\277%II\347ARX\232`a\016\222^y\371\351O\374_|v>\231\206\022k\025Z\342\023\277\006\235\000t\264\023\244E\343\245\005h\262\260g\227\316r0\230\357Ow.>\363\277|\344?\032\205}{\323\277\271yq\311/BVP\311\277\202\210\361\377\002\302\005\024\027XD\376\356\377\375\037\320\275\376\017\177w\234;n\234\374\307\211\330\277,W\277\000\343\037\344\031\245\346\371x~e\201,\351\3020\245\013RV\323\313\341R5-J\0136SS\371Q\023\020/\373\233\334\340\342\007=\243\360\363\320\201\333qxq\372\376R\301\203\010\204\205\233kf\337\002\355\351\377D\322b\361,\353/\255O\373P\216\277\275\243\252L\302,#\304\233\027\377\345\337\203L\311j\026\364\305;'\377<\333:\353A\375ax,\370\013_\\\274\347o\203\354D\301a\236e\232\374x\311x\355\224""\2532\320B\237m\311\275\204\340\365\363\253|/!h\223_;\177.b6\3755\350\362\275\260A\000\032)S\313P\215\003Po]a\267\301-\354K\376\"\231] K\350\354\002\221\3667\377o\177OR\206^'Ia\300\022\241t\327\277\0133\205\262\001\032\326\306\364\037~\023\027F2\207\252\027\377\2408NQ\361o\344\374\0347\032\002m\373\317\001\3430\242\375\345\342\305\037\210\274\2069\000+\337\241\177Hg!\267\036\177rz\211o\221p\036\320\302\225\201\024\342!\323/5-\032\342A^\264\343\360\320\301\373f\374tK3\247B\334\264#IN\377\225\353\247\277\361\027\252\027\217\364\364=\277\3015U\244\326B\037,\343h\207\356\273\355\337\246\335\007s\343\037\376?\376\251\021\374uUF.\343p\275D:\251\346\327`&\205\202\356\227\317\356\372\177X\235\2765\345\0077\016\266iN\035\010\306l\342wX\334\264\204\034\002a]\367\353\320y!#?\177\366\236\377{\201\221O9\005\227\223\262\220E\031\027E\020\243\260(\262\271JF\034\331Z~z\356N\311^\362\023X\210pk\t\2024\224-\237L\377\363\302\0216\223\357\301\364\276\3046\223\327N\177p\352\234>>k\235\2779=\362k\320\365t'\311\030\364\177\376\347\223\334\267\234\216\317r\023\3065\255c\215t\273\262\"\341>\310\377\265PI\234\267\216(>\000\024\r4\324\267\037\235_R\026rqO\010I\236\277\362KP\017_\001\026\237\277\362S\370\377\333\027\331\262\336=\1774\375\301\264\343W\305\r\265\261\264\337\035\337=\201\006\224y|\313\3773\322^\271\346_\313OW.~\003;i\336\n\260r\346\236s]\365\3643\324\336Y\233\364\246\237\222\221l\350\252\200\321\374\331?Q\001\207}+\250F\240\360Ra\374c\377\307o\034\207\373~>\273\250\257h;\207\353\t*\307/\237\275|~\351\374U\215\375E\024\262\376:h\204\255h\346\377Y\026\302]\277\016\355\tD84\375\207(f\371\205\032\344\000\014J:\232\177\366\364o\376o\377r\276<\r\25587`\014>9{\342\177\306m\264is\376\010\225\022J\013;\200G|\202Gg?\340\206\021S\366\257\234\346h\302O\374\217q\365|~e\305_\001\321\001\373\341'\376\223\243h\316\254M\013\240\222\341d\301l\3617sr)\372es\361\311\364\t\356\203\243\036\361\177\374\273\343<& ]\002Z\014U\272\251\205\341\227\376/A\243\377\226\233\313,\275<]\204\201\014\303\335\277u""\027U&X\007\350\006\346\377\206yCS\254\270\\\3434<<\337\342\227k\272\023~\244Y|\330\036\331\337\240m\315)\010\374`\222w\322\374\222\020\342.^\221\024\034\242}S\241~\313_\271\213\226\276\217\374\217\240\325\241nO`ssti\025\3678\253\227\363\227eMtK]\027\376|\362\031h\254W\336\361\337\371|\372\0234A\221a\302\322\321\035\353\233\312V\3775\354\263\327qK\017[\360[g\177\233\276\177\321\301\241\252\354A\267gK\210\273\314\333\234\236\252\230cn\201$|y\372\362\305%Mk\002\316\277]\272\250\222an\332mED01\217\374\243o\302\321\365\236\377\036.^=\252=\3014\241\253\023\214\006\272\3633i\202\232\034cy\3677\211\355O\355\361\260n\027\277\206\265\203_\304_y\272\361L'\3631\305O\247\377\3437`\224Q\205,\234\231\321Ly\345\347\317\256>\273\005\212\374\221\177\355\243\363w\246\277\002\005q\351b\004\371\177{\345\275\323\025P\032\336AS\354\371!3cP\343$\331\256h\212\241\003\214\355\245k\370S\273\274u9aJ\0075\003m\031w<\363Mh\305\226\n\334\300\372L\205>\014\320\353\376\365\217\316\337\233n\031\013\025\2473\242\177p\356\370\026<D}D\005\351%q5\026(\320*N\254w2\005\267\023\336;\316q;\3417\217\335\223\345\323\037\236\002\217\257\037\207\326u,\351\037\250c\t\223\222\356\204!\253\333\376-\252A\200L\005\366a\024~\003m\375\267\300pU\270\234\264\267\r\207\325G\347Wa\303v\033\206u\211\310\346/\205\035\336\357\216\267O\256\362\2434 \377T\320\232_CK\325\rl\230?\371\177\272}\376\223\351\255\213\353\240\251\262]\362\315\263\227\316\266\244\305K\324\333\230\3417\\ko\237_\362\357\334%\205\370_~e2$\022\2522\177\374\000|\274\343\277}\033\265\241w\375w\327\247\017\375-\220\353\240\242\354\362\332\324\301\271l\366\370#\227%\354p^'\243\007\006;.\312\250Bs\353\3625\264\220\221\235\346o\305D\277\271\270F\245#\031\216\274\202 \233\265Y!\027\257\211\312\177\240L\200\032\375\006\014t\200\274\351\277\371\341\351\337p8\303\342\313*\300%V\205\347\035\3776\035\026\037\373\037\323a\361w\030\017\377\270\264\216\303b\375\362\306\345\024*<\344\321 }\367\241\377!l$`\325@\373\017\257\225}>}\tF%.\376\177\204I\314+\003\201&\r\243\344""\371+\277=\276vr\365[A\033\370\303\261wr\225i\003\213\376b\351\"w\321 \374\202.\000\372H0\370\371\344\202\344\370%a\217\252= \202\241YA\367DY\244\323Td\246\014\032\312\357\217\033'\201\322\010{'jIg\303\316\377\031w:d\322P2\252\251\364\2160o\265\246\022\030G\247\257\237\275\216\026\374\367\242\345\233\237jh\315\2726\275\346\177!\233RD\351\031\222\301j\004\222\231\0343\374_\221\240\332\202\365k\033\222~0\375\247\277\315iNt\304\276\353_\277\203\n\365\333'\316\311#\242g\344\246\277\273x\000\235\360\255$0\320\322\026t\034\023\322h\020\3369#\217\177\246\326n|\374\370<w\2763e3\"f&\205\346d\201\353\334\277\377\323\372Q\346\370o\320\223W\317\310\336c\351\3517\320no\237\374\340\304\301\325\362\372q\026\007\024\302\017\311~\003`\357\036\347\360P\005a\217\237\265\216_\025`w\236\255\035\277t\274u\274\007{C\034f\264\010T\254\250U\375{+\202\030/\017\350\311\320\367Q\212o\345\246\257FB;\262\024\346$\231F\270Y\364\027@6\335g\213\314\367Vi\330Q.\241\221\354|\353\373+\003\327:\272\021\374\336\212x\003r|\231\034\213}oE\300\306\362\370\352\361\212\177\365s\377s4\222I\320\354\331\233\3472\345g\376g\270\302IP\220\344\371iV\002.\371K\205\351\226\004\\\366\2277\246r\246 \310\213\027\257J\300[\270\333\222\363\004\361\014\313\275\004\274\343\337As\231\010\314\3719\\\301%\350\252\277\212\272\200\004\315\373y\264\361I\3205jK\225\240\005\277\000\n\360\201\004]\367\327q\327(A7\374\r\220\235]\t\272\351o\342z*A\277\360\277\300eL\202\336\365\357\242T\225\240e\277\214\266\"\tZ\361+\270\273\020\240'\1778\373\351\271\334^\260\031\304\335\203\004\005=\022W}\t\nj\004\356-%\350\016(\023\367.\205'e\022\366\023\377\023\0248\022\364S\377S\234\367\022\364K\310\342\253Ku\314\251~\271qY\022(\341v\357<\247\212h\242\347\275<\2754\375\036g\310G\376\035\320\242\320\300\013}\376\375\024\363\374\345\237=\273\002\033\307\373\307\315\023\230\003\317?\300\026\370\231\365\243W \275\367\354*\227\372\246\177\263t\261\3116\373\220\327\n9\355}\345\277\236>\361\177\375\t.xS\362\346={\233\354A\177\366\332\263\207DM \033\274""\333\307\213\307\007\324\334\201D\337 _\344\347\333 \203-\021\036\032\335(\227f\266\262\322\"\303\263\365\351\371\036Q\204\346f\013T.\003[\3141\350\377\247\334Q\266\336\003M$\r[\277|V9\371\320\317\342N\346\345\367\375\367\327\260#\177\361l\331\177}m\272:\335\273 \325<\204\021\202\343f\343t\231\030\362\364\203\311\334\020\177\342\325\r\276\025nbvg\217\346o\205\233\260\355\233o\350\200\262\367\021\363\010\220\230Z\302\223/\340un\246\226\316rs2\365\007^\365\343yz\377t\235\200\347\346\t\364\3159\207\013\307\223q\254,\234\375\021\033-\375X1\027\307\215\025cq+g\273h\037\371.\212[\020\025Cc\221\205i}zx\261\365]\314\006\320X\352~\0357yh\r\224:\372\363\351\017\246\316\364\005f\304\347\323yg\304\377+\276RJ\323?\013\2725\317[\026\035\361\316/\315\317[\366\354\3259'\007\317\224q\354\334:\033\235\277v\376\350\273\030;\357\372\357~N\314\260\257\312\215p\013\017\000\350\2320_#\334\242rx\216\201\363[~\273\300\363D\335__\200\247wN\346\345\351\217\374.\211\347\351\303\323Cr\24697O\037\236\316\313\323[\376[+\314/Rbj\341\354m\302\353\334L-\310\360\324L\275q|\353\344\032:\320\311<\321\343\346\027\230\361\327N\347\225Do\013\3525\317\324\"\372\016\235_\235\237\251E\272\210\3161\325y\246\214S\375\316\371\317\317W\220\342\305W\246;\242\376o,\262t\261z\261\207\247Y\337\201xy\317\177ouz\355\342\352\2052\032n\343\202{\376\002\243\341\366\371\274\243\201\324v\223\270\032JL\335A\261:}\001\301\177\347|^\301\3173e\354\232\317\316G\323\327\246\337\211\340\377\300\377`}zp\341\\(\262\343\243\363\267\311\240\234\273\021>:\277:g#\360L\031\033\341\363\351\345\351\333\027\227\276\213F\270\346_\373\230\035KI\215\260|\266Cv\352s7\002q\032\231kx\376?a*\245\316\2249y\345t\223\370\367I\254}\200\246\353\263\027`\355\203\323\2559\007\r\337^\306A\363\227\363\005\264*|\027\203\346w\307\233'\233\247\271S\245\r\256\237\276F\316S\347n\203\353\247\257\3169fn\37076/^f'\351\022[\177\301\005d\372\002K\334_\316\263s\262\365\241\377\341\027x\220H\216\340$\266>\306k\006\323\027\330\201~|\236K3\230\315\003\207S\351\214\343f""\371\254\216z\360w\261\376rZ\255\261\270\017N\353\250N~\027\303\364\367\307\017N\016\210\302%5\374\273\247o\223C\375\271\033\376\335\323y\345;\307\223\261\tn\236]F\345\365\273h\202O\374O@\335A\317L\274\021%5\303\027\027o\263\3712wC\240k\347|\023\343\272\177\375\323\363\003\262\001\226\330Z9\333#\273\277\271\231Z9s\346d\352\352\311\357O\017\210\371L\340\351\331?\331\315\2339Y\202\364[\337-G\376\257o\234\356\221Uh\356V\272q:o+\375\346\370W\2203\236\357\212\255\364\277\344\300yn\226 \375\274\366W~0\031\247\325\307\347\253h!\376.\246\325G\376G\177\365\377J}\327\277\226{f\003\255Z\027/\260\035\336\230>z1\035\345\315\343\257O_&\022Nb\215:\200\275\300B\370\336\351\274{=\216'c\017e\317~\216\233\321\224=\364o\313\272\224\303\243\246\334\345\325\313\2217\016\264\336\305O\375\355\226\337B\231\247xv\376\373\025\364\267x\003\357\266A\331\271\351o\230g\023\332d\257\0227\243\317\316\037\243\037\177\340\257\200~\265\377u\026\371:\223\233\255\324\031\341\332\311?@\341\273B\266x\357]\374\267\377%\271[\300\271#\354\220\352(g]\037\010\333\257\357\347\254\353\206\260\273\371~\312\270.\350\351\337\307a\332+O\277x\346\340O\to\313>\177\371\247O\267\236>\004\361\323\301\233\215\324\365\360}\377\275\034\336a\001M\253z\3617\377\313]\177\027\217i\371\253\271\322MG\350\231\310\225\345\255\223-\352\252\302S\340(\016\217-\325\313\267\241\037\330\377\302\350\373\3742\014\302\360\226(\273\020q\321\365\277\374\007 \377i\276\207\030x\377e\270\353\220\376\007\237\235\377\023\204\307C\311Q.\030s!-\324xz\215\351\225\341\2552\211\370\227\n1\310,\361\342>\214\341\334%\377sz7k\315_k\373\355\001\3609\274TFv\313\227w\360g\347\362\275x\367C\331\333\217\226\367>;\016\223\353\033\266<\372\365\374\367\305\r\377>\347*O\307\325\233\274\342\366\375\014\335w\005\365\343\373)\343\017\274\030\376\236\235u><}\344\337\304+\027\257^\\\3259\230~\346\177J/N\255\372\2530=\016\241_\037\007c3<\257\017z9\032]b7\257N\257F%\322\373\320\324P\257\\\331\021<\332\360\"\345m\274\325N\375\226\351\035\277\310\r\373\263\363G\350\200""\210w\235V\374\025z\327\211s\303\346<\33687l>O\220\363\377\364\377\371\277&g\265\220\224\336\311\344\306\376\352%?G\235\205\227\374\245\257\374\257\330\0059\3367\222^\335\221\343V\010\267C\3559cU\274\341\277\301\335\227\020\362\023\374\021_#2n\025\303\204\300\212E\334\013\303\033y\304\037\227\306\326\210\277\325\367\374J~\232\235\256_\\\365-x\210z1\272\212\346\310\302M\240\323\337\325f>\234/\377\353\037\317\362\317\036I.\211\353\323}\274j\004B\000\232\226\016\276?\236\274\r\333\277_\3737?G\177D\321\373\374\n\261\006\377\360\374\221\354\031\373\246\177\017\344\222\326\325\376{*9\220\370}\305\315\032\372\342C\377\306\032\273;J\257\0140\227U\231m\311g\034\322$\265\272Lehs\211J\277<\311T\241\233\366\364\222rW4t\300.L\267\374u\356\362\212o\001D.\t\367\"\202\200\321R\035h\257\362E\345\300\022y\340\357p\327}i\343\376\376x\035\344\306\225\214\237\271y\366\223\363;xG9^(Q\037g\242\3548\344\202*\225\037a\014\220\227\243\033T\242\233+S\373^$'n\255\377Jlz\310\"\252-\211\217!\nIJ\301]!\350\242\033ux\205\340\317'\205\3237\3166\360^\314\233\307\216z\253Ml\203P\306\277\177\372\3519U\2046\246\177#\301exE\310\340\250\017\352\262\260\212\220\030@\227\224K\301\244o6Or\244o\026\316\336;\277\307\367\r:?\277F\3146B\336\233\027\311!\224$*P\327\374\305/4\312C\026\325\375M\\\2516/\303\272\365\246ro\027\350A\375\036\003\301\344R\036\351\362\227\233\370\323\274\334\302\237\326\3456\376\264/\273\227\325\252]C\317\225\253d5\332\274\370\211\337\370RT\345\tS\240\036qw\330\237\013\302\031O\342Q8\223\304\241p\376\361\323\367p\270q\3029\346b\326{\376\273\204\005b\307\204\\\002]M\212\316tIS\376\351g\230\020\272\235$\224\272=\272\350\366U\3022\r\375p\361{\177\007d-\354\232\250\202\020\3353\010\326k2\270\036]\274Dc\007\300>\230\206Q\000\001\370\215\377\315\337\314\3535\2279d\371(\330\270\311W\212\024QQ\004\205\006\346<u,\014ui\341\302\327;\376\333A\347\025/~E.\230i#O\221PS\341%\007\272k\345/\314<7^m\370\231\377\263\314\311\245\223_\340\360\377\321\3237\237\361\027\033\202\264L`~qqU\366\240\026\335\035""\211\300\024\250\210\352\2666\255JW\301\203\353#\320\303o\235|\201\361o\210\231\377nx\001?n\301\367\255\273\027\331$Wn\231*\274D2\335R\251~+\337\212x[\222\022Dg\020(\251\277\347\035\310\261(\005|\202<K\027\334\352\024n+.\266\344\034\221\356\267\262\316Y\272\370\001\331\331\232s4\326E\2402\256\303\345\213-%lU\020\373L\275\020\244\2774\362\251\240\213\277\362\264rL\203?\274t\025\306\322\253\\\300\"\377\335\217\317+h\213\245\333\270\360\256\307\370i\366i\236\230Qh\014\240?\235,\203\234\375%\r\206tU\315\302\377\342+r+\361\241&\006P\345\302\211\252\035]6\225\256\276\tTd?\232\303\000`[b\210\201O\203\020\005o\235Tqo\r\34327}\003}Y\371q\031\304'xK\271N\232\2242\272b\251\211h\003\323\341\203\323*.\215\037\370\037\344\247\177\361+4@\313\301\363+\257>{\303\377\335G\347o\235?JX\363\252\027\217\242\336\r\364\225=i\254\220\321\207\267\257\242\026\211\256lJ\223\215\220\375V\326\265\376~\261\242,\326\254\n\037\373\177\241\027\273>\363?\303\213] \327\276\016V\255\022\376\224.\227\343\257\363\342r\030J\317\310\376\000z\005\n\010X\004\372\200\033\\b;\3700\344\025\374\354\302\202(\013$\354aa|\004kFt\2736l\023\2216Tw/\256\252\301\037\336<\336\372V\271$,\030+\202\021\212;\024X/\003U\346\352\311OO\267X\270\302\25700\001N\366\037\376\353o\376\317o\234\272g\331\370+?r\321\321@\026\026\360\250h\\\n\201\236\255\032r\na\276\277\362t\303\377\005\252;\277\246\007g\341\342\215\036\327\270\004\025\246_\371\265@\177\270\362\377\265v-]Q\035[\270\033Q\300\007\017%\212$f\211H#\202zI\"\010\rH\033\001E\202v.\002Jr\263z]\360\021\3262\351\025\270\250\334\225d\310\260\207=\354a\017\317\360\014\317\360\014k\350\360\374\204\374\204\273\037\347QU\247\316is\333\021\213\352z\235z\355\275\277\332\373+\222@w\354\343vQ2\0170\310\361'\365\020\211w\032\343\022G\362\366\234x\350\263\244\230\372\rE\212Y\361\204m\243\"Z\361A0\270\017\340\274\304?/[~\305?\277\266\224a\336?\257\346E\016\226\234\254\337,\350\007\240l\024\020\245\213t\002\304~\223V8\217\306H\375\014j\202#bd\326\336r;\305:\317x\027\022\0370\003D\332\364""\211\014\014e#\232\006_\177\311!x\247\013\302\317\335\202t\210\276\203\"\347\0146bJ\300\\\310\205\001j\034\333\210\310O\023kF\254\300\361D\252Vz\277\202\003f\027O\215\377\247_\001K\311.\350\3169\267C\024\231\267\3527\361\033\222:\304\232\241@?)\350<D\0000\307\013\253\240M \232t\241\372\002\"\273\221%\007\247\277\016\370\005\206P)\026\303\332\233V\227\277\220\333\255\254)\347\2163n\010\242D\270\366\025\374\276\353\364\030x\030\">\324is\014f\334\2627\025\177\343\256F\352\262\201U \310\367\213\277\215\233\242\226\322+3\357B\374\356\237\303Hu\221\301M\247\352E\213V\316w(S6\023e\354\323bP\311\035\315$\224x/\317\210\031X|\3106\246E\033\267\366UO\211A\324\262K1\266\2005\265]tx\210\332\rxRJ&n\211\274\230\346\010\345\274\310\203\300\001\213[\017~\207\021\336\022\227\037\240\331\221\n\263\004p\324\357L\034\323\010\024\032\254M\300\264\016\213\361{\010\317\201\314\004\213\2137\326\036\205\256N\221Z\035'D\205\343S\217\216\025\031T\336\r:\343?\311 \224H\253\344\354}\361\250}\003\3424+f\3302\230\025\263ln\375\016\003\363Gd\r\373\363\262\354\016\271e\235\017!\373\211\352\013\321\372gZ\205\355\231\343\235Gk!oEe\261:V\363),7*\013\325^\204\257\317\242\016{\001\354\246\366>\3047\332\376D\007>\321\365\017\253\025T\233\r\260i\016\334\222\247\246\224\233\253z\334\352\205%\367\022\344O\233[\366\324\224lsU\023\217\214\335f\037\272\303\310\252\241&\r4W\367h\375\0330\364\333\254Cg\330\355\361\324\224&\253\276m\255\301\346\037\006\361\270\004\233\315S\223\n\315\325=am\303\002\236tF\335\rXX\236\232Tl\256\356)\273\027\304\313KT\253\210\022WKk\262\366\261\372}\330\251\027\210\342b\300SS\232\\\335\213\3166\234\263\223\204\021o\213\355\035OO\374Ws\365O\333W\341\270?\360I1\236yZ\332\367\315\325~\275~\325W:.\270YOM\351i\256\3529{\333\031\200\361\346\213\214\037=-m\275\271\332\357\332{ \201\227\334\274X\3071\366\264\264\215\346j\277U\337\263\306\255%;\017\322\244\350\251)\245\346\252\036\256\355\201\030_\262\362\260\260\271\352(\245$2\030'\252\341\244\013$w5\332%\221A+\347cx3\365\234\t\246\234\236-A""\333\363\263\365\033\3648\035uR\353\013y8T\362\024\310\207\373=\2520\000\024\256jZD\210\177\317q\244\362\207\32691\007*\360\317 \315v\003\240Q\272\177\277P9\024W\036\271\267\261\271\006\364)\347\321$\333\254\261%\3725\314\303+\262D\323Qo\334\214\212\346#&\226\335[F\352\033\250\177>+\3562\025\337\274\230G\203\202\330\213\027\260\273\013-\253\370g\265\345q\013\262\352\216\212!E\363\nI8\326\323\332't\361\036\221\022kD\221\306\234&\034\022rb(\237\302\375,\276\272\357\254\372n\255\361\234\375\t\364\256\214\323$WL\203\365\000MF\035\3757\3464\016\253\336\205\0009?aP>\373p~\237\341\006\205\371\375\006\014\277\003}~\373\255\333\024\027\2514\200M\306Y\355?\323\033\36036@9c\331\022\321\227XN\211\002-n:\234G3\364y\275l4\304\253\003\251\020\031\024?\311\215T\266j\022\371Z\220\370C\255\307`\\\005\320\347M\211\257\rZ\207\324\234\225\025\031z\022\242\337\300\226\215\3021\242\271\365\277T\315\035\256#\246\327\304\022&\252\271K\210\214^\021W&Hlu\320\202\247{\273[\365}\253\320\000vP[\014\364\347\323b\r\177@\312\251\004\216)\030Ad\326\374R<f\222)}\235\237>Z\254\014\204\3548\030d\036#\263\342NNZ\373v\241\321\274\360\205\023\3252&\306\250\226\360\302\211\310F\225\013\247\310\247\310\000\243\022\3438\341\250TM\210\243v\036\355\212\213\n\216\232\364\276\001\361\204\215\341\240\347D\016\357&\336\211'\024\365\233\216\277B\263\215n\246\tiT\362\231\357&)\037\216\241\262\037\305\370\267xQ\212(\250\357\372\254\264\257\346\327\310\033\r.\203\241\025\236\223\\\305\330\326\034\246\024\025\362\016\310'\241m6dC/E\t\377y_\351\251\014\262\231\332\361\341tw\345D\245T9\250\356\324\246\360\026\261\315\335\027k0\220\240\300\320\226\355>\237\362\253\327z\265\266X\037\250OZ\327\211)iJ\254\274\022\257\340K\366\304\036#\203C\344\276\274dM\203\344~\341<\024\3371\347\345\276\330\247K\273\006\275/b\357\213-\317\361\317\363\000I>\300?\007-\205c\370e\307\356\035\213y\237u+\034{\n>\230\253\375`E\024Q\203bp\312\376\302y\346\226\342P1\363\327\311$Qt\341'\2179\332\245\233\316&\202'\214\222\372#|\251\272E\3077\206\344""\340=\003\236\200\307\211\210J\033}\t\212\211\325\245\235\210j\313x\214\374d\257\340a\347\267y\2456\002\343\274\314.\022\260\255\220bs\206\337\302InT\252F\207\021\225\346B\226\330\247\2446\357\212]f\0306\255(\005\370x\204\346\006\310S(\004=a\"\333F+\006:\177_\276\340Oj\\\007\260\225\036\243cW;\306\344\300jb\234\264\211\271\211W\246\202\233\330CP\305\364\225\367\326R\367\253r\343\255^\324\374m:\315\300\275\353)\030\272\023\316E\261\014#\003\302\211}\274\336\304\032\021\217\370\325\020\t:\362\364\353\356p\234U\300=\376\273\244x\301\357\327\342\207\351)w\334]&=J\227\227\270C\337\343\233*2\221qT\260\013\025q\264\234d\026M\231\220\375x\367\321\037\325R\365]=\213\203\177\007Fg@d\260\0213\372\205\207mD\236\354\237\300j\366\300\360\350K\360m\351%\377E\376\210Y1\013\243\253\021\337Ft\201\276^\337N85\245\241\377\260\354s\264\021\3163\207\003\303z\342\376\221Z\304N|c\344~I\227\377\322\025IP\\\246\224F\211\031}\010\302x\263\250h=\217)o\241\2252\\\333\305\323\217\034\315\017\021P\223\351,\215\202Vd\020\315\217\356\370\202L\023\342!3&JN\275r\201\376\030]_|.\350\366\017E^T\275<y08\360#zW\372\236\316j3T\260_7\344\376k\322G\361\032\003\315%E6\220\311QpL\312$\3754\357\234\224!G\221\201S\340\243_P\273\\-{*vK7\370 S\2241\210\256\323<\344\330f\317\263<!(\037Z\317!\333\251\177\371\204O\016M\"a9t\202L5&\274\247\2573\025\245'\235\220\005 *)\031N)\005\305\271q\350-=}\022\226\234w\262\242\000\375f\355$\271\315\211j\037\235pQI\034\366\213\326M'\233Vl\016Y\232\353r1\022\302\367\374\307lR\272z\315wq\216J\222U\362$peM):X\273\303\217\014\205E\t\325`\254\"\265\344h\375\016\013\217\260$\354<\221_'\007\243\235\324\242\001\341@Tt\311)\213\007\376*O\033\243)\364\177\253\025\245\222x\355\262bw\342\335iJ\213c~\274pT\016\014A1\003Z\026?5\221Rt\250\266\311\336\036aQz<c\331\337\024\311}\235F\324\242&\027D5\343G6\315\222\213\345\375\270\256\250\030\2428o\354UD\204R\3729\\{\313<\312a92bW\3357t\016\320Y\241l]\237-\325\360\276O\267\262q\003+\361\206[6\337\177+\306A\326|\216(\216s\334\3045\371\025E\345\341\236-""I\243:\004\331\377u(\n\320\347\3463\030\236C\216(\221\370\370\203\n@c\225^\027\321o\352\245\317\n\256\303^'}\226\2349\262\362\0251n\032\006H@\037\261\202\242n\337D\013\260L\236\205\344\027\032F\201t\305^\"\210\236\271\331\214[z\177\243\236\004\273\217h\226\277\320o\366O;+\3441a\200\001\344\211\014\025\300\242a\226\303\037%Q\310\267\331g\345\346\350Y\272\370;T\322`\343z\337Q\242\266\244q>\031\313\372\3322N 9AtY\214\275\240\230\037\013\260\2273G[H\266%9A\2047\343\272\274\304\312N\324K\332\316!\264\341\256\375^Ci\203\207\327\344\016\242\201\365\026\346\341?\022\322\221\270\312\202){'\031\"\352\027\245P\357rW_X\205\324\315\034lz\331WQu\302Q\266%\246wZ/\354\307\356,\235v\246m\371I^7Q\032\342G\271\366\342\266\237\364!x\246t\230\227H\037\252\307\345J\013\276m\303\020\nl\017\374\377\246\270\0142\312K\331 \240\251\362@\367W\317\303\331\322\332_\275\001gM\316\356`iu\210\215\354G\017*\006\236\020\0326\211\037\026\355\261\360h#\343\0037\207\354\036\030>R\331\016\026\317\267G\037\341\212\342\207Q\300\256\355\020\205\r\242\332g\213\347\365_\355R\\\227\377X\314\245\352\006>\027\323S9[\031\367\022\266u\000\242oI'\364\207\353\243\374\264\345\277\241\300\231N0\302\304\331!\226g\247\216&\3018\364U\351\313l\204\014\212A\004\374\202HNr\273\276A!\254i\316\323\217\377\007l\332\334\207";
    PyObject *data = __Pyx_DecompressString(cstring, 29617, 1);
    if (unlikely(!data)) __PYX_ERR(0, 1, __pyx_L1_error)
    const char* const bytes = __Pyx_PyBytes_AsString(data);
    #if !CYTHON_ASSUME_SAFE_MACROS
    if (likely(bytes)); else { Py_DECREF(data); __PYX_ERR(0, 1, __pyx_L1_error) }
    #endif
    #else /* compression: none (142514 bytes) */
const char* const bytes = ": .AccountingStats object at .ActiveVgpuInstanceInfo_v1 object at All dimensions preceding dimension %d must be indexed and not sliced.BAR1Memory object at .BridgeChipHierarchy object at .BridgeChipInfo_Array_.BridgeChipInfo object at Buffer view does not expose strides.C2cModeInfo_v1 object at Can only create a buffer that is contiguous in memory.Cannot assign to read-only memoryviewCannot create writable memory view from read-only memoryviewCannot index with type 'Cannot transpose memoryview with indirect dimensions.ClkMonFaultInfo_Array_.ClkMonFaultInfo object at .ClkMonStatus object at .ClockOffset_v1 object at .ComputeInstanceInfo object at .ComputeInstancePlacement_Array_.ComputeInstancePlacement object at .ComputeInstanceProfileInfo_v2 object at .ComputeInstanceProfileInfo_v3 object at .ConfComputeGetKeyRotationThresholdInfo_v1 object at .ConfComputeGpuAttestationReport object at .ConfComputeGpuCertificate object at .ConfComputeMemSizeInfo object at .ConfComputeSystemCaps object at .ConfComputeSystemState object at .CoolerInfo_v1 object at .DeviceAddressingMode_v1 object at .DeviceAttributes object at .DeviceCapabilities_v1 object at .DeviceCurrentClockFreqs_v1 object at .DevicePerfModes_v1 object at .DevicePowerMizerModes_v1 object at Dimension %d is not direct.EccSramErrorStatus_v1 object at .EccSramUniqueUncorrectedErrorCounts_v1 object at .EccSramUniqueUncorrectedErrorEntry_v1_Array_.EccSramUniqueUncorrectedErrorEntry_v1 object at Empty shape tuple for cython.array.EncoderSessionInfo_Array_.EncoderSessionInfo object at Error allocating AccountingStatsError allocating ActiveVgpuInstanceInfo_v1Error allocating BAR1MemoryError allocating BridgeChipHierarchyError allocating C2cModeInfo_v1Error allocating ClkMonStatusError allocating ClockOffset_v1Error allocating ComputeInstanceProfileInfo_v2Error allocating ComputeInstanceProfileInfo_v3Error allocating ComputeInstanceInfoError allocating ConfComputeGetKeyRotationThresholdInfo_v1Error allocating ConfComputeGp""uAttestationReportError allocating ConfComputeGpuCertificateError allocating ConfComputeMemSizeInfoError allocating ConfComputeSystemCapsError allocating ConfComputeSystemStateError allocating CoolerInfo_v1Error allocating DeviceAddressingMode_v1Error allocating DeviceAttributesError allocating DeviceCapabilities_v1Error allocating DeviceCurrentClockFreqs_v1Error allocating DevicePerfModes_v1Error allocating DevicePowerMizerModes_v1Error allocating EccSramErrorStatus_v1Error allocating EccSramUniqueUncorrectedErrorCounts_v1Error allocating EventDataError allocating ExcludedDeviceInfoError allocating FBCStatsError allocating FanSpeedInfo_v1Error allocating GpmSupportError allocating GpuDynamicPstatesInfoError allocating GpuFabricInfo_v3Error allocating GpuInstanceInfoError allocating GpuInstanceProfileInfo_v2Error allocating GpuInstanceProfileInfo_v3Error allocating GpuThermalSettingsError allocating GridLicensableFeaturesError allocating GridLicenseExpiryError allocating LedStateError allocating MarginTemperature_v1Error allocating MemoryError allocating Memory_v2Error allocating NvLinkInfo_v2Error allocating NvlinkFirmwareInfoError allocating NvlinkFirmwareVersionError allocating NvlinkGetBwMode_v1Error allocating NvlinkSetBwMode_v1Error allocating NvlinkSupportedBwModes_v1Error allocating PSUInfoError allocating PciInfoError allocating PciInfoExt_v1Error allocating Pdi_v1Error allocating PlatformInfo_v2Error allocating ProcessDetailList_v1Error allocating ProcessesUtilizationInfo_v1Error allocating RepairStatus_v1Error allocating RowRemapperHistogramValuesError allocating SystemConfComputeSettings_v1Error allocating UnitFanSpeedsError allocating UnitInfoError allocating UtilizationError allocating ValueError allocating VgpuCreatablePlacementInfo_v1Error allocating VgpuHeterogeneousMode_v1Error allocating VgpuInstancesUtilizationInfo_v1Error allocating VgpuLicenseExpiryError allocating VgpuLicenseInfoError allocating VgpuMetadataError allocating VgpuPgpuCompatibili""tyError allocating VgpuPgpuMetadataError allocating VgpuPlacementId_v1Error allocating VgpuPlacementList_v2Error allocating VgpuProcessesUtilizationInfo_v1Error allocating VgpuRuntimeState_v1Error allocating VgpuSchedulerCapabilitiesError allocating VgpuSchedulerGetStateError allocating VgpuSchedulerLogError allocating VgpuSchedulerLogInfo_v1Error allocating VgpuSchedulerParamsError allocating VgpuSchedulerSetParamsError allocating VgpuSchedulerStateInfo_v1Error allocating VgpuSchedulerState_v1Error allocating VgpuTypeBar1Info_v1Error allocating VgpuTypeIdInfo_v1Error allocating VgpuTypeMaxInstance_v1Error allocating VgpuVersionError allocating _py_anon_pod0Error allocating _py_anon_pod1Error allocating _py_anon_pod2Error allocating _py_anon_pod3Error allocating _py_anon_pod4Error allocating _py_anon_pod5.EventData object at .ExcludedDeviceInfo object at Expected length 100 for field firmware_version, got Expected length 128 for field bridge_chip_info, got Expected length 200 for field log_entries, got Expected length 24 for field fans, got Expected length 32 for field clk_mon_list, got Expected length 3 for field grid_licensable_features, got Expected length 3 for field sensor, got Expected length 8 for field utilization, got .FBCSessionInfo_Array_.FBCSessionInfo object at .FBCStats object at .FanSpeedInfo_v1 object at .FieldValue_Array_.FieldValue object at .GpmSupport object at .GpuDynamicPstatesInfo object at .GpuFabricInfo_v3 object at .GpuInstanceInfo object at .GpuInstancePlacement_Array_.GpuInstancePlacement object at .GpuInstanceProfileInfo_v2 object at .GpuInstanceProfileInfo_v3 object at .GpuThermalSettings object at .GridLicensableFeature_Array_.GridLicensableFeature object at .GridLicensableFeatures object at .GridLicenseExpiry object at .HwbcEntry_Array_.HwbcEntry object at Index out of bounds (axis %d)Indirect dimensions not supportedInvalid mode, expected 'c' or 'fortran', got Invalid shape in axis .LedState object at .MarginTemperature_v1 object at ""<MemoryView of .Memory object at .Memory_v2 object at NoneNote that Cython is deliberately stricter than PEP-484 and rejects subclasses of builtin types. If you need to pass subclasses then set the 'annotation_typing' directive to False..NvLinkInfo_v2 object at .NvlinkFirmwareInfo object at .NvlinkFirmwareVersion object at .NvlinkGetBwMode_v1 object at .NvlinkSetBwMode_v1 object at .NvlinkSupportedBwModes_v1 object at Out of bounds on buffer access (axis .PSUInfo object at .PciInfoExt_v1 object at .PciInfo object at .Pdi_v1 object at .PlatformInfo_v2 object at .ProcessDetailList_v1 object at .ProcessDetail_v1_Array_.ProcessDetail_v1 object at .ProcessInfo_Array_.ProcessInfo object at .ProcessUtilizationInfo_v1_Array_.ProcessUtilizationInfo_v1 object at .ProcessUtilizationSample_Array_.ProcessUtilizationSample object at .ProcessesUtilizationInfo_v1 object at .RepairStatus_v1 object at .RowRemapperHistogramValues object at .Sample_Array_.Sample object at See `nvmlBrandType_t`.See `nvmlBridgeChipType_t`.See `nvmlClockId_t`.See `nvmlClockLimitId_t`.See `nvmlClockType_t`.See `nvmlComputeMode_t`.See `nvmlCoolerControl_t`.See `nvmlCoolerTarget_t`.See `nvmlDetachGpuState_t`.See `nvmlDeviceAddressingModeType_t`.See `nvmlDeviceGpuRecoveryAction_t`.See `nvmlDeviceVgpuCapability_t`.See `nvmlDriverModel_t`.See `nvmlEccCounterType_t`.See `nvmlEnableState_t`.See `nvmlEncoderType_t`.See `nvmlFBCSessionType_t`.See `nvmlFanState_t`.See `nvmlGpmMetricId_t`.See `nvmlGpuOperationMode_t`.See `nvmlGpuP2PCapsIndex_t`.See `nvmlGpuP2PStatus_t`.See `nvmlGpuTopologyLevel_t`.See `nvmlGpuUtilizationDomainId_t`.See `nvmlGpuVirtualizationMode_t`.See `nvmlGridLicenseFeatureCode_t`.See `nvmlHostVgpuMode_t`.See `nvmlInforomObject_t`.See `nvmlIntNvLinkDeviceType_t`.See `nvmlLedColor_t`.See `nvmlMemoryErrorType_t`.See `nvmlMemoryLocation_t`.See `nvmlNvLinkCapability_t`.See `nvmlNvLinkErrorCounter_t`.See `nvmlNvLinkUtilizationCountPktTypes_t`.See `nvmlNvLinkUtilizationCountUnits_t`.See `nvmlNvlinkVersio""n_t`.See `nvmlPageRetirementCause_t`.See `nvmlPcieLinkState_t`.See `nvmlPcieUtilCounter_t`.See `nvmlPerfPolicyType_t`.See `nvmlPowerProfileType_t`.See `nvmlPstates_t`.See `nvmlRestrictedAPI_t`.See `nvmlReturn_t`.See `nvmlSamplingType_t`.See `nvmlTemperatureSensors_t`.See `nvmlTemperatureThresholds_t`.See `nvmlThermalController_t`.See `nvmlThermalTarget_t`.See `nvmlUUIDType_t`.See `nvmlValueType_t`.See `nvmlVgpuCapability_t`.See `nvmlVgpuDriverCapability_t`.See `nvmlVgpuGuestInfoState_t`.See `nvmlVgpuPgpuCompatibilityLimitCode_t`.See `nvmlVgpuVmCompatibility_t`.See `nvmlVgpuVmIdType_t`.Step may not be zero (axis %d)String too long for field bus_id, max length is 31String too long for field bus_id_legacy, max length is 15String too long for field cause, max length is 255String too long for field firmware_version, max length is 95String too long for field guest_driver_version, max length is 79String too long for field host_driver_version, max length is 79String too long for field id, max length is 95String too long for field name, max length is 95String too long for field opaque_data, max length is 3String too long for field serial, max length is 95String too long for field state, max length is 255String too long for field str, max length is 2047String too long for field uuid, max length is 79.SystemConfComputeSettings_v1 object at This AccountingStats instance is read-onlyThis ActiveVgpuInstanceInfo_v1 instance is read-onlyThis BAR1Memory instance is read-onlyThis BridgeChipHierarchy instance is read-onlyThis C2cModeInfo_v1 instance is read-onlyThis ClkMonStatus instance is read-onlyThis ClockOffset_v1 instance is read-onlyThis ComputeInstanceInfo instance is read-onlyThis ComputeInstanceProfileInfo_v2 instance is read-onlyThis ComputeInstanceProfileInfo_v3 instance is read-onlyThis ConfComputeGetKeyRotationThresholdInfo_v1 instance is read-onlyThis ConfComputeGpuAttestationReport instance is read-onlyThis ConfComputeGpuCertificate instance is read-onlyThis ConfComput""eMemSizeInfo instance is read-onlyThis ConfComputeSystemCaps instance is read-onlyThis ConfComputeSystemState instance is read-onlyThis CoolerInfo_v1 instance is read-onlyThis DeviceAddressingMode_v1 instance is read-onlyThis DeviceAttributes instance is read-onlyThis DeviceCapabilities_v1 instance is read-onlyThis DeviceCurrentClockFreqs_v1 instance is read-onlyThis DevicePerfModes_v1 instance is read-onlyThis DevicePowerMizerModes_v1 instance is read-onlyThis EccSramErrorStatus_v1 instance is read-onlyThis EccSramUniqueUncorrectedErrorCounts_v1 instance is read-onlyThis EventData instance is read-onlyThis ExcludedDeviceInfo instance is read-onlyThis FBCStats instance is read-onlyThis FanSpeedInfo_v1 instance is read-onlyThis GpmSupport instance is read-onlyThis GpuDynamicPstatesInfo instance is read-onlyThis GpuFabricInfo_v3 instance is read-onlyThis GpuInstanceInfo instance is read-onlyThis GpuInstanceProfileInfo_v2 instance is read-onlyThis GpuInstanceProfileInfo_v3 instance is read-onlyThis GpuThermalSettings instance is read-onlyThis GridLicensableFeatures instance is read-onlyThis GridLicenseExpiry instance is read-onlyThis LedState instance is read-onlyThis MarginTemperature_v1 instance is read-onlyThis Memory instance is read-onlyThis Memory_v2 instance is read-onlyThis NvLinkInfo_v2 instance is read-onlyThis NvlinkFirmwareInfo instance is read-onlyThis NvlinkFirmwareVersion instance is read-onlyThis NvlinkGetBwMode_v1 instance is read-onlyThis NvlinkSetBwMode_v1 instance is read-onlyThis NvlinkSupportedBwModes_v1 instance is read-onlyThis PSUInfo instance is read-onlyThis PciInfoExt_v1 instance is read-onlyThis PciInfo instance is read-onlyThis Pdi_v1 instance is read-onlyThis PlatformInfo_v2 instance is read-onlyThis ProcessDetailList_v1 instance is read-onlyThis ProcessesUtilizationInfo_v1 instance is read-onlyThis RepairStatus_v1 instance is read-onlyThis RowRemapperHistogramValues instance is read-onlyThis SystemConfComputeSettings_v1 instance is read-""onlyThis UnitFanSpeeds instance is read-onlyThis UnitInfo instance is read-onlyThis Utilization instance is read-onlyThis Value instance is read-onlyThis VgpuCreatablePlacementInfo_v1 instance is read-onlyThis VgpuHeterogeneousMode_v1 instance is read-onlyThis VgpuInstancesUtilizationInfo_v1 instance is read-onlyThis VgpuLicenseExpiry instance is read-onlyThis VgpuLicenseInfo instance is read-onlyThis VgpuMetadata instance is read-onlyThis VgpuPgpuCompatibility instance is read-onlyThis VgpuPgpuMetadata instance is read-onlyThis VgpuPlacementId_v1 instance is read-onlyThis VgpuPlacementList_v2 instance is read-onlyThis VgpuProcessesUtilizationInfo_v1 instance is read-onlyThis VgpuRuntimeState_v1 instance is read-onlyThis VgpuSchedulerCapabilities instance is read-onlyThis VgpuSchedulerGetState instance is read-onlyThis VgpuSchedulerLogInfo_v1 instance is read-onlyThis VgpuSchedulerLog instance is read-onlyThis VgpuSchedulerParams instance is read-onlyThis VgpuSchedulerSetParams instance is read-onlyThis VgpuSchedulerStateInfo_v1 instance is read-onlyThis VgpuSchedulerState_v1 instance is read-onlyThis VgpuTypeBar1Info_v1 instance is read-onlyThis VgpuTypeIdInfo_v1 instance is read-onlyThis VgpuTypeMaxInstance_v1 instance is read-onlyThis VgpuVersion instance is read-onlyThis _py_anon_pod0 instance is read-onlyThis _py_anon_pod1 instance is read-onlyThis _py_anon_pod2 instance is read-onlyThis _py_anon_pod3 instance is read-onlyThis _py_anon_pod4 instance is read-onlyThis _py_anon_pod5 instance is read-onlyUnable to convert item to object.UnitFanInfo_Array_.UnitFanInfo object at .UnitFanSpeeds object at .UnitInfo object at .Utilization object at .Value object at .VgpuCreatablePlacementInfo_v1 object at .VgpuHeterogeneousMode_v1 object at .VgpuInstanceUtilizationInfo_v1_Array_.VgpuInstanceUtilizationInfo_v1 object at .VgpuInstancesUtilizationInfo_v1 object at .VgpuLicenseExpiry object at .VgpuLicenseInfo object at .VgpuMetadata object at .VgpuPgpuCompatibility object ""at .VgpuPgpuMetadata object at .VgpuPlacementId_v1 object at .VgpuPlacementList_v2 object at .VgpuProcessUtilizationInfo_v1_Array_.VgpuProcessUtilizationInfo_v1 object at .VgpuProcessesUtilizationInfo_v1 object at .VgpuRuntimeState_v1 object at .VgpuSchedulerCapabilities object at .VgpuSchedulerGetState object at .VgpuSchedulerLogEntry_Array_.VgpuSchedulerLogEntry object at .VgpuSchedulerLogInfo_v1 object at .VgpuSchedulerLog object at .VgpuSchedulerParams object at .VgpuSchedulerSetParams object at .VgpuSchedulerStateInfo_v1 object at .VgpuSchedulerState_v1 object at .VgpuTypeBar1Info_v1 object at .VgpuTypeIdInfo_v1 object at .VgpuTypeMaxInstance_v1 object at .VgpuVersion object at .>')<?add_note and  at 0xcollections.abc<contiguous and direct><contiguous and indirect>cuda/bindings/_nvml.pyxdata argument must be a NumPy ndarraydata array must be 1Ddata array must be of dtype data array must be of dtype bridge_chip_info_dtypedata array must be of dtype clk_mon_fault_info_dtypedata array must be of dtype compute_instance_placement_dtypedata array must be of dtype ecc_sram_unique_uncorrected_error_entry_v1_dtypedata array must be of dtype encoder_session_info_dtypedata array must be of dtype fbc_session_info_dtypedata array must be of dtype field_value_dtypedata array must be of dtype gpu_instance_placement_dtypedata array must be of dtype grid_licensable_feature_dtypedata array must be of dtype hwbc_entry_dtypedata array must be of dtype process_info_dtypedata array must be of dtype process_detail_v1_dtypedata array must be of dtype process_utilization_sample_dtypedata array must be of dtype process_utilization_info_v1_dtypedata array must be of dtype sample_dtypedata array must be of dtype unit_fan_info_dtypedata array must be of dtype vgpu_process_utilization_info_v1_dtypedata array must be of dtype vgpu_scheduler_log_entry_dtypedata array must be of dtype vgpu_instance_utilization_info_v1_dtypedata array must have a size of 1disableenablegc (got got differing exte""nts in dimension hostname must 64 characters or lessindex is out of boundsint() argument must be a bytes-like object of size 1. To get the pointer address of an array, use .ptrisenableditemsize <= 0 for cython.arrayitemsize  mismatches struct size no default __reduce__ due to non-trivial __cinit__ object> object at pci_bus_id must be a Python strptr must not be null (0)._py_anon_pod0 object at ._py_anon_pod1 object at ._py_anon_pod2 object at ._py_anon_pod3 object at ._py_anon_pod4 object at ._py_anon_pod5 object at self._data is not Noneself._ptr cannot be converted to a Python object for picklingself.name is not Noneserial must be a Python str<strided and direct><strided and direct or indirect><strided and indirect><stringsource>unable to allocate array data.unable to allocate shape and strides.uuid must be a Python strADM1032ADT7461ADT7473ADT7473SAGGREGATE_ECCALLAMBERAPP_CLOCK_DEFAULTAPP_CLOCK_TARGETASCIIAccountingStatsAccountingStats.__reduce_cython__AccountingStats.__setstate_cython__AccountingStats.from_dataAccountingStats.from_ptrActiveVgpuInstanceInfo_v1ActiveVgpuInstanceInfo_v1.__reduce_cython__ActiveVgpuInstanceInfo_v1.__setstate_cython__ActiveVgpuInstanceInfo_v1.from_dataActiveVgpuInstanceInfo_v1.from_ptrAffinityScopeAlreadyInitializedErrorArgumentVersionMismatchErrorBBAR1MemoryBAR1Memory.__reduce_cython__BAR1Memory.__setstate_cython__BAR1Memory.from_dataBAR1Memory.from_ptrBINARYBOARDBRAND_COUNTBRAND_GEFORCEBRAND_GEFORCE_RTXBRAND_GRIDBRAND_NVIDIABRAND_NVIDIA_CLOUD_GAMINGBRAND_NVIDIA_RTXBRAND_NVIDIA_VAPPSBRAND_NVIDIA_VCSBRAND_NVIDIA_VGAMINGBRAND_NVIDIA_VPCBRAND_NVIDIA_VWSBRAND_NVSBRAND_QUADROBRAND_QUADRO_RTXBRAND_TESLABRAND_TITANBRAND_TITAN_RTXBRAND_UNKNOWNBRIDGE_CHIP_BRO4BRIDGE_CHIP_PLXBrandTypeBridgeChipHierarchyBridgeChipHierarchy.__reduce_cython__BridgeChipHierarchy.__setstate_cython__BridgeChipHierarchy.from_dataBridgeChipHierarchy.from_ptrBridgeChipInfoBridgeChipInfo.__reduce_cython__BridgeChipInfo.__setstate_cython__BridgeChipInfo.from_dataBridgeChi""pInfo.from_ptrBridgeChipTypeC2cModeInfo_v1C2cModeInfo_v1.__reduce_cython__C2cModeInfo_v1.__setstate_cython__C2cModeInfo_v1.from_dataC2cModeInfo_v1.from_ptrCBUCLOCK_COUNTCLOCK_GRAPHICSCLOCK_MEMCLOCK_SMCLOCK_VIDEOCOLDCOMPUTECOMPUTEMODE_COUNTCOMPUTEMODE_DEFAULTCOMPUTEMODE_EXCLUSIVE_PROCESSCOMPUTEMODE_EXCLUSIVE_THREADCOMPUTEMODE_PROHIBITEDCORRECTEDCOUNTCUDACURRENTCUSTOMER_BOOST_MAXClkMonFaultInfoClkMonFaultInfo.__reduce_cython__ClkMonFaultInfo.__setstate_cython__ClkMonFaultInfo.from_dataClkMonFaultInfo.from_ptrClkMonStatusClkMonStatus.__reduce_cython__ClkMonStatus.__setstate_cython__ClkMonStatus.from_dataClkMonStatus.from_ptrClockIdClockLimitIdClockOffset_v1ClockOffset_v1.__reduce_cython__ClockOffset_v1.__setstate_cython__ClockOffset_v1.from_dataClockOffset_v1.from_ptrClockTypeComputeInstanceInfoComputeInstanceInfo.__reduce_cython__ComputeInstanceInfo.__setstate_cython__ComputeInstanceInfo.from_dataComputeInstanceInfo.from_ptrComputeInstancePlacementComputeInstancePlacement.__reduce_cython__ComputeInstancePlacement.__setstate_cython__ComputeInstancePlacement.from_dataComputeInstancePlacement.from_ptrComputeInstanceProfileInfo_v2ComputeInstanceProfileInfo_v2.__reduce_cython__ComputeInstanceProfileInfo_v2.__setstate_cython__ComputeInstanceProfileInfo_v2.from_dataComputeInstanceProfileInfo_v2.from_ptrComputeInstanceProfileInfo_v3ComputeInstanceProfileInfo_v3.__reduce_cython__ComputeInstanceProfileInfo_v3.__setstate_cython__ComputeInstanceProfileInfo_v3.from_dataComputeInstanceProfileInfo_v3.from_ptrComputeModeConfComputeGetKeyRotationThresholdInfo_v1ConfComputeGetKeyRotationThresholdInfo_v1.from_dataConfComputeGetKeyRotationThresholdInfo_v1.from_ptrConfComputeGetKeyRotationThresholdInfo_v1.__reduce_cython__ConfComputeGetKeyRotationThresholdInfo_v1.__setstate_cython__ConfComputeGpuAttestationReportConfComputeGpuAttestationReport.from_dataConfComputeGpuAttestationReport.from_ptrConfComputeGpuAttestationReport.__reduce_cython__ConfComputeGpuAttestationReport.__setstate_cython""__ConfComputeGpuCertificateConfComputeGpuCertificate.__reduce_cython__ConfComputeGpuCertificate.__setstate_cython__ConfComputeGpuCertificate.from_dataConfComputeGpuCertificate.from_ptrConfComputeMemSizeInfoConfComputeMemSizeInfo.__reduce_cython__ConfComputeMemSizeInfo.__setstate_cython__ConfComputeMemSizeInfo.from_dataConfComputeMemSizeInfo.from_ptrConfComputeSystemCapsConfComputeSystemCaps.__reduce_cython__ConfComputeSystemCaps.__setstate_cython__ConfComputeSystemCaps.from_dataConfComputeSystemCaps.from_ptrConfComputeSystemStateConfComputeSystemState.__reduce_cython__ConfComputeSystemState.__setstate_cython__ConfComputeSystemState.from_dataConfComputeSystemState.from_ptrCoolerControlCoolerInfo_v1CoolerInfo_v1.__reduce_cython__CoolerInfo_v1.__setstate_cython__CoolerInfo_v1.from_dataCoolerInfo_v1.from_ptrCoolerTargetCorruptedInforomErrorDEC_UTILIZATION_SAMPLESDETACH_GPU_KEEPDETACH_GPU_REMOVEDEVICE_ADDRESSING_MODE_ATSDEVICE_ADDRESSING_MODE_HMMDEVICE_ADDRESSING_MODE_NONEDEVICE_MEMORYDEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPUDEVICE_VGPU_CAP_COUNTDEVICE_VGPU_CAP_DEVICE_STREAMINGDEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPUDEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILESDEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZESDEVICE_VGPU_CAP_HOMOGENEOUS_PLACEMENTSDEVICE_VGPU_CAP_MIG_TIMESLICING_SUPPORTEDDEVICE_VGPU_CAP_MIG_TIMESLICING_ENABLEDDEVICE_VGPU_CAP_MINI_QUARTER_GPUDEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BWDEVICE_VGPU_CAP_WARM_UPDATEDEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BWDEV_C2C_LINK_COUNTDEV_C2C_LINK_ERROR_INTRDEV_C2C_LINK_ERROR_REPLAYDEV_C2C_LINK_ERROR_REPLAY_B2BDEV_C2C_LINK_GET_MAX_BWDEV_C2C_LINK_GET_STATUSDEV_C2C_LINK_POWER_STATEDEV_CLOCKS_EVENT_REASON_HW_POWER_BRAKE_SLOWDOWNDEV_CLOCKS_EVENT_REASON_HW_THERM_SLOWDOWNDEV_CLOCKS_EVENT_REASON_SW_POWER_CAPDEV_CLOCKS_EVENT_REASON_SW_THERM_SLOWDOWNDEV_CLOCKS_EVENT_REASON_SYNC_BOOSTDEV_DRAIN_AND_RESET_STATUSDEV_ECC_CURRENTDEV_ECC_DBE_AGG_CBUDEV_ECC_DBE_AGG_DEVDEV_ECC_DBE_AGG_L1DEV_ECC_DBE_AGG_L2DEV_ECC_DBE_AGG_REGDEV_ECC_DBE_AGG_TEXDEV_ECC_DBE_A""GG_TOTALDEV_ECC_DBE_VOL_CBUDEV_ECC_DBE_VOL_DEVDEV_ECC_DBE_VOL_L1DEV_ECC_DBE_VOL_L2DEV_ECC_DBE_VOL_REGDEV_ECC_DBE_VOL_TEXDEV_ECC_DBE_VOL_TOTALDEV_ECC_PENDINGDEV_ECC_SBE_AGG_DEVDEV_ECC_SBE_AGG_L1DEV_ECC_SBE_AGG_L2DEV_ECC_SBE_AGG_REGDEV_ECC_SBE_AGG_TEXDEV_ECC_SBE_AGG_TOTALDEV_ECC_SBE_VOL_DEVDEV_ECC_SBE_VOL_L1DEV_ECC_SBE_VOL_L2DEV_ECC_SBE_VOL_REGDEV_ECC_SBE_VOL_TEXDEV_ECC_SBE_VOL_TOTALDEV_ENERGYDEV_GET_GPU_RECOVERY_ACTIONDEV_IS_MIG_MODE_INDEPENDENT_MIG_QUERY_CAPABLEDEV_IS_RESETLESS_MIG_SUPPORTEDDEV_MEMORY_TEMPDEV_NVLINK_BANDWIDTH_C0_L0DEV_NVLINK_BANDWIDTH_C0_L1DEV_NVLINK_BANDWIDTH_C0_L10DEV_NVLINK_BANDWIDTH_C0_L11DEV_NVLINK_BANDWIDTH_C0_L2DEV_NVLINK_BANDWIDTH_C0_L3DEV_NVLINK_BANDWIDTH_C0_L4DEV_NVLINK_BANDWIDTH_C0_L5DEV_NVLINK_BANDWIDTH_C0_L6DEV_NVLINK_BANDWIDTH_C0_L7DEV_NVLINK_BANDWIDTH_C0_L8DEV_NVLINK_BANDWIDTH_C0_L9DEV_NVLINK_BANDWIDTH_C0_TOTALDEV_NVLINK_BANDWIDTH_C1_L0DEV_NVLINK_BANDWIDTH_C1_L1DEV_NVLINK_BANDWIDTH_C1_L10DEV_NVLINK_BANDWIDTH_C1_L11DEV_NVLINK_BANDWIDTH_C1_L2DEV_NVLINK_BANDWIDTH_C1_L3DEV_NVLINK_BANDWIDTH_C1_L4DEV_NVLINK_BANDWIDTH_C1_L5DEV_NVLINK_BANDWIDTH_C1_L6DEV_NVLINK_BANDWIDTH_C1_L7DEV_NVLINK_BANDWIDTH_C1_L8DEV_NVLINK_BANDWIDTH_C1_L9DEV_NVLINK_BANDWIDTH_C1_TOTALDEV_NVLINK_COUNT_BUFFER_OVERRUN_ERRORSDEV_NVLINK_COUNT_EFFECTIVE_BERDEV_NVLINK_COUNT_EFFECTIVE_ERRORSDEV_NVLINK_COUNT_FEC_HISTORY_0DEV_NVLINK_COUNT_FEC_HISTORY_1DEV_NVLINK_COUNT_FEC_HISTORY_10DEV_NVLINK_COUNT_FEC_HISTORY_11DEV_NVLINK_COUNT_FEC_HISTORY_12DEV_NVLINK_COUNT_FEC_HISTORY_13DEV_NVLINK_COUNT_FEC_HISTORY_14DEV_NVLINK_COUNT_FEC_HISTORY_15DEV_NVLINK_COUNT_FEC_HISTORY_2DEV_NVLINK_COUNT_FEC_HISTORY_3DEV_NVLINK_COUNT_FEC_HISTORY_4DEV_NVLINK_COUNT_FEC_HISTORY_5DEV_NVLINK_COUNT_FEC_HISTORY_6DEV_NVLINK_COUNT_FEC_HISTORY_7DEV_NVLINK_COUNT_FEC_HISTORY_8DEV_NVLINK_COUNT_FEC_HISTORY_9DEV_NVLINK_COUNT_LINK_RECOVERY_EVENTSDEV_NVLINK_COUNT_LINK_RECOVERY_FAILED_EVENTSDEV_NVLINK_COUNT_LINK_RECOVERY_SUCCESSFUL_EVENTSDEV_NVLINK_COUNT_LOCAL_LINK_INTEGRITY_ERRORSDEV_NVLINK_COUNT_MALFORMED_PACKET_ERRORSDEV""_NVLINK_COUNT_RAW_BERDEV_NVLINK_COUNT_RAW_BER_LANE0DEV_NVLINK_COUNT_RAW_BER_LANE1DEV_NVLINK_COUNT_RCV_BYTESDEV_NVLINK_COUNT_RCV_ERRORSDEV_NVLINK_COUNT_RCV_GENERAL_ERRORSDEV_NVLINK_COUNT_RCV_PACKETSDEV_NVLINK_COUNT_RCV_REMOTE_ERRORSDEV_NVLINK_COUNT_SYMBOL_BERDEV_NVLINK_COUNT_SYMBOL_ERRORSDEV_NVLINK_COUNT_VL15_DROPPEDDEV_NVLINK_COUNT_XMIT_BYTESDEV_NVLINK_COUNT_XMIT_DISCARDSDEV_NVLINK_COUNT_XMIT_PACKETSDEV_NVLINK_CRC_DATA_ERROR_COUNT_L0DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTALDEV_NVLINK_CRC_DATA_ERROR_COUNT_L6DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTALDEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7DEV_NVLINK_ECC_DATA_ERROR_COUNT_L0DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10DEV_NVLINK_ECC_DATA_ERROR_COUNT_L11DEV_NVLINK_ECC_DATA_ERROR_COUNT_TOTALDEV_NVLINK_ECC_DATA_ERROR_COUNT_L1DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8DEV_NVLINK_ERROR_DL_CRCDEV_NVLINK_ERROR_DL_RECOVERYDEV_NVLINK_ERROR_DL_REPLAYDEV_NVLINK_GET_POWER_STATEDEV_NVLINK_GET_POWER_THRESHOLDDEV_NVLINK_GET_POWER_THRESHOLD_MAXDEV_NVLINK_GET_POWER_THRESHOLD_MINDEV_NVLINK_GET_POWER_THRESHOLD_SUPPORTEDDEV_NVLINK_GET_""POWER_THRESHOLD_UNITSDEV_NVLINK_GET_SPEEDDEV_NVLINK_GET_STATEDEV_NVLINK_GET_VERSIONDEV_NVLINK_LINK_COUNTDEV_NVLINK_RECOVERY_ERROR_COUNT_L0DEV_NVLINK_RECOVERY_ERROR_COUNT_L8DEV_NVLINK_RECOVERY_ERROR_COUNT_L9DEV_NVLINK_RECOVERY_ERROR_COUNT_L10DEV_NVLINK_RECOVERY_ERROR_COUNT_L11DEV_NVLINK_RECOVERY_ERROR_COUNT_L1DEV_NVLINK_RECOVERY_ERROR_COUNT_L2DEV_NVLINK_RECOVERY_ERROR_COUNT_L3DEV_NVLINK_RECOVERY_ERROR_COUNT_L4DEV_NVLINK_RECOVERY_ERROR_COUNT_L5DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTALDEV_NVLINK_RECOVERY_ERROR_COUNT_L6DEV_NVLINK_RECOVERY_ERROR_COUNT_L7DEV_NVLINK_REMOTE_NVLINK_IDDEV_NVLINK_REPLAY_ERROR_COUNT_L0DEV_NVLINK_REPLAY_ERROR_COUNT_L1DEV_NVLINK_REPLAY_ERROR_COUNT_L10DEV_NVLINK_REPLAY_ERROR_COUNT_L11DEV_NVLINK_REPLAY_ERROR_COUNT_L2DEV_NVLINK_REPLAY_ERROR_COUNT_L3DEV_NVLINK_REPLAY_ERROR_COUNT_L4DEV_NVLINK_REPLAY_ERROR_COUNT_L5DEV_NVLINK_REPLAY_ERROR_COUNT_L6DEV_NVLINK_REPLAY_ERROR_COUNT_L7DEV_NVLINK_REPLAY_ERROR_COUNT_L8DEV_NVLINK_REPLAY_ERROR_COUNT_L9DEV_NVLINK_REPLAY_ERROR_COUNT_TOTALDEV_NVLINK_SPEED_MBPS_COMMONDEV_NVLINK_SPEED_MBPS_L0DEV_NVLINK_SPEED_MBPS_L1DEV_NVLINK_SPEED_MBPS_L10DEV_NVLINK_SPEED_MBPS_L11DEV_NVLINK_SPEED_MBPS_L2DEV_NVLINK_SPEED_MBPS_L3DEV_NVLINK_SPEED_MBPS_L4DEV_NVLINK_SPEED_MBPS_L5DEV_NVLINK_SPEED_MBPS_L6DEV_NVLINK_SPEED_MBPS_L7DEV_NVLINK_SPEED_MBPS_L8DEV_NVLINK_SPEED_MBPS_L9DEV_NVLINK_THROUGHPUT_DATA_RXDEV_NVLINK_THROUGHPUT_DATA_TXDEV_NVLINK_THROUGHPUT_RAW_RXDEV_NVLINK_THROUGHPUT_RAW_TXDEV_NVSWITCH_CONNECTED_LINK_COUNTDEV_PCIE_COUNT_BAD_DLLPDEV_PCIE_COUNT_BAD_TLPDEV_PCIE_COUNT_CORRECTABLE_ERRORSDEV_PCIE_COUNT_FATAL_ERRORDEV_PCIE_COUNT_LANE_ERRORDEV_PCIE_COUNT_LCRC_ERRORDEV_PCIE_COUNT_NAKS_RECEIVEDDEV_PCIE_COUNT_NAKS_SENTDEV_PCIE_COUNT_NON_FATAL_ERRORDEV_PCIE_COUNT_RECEIVER_ERRORDEV_PCIE_COUNT_RX_BYTESDEV_PCIE_COUNT_TX_BYTESDEV_PCIE_COUNT_UNSUPPORTED_REQDEV_PCIE_INBOUND_ATOMICS_MASKDEV_PCIE_L0_TO_RECOVERY_COUNTERDEV_PCIE_OUTBOUND_ATOMICS_MASKDEV_PCIE_REPLAY_COUNTERDEV_PCIE_REPLAY_ROLLOVER_COUNTERDEV_PERF_POLICY_BOARD_LIMITDEV_PERF_POLICY_LOW_UTI""LIZATIONDEV_PERF_POLICY_POWERDEV_PERF_POLICY_RELIABILITYDEV_PERF_POLICY_SYNC_BOOSTDEV_PERF_POLICY_THERMALDEV_PERF_POLICY_TOTAL_APP_CLOCKSDEV_PERF_POLICY_TOTAL_BASE_CLOCKSDEV_POWER_AVERAGEDEV_POWER_CURRENT_LIMITDEV_POWER_DEFAULT_LIMITDEV_POWER_INSTANTDEV_POWER_MAX_LIMITDEV_POWER_MIN_LIMITDEV_POWER_REQUESTED_LIMITDEV_POWER_SYNC_BALANCING_AFDEV_POWER_SYNC_BALANCING_FREQDEV_REMAPPED_CORDEV_REMAPPED_FAILUREDEV_REMAPPED_PENDINGDEV_REMAPPED_UNCDEV_RESET_STATUSDEV_RETIRED_DBEDEV_RETIRED_PENDINGDEV_RETIRED_PENDING_DBEDEV_RETIRED_PENDING_SBEDEV_RETIRED_SBEDEV_TEMPERATURE_GPU_MAX_TLIMITDEV_TEMPERATURE_MEM_MAX_TLIMITDEV_TEMPERATURE_SHUTDOWN_TLIMITDEV_TEMPERATURE_SLOWDOWN_TLIMITDEV_TOTAL_ENERGY_CONSUMPTIONDOUBLEDOUBLE_BIT_ECC_ERRORDRAMDRIVER_MCDMDRIVER_WDDMDRIVER_WDMDeprecatedErrorDetachGpuStateDeviceAddressingModeTypeDeviceAddressingMode_v1DeviceAddressingMode_v1.__reduce_cython__DeviceAddressingMode_v1.__setstate_cython__DeviceAddressingMode_v1.from_dataDeviceAddressingMode_v1.from_ptrDeviceAttributesDeviceAttributes.__reduce_cython__DeviceAttributes.__setstate_cython__DeviceAttributes.from_dataDeviceAttributes.from_ptrDeviceCapabilities_v1DeviceCapabilities_v1.__reduce_cython__DeviceCapabilities_v1.__setstate_cython__DeviceCapabilities_v1.from_dataDeviceCapabilities_v1.from_ptrDeviceCurrentClockFreqs_v1DeviceCurrentClockFreqs_v1.__reduce_cython__DeviceCurrentClockFreqs_v1.__setstate_cython__DeviceCurrentClockFreqs_v1.from_dataDeviceCurrentClockFreqs_v1.from_ptrDeviceGpuRecoveryActionDevicePerfModes_v1DevicePerfModes_v1.__reduce_cython__DevicePerfModes_v1.__setstate_cython__DevicePerfModes_v1.from_dataDevicePerfModes_v1.from_ptrDevicePowerMizerModes_v1DevicePowerMizerModes_v1.__reduce_cython__DevicePowerMizerModes_v1.__setstate_cython__DevicePowerMizerModes_v1.from_dataDevicePowerMizerModes_v1.from_ptrDeviceVgpuCapabilityDriverModelDriverNotLoadedErrorENCODER_QUERY_AV1ENCODER_QUERY_H264ENCODER_QUERY_HEVCENCODER_QUERY_UNKNOWNENC_UTILIZATION_SAMPLESERROR_ALREADY_INITIALIZEDERROR""_ARGUMENT_VERSION_MISMATCHERROR_CORRUPTED_INFOROMERROR_DEPRECATEDERROR_DRIVER_NOT_LOADEDERROR_FREQ_NOT_SUPPORTEDERROR_FUNCTION_NOT_FOUNDERROR_GPU_IS_LOSTERROR_GPU_NOT_FOUNDERROR_INSUFFICIENT_POWERERROR_INSUFFICIENT_RESOURCESERROR_INSUFFICIENT_SIZEERROR_INVALID_ARGUMENTERROR_INVALID_STATEERROR_IN_USEERROR_IRQ_ISSUEERROR_LIBRARY_NOT_FOUNDERROR_LIB_RM_VERSION_MISMATCHERROR_MEMORYERROR_NOT_FOUNDERROR_NOT_READYERROR_NOT_SUPPORTEDERROR_NO_DATAERROR_NO_PERMISSIONERROR_OPERATING_SYSTEMERROR_RESET_REQUIREDERROR_RESET_TYPE_NOT_SUPPORTEDERROR_TIMEOUTERROR_UNINITIALIZEDERROR_UNKNOWNERROR_VGPU_ECC_NOT_SUPPORTEDEccCounterTypeEccSramErrorStatus_v1EccSramErrorStatus_v1.__reduce_cython__EccSramErrorStatus_v1.__setstate_cython__EccSramErrorStatus_v1.from_dataEccSramErrorStatus_v1.from_ptrEccSramUniqueUncorrectedErrorCounts_v1EccSramUniqueUncorrectedErrorCounts_v1.from_dataEccSramUniqueUncorrectedErrorCounts_v1.from_ptrEccSramUniqueUncorrectedErrorCounts_v1.__reduce_cython__EccSramUniqueUncorrectedErrorCounts_v1.__setstate_cython__EccSramUniqueUncorrectedErrorEntry_v1EccSramUniqueUncorrectedErrorEntry_v1.from_dataEccSramUniqueUncorrectedErrorEntry_v1.from_ptrEccSramUniqueUncorrectedErrorEntry_v1.__reduce_cython__EccSramUniqueUncorrectedErrorEntry_v1.__setstate_cython__EllipsisEnableStateEncoderSessionInfoEncoderSessionInfo.__reduce_cython__EncoderSessionInfo.__setstate_cython__EncoderSessionInfo.from_dataEncoderSessionInfo.from_ptrEncoderTypeEventDataEventData.__reduce_cython__EventData.__setstate_cython__EventData.from_dataEventData.from_ptrExcludedDeviceInfoExcludedDeviceInfo.__reduce_cython__ExcludedDeviceInfo.__setstate_cython__ExcludedDeviceInfo.from_dataExcludedDeviceInfo.from_ptrFAN_FAILEDFAN_NORMALFBCSessionInfoFBCSessionInfo.__reduce_cython__FBCSessionInfo.__setstate_cython__FBCSessionInfo.from_dataFBCSessionInfo.from_ptrFBCSessionTypeFBCStatsFBCStats.__reduce_cython__FBCStats.__setstate_cython__FBCStats.from_dataFBCStats.from_ptrFEATURE_DISABLEDFEATURE_ENABLEDFIFanSpeedInfo_""v1FanSpeedInfo_v1.__reduce_cython__FanSpeedInfo_v1.__setstate_cython__FanSpeedInfo_v1.from_dataFanSpeedInfo_v1.from_ptrFanStateFieldValueFieldValue.__reduce_cython__FieldValue.__setstate_cython__FieldValue.from_dataFieldValue.from_ptrFreqNotSupportedErrorFunctionNotFoundErrorG781GAMINGGOM_ALL_ONGOM_COMPUTEGOM_LOW_DPGPM_METRIC_ANY_TENSOR_UTILGPM_METRIC_C2C_DATA_RX_PER_SECGPM_METRIC_C2C_DATA_TX_PER_SECGPM_METRIC_C2C_LINK0_DATA_RX_PER_SECGPM_METRIC_C2C_LINK0_DATA_TX_PER_SECGPM_METRIC_C2C_LINK0_TOTAL_RX_PER_SECGPM_METRIC_C2C_LINK0_TOTAL_TX_PER_SECGPM_METRIC_C2C_LINK10_DATA_RX_PER_SECGPM_METRIC_C2C_LINK10_DATA_TX_PER_SECGPM_METRIC_C2C_LINK10_TOTAL_RX_PER_SECGPM_METRIC_C2C_LINK10_TOTAL_TX_PER_SECGPM_METRIC_C2C_LINK11_DATA_RX_PER_SECGPM_METRIC_C2C_LINK11_DATA_TX_PER_SECGPM_METRIC_C2C_LINK11_TOTAL_RX_PER_SECGPM_METRIC_C2C_LINK11_TOTAL_TX_PER_SECGPM_METRIC_C2C_LINK12_DATA_RX_PER_SECGPM_METRIC_C2C_LINK12_DATA_TX_PER_SECGPM_METRIC_C2C_LINK12_TOTAL_RX_PER_SECGPM_METRIC_C2C_LINK12_TOTAL_TX_PER_SECGPM_METRIC_C2C_LINK13_DATA_RX_PER_SECGPM_METRIC_C2C_LINK13_DATA_TX_PER_SECGPM_METRIC_C2C_LINK13_TOTAL_RX_PER_SECGPM_METRIC_C2C_LINK13_TOTAL_TX_PER_SECGPM_METRIC_C2C_LINK1_DATA_RX_PER_SECGPM_METRIC_C2C_LINK1_DATA_TX_PER_SECGPM_METRIC_C2C_LINK1_TOTAL_RX_PER_SECGPM_METRIC_C2C_LINK1_TOTAL_TX_PER_SECGPM_METRIC_C2C_LINK2_DATA_RX_PER_SECGPM_METRIC_C2C_LINK2_DATA_TX_PER_SECGPM_METRIC_C2C_LINK2_TOTAL_RX_PER_SECGPM_METRIC_C2C_LINK2_TOTAL_TX_PER_SECGPM_METRIC_C2C_LINK3_DATA_RX_PER_SECGPM_METRIC_C2C_LINK3_DATA_TX_PER_SECGPM_METRIC_C2C_LINK3_TOTAL_RX_PER_SECGPM_METRIC_C2C_LINK3_TOTAL_TX_PER_SECGPM_METRIC_C2C_LINK4_DATA_RX_PER_SECGPM_METRIC_C2C_LINK4_DATA_TX_PER_SECGPM_METRIC_C2C_LINK4_TOTAL_RX_PER_SECGPM_METRIC_C2C_LINK4_TOTAL_TX_PER_SECGPM_METRIC_C2C_LINK5_DATA_RX_PER_SECGPM_METRIC_C2C_LINK5_DATA_TX_PER_SECGPM_METRIC_C2C_LINK5_TOTAL_RX_PER_SECGPM_METRIC_C2C_LINK5_TOTAL_TX_PER_SECGPM_METRIC_C2C_LINK6_DATA_RX_PER_SECGPM_METRIC_C2C_LINK6_DATA_TX_PER_SECGPM_METRIC_C2C_LINK6_TOTAL_RX_PER_SECGPM_METRIC_C""2C_LINK6_TOTAL_TX_PER_SECGPM_METRIC_C2C_LINK7_DATA_RX_PER_SECGPM_METRIC_C2C_LINK7_DATA_TX_PER_SECGPM_METRIC_C2C_LINK7_TOTAL_RX_PER_SECGPM_METRIC_C2C_LINK7_TOTAL_TX_PER_SECGPM_METRIC_C2C_LINK8_DATA_RX_PER_SECGPM_METRIC_C2C_LINK8_DATA_TX_PER_SECGPM_METRIC_C2C_LINK8_TOTAL_RX_PER_SECGPM_METRIC_C2C_LINK8_TOTAL_TX_PER_SECGPM_METRIC_C2C_LINK9_DATA_RX_PER_SECGPM_METRIC_C2C_LINK9_DATA_TX_PER_SECGPM_METRIC_C2C_LINK9_TOTAL_RX_PER_SECGPM_METRIC_C2C_LINK9_TOTAL_TX_PER_SECGPM_METRIC_C2C_TOTAL_RX_PER_SECGPM_METRIC_C2C_TOTAL_TX_PER_SECGPM_METRIC_DFMA_TENSOR_UTILGPM_METRIC_DRAM_BW_UTILGPM_METRIC_DRAM_CACHE_HITGPM_METRIC_DRAM_CACHE_MISSGPM_METRIC_FP16_UTILGPM_METRIC_FP32_UTILGPM_METRIC_FP64_UTILGPM_METRIC_GR0_CTXSW_ACTIVE_PCTGPM_METRIC_GR0_CTXSW_CYCLES_ACTIVEGPM_METRIC_GR0_CTXSW_CYCLES_ELAPSEDGPM_METRIC_GR0_CTXSW_CYCLES_PER_REQGPM_METRIC_GR0_CTXSW_REQUESTSGPM_METRIC_GR1_CTXSW_ACTIVE_PCTGPM_METRIC_GR1_CTXSW_CYCLES_ACTIVEGPM_METRIC_GR1_CTXSW_CYCLES_ELAPSEDGPM_METRIC_GR1_CTXSW_CYCLES_PER_REQGPM_METRIC_GR1_CTXSW_REQUESTSGPM_METRIC_GR2_CTXSW_ACTIVE_PCTGPM_METRIC_GR2_CTXSW_CYCLES_ACTIVEGPM_METRIC_GR2_CTXSW_CYCLES_ELAPSEDGPM_METRIC_GR2_CTXSW_CYCLES_PER_REQGPM_METRIC_GR2_CTXSW_REQUESTSGPM_METRIC_GR3_CTXSW_ACTIVE_PCTGPM_METRIC_GR3_CTXSW_CYCLES_ACTIVEGPM_METRIC_GR3_CTXSW_CYCLES_ELAPSEDGPM_METRIC_GR3_CTXSW_CYCLES_PER_REQGPM_METRIC_GR3_CTXSW_REQUESTSGPM_METRIC_GR4_CTXSW_ACTIVE_PCTGPM_METRIC_GR4_CTXSW_CYCLES_ACTIVEGPM_METRIC_GR4_CTXSW_CYCLES_ELAPSEDGPM_METRIC_GR4_CTXSW_CYCLES_PER_REQGPM_METRIC_GR4_CTXSW_REQUESTSGPM_METRIC_GR5_CTXSW_ACTIVE_PCTGPM_METRIC_GR5_CTXSW_CYCLES_ACTIVEGPM_METRIC_GR5_CTXSW_CYCLES_ELAPSEDGPM_METRIC_GR5_CTXSW_CYCLES_PER_REQGPM_METRIC_GR5_CTXSW_REQUESTSGPM_METRIC_GR6_CTXSW_ACTIVE_PCTGPM_METRIC_GR6_CTXSW_CYCLES_ACTIVEGPM_METRIC_GR6_CTXSW_CYCLES_ELAPSEDGPM_METRIC_GR6_CTXSW_CYCLES_PER_REQGPM_METRIC_GR6_CTXSW_REQUESTSGPM_METRIC_GR7_CTXSW_ACTIVE_PCTGPM_METRIC_GR7_CTXSW_CYCLES_ACTIVEGPM_METRIC_GR7_CTXSW_CYCLES_ELAPSEDGPM_METRIC_GR7_CTXSW_CYCLES_PER_REQGPM_METRIC_GR7_CTXSW_REQUESTSGP""M_METRIC_GRAPHICS_UTILGPM_METRIC_HMMA_TENSOR_UTILGPM_METRIC_HOSTMEM_CACHE_HITGPM_METRIC_HOSTMEM_CACHE_MISSGPM_METRIC_IMMA_TENSOR_UTILGPM_METRIC_INTEGER_UTILGPM_METRIC_MAXGPM_METRIC_NVDEC_0_UTILGPM_METRIC_NVDEC_1_UTILGPM_METRIC_NVDEC_2_UTILGPM_METRIC_NVDEC_3_UTILGPM_METRIC_NVDEC_4_UTILGPM_METRIC_NVDEC_5_UTILGPM_METRIC_NVDEC_6_UTILGPM_METRIC_NVDEC_7_UTILGPM_METRIC_NVENC_0_UTILGPM_METRIC_NVENC_1_UTILGPM_METRIC_NVENC_2_UTILGPM_METRIC_NVENC_3_UTILGPM_METRIC_NVJPG_0_UTILGPM_METRIC_NVJPG_1_UTILGPM_METRIC_NVJPG_2_UTILGPM_METRIC_NVJPG_3_UTILGPM_METRIC_NVJPG_4_UTILGPM_METRIC_NVJPG_5_UTILGPM_METRIC_NVJPG_6_UTILGPM_METRIC_NVJPG_7_UTILGPM_METRIC_NVLINK_L0_RX_PER_SECGPM_METRIC_NVLINK_L0_TX_PER_SECGPM_METRIC_NVLINK_L10_RX_PER_SECGPM_METRIC_NVLINK_L10_TX_PER_SECGPM_METRIC_NVLINK_L11_RX_PER_SECGPM_METRIC_NVLINK_L11_TX_PER_SECGPM_METRIC_NVLINK_L12_RX_PER_SECGPM_METRIC_NVLINK_L12_TX_PER_SECGPM_METRIC_NVLINK_L13_RX_PER_SECGPM_METRIC_NVLINK_L13_TX_PER_SECGPM_METRIC_NVLINK_L14_RX_PER_SECGPM_METRIC_NVLINK_L14_TX_PER_SECGPM_METRIC_NVLINK_L15_RX_PER_SECGPM_METRIC_NVLINK_L15_TX_PER_SECGPM_METRIC_NVLINK_L16_RX_PER_SECGPM_METRIC_NVLINK_L16_TX_PER_SECGPM_METRIC_NVLINK_L17_RX_PER_SECGPM_METRIC_NVLINK_L17_TX_PER_SECGPM_METRIC_NVLINK_L1_RX_PER_SECGPM_METRIC_NVLINK_L1_TX_PER_SECGPM_METRIC_NVLINK_L2_RX_PER_SECGPM_METRIC_NVLINK_L2_TX_PER_SECGPM_METRIC_NVLINK_L3_RX_PER_SECGPM_METRIC_NVLINK_L3_TX_PER_SECGPM_METRIC_NVLINK_L4_RX_PER_SECGPM_METRIC_NVLINK_L4_TX_PER_SECGPM_METRIC_NVLINK_L5_RX_PER_SECGPM_METRIC_NVLINK_L5_TX_PER_SECGPM_METRIC_NVLINK_L6_RX_PER_SECGPM_METRIC_NVLINK_L6_TX_PER_SECGPM_METRIC_NVLINK_L7_RX_PER_SECGPM_METRIC_NVLINK_L7_TX_PER_SECGPM_METRIC_NVLINK_L8_RX_PER_SECGPM_METRIC_NVLINK_L8_TX_PER_SECGPM_METRIC_NVLINK_L9_RX_PER_SECGPM_METRIC_NVLINK_L9_TX_PER_SECGPM_METRIC_NVLINK_TOTAL_RX_PER_SECGPM_METRIC_NVLINK_TOTAL_TX_PER_SECGPM_METRIC_NVOFA_0_UTILGPM_METRIC_NVOFA_1_UTILGPM_METRIC_PCIE_RX_PER_SECGPM_METRIC_PCIE_TX_PER_SECGPM_METRIC_PEERMEM_CACHE_HITGPM_METRIC_PEERMEM_CACHE_MISSGPM_METRIC_SM_O""CCUPANCYGPM_METRIC_SM_UTILGPUGPU_INTERNALGPU_RECOVERY_ACTION_DRAIN_AND_RESETGPU_RECOVERY_ACTION_DRAIN_P2PGPU_RECOVERY_ACTION_GPU_RESETGPU_RECOVERY_ACTION_NODE_REBOOTGPU_RECOVERY_ACTION_NONEGPU_UTILIZATION_DOMAIN_BUSGPU_UTILIZATION_DOMAIN_FBGPU_UTILIZATION_DOMAIN_GPUGPU_UTILIZATION_DOMAIN_VIDGPU_UTILIZATION_SAMPLESGREENGpmMetricIdGpmSupportGpmSupport.__reduce_cython__GpmSupport.__setstate_cython__GpmSupport.from_dataGpmSupport.from_ptrGpuDynamicPstatesInfoGpuDynamicPstatesInfo.__reduce_cython__GpuDynamicPstatesInfo.__setstate_cython__GpuDynamicPstatesInfo.from_dataGpuDynamicPstatesInfo.from_ptrGpuFabricInfo_v3GpuFabricInfo_v3.__reduce_cython__GpuFabricInfo_v3.__setstate_cython__GpuFabricInfo_v3.from_dataGpuFabricInfo_v3.from_ptrGpuInstanceInfoGpuInstanceInfo.__reduce_cython__GpuInstanceInfo.__setstate_cython__GpuInstanceInfo.from_dataGpuInstanceInfo.from_ptrGpuInstancePlacementGpuInstancePlacement.__reduce_cython__GpuInstancePlacement.__setstate_cython__GpuInstancePlacement.from_dataGpuInstancePlacement.from_ptrGpuInstanceProfileInfo_v2GpuInstanceProfileInfo_v2.__reduce_cython__GpuInstanceProfileInfo_v2.__setstate_cython__GpuInstanceProfileInfo_v2.from_dataGpuInstanceProfileInfo_v2.from_ptrGpuInstanceProfileInfo_v3GpuInstanceProfileInfo_v3.__reduce_cython__GpuInstanceProfileInfo_v3.__setstate_cython__GpuInstanceProfileInfo_v3.from_dataGpuInstanceProfileInfo_v3.from_ptrGpuIsLostErrorGpuNotFoundErrorGpuOperationModeGpuP2PCapsIndexGpuP2PStatusGpuThermalSettingsGpuThermalSettings.__reduce_cython__GpuThermalSettings.__setstate_cython__GpuThermalSettings.from_dataGpuThermalSettings.from_ptrGpuTopologyLevelGpuUtilizationDomainIdGpuVirtualizationModeGridLicensableFeatureGridLicensableFeature.__reduce_cython__GridLicensableFeature.__setstate_cython__GridLicensableFeature.from_dataGridLicensableFeature.from_ptrGridLicensableFeaturesGridLicensableFeatures.__reduce_cython__GridLicensableFeatures.__setstate_cython__GridLicensableFeatures.from_dataGridLicensableFeatures.from_ptrGr""idLicenseExpiryGridLicenseExpiry.__reduce_cython__GridLicenseExpiry.__setstate_cython__GridLicenseExpiry.from_dataGridLicenseExpiry.from_ptrGridLicenseFeatureCodeHIBERNATEHOST_VGPUHOST_VSGAHWENCHostVgpuModeHwbcEntryHwbcEntry.__reduce_cython__HwbcEntry.__setstate_cython__HwbcEntry.from_dataHwbcEntry.from_ptrIINFOROM_COUNTINFOROM_DENINFOROM_ECCINFOROM_OEMINFOROM_POWERInUseErrorInforomObjectInsufficientPowerErrorInsufficientResourcesErrorInsufficientSizeErrorIntEnum_IntEnumIntNvLinkDeviceTypeInvalidArgumentErrorInvalidStateErrorIrqIssueErrorJPG_UTILIZATION_SAMPLESLL1_CACHEL2_CACHELIVELM64LM89LM99LedColorLedStateLedState.__reduce_cython__LedState.__setstate_cython__LedState.from_dataLedState.from_ptrLibRmVersionMismatchErrorLibraryNotFoundErrorMAXMAX1617MAX6649MAX6649RMEMORYMEMORY_CLK_SAMPLESMEMORY_UTILIZATION_SAMPLESMODULE_POWER_SAMPLESMULTIPLE_SINGLE_BIT_ECC_ERRORSMarginTemperature_v1MarginTemperature_v1.__reduce_cython__MarginTemperature_v1.__setstate_cython__MarginTemperature_v1.from_dataMarginTemperature_v1.from_ptrMemoryMemoryErrorMemoryErrorTypeMemoryLocationMemory.__reduce_cython__Memory.__setstate_cython__Memory.from_dataMemory.from_ptrMemory_v2Memory_v2.__reduce_cython__Memory_v2.__setstate_cython__Memory_v2.from_dataMemory_v2.from_ptrNODENONENON_SRIOVNVIDIA_RTXNVLINK_CAP_COUNTNVLINK_CAP_P2P_ATOMICSNVLINK_CAP_P2P_SUPPORTEDNVLINK_CAP_SLI_BRIDGENVLINK_CAP_SYSMEM_ACCESSNVLINK_CAP_SYSMEM_ATOMICSNVLINK_CAP_VALIDNVLINK_COUNTER_PKTFILTER_ALLNVLINK_COUNTER_PKTFILTER_FLUSHNVLINK_COUNTER_PKTFILTER_NOPNVLINK_COUNTER_PKTFILTER_NRATOMNVLINK_COUNTER_PKTFILTER_RATOMNVLINK_COUNTER_PKTFILTER_READNVLINK_COUNTER_PKTFILTER_RESPDATANVLINK_COUNTER_PKTFILTER_RESPNODATANVLINK_COUNTER_PKTFILTER_WRITENVLINK_COUNTER_UNIT_BYTESNVLINK_COUNTER_UNIT_COUNTNVLINK_COUNTER_UNIT_CYCLESNVLINK_COUNTER_UNIT_PACKETSNVLINK_COUNTER_UNIT_RESERVEDNVLINK_DEVICE_TYPE_GPUNVLINK_DEVICE_TYPE_IBMNPUNVLINK_DEVICE_TYPE_SWITCHNVLINK_DEVICE_TYPE_UNKNOWNNVLINK_ERROR_COUNTNVLINK_ERROR_DL_CRC_DATANVLINK_ERROR_DL_CRC""_FLITNVLINK_ERROR_DL_ECC_DATANVLINK_ERROR_DL_RECOVERYNVLINK_ERROR_DL_REPLAYNVLINK_MAX_LINKSNVSYSCON_CANOASNVSYSCON_E551NoDataErrorNoPermissionErrorNotFoundErrorNotReadyErrorNotSupportedErrorNvLinkCapabilityNvLinkErrorCounterNvLinkInfo_v2NvLinkInfo_v2.__reduce_cython__NvLinkInfo_v2.__setstate_cython__NvLinkInfo_v2.from_dataNvLinkInfo_v2.from_ptrNvLinkUtilizationCountPktTypesNvLinkUtilizationCountUnitsNvlinkFirmwareInfoNvlinkFirmwareInfo.__reduce_cython__NvlinkFirmwareInfo.__setstate_cython__NvlinkFirmwareInfo.from_dataNvlinkFirmwareInfo.from_ptrNvlinkFirmwareVersionNvlinkFirmwareVersion.__reduce_cython__NvlinkFirmwareVersion.__setstate_cython__NvlinkFirmwareVersion.from_dataNvlinkFirmwareVersion.from_ptrNvlinkGetBwMode_v1NvlinkGetBwMode_v1.__reduce_cython__NvlinkGetBwMode_v1.__setstate_cython__NvlinkGetBwMode_v1.from_dataNvlinkGetBwMode_v1.from_ptrNvlinkSetBwMode_v1NvlinkSetBwMode_v1.__reduce_cython__NvlinkSetBwMode_v1.__setstate_cython__NvlinkSetBwMode_v1.from_dataNvlinkSetBwMode_v1.from_ptrNvlinkSupportedBwModes_v1NvlinkSupportedBwModes_v1.__reduce_cython__NvlinkSupportedBwModes_v1.__setstate_cython__NvlinkSupportedBwModes_v1.from_dataNvlinkSupportedBwModes_v1.from_ptrNvlinkVersionNvmlErrorNvmlError.__init__NvmlError.__reduce__OFA_UTILIZATION_SAMPLESOSOperatingSystemErrorP2P_CAPS_INDEX_ATOMICSP2P_CAPS_INDEX_NVLINKP2P_CAPS_INDEX_PCIP2P_CAPS_INDEX_PROPP2P_CAPS_INDEX_READP2P_CAPS_INDEX_UNKNOWNP2P_CAPS_INDEX_WRITEP2P_STATUS_CHIPSET_NOT_SUPPOREDP2P_STATUS_CHIPSET_NOT_SUPPORTEDP2P_STATUS_DISABLED_BY_REGKEYP2P_STATUS_GPU_NOT_SUPPORTEDP2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTEDP2P_STATUS_NOT_SUPPORTEDP2P_STATUS_OKP2P_STATUS_UNKNOWNPASSTHROUGHPCIE_LINK_KEEPPCIE_LINK_SHUT_DOWNPCIE_UTIL_COUNTPCIE_UTIL_RX_BYTESPCIE_UTIL_TX_BYTESPERF_POLICY_BOARD_LIMITPERF_POLICY_COUNTPERF_POLICY_LOW_UTILIZATIONPERF_POLICY_POWERPERF_POLICY_RELIABILITYPERF_POLICY_SYNC_BOOSTPERF_POLICY_THERMALPERF_POLICY_TOTAL_APP_CLOCKSPERF_POLICY_TOTAL_BASE_CLOCKSPOWER_PROFILE_BALANCEDPOWER_PROFILE_COMPUTEPOWER_PROFI""LE_DCPCIEPOWER_PROFILE_HMMA_DENSEPOWER_PROFILE_HMMA_SPARSEPOWER_PROFILE_HPCPOWER_PROFILE_LLM_INFERENCEPOWER_PROFILE_LLM_TRAININGPOWER_PROFILE_MAXPOWER_PROFILE_MAX_PPOWER_PROFILE_MAX_QPOWER_PROFILE_MEMORY_BOUNDPOWER_PROFILE_MIGPOWER_PROFILE_NETWORKPOWER_PROFILE_RBMPOWER_PROFILE_SYNC_BALANCEDPOWER_SUPPLYPROCESSOR_CLK_SAMPLESPSTATE_0PSTATE_1PSTATE_10PSTATE_11PSTATE_12PSTATE_13PSTATE_14PSTATE_15PSTATE_2PSTATE_3PSTATE_4PSTATE_5PSTATE_6PSTATE_7PSTATE_8PSTATE_9PSTATE_UNKNOWNPSUInfoPSUInfo.__reduce_cython__PSUInfo.__setstate_cython__PSUInfo.from_dataPSUInfo.from_ptrPWR_SMOOTHING_ACTIVE_PRESET_PROFILEPWR_SMOOTHING_ADMIN_OVERRIDE_PERCENT_TMP_FLOORPWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_UP_RATEPWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_DOWN_RATEPWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_DOWN_HYST_VALPWR_SMOOTHING_APPLIED_TMP_CEILPWR_SMOOTHING_APPLIED_TMP_FLOORPWR_SMOOTHING_ENABLEDPWR_SMOOTHING_HW_CIRCUITRY_PERCENT_LIFETIME_REMAININGPWR_SMOOTHING_IMM_RAMP_DOWN_ENABLEDPWR_SMOOTHING_MAX_NUM_PRESET_PROFILESPWR_SMOOTHING_MAX_PERCENT_TMP_FLOOR_SETTINGPWR_SMOOTHING_MIN_PERCENT_TMP_FLOOR_SETTINGPWR_SMOOTHING_PRIV_LVLPWR_SMOOTHING_PROFILE_PERCENT_TMP_FLOORPWR_SMOOTHING_PROFILE_RAMP_DOWN_RATEPWR_SMOOTHING_PROFILE_RAMP_DOWN_HYST_VALPWR_SMOOTHING_PROFILE_RAMP_UP_RATEPageRetirementCausePciInfoPciInfoExt_v1PciInfoExt_v1.__reduce_cython__PciInfoExt_v1.__setstate_cython__PciInfoExt_v1.from_dataPciInfoExt_v1.from_ptrPciInfo.__reduce_cython__PciInfo.__setstate_cython__PciInfo.from_dataPciInfo.from_ptrPcieLinkStatePcieUtilCounterPdi_v1Pdi_v1.__reduce_cython__Pdi_v1.__setstate_cython__Pdi_v1.from_dataPdi_v1.from_ptrPerfPolicyTypePlatformInfo_v2PlatformInfo_v2.__reduce_cython__PlatformInfo_v2.__setstate_cython__PlatformInfo_v2.from_dataPlatformInfo_v2.from_ptrPowerProfileTypeProcessDetailList_v1ProcessDetailList_v1.__reduce_cython__ProcessDetailList_v1.__setstate_cython__ProcessDetailList_v1.from_dataProcessDetailList_v1.from_ptrProcessDetail_v1ProcessDetail_v1.__reduce_cython__ProcessDetail_v1.__setstate_cython__Process""Detail_v1.from_dataProcessDetail_v1.from_ptrProcessInfoProcessInfo.__reduce_cython__ProcessInfo.__setstate_cython__ProcessInfo.from_dataProcessInfo.from_ptrProcessUtilizationInfo_v1ProcessUtilizationInfo_v1.__reduce_cython__ProcessUtilizationInfo_v1.__setstate_cython__ProcessUtilizationInfo_v1.from_dataProcessUtilizationInfo_v1.from_ptrProcessUtilizationSampleProcessUtilizationSample.__reduce_cython__ProcessUtilizationSample.__setstate_cython__ProcessUtilizationSample.from_dataProcessUtilizationSample.from_ptrProcessesUtilizationInfo_v1ProcessesUtilizationInfo_v1.__reduce_cython__ProcessesUtilizationInfo_v1.__setstate_cython__ProcessesUtilizationInfo_v1.from_dataProcessesUtilizationInfo_v1.from_ptrPstates__Pyx_PyDict_NextRefQRANGE_STARTREGISTER_FILERepairStatus_v1RepairStatus_v1.__reduce_cython__RepairStatus_v1.__setstate_cython__RepairStatus_v1.from_dataRepairStatus_v1.from_ptrResetRequiredErrorResetTypeNotSupportedErrorRestrictedAPIReturnRowRemapperHistogramValuesRowRemapperHistogramValues.__reduce_cython__RowRemapperHistogramValues.__setstate_cython__RowRemapperHistogramValues.from_dataRowRemapperHistogramValues.from_ptrSAMPLINGTYPE_COUNTSBMAX6649SET_APPLICATION_CLOCKSSET_AUTO_BOOSTED_CLOCKSSIGNED_INTSIGNED_LONG_LONGSLEEPSOCKETSRAMSRIOVSUCCESSSampleSample.__reduce_cython__Sample.__setstate_cython__Sample.from_dataSample.from_ptrSamplingTypeSequenceSystemConfComputeSettings_v1SystemConfComputeSettings_v1.__reduce_cython__SystemConfComputeSettings_v1.__setstate_cython__SystemConfComputeSettings_v1.from_dataSystemConfComputeSettings_v1.from_ptrTDPTEMPERATURE_COUNTTEMPERATURE_GPUTEMPERATURE_THRESHOLD_ACOUSTIC_CURRTEMPERATURE_THRESHOLD_ACOUSTIC_MINTEMPERATURE_THRESHOLD_ACOUSTIC_MAXTEMPERATURE_THRESHOLD_COUNTTEMPERATURE_THRESHOLD_GPS_CURRTEMPERATURE_THRESHOLD_GPU_MAXTEMPERATURE_THRESHOLD_MEM_MAXTEMPERATURE_THRESHOLD_SHUTDOWNTEMPERATURE_THRESHOLD_SLOWDOWNTEXTURE_MEMORYTEXTURE_SHMTHERMAL_COOLER_SIGNAL_COUNTTHERMAL_COOLER_SIGNAL_NONETHERMAL_COOLER_SIGNAL_TOGGLETHERMAL_COO""LER_SIGNAL_VARIABLETHERMAL_GPUTHERMAL_GPU_RELATEDTHERMAL_MEMORYTHERMAL_NONETHERMAL_POWER_SUPPLYTOPOLOGY_HOSTBRIDGETOPOLOGY_INTERNALTOPOLOGY_MULTIPLETOPOLOGY_NODETOPOLOGY_SINGLETOPOLOGY_SYSTEMTOSYSTOTAL_POWER_SAMPLESTemperatureSensorsTemperatureThresholdsThermalControllerThermalTargetTimeoutErrorUNCORRECTEDUNKNOWNUNLIMITEDUNSIGNED_INTUNSIGNED_LONGUNSIGNED_LONG_LONGUNSIGNED_SHORTUUIDTypeUninitializedErrorUnitFanInfoUnitFanInfo.__reduce_cython__UnitFanInfo.__setstate_cython__UnitFanInfo.from_dataUnitFanInfo.from_ptrUnitFanSpeedsUnitFanSpeeds.__reduce_cython__UnitFanSpeeds.__setstate_cython__UnitFanSpeeds.from_dataUnitFanSpeeds.from_ptrUnitInfoUnitInfo.__reduce_cython__UnitInfo.__setstate_cython__UnitInfo.from_dataUnitInfo.from_ptrUnknownErrorUtilizationUtilization.__reduce_cython__Utilization.__setstate_cython__Utilization.from_dataUtilization.from_ptrVBIOSEVTVCD_BOARDVCD_INLETVCD_OUTLETVERSION_1_0VERSION_2_0VERSION_2_2VERSION_3_0VERSION_3_1VERSION_4_0VERSION_5_0VERSION_INVALIDVGPUVGPU_CAP_COUNTVGPU_CAP_EXCLUSIVE_SIZEVGPU_CAP_EXCLUSIVE_TYPEVGPU_CAP_GPUDIRECTVGPU_CAP_MULTI_VGPU_EXCLUSIVEVGPU_CAP_NVLINK_P2PVGPU_COMPATIBILITY_LIMIT_GPUVGPU_COMPATIBILITY_LIMIT_GUEST_DRIVERVGPU_COMPATIBILITY_LIMIT_HOST_DRIVERVGPU_COMPATIBILITY_LIMIT_NONEVGPU_COMPATIBILITY_LIMIT_OTHERVGPU_DRIVER_CAP_COUNTVGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPUVGPU_DRIVER_CAP_WARM_UPDATEVGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZEDVGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZEDVGPU_VM_ID_DOMAIN_IDVGPU_VM_ID_UUIDVIDVOLATILE_ECCVWORKSTATIONValueValueTypeValue.__reduce_cython__Value.__setstate_cython__Value.from_dataValue.from_ptrVgpuCapabilityVgpuCreatablePlacementInfo_v1VgpuCreatablePlacementInfo_v1.__reduce_cython__VgpuCreatablePlacementInfo_v1.__setstate_cython__VgpuCreatablePlacementInfo_v1.from_dataVgpuCreatablePlacementInfo_v1.from_ptrVgpuDriverCapabilityVgpuEccNotSupportedErrorVgpuGuestInfoStateVgpuHeterogeneousMode_v1VgpuHeterogeneousMode_v1.__reduce_cython__VgpuHeterogeneousMode_v1.__setstate_cython__VgpuHe""terogeneousMode_v1.from_dataVgpuHeterogeneousMode_v1.from_ptrVgpuInstanceUtilizationInfo_v1VgpuInstanceUtilizationInfo_v1.__reduce_cython__VgpuInstanceUtilizationInfo_v1.__setstate_cython__VgpuInstanceUtilizationInfo_v1.from_dataVgpuInstanceUtilizationInfo_v1.from_ptrVgpuInstancesUtilizationInfo_v1VgpuInstancesUtilizationInfo_v1.from_dataVgpuInstancesUtilizationInfo_v1.from_ptrVgpuInstancesUtilizationInfo_v1.__reduce_cython__VgpuInstancesUtilizationInfo_v1.__setstate_cython__VgpuLicenseExpiryVgpuLicenseExpiry.__reduce_cython__VgpuLicenseExpiry.__setstate_cython__VgpuLicenseExpiry.from_dataVgpuLicenseExpiry.from_ptrVgpuLicenseInfoVgpuLicenseInfo.__reduce_cython__VgpuLicenseInfo.__setstate_cython__VgpuLicenseInfo.from_dataVgpuLicenseInfo.from_ptrVgpuMetadataVgpuMetadata.__reduce_cython__VgpuMetadata.__setstate_cython__VgpuMetadata.from_dataVgpuMetadata.from_ptrVgpuPgpuCompatibilityVgpuPgpuCompatibilityLimitCodeVgpuPgpuCompatibility.__reduce_cython__VgpuPgpuCompatibility.__setstate_cython__VgpuPgpuCompatibility.from_dataVgpuPgpuCompatibility.from_ptrVgpuPgpuMetadataVgpuPgpuMetadata.__reduce_cython__VgpuPgpuMetadata.__setstate_cython__VgpuPgpuMetadata.from_dataVgpuPgpuMetadata.from_ptrVgpuPlacementId_v1VgpuPlacementId_v1.__reduce_cython__VgpuPlacementId_v1.__setstate_cython__VgpuPlacementId_v1.from_dataVgpuPlacementId_v1.from_ptrVgpuPlacementList_v2VgpuPlacementList_v2.__reduce_cython__VgpuPlacementList_v2.__setstate_cython__VgpuPlacementList_v2.from_dataVgpuPlacementList_v2.from_ptrVgpuProcessUtilizationInfo_v1VgpuProcessUtilizationInfo_v1.__reduce_cython__VgpuProcessUtilizationInfo_v1.__setstate_cython__VgpuProcessUtilizationInfo_v1.from_dataVgpuProcessUtilizationInfo_v1.from_ptrVgpuProcessesUtilizationInfo_v1VgpuProcessesUtilizationInfo_v1.from_dataVgpuProcessesUtilizationInfo_v1.from_ptrVgpuProcessesUtilizationInfo_v1.__reduce_cython__VgpuProcessesUtilizationInfo_v1.__setstate_cython__VgpuRuntimeState_v1VgpuRuntimeState_v1.__reduce_cython__VgpuRuntimeState_v1.__sets""tate_cython__VgpuRuntimeState_v1.from_dataVgpuRuntimeState_v1.from_ptrVgpuSchedulerCapabilitiesVgpuSchedulerCapabilities.__reduce_cython__VgpuSchedulerCapabilities.__setstate_cython__VgpuSchedulerCapabilities.from_dataVgpuSchedulerCapabilities.from_ptrVgpuSchedulerGetStateVgpuSchedulerGetState.__reduce_cython__VgpuSchedulerGetState.__setstate_cython__VgpuSchedulerGetState.from_dataVgpuSchedulerGetState.from_ptrVgpuSchedulerLogVgpuSchedulerLogEntryVgpuSchedulerLogEntry.__reduce_cython__VgpuSchedulerLogEntry.__setstate_cython__VgpuSchedulerLogEntry.from_dataVgpuSchedulerLogEntry.from_ptrVgpuSchedulerLogInfo_v1VgpuSchedulerLogInfo_v1.__reduce_cython__VgpuSchedulerLogInfo_v1.__setstate_cython__VgpuSchedulerLogInfo_v1.from_dataVgpuSchedulerLogInfo_v1.from_ptrVgpuSchedulerLog.__reduce_cython__VgpuSchedulerLog.__setstate_cython__VgpuSchedulerLog.from_dataVgpuSchedulerLog.from_ptrVgpuSchedulerParamsVgpuSchedulerParams.__reduce_cython__VgpuSchedulerParams.__setstate_cython__VgpuSchedulerParams.from_dataVgpuSchedulerParams.from_ptrVgpuSchedulerSetParamsVgpuSchedulerSetParams.__reduce_cython__VgpuSchedulerSetParams.__setstate_cython__VgpuSchedulerSetParams.from_dataVgpuSchedulerSetParams.from_ptrVgpuSchedulerStateInfo_v1VgpuSchedulerStateInfo_v1.__reduce_cython__VgpuSchedulerStateInfo_v1.__setstate_cython__VgpuSchedulerStateInfo_v1.from_dataVgpuSchedulerStateInfo_v1.from_ptrVgpuSchedulerState_v1VgpuSchedulerState_v1.__reduce_cython__VgpuSchedulerState_v1.__setstate_cython__VgpuSchedulerState_v1.from_dataVgpuSchedulerState_v1.from_ptrVgpuTypeBar1Info_v1VgpuTypeBar1Info_v1.__reduce_cython__VgpuTypeBar1Info_v1.__setstate_cython__VgpuTypeBar1Info_v1.from_dataVgpuTypeBar1Info_v1.from_ptrVgpuTypeIdInfo_v1VgpuTypeIdInfo_v1.__reduce_cython__VgpuTypeIdInfo_v1.__setstate_cython__VgpuTypeIdInfo_v1.from_dataVgpuTypeIdInfo_v1.from_ptrVgpuTypeMaxInstance_v1VgpuTypeMaxInstance_v1.__reduce_cython__VgpuTypeMaxInstance_v1.__setstate_cython__VgpuTypeMaxInstance_v1.from_dataVgpuTypeMaxInstance_v1"".from_ptrVgpuVersionVgpuVersion.__reduce_cython__VgpuVersion.__setstate_cython__VgpuVersion.from_dataVgpuVersion.from_ptrVgpuVmCompatibilityVgpuVmIdTypeView.MemoryViewabcaccounting_stats_dtypeactive_vgpu_instance_info_v1_dtypeaddressaggregate_coraggregate_unc_bucket_l2aggregate_unc_bucket_mcuaggregate_unc_bucket_otheraggregate_unc_bucket_pcieaggregate_unc_bucket_smaggregate_unc_parityaggregate_unc_sec_dedallallocate_bufferapi_typearr_modeasarrayasyncio.coroutinesattacker_advantageattestation_cert_chainattestation_cert_chain_sizeattestation_reportattestation_report_sizeaverage_fpsaverage_latencyavg_factorb_channel_repair_pendingb_global_statusb_is_bestb_is_presentb_set_bestb_threshold_exceededb_tpc_repair_pendingba_r1memory_dtypebar1_usedbar1freebar1sizebar1totalbasebase_classbridge_chip_hierarchy_dtypebridge_chip_infobridge_chip_info_dtypebridge_countbufbufferbusbus_idbus_id_legacybw_modebw_modescc2c_mode_info_v1_dtypecap_maskcapabilitiescapabilitycausecc_featurecec_attestation_reportcec_attestation_report_sizecert_chaincert_chain_sizechassis_serial_numbercheck_statuscheck_status_size__class____class_getitem__cline_in_tracebackclique_idclk_api_domainclk_domain_fault_maskclk_mon_fault_info_dtypeclk_mon_listclk_mon_list_sizeclk_mon_status_dtypeclock_idclock_offset_m_hzclock_offset_v1_dtypeclock_typecluster_uuidcodec_typecolorcompatibility_limit_codecompute_instancecompute_instance_destroycompute_instance_get_info_v2compute_instance_idcompute_instance_info_dtypecompute_instance_placement_dtypecompute_instance_profile_info_v2_dtypecompute_instance_profile_info_v3_dtypecompute_instance_slice_countconf_compute_get_key_rotation_threshold_info_v1_dtypeconf_compute_gpu_attestation_report_dtypeconf_compute_gpu_certificate_dtypeconf_compute_mem_size_info_dtypeconf_compute_system_caps_dtypeconf_compute_system_state_dtypecontrollercooler_info_v1_dtypecopy_engine_countcountcountercounter_typecpuNumbercpu_capscpu_set_sizectypescuda.bindings._nvmlcumulative_preemption_timecurrentcu""rrent_modecurrent_statecurrent_tempdVald_valdata_datadaydecUtildec_thresholddec_utildecoder_countdefault_max_tempdefault_min_tempdev_tools_modedevice_device1device2deviceCountdevicedevice_addressing_mode_v1_dtypedevice_attributes_dtypedevice_capabilities_v1_dtypedevice_clear_accounting_pidsdevice_clear_cpu_affinitydevice_clear_ecc_error_countsdevice_clear_field_valuesdevice_create_gpu_instancedevice_create_gpu_instance_with_placementdevice_current_clock_freqs_v1_dtypedevice_discover_gpusdevice_get_accounting_buffer_sizedevice_get_accounting_modedevice_get_accounting_pidsdevice_get_accounting_statsdevice_get_active_vgpusdevice_get_adaptive_clock_info_statusdevice_get_addressing_modedevice_get_api_restrictiondevice_get_architecturedevice_get_attributes_v2device_get_auto_boosted_clocks_enableddevice_get_bar1_memory_infodevice_get_board_iddevice_get_board_part_numberdevice_get_branddevice_get_bridge_chip_infodevice_get_bus_typedevice_get_c2c_mode_info_vdevice_get_capabilitiesdevice_get_clk_mon_statusdevice_get_clockdevice_get_clock_infodevice_get_clock_offsetsdevice_get_compute_instance_iddevice_get_compute_modedevice_get_compute_running_processes_v3device_get_conf_compute_gpu_attestation_reportdevice_get_conf_compute_gpu_certificatedevice_get_conf_compute_mem_size_infodevice_get_conf_compute_protected_memory_usagedevice_get_cooler_infodevice_get_count_v2device_get_cpu_affinitydevice_get_cpu_affinity_within_scopedevice_get_creatable_vgpusdevice_get_cuda_compute_capabilitydevice_get_curr_pcie_link_generationdevice_get_curr_pcie_link_widthdevice_get_current_clock_freqsdevice_get_current_clocks_event_reasonsdevice_get_decoder_utilizationdevice_get_default_ecc_modedevice_get_device_handle_from_mig_device_handledevice_get_display_activedevice_get_display_modedevice_get_dram_encryption_modedevice_get_driver_model_v2device_get_dynamic_pstates_infodevice_get_ecc_modedevice_get_encoder_capacitydevice_get_encoder_sessionsdevice_get_encoder_statsdevice_get_encoder_utilizationdevic""e_get_enforced_power_limitdevice_get_fan_control_policy_v2device_get_fan_speeddevice_get_fan_speed_rpmdevice_get_fan_speed_v2device_get_fbc_sessionsdevice_get_fbc_statsdevice_get_field_valuesdevice_get_gpc_clk_min_max_vf_offsetdevice_get_gpc_clk_vf_offsetdevice_get_gpu_fabric_info_vdevice_get_gpu_instance_by_iddevice_get_gpu_instance_iddevice_get_gpu_instance_possible_placements_v2device_get_gpu_instance_profile_info_vdevice_get_gpu_instance_profile_info_by_id_vdevice_get_gpu_instance_remaining_capacitydevice_get_gpu_instancesdevice_get_gpu_max_pcie_link_generationdevice_get_gpu_operation_modedevice_get_grid_licensable_features_v4device_get_gsp_firmware_modedevice_get_gsp_firmware_versiondevice_get_handle_by_index_v2device_get_handle_by_pci_bus_id_v2device_get_handle_by_serialdevice_get_handle_by_uuiddevice_get_handle_by_uuidvdevice_get_host_vgpu_modedevice_get_hostname_v1device_get_indexdevice_get_inforom_configuration_checksumdevice_get_inforom_image_versiondevice_get_inforom_versiondevice_get_irq_numdevice_get_jpg_utilizationdevice_get_last_bbx_flush_timedevice_get_margin_temperaturedevice_get_max_clock_infodevice_get_max_customer_boost_clockdevice_get_max_mig_device_countdevice_get_max_pcie_link_generationdevice_get_max_pcie_link_widthdevice_get_mem_clk_min_max_vf_offsetdevice_get_mem_clk_vf_offsetdevice_get_memory_affinitydevice_get_memory_bus_widthdevice_get_memory_error_counterdevice_get_memory_info_v2device_get_mig_device_handle_by_indexdevice_get_mig_modedevice_get_min_max_clock_of_p_statedevice_get_min_max_fan_speeddevice_get_minor_numberdevice_get_module_iddevice_get_mps_compute_running_processes_v3device_get_multi_gpu_boarddevice_get_namedevice_get_num_fansdevice_get_num_gpu_coresdevice_get_numa_node_iddevice_get_nvlink_bw_modedevice_get_nvlink_capabilitydevice_get_nvlink_error_counterdevice_get_nvlink_infodevice_get_nvlink_remote_device_typedevice_get_nvlink_remote_pci_info_v2device_get_nvlink_statedevice_get_nvlink_supported_bw_modesdevice_get_nvlink_v""ersiondevice_get_ofa_utilizationdevice_get_p2p_statusdevice_get_pci_info_extdevice_get_pci_info_v3device_get_pcie_link_max_speeddevice_get_pcie_replay_counterdevice_get_pcie_speeddevice_get_pcie_throughputdevice_get_pdidevice_get_performance_modesdevice_get_performance_statedevice_get_persistence_modedevice_get_pgpu_metadata_stringdevice_get_platform_infodevice_get_power_management_default_limitdevice_get_power_management_limitdevice_get_power_management_limit_constraintsdevice_get_power_mizer_mode_v1device_get_power_sourcedevice_get_power_statedevice_get_power_usagedevice_get_process_utilizationdevice_get_processes_utilization_infodevice_get_remapped_rowsdevice_get_repair_statusdevice_get_retired_pagesdevice_get_retired_pages_pending_statusdevice_get_retired_pages_v2device_get_row_remapper_histogramdevice_get_running_process_detail_listdevice_get_samplesdevice_get_serialdevice_get_sram_ecc_error_statusdevice_get_sram_unique_uncorrected_ecc_error_countsdevice_get_supported_clocks_event_reasonsdevice_get_supported_event_typesdevice_get_supported_graphics_clocksdevice_get_supported_memory_clocksdevice_get_supported_performance_statesdevice_get_supported_vgpusdevice_get_target_fan_speeddevice_get_temperature_thresholddevice_get_temperature_vdevice_get_thermal_settingsdevice_get_topology_common_ancestordevice_get_topology_nearest_gpusdevice_get_total_ecc_errorsdevice_get_total_energy_consumptiondevice_get_utilization_ratesdevice_get_uuiddevice_get_vbios_versiondevice_get_vgpu_capabilitiesdevice_get_vgpu_heterogeneous_modedevice_get_vgpu_instances_utilization_infodevice_get_vgpu_metadatadevice_get_vgpu_process_utilizationdevice_get_vgpu_processes_utilization_infodevice_get_vgpu_scheduler_capabilitiesdevice_get_vgpu_scheduler_logdevice_get_vgpu_scheduler_statedevice_get_vgpu_type_creatable_placementsdevice_get_vgpu_type_supported_placementsdevice_get_vgpu_utilizationdevice_get_virtualization_modedevice_is_mig_device_handledevice_modify_drain_statedevice_on_same_boarddevic""e_perf_modes_v1_dtypedevice_power_mizer_modes_v1_dtypedevice_power_smoothing_activate_preset_profiledevice_power_smoothing_set_statedevice_power_smoothing_update_preset_profile_paramdevice_query_drain_statedevice_read_write_prm_v1device_register_eventsdevice_remove_gpu_v2device_reset_gpu_locked_clocksdevice_reset_memory_locked_clocksdevice_reset_nvlink_error_countersdevice_set_accounting_modedevice_set_api_restrictiondevice_set_auto_boosted_clocks_enableddevice_set_clock_offsetsdevice_set_compute_modedevice_set_conf_compute_unprotected_mem_sizedevice_set_cpu_affinitydevice_set_default_auto_boosted_clocks_enableddevice_set_default_fan_speed_v2device_set_dram_encryption_modedevice_set_driver_modeldevice_set_ecc_modedevice_set_fan_control_policydevice_set_fan_speed_v2device_set_gpu_locked_clocksdevice_set_gpu_operation_modedevice_set_hostname_v1device_set_memory_locked_clocksdevice_set_mig_modedevice_set_nvlink_bw_modedevice_set_nvlink_device_low_power_thresholddevice_set_persistence_modedevice_set_power_management_limitdevice_set_power_management_limit_v2device_set_power_mizer_mode_v1device_set_temperature_thresholddevice_set_vgpu_capabilitiesdevice_set_vgpu_heterogeneous_modedevice_set_vgpu_scheduler_statedevice_set_virtualization_modedevice_validate_inforomdevice_workload_power_profile_clear_requested_profiles__dict___dictdisplay_ind_exdisplay_ordinal__doc__domaindram_encryptiondriver_modeldtypedtype_is_objecteccecc_sram_error_status_v1_dtypeecc_sram_unique_uncorrected_error_entry_v1_dtypeecc_sram_unique_uncorrected_error_counts_v1_dtypeemptyenable_arr_modeenabledencUtilenc_utilencodeencoder_capacityencoder_countencoder_query_typeencoder_session_info_dtypeencryptionStateeng_profileengine_identriesentries_countentry_countenumenumerateenvironmenterrorerror_stringerror_typeevent_dataevent_data_dtypeevent_set_createevent_set_freeevent_set_wait_v2event_typeevent_typesexcluded_device_info_dtypeextlocationfanfan_speed_info_v1_dtypefansfbc_session_info_dtypefbc_stats_dtypef""eature_codefeature_enabledfeature_statefield_idfield_value_dtypefirmware_infofirmware_versionflagflagsflags_float64formatformatsfortranfreefrequencyfrom_datafrom_ptr__func__fw_versionget_excluded_device_countget_excluded_device_info_by_indexget_vgpu_compatibilityget_vgpu_driver_capabilitiesget_vgpu_version__getstate__gpm_mig_sample_getgpm_query_device_supportgpm_query_if_streaming_enabledgpm_samplegpm_sample_getgpm_set_streaming_enabledgpm_support_dtypegpugpu_dynamic_pstates_info_dtypegpu_fabric_info_v3_dtypegpu_instancegpu_instance_create_compute_instancegpu_instance_create_compute_instance_with_placementgpu_instance_destroygpu_instance_get_active_vgpusgpu_instance_get_compute_instance_profile_info_vgpu_instance_get_compute_instance_remaining_capacitygpu_instance_get_compute_instance_possible_placementsgpu_instance_get_compute_instance_by_idgpu_instance_get_compute_instancesgpu_instance_get_creatable_vgpusgpu_instance_get_infogpu_instance_get_vgpu_heterogeneous_modegpu_instance_get_vgpu_scheduler_stategpu_instance_get_vgpu_scheduler_loggpu_instance_get_vgpu_type_creatable_placementsgpu_instance_idgpu_instance_info_dtypegpu_instance_placement_dtypegpu_instance_profile_info_v2_dtypegpu_instance_profile_info_v3_dtypegpu_instance_set_vgpu_heterogeneous_modegpu_instance_set_vgpu_scheduler_stategpu_instance_slice_countgpu_stategpu_thermal_settings_dtypegpu_utilizationgpus_capsgrid_licensable_feature_dtypegrid_licensable_featuresgrid_licensable_features_dtypegrid_license_expiry_dtypeguest_driver_versionguest_info_stateguest_vgpu_versionh_max_resolutionh_resolutionhealth_maskhealth_summaryhighhost_driver_versionhost_idhost_supported_vgpu_rangehostnamehourhwbc_entry_dtypehwbc_idiib_guidid__import__inc_thresholdind_exindexinfo__init__init_v2init_with_flagsinstance_countint32int64int8intpis_accepting_workis_arr_mode_supportedis_c2c_enabledis_cec_attestation_report_present_is_coroutineis_grid_license_supportedis_licensedis_nvle_enabledis_p2p_supportedis_parityis_restrictedis_r""unningis_supported_deviceitemsitemsizejpeg_countjpg_utillast_seen_time_stamplatency_usecled_state_dtypelevellicensable_features_countlicense_expirylicense_infolimitlinklink_statelocationlocation_typelog_entrieslow__main__majormargin_temperaturemargin_temperature_v1_dtypemax_max_avg_factor_for_arrmax_clock_offset_m_hzmax_frequency_for_arrmax_gpu_clock_m_hzmax_instance_per_gimax_mem_clock_m_hzmax_memory_usagemax_timeslicemax_versionmemUtilmem_utilmemorymemory_clock_m_hzmemory_dtypememory_size_mbmemory_utilizationmemory_v2_dtypememview__metaclass__mig_devicemin_min_avg_factor_for_arrmin_clock_offset_m_hzmin_frequency_for_arrmin_gpu_clock_m_hzmin_mem_clock_m_hzmin_timeslicemin_versionminormode__module__module_idmonth__mro_entries__multi_gpu_modemultiprocessor_countname__name__namesndarrayndim__new__new_statenode_set_sizenoncenonenum_proc_array_entriesnum_valid_entries_numpynumpynv_link_info_v2_dtypenvlink_bw_modenvlink_firmware_info_dtypenvlink_firmware_version_dtypenvlink_get_bw_mode_v1_dtypenvlink_set_bw_mode_v1_dtypenvlink_supported_bw_modes_v1_dtypenvml_returnobjobjectofa_countofa_utiloffsetsopaque_dataopaque_data_sizeownerp2p_ind_exp_heterogeneous_modep_key_rotation_thr_infop_schedulerp_scheduler_statepackpartialpci_bus_idpci_device_idpci_infopci_info_dtypepci_info_ext_v1_dtypepci_sub_system_idpdi_v1_dtypepeer_typepercentagepgpu_metadatapgpu_virtualization_capspidplacementplacement_idplacement_idsplacement_sizeplatform_info_v2_dtypepolicypoppowerpower_mizer_modepower_value__prepare__proc_arrayproc_util_arrayprocessNameprocess_detail_list_v1_dtypeprocess_detail_v1_dtypeprocess_info_dtypeprocess_nameprocess_samples_countprocess_utilization_info_v1_dtypeprocess_utilization_sample_dtypeprocesses_utilization_info_v1_dtypeproduct_nameprofileprofile_idprotected_mem_size_kibpstatepsu_info_dtypeptr_py_anon_pod0_py_anon_pod0.__reduce_cython___py_anon_pod0.__setstate_cython___py_anon_pod0_dtype_py_anon_pod0.from_data_py_anon_pod0.from_ptr_py_anon_pod1_py_anon_pod1.__reduce_cy""thon___py_anon_pod1.__setstate_cython___py_anon_pod1_dtype_py_anon_pod1.from_data_py_anon_pod1.from_ptr_py_anon_pod2_py_anon_pod2.__reduce_cython___py_anon_pod2.__setstate_cython___py_anon_pod2_dtype_py_anon_pod2.from_data_py_anon_pod2.from_ptr_py_anon_pod3_py_anon_pod3.__reduce_cython___py_anon_pod3.__setstate_cython___py_anon_pod3_dtype_py_anon_pod3.from_data_py_anon_pod3.from_ptr_py_anon_pod4_py_anon_pod4.__reduce_cython___py_anon_pod4.__setstate_cython___py_anon_pod4_dtype_py_anon_pod4.from_data_py_anon_pod4.from_ptr_py_anon_pod5_py_anon_pod5.__reduce_cython___py_anon_pod5.__setstate_cython___py_anon_pod5_dtype_py_anon_pod5.from_data_py_anon_pod5.from_ptr__pyx_capi____pyx_checksum__pyx_result__pyx_state__pyx_type__pyx_unpickle_BridgeChipInfo__pyx_unpickle_ClkMonFaultInfo__pyx_unpickle_ComputeInstancePlacement__pyx_unpickle_EccSramUniqueUncorrectedErrorEntry_v1__pyx_unpickle_EncoderSessionInfo__pyx_unpickle_Enum__pyx_unpickle_FBCSessionInfo__pyx_unpickle_FieldValue__pyx_unpickle_GpuInstancePlacement__pyx_unpickle_GridLicensableFeature__pyx_unpickle_HwbcEntry__pyx_unpickle_ProcessDetail_v1__pyx_unpickle_ProcessInfo__pyx_unpickle_ProcessUtilizationSample__pyx_unpickle_ProcessUtilizationInfo_v1__pyx_unpickle_Sample__pyx_unpickle_UnitFanInfo__pyx_unpickle_VgpuInstanceUtilizationInfo_v1__pyx_unpickle_VgpuProcessUtilizationInfo_v1__pyx_unpickle_VgpuSchedulerLogEntry__pyx_vtable____qualname__readonlyrecarray__reduce____reduce_cython____reduce_ex__registerrepair_status_v1_dtyperequestrequested_profilesreservedresultrevisionrow_remapper_histogram_values_dtypessample_dtypesample_val_typesample_valuescheduler_paramsscheduler_policyscopescope_idsecselfsensorsensorTypesensor_ind_exserialsession_flagssession_idsession_typesessions_countsetset_bw_mode__set_name__set_vgpu_versionsetdefault__setstate____setstate_cython__shapeshared_copy_engine_countshared_decoder_countshared_encoder_countshared_jpeg_countshared_ofa_countshutdownsiValsi_valsignal_typesizesize_size_ki_bslice_counts""llValsll_valslot_numbersmUtilsm_utilspeedstartstart_timestatestaticmethodstatusstepstopstrstructsub_classsub_minorsublocationsupersupported_power_mizer_modessupported_schedulerssw_runlist_idsystem_conf_compute_settings_v1_dtypesystem_event_set_createsystem_event_set_freesystem_event_set_waitsystem_get_conf_compute_capabilitiessystem_get_conf_compute_gpus_ready_statesystem_get_conf_compute_key_rotation_threshold_infosystem_get_conf_compute_settingssystem_get_conf_compute_statesystem_get_cuda_driver_versionsystem_get_cuda_driver_version_v2system_get_driver_branchsystem_get_driver_versionsystem_get_hic_versionsystem_get_nvlink_bw_modesystem_get_nvml_versionsystem_get_process_namesystem_get_topology_gpu_setsystem_register_eventssystem_set_conf_compute_gpus_ready_statesystem_set_conf_compute_key_rotation_threshold_infosystem_set_nvlink_bw_modetargettarget_time_slicetemp__test__threshold_typetimetimeStamptime_runtime_run_totaltime_stamptimeoutmstimeslicetimestamptotaltotal_bw_modestray_ind_extypeucode_typeuiValui_valuint16uint32uint64uint8ulValul_valullValull_valunitunit_fan_info_dtypeunit_fan_speeds_dtypeunit_get_countunit_get_devicesunit_get_fan_speed_infounit_get_handle_by_indexunit_get_led_stateunit_get_psu_infounit_get_temperatureunit_get_unit_infounit_info_dtypeunit_set_led_stateunpackunprotected_mem_size_kibupdateusValus_valuse_setstateusedused_gpu_cc_protected_memoryused_gpu_memoryutilizationutilization_dtypeuuidv_max_resolutionv_resolutionvaluevalue_dtypevalue_typevaluesversionvgpuInstancevgpu_countvgpu_creatable_placement_info_v1_dtypevgpu_heterogeneous_mode_v1_dtypevgpu_instancevgpu_instance_clear_accounting_pidsvgpu_instance_countvgpu_instance_get_accounting_modevgpu_instance_get_accounting_pidsvgpu_instance_get_accounting_statsvgpu_instance_get_ecc_modevgpu_instance_get_encoder_capacityvgpu_instance_get_encoder_sessionsvgpu_instance_get_encoder_statsvgpu_instance_get_fb_usagevgpu_instance_get_fbc_sessionsvgpu_instance_get_fbc_statsvgpu_instance_get_frame_rate""_limitvgpu_instance_get_gpu_instance_idvgpu_instance_get_gpu_pci_idvgpu_instance_get_license_info_v2vgpu_instance_get_license_statusvgpu_instance_get_mdev_uuidvgpu_instance_get_metadatavgpu_instance_get_placement_idvgpu_instance_get_runtime_state_sizevgpu_instance_get_typevgpu_instance_get_uuidvgpu_instance_get_vm_driver_versionvgpu_instance_get_vm_idvgpu_instance_set_encoder_capacityvgpu_instance_utilization_info_v1_dtypevgpu_instancesvgpu_instances_utilization_info_v1_dtypevgpu_license_expiry_dtypevgpu_license_info_dtypevgpu_metadatavgpu_metadata_dtypevgpu_pgpu_compatibility_dtypevgpu_pgpu_metadata_dtypevgpu_placement_id_v1_dtypevgpu_placement_list_v2_dtypevgpu_proc_util_arrayvgpu_process_countvgpu_process_utilization_info_v1_dtypevgpu_processes_utilization_info_v1_dtypevgpu_runtime_state_v1_dtypevgpu_sched_datavgpu_sched_data_with_arrvgpu_scheduler_capabilities_dtypevgpu_scheduler_get_state_dtypevgpu_scheduler_log_dtypevgpu_scheduler_log_entry_dtypevgpu_scheduler_log_info_v1_dtypevgpu_scheduler_params_dtypevgpu_scheduler_set_params_dtypevgpu_scheduler_state_info_v1_dtypevgpu_scheduler_state_v1_dtypevgpu_type_bar1info_v1_dtypevgpu_type_get_bar1_infovgpu_type_get_capabilitiesvgpu_type_get_classvgpu_type_get_device_idvgpu_type_get_fb_reservationvgpu_type_get_frame_rate_limitvgpu_type_get_framebuffer_sizevgpu_type_get_gpu_instance_profile_idvgpu_type_get_gsp_heap_sizevgpu_type_get_licensevgpu_type_get_max_instancesvgpu_type_get_max_instances_per_vmvgpu_type_get_max_instances_per_gpu_instancevgpu_type_get_namevgpu_type_get_num_display_headsvgpu_type_get_resolutionvgpu_type_idvgpu_type_id_info_v1_dtypevgpu_type_idsvgpu_type_license_stringvgpu_type_max_instance_v1_dtypevgpu_util_arrayvgpu_versionvgpu_version_dtypevgpu_virtualization_capsvgpu_vm_compatibilityviewvirtual_modevoidvolatile_corvolatile_unc_parityvolatile_unc_sec_dedvoltagewriteablexyearPyObject *(int __pyx_skip_dispatch)\000\000\000\000\000\000\000\000\000\000PyObject *(int, int __pyx_skip_dispatch)\000PyObj""ect *(intptr_t, int __pyx_skip_dispatch)\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000PyObject *(intptr_t, int, int __pyx_skip_dispatch)\000\000\000\000\000\000\000\000\000\000\000\000PyObject *(intptr_t, int, int, int __pyx_skip_dispatch)\000\000\000\000PyObject *(intptr_t, int, intptr_t, int __pyx_skip_dispatch)\000PyObject *(intptr_t, int, unsigned int, int __pyx_skip_dispatch)\000\000PyObject *(intptr_t, intptr_t, int __pyx_skip_dispatch)\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000PyObject *(intptr_t, unsigned PY_LONG_LONG, int __pyx_skip_dispatch)\000\000\000\000PyObject *(intptr_t, unsigned PY_LONG_LONG, intptr_t, int __pyx_skip_dispatch)\000PyObject *(intptr_t, unsigned int, int __pyx_skip_dispatch)\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000PyObject *(intptr_t, unsigned int, intptr_t, int __pyx_skip_dispatch)\000PyObject *(intptr_t, unsigned int, unsigned int, int __pyx_skip_dispatch)\000\000\000\000\000\000\000PyObject *(unsigned int, int __pyx_skip_dispatch)\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000PyObject *(unsigned int, intptr_t, unsigned int, int __pyx_skip_dispatch)\000PyObject *(unsigned int, unsigned int, int __pyx_skip_dispatch)\000\000\000char (intptr_t, int __pyx_skip_dispatch)\000int (int __pyx_skip_dispatch)\000\000int (intptr_t, int __pyx_skip_dispatch)\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000int (intptr_t, int, int __pyx_skip_dispatch)\000int (intptr_t, intptr_t, int __pyx_skip_dispatch)\000\000int (intptr_t, intptr_t, int, int __pyx_skip_dispatch)\000int (intptr_t, unsigned int, int __pyx_skip_dispatch)\000\000""\000int (unsigned int, int __pyx_skip_dispatch)\000\000intptr_t (PyObject *, int __pyx_skip_dispatch)\000\000\000intptr_t (int __pyx_skip_dispatch)\000intptr_t (intptr_t, int __pyx_skip_dispatch)\000\000intptr_t (intptr_t, unsigned int, int __pyx_skip_dispatch)\000\000\000\000\000intptr_t (intptr_t, unsigned int, intptr_t, int __pyx_skip_dispatch)\000\000intptr_t (unsigned int, int __pyx_skip_dispatch)\000\000unsigned PY_LONG_LONG (intptr_t, int __pyx_skip_dispatch)\000\000\000\000unsigned PY_LONG_LONG (intptr_t, int, int, int __pyx_skip_dispatch)\000unsigned PY_LONG_LONG (intptr_t, int, int, int, int __pyx_skip_dispatch)\000unsigned PY_LONG_LONG (intptr_t, unsigned int, int, int __pyx_skip_dispatch)\000unsigned PY_LONG_LONG (unsigned int, int __pyx_skip_dispatch)\000\000\000\000unsigned int (int __pyx_skip_dispatch)\000\000\000\000\000unsigned int (int, int __pyx_skip_dispatch)\000unsigned int (intptr_t, int __pyx_skip_dispatch)\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000unsigned int (intptr_t, int, int __pyx_skip_dispatch)\000\000\000\000\000\000\000unsigned int (intptr_t, int, int, int __pyx_skip_dispatch)\000unsigned int (intptr_t, unsigned int, int __pyx_skip_dispatch)\000\000\000\000\000\000\000\000unsigned int (intptr_t, unsigned int, int, int __pyx_skip_dispatch)\000unsigned int (unsigned int, int __pyx_skip_dispatch)\000\000\000\000\000\000\000\000\000unsigned int (unsigned int, int, int __pyx_skip_dispatch)\000unsigned long (intptr_t, intptr_t, int __pyx_skip_dispatch)\000init_v2\000shutdown\000system_get_conf_compute_capabilities\000system_get_conf_compute_key_rotation_threshold_info\000system_get_conf_compute_settings\000system_get_conf_compute_state\000system_get_driver_version\000system_get_hic_version\000system_get_nvml_version\000vgpu_type_get_max_instances_per_gpu_instance\000error_string\000compute_instance_destroy\000compute_instance_get_info_v2\000device_c""lear_accounting_pids\000device_clear_cpu_affinity\000device_discover_gpus\000device_get_accounting_pids\000device_get_addressing_mode\000device_get_attributes_v2\000device_get_auto_boosted_clocks_enabled\000device_get_bar1_memory_info\000device_get_board_part_number\000device_get_bridge_chip_info\000device_get_c2c_mode_info_v\000device_get_capabilities\000device_get_clk_mon_status\000device_get_clock_offsets\000device_get_compute_running_processes_v3\000device_get_conf_compute_gpu_attestation_report\000device_get_conf_compute_gpu_certificate\000device_get_conf_compute_mem_size_info\000device_get_conf_compute_protected_memory_usage\000device_get_cooler_info\000device_get_cuda_compute_capability\000device_get_current_clock_freqs\000device_get_decoder_utilization\000device_get_dram_encryption_mode\000device_get_driver_model_v2\000device_get_dynamic_pstates_info\000device_get_ecc_mode\000device_get_encoder_sessions\000device_get_encoder_stats\000device_get_encoder_utilization\000device_get_fan_speed_rpm\000device_get_fbc_sessions\000device_get_fbc_stats\000device_get_gpc_clk_min_max_vf_offset\000device_get_gpu_fabric_info_v\000device_get_gpu_operation_mode\000device_get_grid_licensable_features_v4\000device_get_gsp_firmware_mode\000device_get_inforom_image_version\000device_get_jpg_utilization\000device_get_margin_temperature\000device_get_mem_clk_min_max_vf_offset\000device_get_memory_info_v2\000device_get_mig_mode\000device_get_min_max_fan_speed\000device_get_mps_compute_running_processes_v3\000device_get_name\000device_get_nvlink_bw_mode\000device_get_nvlink_info\000device_get_nvlink_supported_bw_modes\000device_get_ofa_utilization\000device_get_pci_info_ext\000device_get_pci_info_v3\000device_get_pdi\000device_get_performance_modes\000device_get_pgpu_metadata_string\000device_get_platform_info\000device_get_power_management_limit_constraints\000device_get_power_mizer_mode_v1\000device_get_remapped_rows\000device_get_repair_status\000device_get_row_remapper_histogram""\000device_get_serial\000device_get_sram_ecc_error_status\000device_get_supported_memory_clocks\000device_get_utilization_rates\000device_get_uuid\000device_get_vbios_version\000device_get_vgpu_heterogeneous_mode\000device_get_vgpu_scheduler_capabilities\000device_get_vgpu_scheduler_log\000device_get_vgpu_scheduler_state\000device_reset_gpu_locked_clocks\000device_reset_memory_locked_clocks\000device_set_cpu_affinity\000device_validate_inforom\000event_set_free\000gpm_query_device_support\000gpu_instance_destroy\000gpu_instance_get_info\000gpu_instance_get_vgpu_heterogeneous_mode\000gpu_instance_get_vgpu_scheduler_log\000gpu_instance_get_vgpu_scheduler_state\000set_vgpu_version\000system_event_set_create\000system_event_set_free\000system_event_set_wait\000system_register_events\000system_set_conf_compute_key_rotation_threshold_info\000unit_get_fan_speed_info\000unit_get_led_state\000unit_get_psu_info\000unit_get_unit_info\000device_clear_ecc_error_counts\000device_get_inforom_version\000device_get_retired_pages\000device_modify_drain_state\000device_set_accounting_mode\000device_set_auto_boosted_clocks_enabled\000device_set_compute_mode\000device_set_ecc_mode\000device_set_gpu_operation_mode\000device_set_persistence_mode\000device_set_virtualization_mode\000unit_set_led_state\000device_get_min_max_clock_of_p_state\000device_remove_gpu_v2\000device_set_api_restriction\000device_set_vgpu_capabilities\000device_set_temperature_threshold\000device_set_default_auto_boosted_clocks_enabled\000device_set_driver_model\000device_power_smoothing_activate_preset_profile\000device_power_smoothing_set_state\000device_power_smoothing_update_preset_profile_param\000device_read_write_prm_v1\000device_set_clock_offsets\000device_set_dram_encryption_mode\000device_set_nvlink_bw_mode\000device_set_nvlink_device_low_power_threshold\000device_set_power_management_limit_v2\000device_set_power_mizer_mode_v1\000device_set_vgpu_heterogeneous_mode\000device_set_vgpu_scheduler_state\000devic""e_workload_power_profile_clear_requested_profiles\000gpm_sample_get\000gpu_instance_set_vgpu_heterogeneous_mode\000gpu_instance_set_vgpu_scheduler_state\000device_get_process_utilization\000device_get_vgpu_process_utilization\000device_get_vgpu_utilization\000device_set_conf_compute_unprotected_mem_size\000device_register_events\000device_get_accounting_stats\000device_get_cpu_affinity\000device_get_gpu_instance_possible_placements_v2\000device_get_gpu_instance_profile_info_by_id_v\000device_get_gpu_instance_profile_info_v\000device_get_nvlink_remote_pci_info_v2\000device_get_supported_graphics_clocks\000device_get_thermal_settings\000device_get_vgpu_type_supported_placements\000device_reset_nvlink_error_counters\000device_set_default_fan_speed_v2\000device_set_power_management_limit\000event_set_wait_v2\000gpm_set_streaming_enabled\000gpu_instance_get_compute_instance_possible_placements\000gpm_mig_sample_get\000device_get_cpu_affinity_within_scope\000device_get_memory_affinity\000device_set_fan_control_policy\000device_set_fan_speed_v2\000device_set_gpu_locked_clocks\000device_set_memory_locked_clocks\000gpu_instance_get_compute_instance_profile_info_v\000get_excluded_device_info_by_index\000init_with_flags\000system_get_process_name\000system_set_conf_compute_gpus_ready_state\000system_set_nvlink_bw_mode\000vgpu_instance_clear_accounting_pids\000vgpu_instance_get_accounting_pids\000vgpu_instance_get_encoder_sessions\000vgpu_instance_get_encoder_stats\000vgpu_instance_get_fbc_sessions\000vgpu_instance_get_fbc_stats\000vgpu_instance_get_gpu_pci_id\000vgpu_instance_get_license_info_v2\000vgpu_instance_get_mdev_uuid\000vgpu_instance_get_placement_id\000vgpu_instance_get_runtime_state_size\000vgpu_instance_get_uuid\000vgpu_instance_get_vm_driver_version\000vgpu_type_get_bar1_info\000vgpu_type_get_class\000vgpu_type_get_device_id\000vgpu_type_get_name\000vgpu_type_get_license\000vgpu_instance_get_accounting_stats\000vgpu_instance_set_encoder_capacity\000vgpu_type_get_r""esolution\000device_get_gsp_firmware_version\000system_get_cuda_driver_version\000system_get_cuda_driver_version_v2\000device_get_accounting_mode\000device_get_brand\000device_get_compute_mode\000device_get_default_ecc_mode\000device_get_display_active\000device_get_display_mode\000device_get_gpc_clk_vf_offset\000device_get_host_vgpu_mode\000device_get_mem_clk_vf_offset\000device_get_performance_state\000device_get_persistence_mode\000device_get_power_state\000device_get_retired_pages_pending_status\000device_get_virtualization_mode\000device_query_drain_state\000device_get_api_restriction\000device_get_topology_common_ancestor\000device_on_same_board\000device_get_p2p_status\000device_get_nvlink_remote_device_type\000device_get_nvlink_state\000device_set_mig_mode\000vgpu_instance_get_accounting_mode\000vgpu_instance_get_ecc_mode\000device_get_handle_by_pci_bus_id_v2\000device_get_handle_by_serial\000device_get_handle_by_uuid\000event_set_create\000device_get_device_handle_from_mig_device_handle\000device_get_handle_by_uuidv\000device_create_gpu_instance\000device_get_gpu_instance_by_id\000device_get_mig_device_handle_by_index\000gpu_instance_create_compute_instance\000gpu_instance_get_compute_instance_by_id\000device_create_gpu_instance_with_placement\000gpu_instance_create_compute_instance_with_placement\000device_get_handle_by_index_v2\000unit_get_handle_by_index\000device_get_current_clocks_event_reasons\000device_get_supported_clocks_event_reasons\000device_get_supported_event_types\000device_get_total_energy_consumption\000device_get_total_ecc_errors\000device_get_memory_error_counter\000device_get_nvlink_error_counter\000vgpu_instance_get_fb_usage\000vgpu_type_get_fb_reservation\000vgpu_type_get_framebuffer_size\000vgpu_type_get_gsp_heap_size\000device_get_count_v2\000get_excluded_device_count\000system_get_conf_compute_gpus_ready_state\000system_get_nvlink_bw_mode\000unit_get_count\000get_vgpu_driver_capabilities\000device_get_accounting_buffer_size\000devic""e_get_adaptive_clock_info_status\000device_get_architecture\000device_get_board_id\000device_get_bus_type\000device_get_compute_instance_id\000device_get_curr_pcie_link_generation\000device_get_curr_pcie_link_width\000device_get_enforced_power_limit\000device_get_fan_speed\000device_get_gpu_instance_id\000device_get_gpu_max_pcie_link_generation\000device_get_index\000device_get_inforom_configuration_checksum\000device_get_irq_num\000device_get_max_mig_device_count\000device_get_max_pcie_link_generation\000device_get_max_pcie_link_width\000device_get_memory_bus_width\000device_get_minor_number\000device_get_module_id\000device_get_multi_gpu_board\000device_get_num_fans\000device_get_num_gpu_cores\000device_get_numa_node_id\000device_get_pcie_link_max_speed\000device_get_pcie_replay_counter\000device_get_pcie_speed\000device_get_power_management_default_limit\000device_get_power_management_limit\000device_get_power_source\000device_get_power_usage\000device_is_mig_device_handle\000gpm_query_if_streaming_enabled\000device_get_clock_info\000device_get_encoder_capacity\000device_get_max_clock_info\000device_get_max_customer_boost_clock\000device_get_pcie_throughput\000device_get_temperature_threshold\000device_get_vgpu_capabilities\000device_get_clock\000device_get_fan_control_policy_v2\000device_get_fan_speed_v2\000device_get_gpu_instance_remaining_capacity\000device_get_nvlink_version\000device_get_target_fan_speed\000gpu_instance_get_compute_instance_remaining_capacity\000unit_get_temperature\000vgpu_type_get_max_instances\000device_get_nvlink_capability\000vgpu_instance_get_encoder_capacity\000vgpu_instance_get_frame_rate_limit\000vgpu_instance_get_gpu_instance_id\000vgpu_instance_get_license_status\000vgpu_instance_get_type\000vgpu_type_get_frame_rate_limit\000vgpu_type_get_gpu_instance_profile_id\000vgpu_type_get_max_instances_per_vm\000vgpu_type_get_num_display_heads\000vgpu_type_get_capabilities\000device_get_last_bbx_flush_timechar const *(nvmlReturn_t)\000nvmlR""eturn_t (char *, unsigned int)\000\000nvmlReturn_t (char const *, nvmlDevice_t *)\000\000\000nvmlReturn_t (int *)\000\000nvmlReturn_t (nvmlComputeInstance_t)\000nvmlReturn_t (nvmlComputeInstance_t, nvmlComputeInstanceInfo_t *)\000nvmlReturn_t (nvmlConfComputeGetKeyRotationThresholdInfo_t *)\000nvmlReturn_t (nvmlConfComputeSetKeyRotationThresholdInfo_t *)\000nvmlReturn_t (nvmlConfComputeSystemCaps_t *)\000nvmlReturn_t (nvmlConfComputeSystemState_t *)\000nvmlReturn_t (nvmlDevice_t)\000\000\000\000\000\000nvmlReturn_t (nvmlDevice_t, char *)\000nvmlReturn_t (nvmlDevice_t, char *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, char *, unsigned int)\000\000\000\000\000\000nvmlReturn_t (nvmlDevice_t, int *)\000\000nvmlReturn_t (nvmlDevice_t, int *, int *)\000\000\000nvmlReturn_t (nvmlDevice_t, int, nvmlFieldValue_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlBAR1Memory_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlBrandType_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlBridgeChipHierarchy_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlBusType_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlC2cModeInfo_v1_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlClkMonStatus_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlClockOffset_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlClockType_t, nvmlClockId_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlClockType_t, nvmlPstates_t, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlClockType_t, unsigned int *)\000\000\000nvmlReturn_t (nvmlDevice_t, nvmlComputeMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlComputeMode_t)\000nvmlReturn_t (nvmlDevice_t, nvmlConfComputeGpuAttestationReport_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlConfComputeGpuCertificate_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlConfComputeMemSizeInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlCoolerInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceAddressingMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceArchitecture_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceAttributes_t *)\000nvmlReturn_t (nvmlDe""vice_t, nvmlDeviceCapabilities_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceCurrentClockFreqs_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevicePerfModes_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevicePowerMizerModes_v1_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceVgpuCapability_t, nvmlEnableState_t)\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceVgpuCapability_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevice_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevice_t, int *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevice_t, nvmlGpuP2PCapsIndex_t, nvmlGpuP2PStatus_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevice_t, nvmlGpuTopologyLevel_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDramEncryptionInfo_t *, nvmlDramEncryptionInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDramEncryptionInfo_t const *)\000nvmlReturn_t (nvmlDevice_t, nvmlDriverModel_t *, nvmlDriverModel_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDriverModel_t, unsigned int)\000nvmlReturn_t (nvmlDevice_t, nvmlEccCounterType_t)\000nvmlReturn_t (nvmlDevice_t, nvmlEccSramErrorStatus_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlEccSramUniqueUncorrectedErrorCounts_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlEnableState_t *)\000\000\000\000\000\000nvmlReturn_t (nvmlDevice_t, nvmlEnableState_t *, nvmlEnableState_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlEnableState_t)\000\000\000\000nvmlReturn_t (nvmlDevice_t, nvmlEnableState_t, unsigned int)\000nvmlReturn_t (nvmlDevice_t, nvmlEncoderType_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlFBCStats_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlFanSpeedInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpmSample_t)\000nvmlReturn_t (nvmlDevice_t, nvmlGpmSupport_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuDynamicPstatesInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuFabricInfoV_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuOperationMode_t *, nvmlGpuOperationMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuOperationMode_t)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuTopologyLevel_t, unsigned int *, nvmlDevice_t *)""\000nvmlReturn_t (nvmlDevice_t, nvmlGpuVirtualizationMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuVirtualizationMode_t)\000nvmlReturn_t (nvmlDevice_t, nvmlGridLicensableFeatures_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlHostVgpuMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlHostname_v1_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlInforomObject_t, char *, unsigned int)\000nvmlReturn_t (nvmlDevice_t, nvmlMarginTemperature_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlMemoryErrorType_t, nvmlEccCounterType_t, nvmlMemoryLocation_t, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlDevice_t, nvmlMemoryErrorType_t, nvmlEccCounterType_t, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlDevice_t, nvmlMemory_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlMemory_v2_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlNvLinkInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlNvLinkPowerThres_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlNvlinkGetBwMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlNvlinkSetBwMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlNvlinkSupportedBwModes_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPRMTLV_v1_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPageRetirementCause_t, unsigned int *, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlDevice_t, nvmlPageRetirementCause_t, unsigned int *, unsigned PY_LONG_LONG *, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlDevice_t, nvmlPciInfoExt_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPciInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPcieUtilCounter_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlPdi_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPlatformInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPowerSmoothingProfile_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlPowerSmoothingState_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPowerSource_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPowerValue_v2_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlProcessDetailList_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlProcessUtilizationSample_t *, unsigned int *, unsigned PY_LONG_LONG)\000nvmlReturn_t (nvmlDevice_t, nvmlP""rocessesUtilizationInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPstates_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlPstates_t *, unsigned int)\000nvmlReturn_t (nvmlDevice_t, nvmlRepairStatus_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlRestrictedAPI_t, nvmlEnableState_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlRestrictedAPI_t, nvmlEnableState_t)\000nvmlReturn_t (nvmlDevice_t, nvmlRowRemapperHistogramValues_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlSamplingType_t, unsigned PY_LONG_LONG, nvmlValueType_t *, unsigned int *, nvmlSample_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlTemperatureThresholds_t, int *)\000nvmlReturn_t (nvmlDevice_t, nvmlTemperatureThresholds_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlTemperature_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlUtilization_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuHeterogeneousMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuHeterogeneousMode_t const *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuInstancesUtilizationInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuPgpuMetadata_t *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuProcessesUtilizationInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuSchedulerCapabilities_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuSchedulerGetState_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuSchedulerLog_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuSchedulerSetState_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuTypeId_t, nvmlVgpuPlacementList_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuTypeId_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlWorkloadPowerProfileRequestedProfiles_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned PY_LONG_LONG *)\000\000\000\000nvmlReturn_t (nvmlDevice_t, unsigned PY_LONG_LONG *, unsigned long *)\000nvmlReturn_t (nvmlDevice_t, unsigned PY_LONG_LONG)\000nvmlReturn_t (nvmlDevice_t, unsigned PY_LONG_LONG, nvmlEventSet_t)\000nvmlReturn_t (nvmlDevice_t, unsigned PY_LONG_LONG, nvmlValueType_t *, unsigned int *, nvmlVgpuInstanceUtilizationSample_t *)\000nvmlReturn_t (""nvmlDevice_t, unsigned PY_LONG_LONG, unsigned int *, nvmlVgpuProcessUtilizationSample_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int *)\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000nvmlReturn_t (nvmlDevice_t, unsigned int *, nvmlEncoderSessionInfo_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int *, nvmlFBCSessionInfo_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int *, nvmlProcessInfo_t *)\000\000nvmlReturn_t (nvmlDevice_t, unsigned int *, nvmlVgpuInstance_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int *, nvmlVgpuTypeId_t *)\000\000nvmlReturn_t (nvmlDevice_t, unsigned int *, unsigned int *)\000\000\000\000\000\000\000\000\000\000nvmlReturn_t (nvmlDevice_t, unsigned int *, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, unsigned int *, unsigned int *, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, unsigned int)\000\000\000\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlAccountingStats_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlDevice_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlEnableState_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlFanControlPolicy_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlFanControlPolicy_t)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpmSample_t)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuInstancePlacement_t *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuInstancePlacement_t const *, nvmlGpuInstance_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuInstanceProfileInfo_v2_t *)\000\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuInstance_t *)\000\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuInstance_t *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuThermalSettings_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlIntNvLinkDeviceType_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlNvLinkCapability_t, unsigned i""nt *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlNvLinkErrorCounter_t, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlPciInfo_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlReturn_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, unsigned int *)\000\000\000\000nvmlReturn_t (nvmlDevice_t, unsigned int, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, unsigned int)\000\000\000nvmlReturn_t (nvmlDevice_t, unsigned int, unsigned long *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, unsigned long *, nvmlAffinityScope_t)\000\000nvmlReturn_t (nvmlEventSet_t *)\000nvmlReturn_t (nvmlEventSet_t)\000nvmlReturn_t (nvmlEventSet_t, nvmlEventData_t *, unsigned int)\000nvmlReturn_t (nvmlGpuInstance_t)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlActiveVgpuInstanceInfo_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlGpuInstanceInfo_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuCreatablePlacementInfo_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuHeterogeneousMode_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuHeterogeneousMode_t const *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuSchedulerLogInfo_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuSchedulerStateInfo_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuSchedulerState_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuTypeIdInfo_t *)\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, nvmlComputeInstancePlacement_t *, unsigned int *)\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, nvmlComputeInstancePlacement_t const *, nvmlComputeInstance_t *)\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, nvmlComputeInstance_t *)\000\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, nvmlComputeInstance_t *, unsigned int *)\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, unsigned int *)\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, unsigned int, nvmlComputeInstanceProfileInfo_v2_t *)\000nvmlReturn_t (nvmlPciInfo_t *)\000nvmlReturn_t (nvmlPciInfo_t *, nvmlDetachGpuState""_t, nvmlPcieLinkState_t)\000nvmlReturn_t (nvmlPciInfo_t *, nvmlEnableState_t *)\000nvmlReturn_t (nvmlPciInfo_t *, nvmlEnableState_t)\000nvmlReturn_t (nvmlSystemConfComputeSettings_t *)\000nvmlReturn_t (nvmlSystemDriverBranchInfo_t *, unsigned int)\000nvmlReturn_t (nvmlSystemEventSetCreateRequest_t *)\000nvmlReturn_t (nvmlSystemEventSetFreeRequest_t *)\000nvmlReturn_t (nvmlSystemEventSetWaitRequest_t *)\000nvmlReturn_t (nvmlSystemRegisterEventRequest_t *)\000nvmlReturn_t (nvmlUUID_t const *, nvmlDevice_t *)\000nvmlReturn_t (nvmlUnit_t, nvmlLedColor_t)\000nvmlReturn_t (nvmlUnit_t, nvmlLedState_t *)\000nvmlReturn_t (nvmlUnit_t, nvmlPSUInfo_t *)\000nvmlReturn_t (nvmlUnit_t, nvmlUnitFanSpeeds_t *)\000nvmlReturn_t (nvmlUnit_t, nvmlUnitInfo_t *)\000nvmlReturn_t (nvmlUnit_t, unsigned int *, nvmlDevice_t *)\000nvmlReturn_t (nvmlUnit_t, unsigned int, unsigned int *)\000nvmlReturn_t (nvmlVgpuDriverCapability_t, unsigned int *)\000nvmlReturn_t (nvmlVgpuInstance_t)\000nvmlReturn_t (nvmlVgpuInstance_t, char *, unsigned int *)\000nvmlReturn_t (nvmlVgpuInstance_t, char *, unsigned int)\000\000\000nvmlReturn_t (nvmlVgpuInstance_t, char *, unsigned int, nvmlVgpuVmIdType_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlEnableState_t *)\000\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlFBCStats_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlVgpuLicenseInfo_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlVgpuMetadata_t *, unsigned int *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlVgpuPlacementId_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlVgpuRuntimeState_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlVgpuTypeId_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int *)\000\000\000\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int *, nvmlEncoderSessionInfo_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int *, nvmlFBCSessionInfo_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlVgpuInstance_""t, unsigned int *, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int, nvmlAccountingStats_t *)\000nvmlReturn_t (nvmlVgpuMetadata_t *, nvmlVgpuPgpuMetadata_t *, nvmlVgpuPgpuCompatibility_t *)\000nvmlReturn_t (nvmlVgpuTypeId_t, char *, unsigned int *)\000\000nvmlReturn_t (nvmlVgpuTypeId_t, char *, unsigned int)\000nvmlReturn_t (nvmlVgpuTypeId_t, nvmlVgpuCapability_t, unsigned int *)\000nvmlReturn_t (nvmlVgpuTypeId_t, nvmlVgpuTypeBar1Info_t *)\000nvmlReturn_t (nvmlVgpuTypeId_t, unsigned PY_LONG_LONG *)\000\000\000nvmlReturn_t (nvmlVgpuTypeId_t, unsigned PY_LONG_LONG *, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlVgpuTypeId_t, unsigned int *)\000\000\000\000nvmlReturn_t (nvmlVgpuTypeId_t, unsigned int, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlVgpuTypeMaxInstance_t *)\000nvmlReturn_t (nvmlVgpuVersion_t *)\000nvmlReturn_t (nvmlVgpuVersion_t *, nvmlVgpuVersion_t *)\000nvmlReturn_t (unsigned int *)\000\000\000\000\000nvmlReturn_t (unsigned int *, nvmlHwbcEntry_t *)\000nvmlReturn_t (unsigned int)\000\000\000nvmlReturn_t (unsigned int, char *, unsigned int)\000nvmlReturn_t (unsigned int, nvmlDevice_t *)\000nvmlReturn_t (unsigned int, nvmlExcludedDeviceInfo_t *)\000nvmlReturn_t (unsigned int, nvmlUnit_t *)\000nvmlReturn_t (unsigned int, unsigned int *, nvmlDevice_t *)\000nvmlReturn_t (void)\000\000nvmlErrorString\000nvmlSystemGetDriverVersion\000nvmlSystemGetNVMLVersion\000nvmlDeviceGetHandleByPciBusId_v2\000nvmlDeviceGetHandleBySerial\000nvmlDeviceGetHandleByUUID\000nvmlSystemGetCudaDriverVersion\000nvmlSystemGetCudaDriverVersion_v2\000nvmlComputeInstanceDestroy\000nvmlComputeInstanceGetInfo_v2\000nvmlSystemGetConfComputeKeyRotationThresholdInfo\000nvmlSystemSetConfComputeKeyRotationThresholdInfo\000nvmlSystemGetConfComputeCapabilities\000nvmlSystemGetConfComputeState\000nvmlDeviceClearAccountingPids\000nvmlDeviceClearCpuAffinity\000nvmlDeviceResetGpuLockedClocks\000nvmlDeviceRese""tMemoryLockedClocks\000nvmlDeviceSetCpuAffinity\000nvmlDeviceValidateInforom\000nvmlDeviceGetGspFirmwareVersion\000nvmlDeviceGetPgpuMetadataString\000nvmlDeviceGetBoardPartNumber\000nvmlDeviceGetInforomImageVersion\000nvmlDeviceGetName\000nvmlDeviceGetSerial\000nvmlDeviceGetUUID\000nvmlDeviceGetVbiosVersion\000nvmlDeviceGetGpcClkVfOffset\000nvmlDeviceGetMemClkVfOffset\000nvmlDeviceGetCudaComputeCapability\000nvmlDeviceGetGpcClkMinMaxVfOffset\000nvmlDeviceGetMemClkMinMaxVfOffset\000nvmlDeviceClearFieldValues\000nvmlDeviceGetFieldValues\000nvmlDeviceGetBAR1MemoryInfo\000nvmlDeviceGetBrand\000nvmlDeviceGetBridgeChipInfo\000nvmlDeviceGetBusType\000nvmlDeviceGetC2cModeInfoV\000nvmlDeviceGetClkMonStatus\000nvmlDeviceGetClockOffsets\000nvmlDeviceSetClockOffsets\000nvmlDeviceGetClock\000nvmlDeviceGetMinMaxClockOfPState\000nvmlDeviceGetClockInfo\000nvmlDeviceGetMaxClockInfo\000nvmlDeviceGetMaxCustomerBoostClock\000nvmlDeviceGetComputeMode\000nvmlDeviceSetComputeMode\000nvmlDeviceGetConfComputeGpuAttestationReport\000nvmlDeviceGetConfComputeGpuCertificate\000nvmlDeviceGetConfComputeMemSizeInfo\000nvmlDeviceGetCoolerInfo\000nvmlDeviceGetAddressingMode\000nvmlDeviceGetArchitecture\000nvmlDeviceGetAttributes_v2\000nvmlDeviceGetCapabilities\000nvmlDeviceGetCurrentClockFreqs\000nvmlDeviceGetPerformanceModes\000nvmlDeviceGetPowerMizerMode_v1\000nvmlDeviceSetPowerMizerMode_v1\000nvmlDeviceSetVgpuCapabilities\000nvmlDeviceGetVgpuCapabilities\000nvmlDeviceGetDeviceHandleFromMigDeviceHandle\000nvmlDeviceOnSameBoard\000nvmlDeviceGetP2PStatus\000nvmlDeviceGetTopologyCommonAncestor\000nvmlDeviceGetDramEncryptionMode\000nvmlDeviceSetDramEncryptionMode\000nvmlDeviceGetDriverModel_v2\000nvmlDeviceSetDriverModel\000nvmlDeviceClearEccErrorCounts\000nvmlDeviceGetSramEccErrorStatus\000nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts\000nvmlDeviceGetAccountingMode\000nvmlDeviceGetDefaultEccMode\000nvmlDeviceGetDisplayActive\000nvmlDeviceGetDisplayMode\000nvmlDeviceGetPersistenceMode\000nvmlDevice""GetRetiredPagesPendingStatus\000nvmlDeviceGetAutoBoostedClocksEnabled\000nvmlDeviceGetEccMode\000nvmlDeviceSetAccountingMode\000nvmlDeviceSetAutoBoostedClocksEnabled\000nvmlDeviceSetEccMode\000nvmlDeviceSetPersistenceMode\000nvmlDeviceSetDefaultAutoBoostedClocksEnabled\000nvmlDeviceGetEncoderCapacity\000nvmlDeviceGetFBCStats\000nvmlDeviceGetFanSpeedRPM\000nvmlGpmSampleGet\000nvmlGpmQueryDeviceSupport\000nvmlDeviceGetDynamicPstatesInfo\000nvmlDeviceGetGpuFabricInfoV\000nvmlDeviceGetGpuOperationMode\000nvmlDeviceSetGpuOperationMode\000nvmlDeviceGetTopologyNearestGpus\000nvmlDeviceGetVirtualizationMode\000nvmlDeviceSetVirtualizationMode\000nvmlDeviceGetGridLicensableFeatures_v4\000nvmlDeviceGetHostVgpuMode\000nvmlDeviceGetHostname_v1\000nvmlDeviceSetHostname_v1\000nvmlDeviceGetInforomVersion\000nvmlDeviceGetMarginTemperature\000nvmlDeviceGetMemoryErrorCounter\000nvmlDeviceGetTotalEccErrors\000nvmlDeviceGetConfComputeProtectedMemoryUsage\000nvmlDeviceGetMemoryInfo_v2\000nvmlDeviceGetNvLinkInfo\000nvmlDeviceSetNvLinkDeviceLowPowerThreshold\000nvmlDeviceGetNvlinkBwMode\000nvmlDeviceSetNvlinkBwMode\000nvmlDeviceGetNvlinkSupportedBwModes\000nvmlDeviceReadWritePRM_v1\000nvmlDeviceGetRetiredPages\000nvmlDeviceGetRetiredPages_v2\000nvmlDeviceGetPciInfoExt\000nvmlDeviceGetPciInfo_v3\000nvmlDeviceGetPcieThroughput\000nvmlDeviceGetPdi\000nvmlDeviceGetPlatformInfo\000nvmlDevicePowerSmoothingActivatePresetProfile\000nvmlDevicePowerSmoothingUpdatePresetProfileParam\000nvmlDevicePowerSmoothingSetState\000nvmlDeviceGetPowerSource\000nvmlDeviceSetPowerManagementLimit_v2\000nvmlDeviceGetRunningProcessDetailList\000nvmlDeviceGetProcessUtilization\000nvmlDeviceGetProcessesUtilizationInfo\000nvmlDeviceGetPerformanceState\000nvmlDeviceGetPowerState\000nvmlDeviceGetSupportedPerformanceStates\000nvmlDeviceGetRepairStatus\000nvmlDeviceGetAPIRestriction\000nvmlDeviceSetAPIRestriction\000nvmlDeviceGetRowRemapperHistogram\000nvmlDeviceGetSamples\000nvmlDeviceSetTemperatureThreshold\000nvmlDeviceG""etTemperatureThreshold\000nvmlDeviceGetTemperatureV\000nvmlDeviceGetUtilizationRates\000nvmlDeviceGetVgpuHeterogeneousMode\000nvmlDeviceSetVgpuHeterogeneousMode\000nvmlDeviceGetVgpuInstancesUtilizationInfo\000nvmlDeviceGetVgpuMetadata\000nvmlDeviceGetVgpuProcessesUtilizationInfo\000nvmlDeviceGetVgpuSchedulerCapabilities\000nvmlDeviceGetVgpuSchedulerState\000nvmlDeviceGetVgpuSchedulerLog\000nvmlDeviceSetVgpuSchedulerState\000nvmlDeviceGetVgpuTypeCreatablePlacements\000nvmlDeviceGetVgpuTypeSupportedPlacements\000nvmlVgpuTypeGetMaxInstances\000nvmlDeviceWorkloadPowerProfileClearRequestedProfiles\000nvmlDeviceGetCurrentClocksEventReasons\000nvmlDeviceGetSupportedClocksEventReasons\000nvmlDeviceGetSupportedEventTypes\000nvmlDeviceGetTotalEnergyConsumption\000nvmlDeviceGetLastBBXFlushTime\000nvmlDeviceSetConfComputeUnprotectedMemSize\000nvmlDeviceRegisterEvents\000nvmlDeviceGetVgpuUtilization\000nvmlDeviceGetVgpuProcessUtilization\000nvmlDeviceGetAccountingBufferSize\000nvmlDeviceGetAdaptiveClockInfoStatus\000nvmlDeviceGetBoardId\000nvmlDeviceGetComputeInstanceId\000nvmlDeviceGetCurrPcieLinkGeneration\000nvmlDeviceGetCurrPcieLinkWidth\000nvmlDeviceGetEnforcedPowerLimit\000nvmlDeviceGetFanSpeed\000nvmlDeviceGetGpuInstanceId\000nvmlDeviceGetGpuMaxPcieLinkGeneration\000nvmlDeviceGetIndex\000nvmlDeviceGetInforomConfigurationChecksum\000nvmlDeviceGetIrqNum\000nvmlDeviceGetMaxMigDeviceCount\000nvmlDeviceGetMaxPcieLinkGeneration\000nvmlDeviceGetMaxPcieLinkWidth\000nvmlDeviceGetMemoryBusWidth\000nvmlDeviceGetMinorNumber\000nvmlDeviceGetModuleId\000nvmlDeviceGetMultiGpuBoard\000nvmlDeviceGetNumFans\000nvmlDeviceGetNumGpuCores\000nvmlDeviceGetNumaNodeId\000nvmlDeviceGetPcieLinkMaxSpeed\000nvmlDeviceGetPcieReplayCounter\000nvmlDeviceGetPcieSpeed\000nvmlDeviceGetPowerManagementDefaultLimit\000nvmlDeviceGetPowerManagementLimit\000nvmlDeviceGetPowerUsage\000nvmlDeviceIsMigDeviceHandle\000nvmlGpmQueryIfStreamingEnabled\000nvmlDeviceGetEncoderSessions\000nvmlDeviceGetFBCSessions\000nvmlD""eviceGetComputeRunningProcesses_v3\000nvmlDeviceGetMPSComputeRunningProcesses_v3\000nvmlDeviceGetActiveVgpus\000nvmlDeviceGetCreatableVgpus\000nvmlDeviceGetSupportedVgpus\000nvmlDeviceGetAccountingPids\000nvmlDeviceGetDecoderUtilization\000nvmlDeviceGetEncoderUtilization\000nvmlDeviceGetGspFirmwareMode\000nvmlDeviceGetJpgUtilization\000nvmlDeviceGetMigMode\000nvmlDeviceGetMinMaxFanSpeed\000nvmlDeviceGetOfaUtilization\000nvmlDeviceGetPowerManagementLimitConstraints\000nvmlDeviceGetSupportedMemoryClocks\000nvmlDeviceGetEncoderStats\000nvmlDeviceGetRemappedRows\000nvmlDeviceResetNvLinkErrorCounters\000nvmlDeviceSetDefaultFanSpeed_v2\000nvmlDeviceSetPowerManagementLimit\000nvmlGpmSetStreamingEnabled\000nvmlDeviceGetAccountingStats\000nvmlDeviceGetMigDeviceHandleByIndex\000nvmlDeviceGetNvLinkState\000nvmlDeviceGetFanControlPolicy_v2\000nvmlDeviceSetFanControlPolicy\000nvmlGpmMigSampleGet\000nvmlDeviceGetGpuInstancePossiblePlacements_v2\000nvmlDeviceCreateGpuInstanceWithPlacement\000nvmlDeviceGetGpuInstanceProfileInfoByIdV\000nvmlDeviceGetGpuInstanceProfileInfoV\000nvmlDeviceCreateGpuInstance\000nvmlDeviceGetGpuInstanceById\000nvmlDeviceGetGpuInstances\000nvmlDeviceGetThermalSettings\000nvmlDeviceGetNvLinkRemoteDeviceType\000nvmlDeviceGetNvLinkCapability\000nvmlDeviceGetNvLinkErrorCounter\000nvmlDeviceGetNvLinkRemotePciInfo_v2\000nvmlDeviceSetMigMode\000nvmlDeviceGetFanSpeed_v2\000nvmlDeviceGetGpuInstanceRemainingCapacity\000nvmlDeviceGetNvLinkVersion\000nvmlDeviceGetTargetFanSpeed\000nvmlDeviceGetSupportedGraphicsClocks\000nvmlDeviceSetFanSpeed_v2\000nvmlDeviceSetGpuLockedClocks\000nvmlDeviceSetMemoryLockedClocks\000nvmlDeviceGetCpuAffinity\000nvmlDeviceGetCpuAffinityWithinScope\000nvmlDeviceGetMemoryAffinity\000nvmlEventSetCreate\000nvmlEventSetFree\000nvmlEventSetWait_v2\000nvmlGpuInstanceDestroy\000nvmlGpuInstanceGetActiveVgpus\000nvmlGpuInstanceGetInfo\000nvmlGpuInstanceGetVgpuTypeCreatablePlacements\000nvmlGpuInstanceGetVgpuHeterogeneousMode\000nvmlGpuInstanceSetVgp""uHeterogeneousMode\000nvmlGpuInstanceGetVgpuSchedulerLog\000nvmlGpuInstanceGetVgpuSchedulerState\000nvmlGpuInstanceSetVgpuSchedulerState\000nvmlGpuInstanceGetCreatableVgpus\000nvmlGpuInstanceGetComputeInstancePossiblePlacements\000nvmlGpuInstanceCreateComputeInstanceWithPlacement\000nvmlGpuInstanceCreateComputeInstance\000nvmlGpuInstanceGetComputeInstanceById\000nvmlGpuInstanceGetComputeInstances\000nvmlGpuInstanceGetComputeInstanceRemainingCapacity\000nvmlGpuInstanceGetComputeInstanceProfileInfoV\000nvmlDeviceDiscoverGpus\000nvmlDeviceRemoveGpu_v2\000nvmlDeviceQueryDrainState\000nvmlDeviceModifyDrainState\000nvmlSystemGetConfComputeSettings\000nvmlSystemGetDriverBranch\000nvmlSystemEventSetCreate\000nvmlSystemEventSetFree\000nvmlSystemEventSetWait\000nvmlSystemRegisterEvents\000nvmlDeviceGetHandleByUUIDV\000nvmlUnitSetLedState\000nvmlUnitGetLedState\000nvmlUnitGetPsuInfo\000nvmlUnitGetFanSpeedInfo\000nvmlUnitGetUnitInfo\000nvmlUnitGetDevices\000nvmlUnitGetTemperature\000nvmlGetVgpuDriverCapabilities\000nvmlVgpuInstanceClearAccountingPids\000nvmlVgpuInstanceGetGpuPciId\000nvmlVgpuInstanceGetMdevUUID\000nvmlVgpuInstanceGetUUID\000nvmlVgpuInstanceGetVmDriverVersion\000nvmlVgpuInstanceGetVmID\000nvmlVgpuInstanceGetAccountingMode\000nvmlVgpuInstanceGetEccMode\000nvmlVgpuInstanceGetFBCStats\000nvmlVgpuInstanceGetLicenseInfo_v2\000nvmlVgpuInstanceGetMetadata\000nvmlVgpuInstanceGetPlacementId\000nvmlVgpuInstanceGetRuntimeStateSize\000nvmlVgpuInstanceGetType\000nvmlVgpuInstanceGetFbUsage\000nvmlVgpuInstanceGetEncoderCapacity\000nvmlVgpuInstanceGetFrameRateLimit\000nvmlVgpuInstanceGetGpuInstanceId\000nvmlVgpuInstanceGetLicenseStatus\000nvmlVgpuInstanceGetEncoderSessions\000nvmlVgpuInstanceGetFBCSessions\000nvmlVgpuInstanceGetAccountingPids\000nvmlVgpuInstanceGetEncoderStats\000nvmlVgpuInstanceSetEncoderCapacity\000nvmlVgpuInstanceGetAccountingStats\000nvmlGetVgpuCompatibility\000nvmlVgpuTypeGetClass\000nvmlVgpuTypeGetName\000nvmlVgpuTypeGetLicense\000nvmlVgpuTypeGetCapabilit""ies\000nvmlVgpuTypeGetBAR1Info\000nvmlVgpuTypeGetFbReservation\000nvmlVgpuTypeGetFramebufferSize\000nvmlVgpuTypeGetGspHeapSize\000nvmlVgpuTypeGetDeviceID\000nvmlVgpuTypeGetFrameRateLimit\000nvmlVgpuTypeGetGpuInstanceProfileId\000nvmlVgpuTypeGetMaxInstancesPerVm\000nvmlVgpuTypeGetNumDisplayHeads\000nvmlVgpuTypeGetResolution\000nvmlVgpuTypeGetMaxInstancesPerGpuInstance\000nvmlSetVgpuVersion\000nvmlGetVgpuVersion\000nvmlDeviceGetCount_v2\000nvmlGetExcludedDeviceCount\000nvmlSystemGetConfComputeGpusReadyState\000nvmlSystemGetNvlinkBwMode\000nvmlUnitGetCount\000nvmlSystemGetHicVersion\000nvmlInitWithFlags\000nvmlSystemSetConfComputeGpusReadyState\000nvmlSystemSetNvlinkBwMode\000nvmlSystemGetProcessName\000nvmlDeviceGetHandleByIndex_v2\000nvmlGetExcludedDeviceInfoByIndex\000nvmlUnitGetHandleByIndex\000nvmlSystemGetTopologyGpuSet\000nvmlInit_v2\000nvmlShutdownint (__pyx_t_4cuda_8bindings_9_internal_5utils_nested_resource<char>  &, PyObject *, char *)\000int (__pyx_t_4cuda_8bindings_9_internal_5utils_nested_resource<double>  &, PyObject *, double *)\000int (__pyx_t_4cuda_8bindings_9_internal_5utils_nested_resource<float>  &, PyObject *, float *)\000int (__pyx_t_4cuda_8bindings_9_internal_5utils_nested_resource<int32_t>  &, PyObject *, int32_t *)\000int (__pyx_t_4cuda_8bindings_9_internal_5utils_nested_resource<int64_t>  &, PyObject *, int64_t *)\000int (__pyx_t_4cuda_8bindings_9_internal_5utils_nested_resource<int>  &, PyObject *, int *)\000void *(PyObject *, Py_ssize_t, struct __pyx_opt_args_4cuda_8bindings_9_internal_5utils_get_buffer_pointer *__pyx_optional_args)\000__pyx_fuse_3get_nested_resource_ptr\000__pyx_fuse_5get_nested_resource_ptr\000__pyx_fuse_4get_nested_resource_ptr\000__pyx_fuse_1get_nested_resource_ptr\000__pyx_fuse_2get_nested_resource_ptr\000__pyx_fuse_0get_nested_resource_ptr\000get_buffer_pointer\320\000@\300\001\360\030\000\n\013\330\010\025\320\0250\260\001\260\030\270\030\300\021\300!\330\004\020\220\001\220\021\330\004\013\2101\200\001\360 \000\n""\013\330\010\025\320\0250\260\001\260\030\270\030\300\021\300)\3101\310A\330\004\020\220\001\220\021\330\004\014\210E\220\031\230%\230q\320\000@\300\001\360\030\000\n\013\330\010\025\320\0250\260\001\260\030\270\030\300\021\300!\330\004\020\220\001\220\021\330\004\013\2105\220\001\200\001\360\022\000\005\037\320\0360\260\001\260\021\330\004!\320!5\260W\270I\300Q\330\004$\240C\240q\250\001\330\t\n\330\010\025\320\025-\250Q\250h\260h\270m\3101\330\004\020\220\001\220\021\340\004\013\210:\220Y\230a\230z\250\025\250a\200\001\360\020\000\005\037\320\0360\260\001\260\021\330\004!\320!5\260W\270I\300Q\330\004$\240C\240q\250\001\340\t\n\330\010\025\320\025/\250q\260\010\270\010\300\r\310Q\330\004\020\220\001\220\021\200\001\360\024\000\n\013\330\010\025\320\0250\260\001\260\030\270\030\320AQ\320Q[\320[i\320ij\330\004\020\220\001\220\021\200\001\360\030\000\0050\320/C\3001\330\004+\320+I\310\036\320W`\320`a\360\006\000\n\013\330\010\013\210;\320\0269\270\025\270a\330\010\013\2109\220A\330\010\013\320\013\033\2301\330\010\013\2108\2201\330\010\025\320\025=\270Q\270h\300h\320N`\320`n\320no\330\004\025\220Q\220a\340\004\007\200s\210'\220\023\220A\330\010\017\210q\340\004'\240v\250Q\250g\260S\270\n\300)\320Ka\320ah\320hm\320mr\320rs\330\004\022\320\022#\2401\340\t\n\330\010\025\320\025=\270Q\270h\300h\320N`\320`n\320no\330\004\020\220\001\220\021\340\004\013\2101\200\001\360\022\000\005\"\240\021\240!\330\t\n\330\010\025\320\0250\260\001\260\033\270O\3107\320RS\330\004\025\220Q\220a\330\004\007\200u\210A\210S\220\003\2201\330\010\023\2206\230\021\230'\240\025\240i\320/A\300\027\310\005\310U\320RV\320VX\320XY\330\004&\240f\250A\250W\260E\270\021\270&\300\t\320I[\320[b\320bg\320gl\320lm\330\t\n\330\010\025\320\0250\260\001\260\033\270O\3107\320Rb\320bm\320mn\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\020\000\005\"\240\021\240!\330\t\n\330\010\025\320\0250\260\001\260\030\270\030\300\037\320PW\320WX\330\004\025\220Q\220a\330\004\007\200u\210A\210S\220\003\2201\330""\010\023\2206\230\021\230'\240\025\240i\320/E\300W\310E\320QV\320VZ\320Z\\\320\\]\330\004\037\230v\240Q\240g\250U\260!\2606\270\031\320BX\320X_\320_d\320di\320ij\330\004\"\320\"3\2604\260q\330\t\n\330\010\025\320\0250\260\001\260\030\270\030\300\037\320PW\320WX\330\004\020\220\001\220\021\330\004\013\2101\200\001\360 \000\n\013\330\010\025\320\0250\260\001\260\030\270\030\300\021\300+\310Q\310a\330\004\020\220\001\220\021\330\004\014\210K\220q\320\000\\\320\\]\360\032\000\n\013\330\010\025\320\0250\260\001\260\030\270\030\300\034\310Q\310a\330\004\020\220\001\220\021\330\004\013\210:\220Q\200\001\330\0040\260\001\260\026\260q\200\001\360 \000\n\013\330\010\025\320\0250\260\001\260\030\270\030\300\021\300-\310q\320PQ\330\004\020\220\001\220\021\330\004\014\210M\230\021\200\001\360\026\000\0050\320/?\270q\330\0041\3201I\310\033\320Tf\320fo\320op\330\004\023\220;\320\036=\270U\300!\330\t\n\330\010\025\320\0250\260\001\260\030\270\030\300\021\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\022\000\n\013\330\010\025\320\0250\260\001\260\030\270\030\300\036\310q\330\004\020\220\001\220\021\200\001\360\026\000\0050\320/?\270q\330\0043\3203M\310[\320Xj\320js\320st\330\t\n\330\010\025\320\0252\260!\2608\2708\3001\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\016\000\005\031\320\0300\260\001\260\021\330\004\007\200s\210!\2107\220\"\220A\330\010\016\210j\230\001\230\021\360\006\000\005\013\210!\2108\220?\240(\250/\3209J\310!\3108\320SV\320VW\320WX\340\t\n\330\010\025\320\025-\250Q\250h\260h\270a\270q\330\004\020\220\001\220\021\200\001\330/0\330\004\007\200w\210o\230Q\330\010\017\210q\330\004\013\210<\220q\230\001\320\000@\300\001\360\030\000\n\013\330\010\025\320\025)\250\021\250(\260(\270!\2701\330\004\020\220\001\220\021\330\004\013\210>\230\021\200\001\360\026\000\0051\3200J\310!\330\0044\3204X\320Xc\320cl\320lu\320uv\330\t\n\330\010\025\320\0256\260a\260x\270x\300q\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\020\000\n\013\330\010\025\320""\025;\2701\270A\330\004\020\220\001\220\021\200\001\360\024\000\005)\250\001\250\021\330\004+\2501\250A\330\t\n\330\010\025\320\025)\250\021\250(\260(\270/\310\026\320Oe\320er\360\000\000s\001D\002\360\000\000D\002S\002\360\000\000S\002a\002\360\000\000a\002b\002\330\004\025\220Q\220a\330\004\032\230&\240\001\240\034\250Q\250a\330\004%\320%5\260W\270I\300Q\330\004\007\200|\2201\220C\220s\230!\330\010\017\210q\330\t\n\330\010\025\320\025)\250\021\250(\260(\270/\310\026\320Oe\320er\360\000\000s\001D\002\360\000\000D\002S\002\360\000\000S\002a\002\360\000\000a\002b\002\330\004\020\220\001\220\021\330\004\014\210O\2301\230D\240\001\200\001\330\004;\2701\270F\300!\320\000]\320]^\360\034\000\n\013\330\010\025\320\025+\2501\250H\260I\270X\300Y\320N`\320`l\320lm\320mn\330\004\020\220\001\220\021\330\004\013\2105\220\001\200\001\360\020\000\n\013\330\010\025\320\025+\2501\250M\270\021\330\004\020\220\001\220\021\200\001\360\020\000\005#\240!\2401\330\t\n\330\010\025\320\0250\260\001\3201E\300_\320TZ\320Zi\320ij\330\004\025\220Q\220a\330\004\007\200v\210Q\210c\220\023\220A\330\010\017\210q\330\004\037\230u\240A\240V\2501\250A\330\004\035\230Q\330\t\n\330\010\025\320\0250\260\001\3201E\300_\320Ta\320ap\320pq\330\004\020\220\001\220\021\330\004\022\320\022'\240q\250\001\320\000@\300\001\360\030\000\n\013\330\010\025\320\025)\250\021\250(\260(\270!\2701\330\004\020\220\001\220\021\330\004\013\2101\320\0001\260\021\360\022\000\n\013\330\010\025\320\025*\250!\2501\250A\330\004\020\220\001\220\021\330\004\013\2101\320\000@\300\001\360\030\000\n\013\330\010\025\320\0251\260\021\260(\270(\300!\3001\330\004\020\220\001\220\021\330\004\013\2105\220\001\200\001\360\020\000\n\013\330\010\025\320\025+\2501\320,<\270A\330\004\020\220\001\220\021\320\000;\2701\360\030\000\n\013\330\010\025\320\025,\250A\250X\260X\270Q\270a\330\004\020\220\001\220\021\330\004\013\2105\220\001\200\001\360\024\000\n\013\330\010\025\320\0251\260\021\260(\270(\320BV\320VW\330\004\020\220\001\220\021\200\001\360&""\000\n\013\330\010\025\320\0251\260\021\260(\270(\320BX\320XY\320Yj\320jk\360\000\000l\001I\002\360\000\000I\002J\002\360\000\000J\002K\002\330\004\020\220\001\220\021\330\004\014\210E\320\021\"\320\"?\270q\200\001\360\024\000\n\013\330\010\025\320\025+\2501\320,<\270J\320FW\320Wb\320br\320rs\330\004\020\220\001\220\021\200\001\360\020\000\n\013\330\010\025\320\025+\2501\320,N\310a\330\004\020\220\001\220\021\200\001\360\022\000\n\013\330\010\025\320\0251\260\021\260(\270(\300.\320PQ\330\004\020\220\001\220\021\200\001\360\020\000\005*\250\021\250!\330\t\n\330\010\025\320\0251\260\021\260(\270(\300/\320Q`\320`a\330\004\025\220Q\220a\330\004,\320,>\270a\270}\310A\310Q\330\0047\3207S\320S^\320^k\320kt\320tu\330\004\007\200}\220A\220S\230\003\2301\330\010\017\210q\330\t\n\330\010\025\320\0251\260\021\260(\270(\300/\320Q`\320`a\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\024\000\n\013\330\010\025\320\025+\2501\320,>\270n\310G\320Sm\320mn\330\004\020\220\001\220\021\200\001\360 \000\n\013\330\010\025\320\0251\260\021\260(\270(\300!\300<\310q\320PQ\330\004\020\220\001\220\021\330\004\014\210L\230\001\320\000;\2701\360\026\000\005\010\200t\210:\220Q\220h\230a\330\010\016\210i\220q\230\001\330\004 \240\005\240W\250G\2601\330\004\032\230!\340\t\n\330\010\025\320\0250\260\001\260\035\270j\310\001\310\021\330\004\020\220\001\220\021\330\004\013\210:\220Q\200\001\360\020\000\0052\3201F\300a\330\0045\3205T\320T_\320_n\320nw\320wx\330\t\n\330\010\025\320\0259\270\021\270!\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\0052\3201J\310!\330\0045\3205X\320Xc\320cn\320nw\320wx\330\t\n\330\010\025\320\025;\2701\270H\300H\310A\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\032\000\0052\3201N\310a\330\0045\3205\\\320\\g\320gn\320nw\320wx\330\004\010\210\013\320\023?\270u\300A\330\t\n\330\010\025\320\025B\300!\300=\320P^\320^g\320gt\320tu\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\022\000\0052\260\021\260!\330\t\n\330\010\025\320\0254""\260A\260X\270X\300V\310?\320Zq\320qr\330\004\025\220Q\220a\330\0040\3200H\310\001\320I^\320^_\320_`\330\004;\320;]\320]h\320hs\320s|\320|}\330\004\007\320\007\034\230A\230S\240\003\2401\330\010\017\210q\330\t\n\330\010\025\320\0254\260A\260X\270X\320EV\320Ve\320e|\320|}\330\004\020\220\001\220\021\330\004\013\2101\200\001\330\0042\260!\2606\270\021\200\001\360\020\000\n\013\330\010\025\320\0252\260!\2608\2701\330\004\020\220\001\220\021\200\001\360 \000\n\013\330\010\025\320\0252\260!\2608\2708\3001\300I\310Q\310a\330\004\020\220\001\220\021\330\004\014\210E\220\031\230%\230q\200\001\360\024\000\n\013\330\010\025\320\0252\260!\2608\2708\3005\320H`\320`a\330\004\020\220\001\220\021\200\001\360\022\000\n\013\330\010\025\320\0252\260!\2608\2708\320CT\320TU\330\004\020\220\001\220\021\200\001\360\022\000\n\013\330\010\025\320\0252\260!\2608\2708\320CV\320VW\330\004\020\220\001\220\021\200\001\360\024\000\n\013\330\010\025\320\0252\260!\2608\2708\320CZ\320Zf\320ft\320tu\330\004\020\220\001\220\021\320\000[\320[\\\360\030\000\n\013\330\010\025\320\0253\2601\3204F\300n\320TU\320UV\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\020\000\005*\250\021\250!\330\t\n\330\010\025\320\0253\2601\3204H\310\017\320Wf\320fu\320uv\330\004\025\220Q\220a\330\004'\240~\260Q\260m\3001\300A\330\0042\3202J\310+\320Ua\320aj\320jk\330\004\007\200}\220A\220S\230\003\2301\330\010\017\210q\330\t\n\330\010\025\320\0253\2601\3204H\310\017\320Wf\320fu\320uv\330\004\020\220\001\220\021\330\004\013\2101\320\000(\250\001\360\022\000\n\013\330\010\025\320\0253\2601\260A\260Q\330\004\020\220\001\220\021\330\004\013\2101\200\001\330\0043\2601\260F\270!\200\001\360\020\000\n\013\330\010\025\320\0253\2601\260H\270A\330\004\020\220\001\220\021\200\001\360\022\000\n\013\330\010\025\320\0253\2601\260H\270H\320De\320ef\330\004\020\220\001\220\021\320\0003\2601\360\032\000\n\013\330\010\025\320\0255\260Q\260h\270h\300e\3101\310A\330\004\020\220\001\220\021\330\004\013\210>\230\021\200\001\360\026\000""\0054\3203F\300a\330\0047\3207T\320T_\320_r\320r{\320{|\330\t\n\330\010\025\320\0250\260\001\260\030\270\030\300\021\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\030\000\0054\3203H\310\001\330\004,\320,K\320K\\\320\\e\320ef\330\0041\3201G\300}\320T]\320]^\330\0045\3205O\310}\320\\e\320ef\340\t\n\330\010\025\320\025-\250Q\320.A\320AT\320TU\330\004\020\220\001\220\021\340\004\013\2101\200\001\360$\000\n\013\330\010\025\320\0254\260A\3205I\310\037\320XY\320Yh\320hi\320iv\320vw\320wx\330\004\020\220\001\220\021\330\004\014\210O\230=\250\001\200\001\360\022\000\n\013\330\010\025\320\0254\260A\260X\270X\320E]\320]^\330\004\020\220\001\220\021\200\001\360\024\000\n\013\330\010\025\320\0254\260A\260X\270X\320EY\320YZ\330\004\020\220\001\220\021\200\001\360\022\000\n\013\330\010\025\320\0254\260A\260X\270X\320Ec\320cd\330\004\020\220\001\220\021\200\001\360\022\000\n\013\330\010\025\320\0254\260A\260X\270X\320Ef\320fg\330\004\020\220\001\220\021\200\001\360\022\000\n\013\330\010\025\320\0254\260A\260X\270X\300Q\330\004\020\220\001\220\021\200\001\360 \000\n\013\330\010\025\320\0254\260A\260X\270X\300Q\300i\310q\320PQ\330\004\020\220\001\220\021\330\004\014\320\014)\250\031\3202O\310q\200\001\360 \000\n\013\330\010\025\320\0254\260A\260X\270X\300Q\300m\320ST\320TU\330\004\020\220\001\220\021\330\004\014\210M\230\021\200\001\360\030\000\0055\3204F\300a\330\0048\3208T\320T_\320_t\320t}\320}~\330\t\n\330\010\025\320\0251\260\021\260(\270(\300/\320QR\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\0055\3204H\310\001\330\0045\3205P\320P[\320[n\320nw\320wx\330\004\024\220K\320\037B\300%\300q\330\t\n\330\010\025\320\0253\2601\260H\270H\300A\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\030\000\0055\3204H\310\001\330\0045\3205P\320P[\320[n\320nw\320wx\330\t\n\330\010\025\320\025=\270Q\270h\300h\320N`\320`n\320no\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\020\000\0055\3204J\310!\330\0045\3205R\320R]\320]n\320nw\320wx\330\004\022""\220+\320\035B\300%\300q\330\t\n\330\010\025\320\025>\270a\270q\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\022\000\0055\3204M\310Q\330\0040\3200S\320Sa\320aj\320jk\340\t\n\330\010\013\210;\320\026>\270e\3001\330\010\013\210=\230\001\330\010\013\320\013\034\230A\330\010\025\320\0252\260!\260=\300\016\310a\330\004\025\220Q\220a\340\004\007\200s\210+\220S\230\001\330\010\017\210q\340\004(\250\006\250a\250w\260c\270\036\300y\320Pf\320fm\320mr\320rw\320wx\330\004\022\320\022$\240A\340\t\n\330\010\025\320\0252\260!\260=\300\016\310a\330\004\020\220\001\220\021\340\004\013\2101\200\001\360\020\000\0055\3204P\320PQ\330\0045\3205X\320Xc\320cn\320nw\320wx\330\004\014\210K\320\027B\300%\300q\330\t\n\330\010\025\320\0255\260Q\260a\330\004\020\220\001\220\021\330\004\013\2101\320\0005\260Q\360\022\000\n\013\330\010\025\320\0256\260a\260q\270\001\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\024\000\005\"\240\021\240!\330\t\n\330\010\025\320\0255\260Q\330\014\024\220A\330\014$\240A\330\014\r\330\014\r\340\004\025\220Q\220a\330\004\007\200u\210A\210S\220\003\2201\330\010\023\2206\230\021\230'\240\025\240i\320/A\300\027\310\005\310U\320RV\320VX\320XY\330\004&\240f\250A\250W\260K\270q\300\006\300i\320Oa\320ah\320hm\320mr\320rs\330\t\n\330\010\025\320\0255\260Q\330\014\024\220A\330\014$\240A\330\014\r\330\014\034\230K\240q\340\004\020\220\001\220\021\330\004\013\2101\200\001\360\024\000\005%\320$5\260Q\330\004(\320(C\3006\310\031\320RS\360\006\000\n\013\330\010\013\210;\320\0266\260e\2701\330\010\013\210=\230\001\330\010\013\210?\230!\330\010\025\320\0255\260Q\260m\300>\320QR\330\004\025\220Q\220a\340\004\007\200s\210+\220S\230\001\330\010\017\210q\340\004&\240f\250A\250W\260C\260~\300Y\320Nd\320dk\320kp\320pu\320uv\330\004\n\320\n\033\2301\340\t\n\330\010\025\320\0255\260Q\260m\300>\320QR\330\004\020\220\001\220\021\340\004\013\2101\320\0005\260Q\360\030\000\n\013\330\010\025\320\025'\240q\250\010\260\010\270\001\270\021\330\004\020\220\001\220\021\330\004""\013\2105\220\001\200\001\360\022\000\n\013\330\010\025\320\0255\260Q\260h\270h\320Fb\320bc\330\004\020\220\001\220\021\200\001\360$\000\n\013\330\010\025\320\0255\260Q\260h\270h\300l\320RX\320Xb\320bj\320jk\320k{\320{|\320|}\330\004\020\220\001\220\021\330\004\014\320\014\034\230A\200\001\360\022\000\005\020\210{\320\032;\2705\300\001\330\t\n\330\010\025\320\025.\250a\250x\260x\320?W\320WX\330\004\020\220\001\220\021\200\001\360\024\000\005\t\210\013\320\023;\2705\300\001\330\004\037\230q\330\t\n\330\010\025\320\025.\250a\250q\260\006\260a\330\004\020\220\001\220\021\330\004\022\320\022'\240q\250\004\250A\200\001\360\024\000\005=\320<[\320[\\\330\0046\3206_\320_o\320ox\320xy\340\t\n\330\010\013\210;\320\026D\300E\310\021\330\010\013\320\013\037\230q\330\010\013\320\013 \240\001\330\010\013\320\013 \240\001\330\010\025\320\025>\270a\270x\300x\310q\330\004\025\220Q\220a\340\004\007\200s\320\n\034\230C\230q\330\010\017\210q\340\004;\320;X\320XY\320Y\\\320\\]\330\004\024\320\024,\250A\340\t\n\330\010\025\320\025>\270a\270x\300x\310q\330\004\020\220\001\220\021\340\004\013\2101\200\001\360\020\000\005\"\240\021\240!\330\t\n\330\010\025\320\0256\260a\3207K\310?\320Zi\320ip\320pq\330\004\025\220Q\220a\330\004\007\200u\210A\210S\220\003\2201\330\010\023\2206\230\021\230'\240\025\240i\320/E\300W\310E\320QV\320VZ\320Z\\\320\\]\330\004\037\230v\240Q\240g\250U\260!\2606\270\031\320BX\320X_\320_d\320di\320ij\330\004\"\320\"3\2604\260q\330\t\n\330\010\025\320\0256\260a\3207K\310?\320Zi\320ip\320pq\330\004\020\220\001\220\021\330\004\013\2101\200\001\330\0046\260a\260v\270Q\200\001\360\020\000\n\013\330\010\025\320\0256\260a\260x\270q\330\004\020\220\001\220\021\200\001\360\024\000\n\013\330\010\025\320\0256\260a\260x\270x\320G_\320_o\320ou\320uv\330\004\020\220\001\220\021\200\001\360\022\000\n\013\330\010\025\320\0256\260a\260x\270x\300q\330\004\020\220\001\220\021\200\001\360 \000\n\013\330\010\025\320\0256\260a\260x\270x\300q\310\014\320TU\320UV\330\004\020\220\001\220\021""\330\004\014\210L\230\001\200\001\360\026\000\0057\3206K\3101\330\004:\320:Y\320Yd\320dx\360\000\000y\001B\002\360\000\000B\002C\002\330\t\n\330\010\025\320\0254\260A\260X\270X\300Q\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005$\240?\260!\330\004'\320'@\300\013\3107\320R[\320[\\\330\t\n\330\010\025\320\025+\2501\250M\270\036\300q\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005\"\240\030\250\021\330\004%\320%7\260{\300,\310i\320WX\330\t\n\330\010\025\320\0250\260\001\3201E\300_\320TU\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005\"\240\030\250\021\330\004%\320%7\260{\300,\310i\320WX\330\t\n\330\010\025\320\025*\250!\2508\2608\2701\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\022\000\n\013\330\010\025\320\0257\260q\270\010\300\010\310\001\330\004\020\220\001\220\021\200\001\360\022\000\n\013\330\010\025\320\0257\260q\3208L\310O\320[\\\330\004\020\220\001\220\021\200\001\360\020\000\005*\250\021\250!\330\t\n\330\010\025\320\0257\260q\3208L\310O\320[j\320jy\320yz\330\004\025\220Q\220a\330\004+\320+=\270Q\270m\3101\310A\330\0046\3206R\320R]\320]i\320ir\320rs\330\004\007\200}\220A\220S\230\003\2301\330\010\017\210q\330\t\n\330\010\025\320\0257\260q\3208L\310O\320[j\320jy\320yz\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\022\000\n\013\330\010\025\320\0257\260q\270\010\300\010\320Hl\320lm\330\004\020\220\001\220\021\200\001\360 \000\n\013\330\010\025\320\0257\260q\270\010\300\010\310\001\310\027\320PQ\320QR\330\004\020\220\001\220\021\330\004\014\210G\2201\200\001\360\024\000\005\"\240\021\240!\330\t\n\330\010\025\320\0257\260q\270\r\300^\320S_\320_e\320ef\330\004\025\220Q\220a\340\004\007\200u\210A\210S\220\003\2201\330\014\022\220!\2207\230%\230y\320(:\270'\300\025\300e\3104\310r\320QR\340\004+\2506\260\021\260'\270\025\270a\270v\300Y\320N`\320`g\320gl\320lq\320qr\330\t\n\330\010\025\320\0257\260q\270\r\300^\320S_\320_x\360\000\000y\001I\002\360\000\000I\002P\002\360\000\000P\002Q""\002\330\004\020\220\001\220\021\340\004\013\2101\200\001\360\020\000\005\"\240\021\240!\330\t\n\330\010\025\320\0257\260q\270\010\300\010\310\017\320W^\320^_\330\004\025\220Q\220a\330\004\007\200u\210A\210S\220\003\2201\330\010\023\2206\230\021\230'\240\025\240i\320/E\300W\310E\320QV\320VZ\320Z\\\320\\]\330\004&\240f\250A\250W\260E\270\021\270&\300\t\320I_\320_f\320fk\320kp\320pq\330\004)\320):\270+\300Q\330\t\n\330\010\025\320\0257\260q\270\010\300\010\310\017\320W^\320^_\330\004\020\220\001\220\021\330\004\013\2101\200\001\330\0047\260q\270\006\270a\320\0007\260q\360\022\000\n\013\330\010\025\320\025.\250a\250q\260\001\330\004\020\220\001\220\021\330\004\013\2101\320\0007\260q\360\022\000\n\013\330\010\025\320\025/\250q\260\001\260\021\330\004\020\220\001\220\021\330\004\013\2101\320\0007\260q\360\026\000\005\010\200t\210:\220Q\220f\230A\330\010\016\210i\220q\230\001\330\004\036\230e\2405\250\007\250q\330\004\030\230\001\340\t\n\330\010\025\320\025.\250a\250}\270H\300A\300Q\330\004\020\220\001\220\021\330\004\013\210:\220Q\200\001\360\026\000\0058\3207P\320PQ\330\004;\320;^\320^i\320iz\360\000\000{\001D\002\360\000\000D\002E\002\330\t\n\330\010\025\320\025;\2701\270H\300H\310A\330\004\020\220\001\220\021\330\004\013\2101\200\001\360 \000\n\013\330\010\025\320\025:\270!\2708\3008\3101\310L\320XY\320YZ\330\004\020\220\001\220\021\330\004\014\210E\220\034\230U\240!\200\001\360\022\000\n\013\330\010\025\320\025:\270!\2708\3008\310>\320YZ\330\004\020\220\001\220\021\200\001\360\026\000\005#\240.\260\001\330\004#\320#8\270\013\3007\310)\320ST\330\004\010\210\013\320\0230\260\005\260Q\330\t\n\330\010\025\320\025.\250a\250x\260x\270q\330\004\020\220\001\220\021\330\004\013\2101\320\000\\\320\\]\360\030\000\n\013\330\010\025\320\0258\270\001\3209K\310>\320YZ\320Z[\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\020\000\n\013\330\010\025\320\0258\270\001\3209M\310Q\330\004\020\220\001\220\021\200\001\360\026\000\005'\320&8\270\001\330\004*\320*F\300k\320QX\320Xa""\320ab\330\t\n\330\010\025\320\0255\260Q\260h\270a\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\"\000\n\013\330\010\025\320\0258\270\001\270\030\300\030\320I_\320_`\320`|\320|}\320}~\330\004\020\220\001\220\021\330\004\014\320\014(\250\001\320\000[\320[\\\360\032\000\n\013\330\010\025\320\0258\270\001\270\030\300\031\310(\320R[\320[\\\320\\]\330\004\020\220\001\220\021\330\004\013\2105\220\001\320\000\\\320\\]\360\032\000\n\013\330\010\025\320\0258\270\001\270\030\300\030\310\026\310q\320PQ\330\004\020\220\001\220\021\330\004\013\2105\220\001\200\001\360\030\000\005\033\230'\240\021\330\004\036\320\036/\250{\270&\300\t\310\021\330\t\n\330\010\025\320\0258\270\001\270\030\300\030\310\026\310q\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\0059\3208P\320PQ\330\004<\320<^\320^i\320i|\360\000\000}\001F\002\360\000\000F\002G\002\330\t\n\330\010\025\320\0253\2601\260H\270H\300A\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\030\000\0059\3208S\320ST\330\004/\320/T\320Td\320dm\320mn\360\006\000\n\013\330\010\013\210;\320\026@\300\005\300Q\330\010\013\320\013\"\240!\330\010\013\320\013 \240\001\330\010\013\320\013\034\230A\330\010\025\320\025:\270!\330\014\032\230(\240!\340\004\025\220Q\220a\340\004\007\200s\320\n\037\230s\240!\330\010\017\210q\340\0043\3203L\310A\310S\320PQ\330\004\024\320\024'\240q\340\t\n\330\010\025\320\025:\270!\330\014\032\230(\240!\340\004\020\220\001\220\021\340\004\013\2101\200\001\360\024\000\0059\3208W\320WX\330\0046\3206\\\320\\h\320hq\320qr\340\t\n\330\010\013\210;\320\026D\300E\310\021\330\010\013\320\013 \240\001\330\010\013\320\013\034\230A\330\010\025\320\025>\270a\270x\300x\310q\330\004\025\220Q\220a\340\004\007\200s\320\n\035\230S\240\001\330\010\017\210q\340\0048\3208V\320VW\320WZ\320Z[\330\004\020\320\020#\2401\340\t\n\330\010\025\320\025>\270a\270x\300x\310q\330\004\020\220\001\220\021\340\004\013\2101\200\001\360 \000\n\013\330\010\025\320\025)\250\021\250(\260(\270!\2709\300A\300Q\330\004\020""\220\001\220\021\330\004\014\210E\220\031\230%\230q\200\001\360\024\000\n\013\330\010\025\320\025(\250\001\250\030\260\030\3209J\310+\320UV\330\004\020\220\001\220\021\200\001\360\022\000\n\013\330\010\025\320\0259\270\021\270(\300(\320J`\320`a\330\004\020\220\001\220\021\200\001\360\022\000\005\"\240\021\240!\330\t\n\330\010\025\320\0259\270\021\270(\300(\320J]\320]l\320ls\320st\330\004\025\220Q\220a\330\004\007\200u\210A\210S\220\003\2201\330\010\023\2206\230\021\230'\240\025\240i\320/E\300W\310E\320QV\320VZ\320Z\\\320\\]\330\004&\240f\250A\250W\260E\270\021\270&\300\t\320I_\320_f\320fk\320kp\320pq\330\004)\320):\270+\300Q\330\t\n\330\010\025\320\0259\270\021\270(\300(\320J]\320]l\320ls\320st\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\022\000\n\013\330\010\025\320\0259\270\021\270-\300~\320Up\320pq\330\004\020\220\001\220\021\200\001\360\026\000\005*\320)9\270\021\330\004&\240a\240q\330\004'\320'A\300\034\310Y\320VW\340\t\n\330\010\025\320\025.\250a\250x\260x\270u\300A\330\004\025\220Q\220a\340\004\013\2101\200\001\330\004,\250A\250V\2601\200\001\330\004$\240A\240V\2501\200\001\330\004<\270A\270V\3001\200\001\360\026\000\005\033\230'\240\021\330\004\036\320\036/\250{\270&\300\t\310\021\330\t\n\330\010\025\320\025,\250A\250X\260X\270Q\330\004\020\220\001\220\021\330\004\013\2101\200A\360\016\000\t \230{\250(\260!\2601\330\010\013\2104\210z\230\021\230&\240\006\240a\330\014\022\220)\2301\230A\330\010\013\2104\210v\220S\230\001\330\014\022\220*\230A\230Q\330\010\013\2104\210w\220c\230\021\330\014\022\220*\230A\230Q\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\360\016\000\t$\240?\260(\270!\2701\330\010\013\2104\210z\230\021\230&\240\006\240a\330\014\022\220)\2301\230A\330\010\013\2104\210v\220S\230\001\330\014\022\220*\230A\230Q\330\010\013\2104\210w\220c\230\021\330\014\022\220*\230A\230Q\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\360\016\000\t2\3201N\310h\320VW\320WX\330\010\013\2104\210z\230\021\230&\240""\006\240a\330\014\022\220)\2301\230A\330\010\013\2104\210v\220S\230\001\330\014\022\220*\230A\230Q\330\010\013\2104\210w\220c\230\021\330\014\022\220*\230A\230Q\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\320\000A\300\021\360\030\000\n\013\330\010\025\320\0252\260!\2608\2708\3001\300A\330\004\020\220\001\220\021\330\004\013\2105\220\001\200A\360\016\000\t3\3202P\320PX\320XY\320YZ\330\010\013\2104\210z\230\021\230&\240\006\240a\330\014\022\220)\2301\230A\330\010\013\2104\210v\220S\230\001\330\014\022\220*\230A\230Q\330\010\013\2104\210w\220c\230\021\330\014\022\220*\230A\230Q\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\360\016\000\t%\320$4\260H\270A\270Q\330\010\013\2104\210z\230\021\230&\240\006\240a\330\014\022\220)\2301\230A\330\010\013\2104\210v\220S\230\001\330\014\022\220*\230A\230Q\330\010\013\2104\210w\220c\230\021\330\014\022\220*\230A\230Q\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\360\016\000\t#\240.\260\010\270\001\270\021\330\010\013\2104\210z\230\021\230&\240\006\240a\330\014\022\220)\2301\230A\330\010\013\2104\210v\220S\230\001\330\014\022\220*\230A\230Q\330\010\013\2104\210w\220c\230\021\330\014\022\220*\230A\230Q\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\360\016\000\t\033\230&\240\010\250\001\250\021\330\010\013\2104\210z\230\021\230&\240\006\240a\330\014\022\220)\2301\230A\330\010\013\2104\210v\220S\230\001\330\014\022\220*\230A\230Q\330\010\013\2104\210w\220c\230\021\330\014\022\220*\230A\230Q\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\360\016\000\t\020\210{\230!\2306\320!@\320@]\320]^\200A\360\016\000\t\020\210{\230!\2306\320!1\260\036\270q\200A\360\016\000\t\020\210{\230!\2306\320!?\320?[\320[\\\200A\360\016\000\t\020\210{\230!\2306\320!3\3203C\3001\200A\360\016\000\t\020\210{\230!\2306\320!4\3204E\300Q\200A\360\016\000\t\020\210{\230!\2306\320!5\3205G\300q\200A\360\016\000\t\020\210{\230!\2306\320!6\3206I\310\021\200A\360\016\000""\t\020\210{\230!\2306\320!7\3207K\3101\200A\360\016\000\t\020\210{\230!\2306\320!8\3208M\310Q\200A\360\016\000\t\020\210{\230!\2306\320!9\3209O\310q\200A\360\016\000\t\020\210{\230!\2306\320!A\320A_\320_`\200A\360\016\000\t\020\210{\230!\2306\320!B\320Ba\320ab\200A\360\016\000\t\020\210{\230!\2306\320!C\320Cc\320cd\200A\360\016\000\t\020\210{\230!\2306\320!D\320De\320ef\200A\360\016\000\t\020\210{\230!\2306\320!E\320Eg\320gh\200A\360\016\000\t\020\210{\230!\2306\320!F\320Fi\320ij\200A\360\016\000\t\020\210{\230!\2306\320!G\320Gk\320kl\200A\360\016\000\t\020\210{\230!\2306\320!H\320Hm\320mn\200A\360\016\000\t\020\210{\230!\2306\320!J\320Jq\320qr\200A\360\016\000\t\020\210{\230!\2306\320!K\320Ks\320st\200A\360\016\000\t\020\210{\230!\2306\320!M\320Mw\320wx\200A\360\016\000\t\020\210{\230!\2306\320!N\320Ny\320yz\200A\360\016\000\t\020\210{\230!\2306\240\037\260\r\270Q\200A\360\016\000\t\020\210{\230!\2306\320!:\320:Q\320QR\200A\360\016\000\t\020\210{\230!\2306\320!;\320;S\320ST\200A\360\016\000\t\020\210{\230!\2306\320!<\320<U\320UV\200A\360\016\000\t\020\210{\230!\2306\320!V\360\000\000W\001J\002\360\000\000J\002K\002\200A\360\016\000\t\020\210{\230!\2306\320!=\320=W\320WX\200A\360\016\000\t\020\210{\230!\2306\320!>\320>Y\320YZ\200A\360\016\000\t\020\210{\230!\2306\320!Z\360\000\000[\001R\002\360\000\000R\002S\002\320\000A\300\021\360\030\000\n\013\330\010\025\320\025*\250!\2508\2608\2701\270A\330\004\020\220\001\220\021\330\004\013\2101\200A\360\016\000\t'\320&8\270\010\300\001\300\021\330\010\013\2104\210z\230\021\230&\240\006\240a\330\014\022\220)\2301\230A\330\010\013\2104\210v\220S\230\001\330\014\022\220*\230A\230Q\330\010\013\2104\210w\220c\230\021\330\014\022\220*\230A\230Q\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\360\016\000\t:\3209^\320^f\320fg\320gh\330\010\013\2104\210z\230\021\230&\240\006\240a\330\014\022\220)\2301\230A\330\010\013\2104\210v\220S\230\001\330\014\022\220*\230A\230Q\330\010\013\2104\210w\220c\230\021\330\014\022""\220*\230A\230Q\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\330\010\020\220\004\220A\220X\230T\240\021\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\0100\3200L\310H\320TU\320UV\330\010\013\2106\220\023\220A\330\014\017\210x\320\027=\270V\3001\300A\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027=\270Q\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\0101\3201N\310h\320VW\320WX\330\010\013\2106\220\023\220A\330\014\017\210x\320\027>\270f\300A\300Q\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027>\270a\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\013\2109\220A\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\0101\3201N\310h\320VW\320WX\330\010\013\2106\220\023\220A\330\014\017\210x\320\027>\270f\300A\300Q\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027>\270a\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330+<\270A\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\0101\3201N\310h\320VW\320WX\330\010\024\220N\240/\3201M\310Q\330\010,\320,D\300A\330\014\023\2205\320\030D\300B\300f\310A\330\010\017\210v\220X\230Q\230f\240G\2505\260\006\260a\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010#\240?\260(\270!\2701\330\010\013\2106\220""\023\220A\330\014\017\210x\320\0270\260\006\260a\260q\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\0270\260\001\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010'\320':\270(\300!\3001\330\010\013\2106\220\023\220A\330\014\017\210x\320\0274\260F\270!\2701\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\0274\260A\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010\037\230{\250(\260!\2601\330\010\013\2106\220\023\220A\330\014\017\210x\320\027,\250F\260!\2601\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027,\250A\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330+<\270A\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010\037\230{\250(\260!\2601\330\010\024\220N\240/\3201M\310Q\330\010,\320,D\300A\330\014\023\2205\320\0302\260\"\260F\270!\330\010\017\210v\220X\230Q\230f\240G\2505\260\006\260a\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\330+<\270A\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010#\240?\260(\270!\2701\330\010\024\220N\240/\3201M\310Q\330\010,\320,D\300A\330\014\023\2205\320\0306\260b\270\006\270a\330\010\017\210v\220X\230Q\230f\240G\2505\260\006\260a\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\330+<\270A\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\0102""\3202P\320PX\320XY\320YZ\330\010\024\220N\240/\3201M\310Q\330\010,\320,D\300A\330\014\023\2205\320\030E\300R\300v\310Q\330\010\017\210v\220X\230Q\230f\240G\2505\260\006\260a\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\0103\3203R\320RZ\320Z[\320[\\\330\010\013\2106\220\023\220A\330\014\017\210x\320\027@\300\006\300a\300q\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027@\300\001\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\0103\3203R\320RZ\320Z[\320[\\\330\010\013\2106\220\023\220A\330\014\017\210x\320\027@\300\006\300a\300q\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027@\300\001\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\013\2109\220A\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010$\320$4\260H\270A\270Q\330\010\013\2106\220\023\220A\330\014\017\210x\320\0271\260\026\260q\270\001\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\0271\260\021\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330+<\270A\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010$\320$4\260H\270A\270Q\330\010\024\220N\240/\3201M\310Q\330\010,\320,D\300A\330\014\023\2205\320\0307\260r\270\026\270q\330\010\017\210v\220X\230Q\230f\240G\2505\260\006\260a\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017""\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010*\320*@\300\010\310\001\310\021\330\010\013\2106\220\023\220A\330\014\017\210x\320\0277\260v\270Q\270a\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\0277\260q\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010\032\230&\240\010\250\001\250\021\330\010\013\2106\220\023\220A\330\014\017\210x\320\027'\240v\250Q\250a\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027'\240q\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010\"\240.\260\010\270\001\270\021\330\010\013\2106\220\023\220A\330\014\017\210x\320\027/\250v\260Q\260a\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027/\250q\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010%\320%6\260h\270a\270q\330\010\013\2106\220\023\220A\330\014\017\210x\320\0272\260&\270\001\270\021\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\0272\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010\033\2307""\240(\250!\2501\330\010\013\2106\220\023\220A\330\014\017\210x\320\027(\250\006\250a\250q\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027(\250\001\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010&\320&8\270\010\300\001\300\021\330\010\013\2106\220\023\220A\330\014\017\210x\320\0273\2606\270\021\270!\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\0273\2601\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330+<\270A\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010&\320&8\270\010\300\001\300\021\330\010\024\220N\240/\3201M\310Q\330\010,\320,D\300A\330\014\023\2205\320\0309\270\022\2706\300\021\330\010\017\210v\220X\230Q\230f\240G\2505\260\006\260a\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\330+<\270A\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\0109\3209^\320^f\320fg\320gh\330\010\024\220N\240/\3201M\310Q\330\010,\320,D\300A\330\014\023\2205\320\030L\310B\310f\320TU\330\010\017\210v\220X\230Q\230f\240G\2505\260\006\260a\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010+\320+B\300(\310!\3101\330\010\013\2106\220\023\220A\330\014\017\210x\320\0278\270\006\270a\270q\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\0278\270\001\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330-A\300\021""\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010,\320,D\300H\310A\310Q\330\010\013\2106\220\023\220A\330\014\017\210x\320\0279\270\026\270q\300\001\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\0279\270\021\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330+<\270A\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010,\320,D\300H\310A\310Q\330\010\024\220N\240/\3201M\310Q\330\010,\320,D\300A\330\014\023\2205\320\030?\270r\300\026\300q\330\010\017\210v\220X\230Q\230f\240G\2505\260\006\260a\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010-\320-F\300h\310a\310q\330\010\013\2106\220\023\220A\330\014\017\210x\320\027:\270&\300\001\300\021\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027:\270!\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330+<\270A\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010-\320-F\300h\310a\310q\330\010\024\220N\240/\3201M\310Q\330\010,\320,D\300A\330\014\023\2205\320\030@\300\002\300&\310\001\330\010\017\210v\220X\230Q\230f\240G\2505\260\006\260a\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010(\320(<\270H\300A\300Q\330\010\013\2106\220\023\220A\330\014\017\210x\320\0275\260V\2701\270A\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\0275\260Q\330\014\017\210z\230\021\330\014\017\210z\230\021\330""\010\013\210=\230\001\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010(\320(<\270H\300A\300Q\330\010\013\2106\220\023\220A\330\014\017\210x\320\0275\260V\2701\270A\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\0275\260Q\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\013\2109\220A\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010 \240\014\250H\260A\260Q\330\010\013\2106\220\023\220A\330\014\017\210x\320\027-\250V\2601\260A\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027-\250Q\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330+<\270A\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010(\320(<\270H\300A\300Q\330\010\024\220N\240/\3201M\310Q\330\010,\320,D\300A\330\014\023\2205\320\030;\2702\270V\3001\330\010\017\210v\220X\230Q\230f\240G\2505\260\006\260a\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010\034\230H\240H\250A\250Q\330\010\013\2106\220\023\220A\330\014\017\210x\320\027)\250\026\250q\260\001\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027)\250\021\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010.\320.H\310\010\320PQ\320QR\330\010\013\2106\220\023\220A\330\014\017\210x\320\027;\2706\300\021\300!\330\014\017\210s\220&\230""\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027;\2701\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010/\320/J\310(\320RS\320ST\330\010\013\2106\220\023\220A\330\014\017\210x\320\027<\270F\300!\3001\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027<\270A\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\013\2109\220A\330\010\017\210q\200A\330+<\270A\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010\"\240.\260\010\270\001\270\021\330\010\024\220N\240/\3201M\310Q\330\010,\320,D\300A\330\014\023\2205\320\0305\260R\260v\270Q\330\010\017\210v\220X\230Q\230f\240G\2505\260\006\260a\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\330+<\270A\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010\032\230&\240\010\250\001\250\021\330\010\024\220N\240/\3201M\310Q\330\010,\320,D\300A\330\014\023\2205\320\030-\250R\250v\260Q\330\010\017\210v\220X\230Q\230f\240G\2505\260\006\260a\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010\035\230Y\240h\250a\250q\330\010\013\2106\220\023\220A\330\014\017\210x\320\027*\250&\260\001\260\021\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027*\250!\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330+<\270A\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010\035\230Y\240h\250a\250q\330\010\024\220N""\240/\3201M\310Q\330\010,\320,D\300A\330\014\023\2205\320\0300\260\002\260&\270\001\330\010\017\210v\220X\230Q\230f\240G\2505\260\006\260a\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010=\320=f\320fn\320no\320op\330\010\013\2106\220\023\220A\330\014\017\210x\320\027J\310&\320PQ\320QR\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027J\310!\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010)\320)>\270h\300a\300q\330\010\013\2106\220\023\220A\330\014\017\210x\320\0276\260f\270A\270Q\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\0276\260a\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010!\240\035\250h\260a\260q\330\010\013\2106\220\023\220A\330\014\017\210x\220~\240V\2501\250A\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\220~\240Q\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010!\240\035\250h\260a\260q\330\010\013\2106\220\023\220A\330\014\017\210x\320\027.\250f\260A\260Q\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027.\250a\330\014\017\210z\230\021\330""\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010\031\230\025\230h\240a\240q\330\010\013\2106\220\023\220A\330\014\017\210x\220\177\240f\250A\250Q\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\220\177\240a\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330+<\270A\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010)\320)>\270h\300a\300q\330\010\024\220N\240/\3201M\310Q\330\010,\320,D\300A\330\014\023\2205\320\030<\270B\270f\300A\330\010\017\210v\220X\230Q\230f\240G\2505\260\006\260a\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010:\320:`\320`h\320hi\320ij\330\010\013\2106\220\023\220A\330\014\017\210x\320\027G\300v\310Q\310a\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027G\300q\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\013\2109\220A\330\010\017\210q\200A\330-A\300\021\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010\036\230j\250\010\260\001\260\021\330\010\013\2106\220\023\220A\330\014\017\210x\320\027+\2506\260\021\260!\330\014\017\210s\220&\230\003\2301\330\020\026\220k\240\021\240!\330\014\022\220!\2208\2303\230h\240g\250U\260!\330\014\017\210z\230\021\330\014\017\210z\230\021\340\014\017\210x\320\027+\2501\330\014\017\210z\230\021\330\014\017\210z\230\021\330\010\013\210=\230\001\330\010\017\210q\200A\330+<\270A\360\020\000\t\014\2104\210s\220!\330\014\022\220*\230A\230Q\330\010\036\230j\250\010\260\001\260\021\330\010\024\220N\240/\3201M\310Q\330\010,\320,D\300A\330\014\023""\2205\320\0301\260\022\2606\270\021\330\010\017\210v\220X\230Q\230f\240G\2505\260\006\260a\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\360\000\000\001A\002\360\000\000A\002B\002\360\032\000\n\013\330\010\025\320\025G\300q\310\r\320Uc\320co\320op\320pq\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\014\000\005\"\240\033\250A\330\004,\320,A\300\031\310)\320ST\330\004\037\230{\250!\330\004*\320*?\270w\300i\310q\340\t\n\330\010\025\320\025'\240q\250\017\260q\340\004\020\220\001\220\021\330\004\014\210K\220q\200\001\360\026\000\005(\240}\260A\330\004+\320+B\300+\310]\320Zc\320cd\330\t\n\330\010\025\320\025,\250A\250V\2606\270\021\330\004\020\220\001\220\021\330\004\013\2101\200A\360\016\000\t-\320,D\300H\310A\310Q\330\010\013\2104\210z\230\021\230&\240\006\240a\330\014\022\220)\2301\230A\330\010\013\2104\210v\220S\230\001\330\014\022\220*\230A\230Q\330\010\013\2104\210w\220c\230\021\330\014\022\220*\230A\230Q\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\360\016\000\t.\320-F\300h\310a\310q\330\010\013\2104\210z\230\021\230&\240\006\240a\330\014\022\220)\2301\230A\330\010\013\2104\210v\220S\230\001\330\014\022\220*\230A\230Q\330\010\013\2104\210w\220c\230\021\330\014\022\220*\230A\230Q\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\360\016\000\t)\320(<\270H\300A\300Q\330\010\013\2104\210z\230\021\230&\240\006\240a\330\014\022\220)\2301\230A\330\010\013\2104\210v\220S\230\001\330\014\022\220*\230A\230Q\330\010\013\2104\210w\220c\230\021\330\014\022\220*\230A\230Q\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\330\010\014\210J\220a\330\010\014\210L\230\001\230\021\330\010\r\210Q\210k\230\025\230i\240q\250\001\200\001\360\020\000\005,\320+A\300\021\330\004/\320/O\310{\320Zb\320bk\320kl\330\t\n\330\010\025\320\0252\260!\2601\330\004\020\220\001\220\021\330\004\013\2101\320\000,\250A\360\022\000\n\013\330\010\025\320\025%\240Q\240a\240q\330\004\020\220\001\220\021\330\004\013\2101\320\000<""\270A\360\030\000\n\013\330\010\025\320\025-\250Q\250h\260h\270a\270q\330\004\020\220\001\220\021\330\004\013\2105\220\001\320\000\177\360\000\000@\002A\002\360\034\000\n\013\330\010\025\320\025=\270Q\270h\300h\310l\320Z}\360\000\000~\001I\002\360\000\000I\002J\002\360\000\000J\002K\002\330\004\020\220\001\220\021\330\004\013\210:\220Q\200\001\360 \000\n\013\330\010\025\320\025,\250A\320-?\270~\310Q\310k\320YZ\320Z[\330\004\020\220\001\220\021\330\004\014\210K\220q\200\001\360 \000\n\013\330\010\025\320\025A\300\021\300(\310(\320RS\320S^\320^_\320_`\330\004\020\220\001\220\021\330\004\014\210K\220q\200\001\360\024\000\n\013\330\010\025\320\025A\300\021\300(\310(\320R`\320`i\320ij\330\004\020\220\001\220\021\200\001\360\026\000\005A\001\320@V\320VW\330\004D\320Dd\320do\360\000\000p\001M\002\360\000\000M\002V\002\360\000\000V\002W\002\330\t\n\330\010\025\320\025;\2701\270H\300H\310A\330\004\020\220\001\220\021\330\004\013\2101\320\000\\\320\\]\360\030\000\n\013\330\010\025\320\025A\300\021\300(\310,\320VW\320WX\330\004\020\220\001\220\021\330\004\013\210:\220Q\200\001\360\022\000\n\013\330\010\025\320\025<\270A\270]\310.\320X|\320|}\330\004\020\220\001\220\021\200A\360\016\000\t\036\230Y\240h\250a\250q\330\010\013\2104\210z\230\021\230&\240\006\240a\330\014\022\220)\2301\230A\330\010\013\2104\210v\220S\230\001\330\014\022\220*\230A\230Q\330\010\013\2104\210w\220c\230\021\330\014\022\220*\230A\230Q\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\360\016\000\t*\320)>\270h\300a\300q\330\010\013\2104\210z\230\021\230&\240\006\240a\330\014\022\220)\2301\230A\330\010\013\2104\210v\220S\230\001\330\014\022\220*\230A\230Q\330\010\013\2104\210w\220c\230\021\330\014\022\220*\230A\230Q\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\200A\360\016\000\t\037\230j\250\010\260\001\260\021\330\010\013\2104\210z\230\021\230&\240\006\240a\330\014\022\220)\2301\230A\330\010\013\2104\210v\220S\230\001\330\014\022\220*\230A\230Q\330\010\013\2104\210w\220c""\230\021\330\014\022\220*\230A\230Q\330\010\013\2109\220D\230\005\230Q\230f\240A\340\010\017\210q\320\000A\300\021\360\030\000\n\013\330\010\025\320\025/\250q\3200C\3006\310\021\310!\330\004\020\220\001\220\021\330\004\013\210:\220Q\200\001\360\026\000\005?\320>]\320]^\330\004B\320Bk\320kv\360\000\000w\001I\002\360\000\000I\002R\002\360\000\000R\002S\002\330\t\n\330\010\025\320\025A\300\021\300(\310(\320RS\330\004\020\220\001\220\021\330\004\013\2101\320\000B\300!\360\030\000\n\013\330\010\025\320\025+\2501\250H\260H\270A\270Q\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\022\000\005\"\240\021\240!\330\t\n\330\010\025\320\025B\300!\3008\3108\320S_\320_e\320et\320tu\330\004\025\220Q\220a\330\004+\320+?\270q\300\005\300Q\300a\330\0046\3206T\320T_\320_i\320ir\320rs\330\004\007\200u\210A\210S\220\003\2201\330\010\017\210q\330\t\n\330\010\025\320\025B\300!\3008\3108\320S_\320_o\320o~\320~\177\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\022\000\n\013\330\010\025\320\025B\300!\3008\3108\320Sq\320qr\330\004\020\220\001\220\021\200\001\360\026\000\005B\001\320A^\320^_\330\0044\3204[\320[r\320r{\320{|\360\006\000\n\013\330\010\013\210;\320\026B\300%\300q\330\010\013\2109\220A\330\010\013\320\013\033\2301\330\010\025\320\025B\300!\300=\320P^\320^_\330\004\025\220Q\220a\340\004\007\200s\210'\220\023\220A\330\010\017\210q\340\004'\240v\250Q\250g\260S\270\n\300)\320Ka\320ah\320hm\320mr\320rs\330\004\033\320\033,\250A\340\t\n\330\010\025\320\025B\300!\300=\320P^\320^_\330\004\020\220\001\220\021\340\004\013\2101\200\001\360\026\000\005,\320+B\300!\330\004,\320,J\310+\320U\\\320\\e\320ef\330\004\010\210\013\320\0239\270\025\270a\330\t\n\330\010\025\320\0250\260\001\260\030\270\030\300\021\330\004\020\220\001\220\021\330\004\013\2101\200\001\330\004C\3001\300F\310!\320\000C\3001\360\030\000\n\013\330\010\025\320\0254\260A\260X\270X\300Q\300a\330\004\020\220\001\220\021\330\004\013\2105\220\001\320\000C\3001\360\030\000\n\013\330\010\025\320\025,\250A\250X""\260X\270Q\270a\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\024\000\005\010\200}\220C\220q\330\010\023\2206\230\021\230'\240\025\240i\320/F\300g\310U\320RW\320W[\320[]\320]^\330\004\"\240&\250\001\250\027\3200@\300\t\320I`\320`g\320gl\320lq\320qr\330\004&\320&8\270\007\270q\330\t\n\330\010\025\320\0258\270\001\270\030\300\030\310\036\320Wd\320dy\320yz\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\022\000\005\010\200}\220C\220q\330\010\023\2206\230\021\230'\240\025\240i\320/F\300g\310U\320RW\320W[\320[]\320]^\330\004\"\240&\250\001\250\027\3200@\300\t\320I`\320`g\320gl\320lq\320qr\330\004&\320&8\270\007\270q\330\t\n\330\010\025\320\025-\250Q\250h\260h\270n\310A\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005/\320.D\300A\330\0042\3202R\320R]\320]h\320hq\320qr\330\t\n\330\010\025\320\0258\270\001\270\030\300\030\310\021\330\004\020\220\001\220\021\330\004\013\2101\320\000D\300A\360\030\000\n\013\330\010\025\320\0254\260A\260X\270X\300Q\300a\330\004\020\220\001\220\021\330\004\013\2101\320\000D\300A\360\030\000\n\013\330\010\025\320\025,\250A\250X\260X\270Q\270a\330\004\020\220\001\220\021\330\004\013\2101\320\000D\300A\360\030\000\n\013\330\010\025\320\025-\250Q\250h\260h\270a\270q\330\004\020\220\001\220\021\330\004\013\2101\320\000D\300A\360\030\000\n\013\330\010\025\320\025.\250a\250x\260x\270q\300\001\330\004\020\220\001\220\021\330\004\013\210>\230\021\200\001\360\020\000\n\013\330\010\025\320\025E\300Q\320Fu\320uv\330\004\020\220\001\220\021\320\000E\300Q\360\030\000\n\013\330\010\025\320\025-\250Q\250h\260a\260q\330\004\020\220\001\220\021\330\004\013\210:\220Q\320\000E\300Q\360\030\000\n\013\330\010\025\320\025-\250Q\250h\260h\270a\270q\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\022\000\n\013\330\010\025\320\025E\300Q\300h\310h\320Vt\320tu\330\004\020\220\001\220\021\200\001\360\026\000\005\035\230F\240!\330\004 \320 0\260\013\2709\300I\310Q\330\t\n\330\010\025\320\025A\300\021\300(\310(\320RS\330\004""\020\220\001\220\021\330\004\013\2101\200\001\360\030\000\005.\320-F\300a\330\0041\3201T\320T_\320_f\320fo\320op\330\004\010\210\013\320\023;\2705\300\001\330\t\n\330\010\025\320\0259\270\021\270(\300(\310)\320ST\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\030\000\005.\320-F\300a\330\0041\3201T\320T_\320_f\320fo\320op\330\004\010\210\013\320\023;\2705\300\001\330\t\n\330\010\025\320\025=\270Q\270h\300h\310l\320Z[\330\004\020\220\001\220\021\330\004\013\2101\320\000F\300a\360\022\000\n\013\330\010\025\320\025;\2701\270A\270Q\330\004\020\220\001\220\021\330\004\013\2101\320\000F\300a\360\026\000\005\010\200t\210:\220Q\220l\240!\330\010\016\210i\220q\230\001\330\004$\240E\250\033\260G\2701\330\004\036\230a\340\t\n\330\010\025\320\0255\260Q\260m\300>\320QR\320RS\330\004\020\220\001\220\021\330\004\013\210:\220Q\200\001\360\026\000\005,\250>\270\021\330\004/\320/G\300{\320Rb\320bk\320kl\330\t\n\330\010\025\320\025.\250a\250x\260x\270q\330\004\020\220\001\220\021\330\004\013\2101\320\000G\300q\360\030\000\n\013\330\010\025\320\025/\250q\260\010\270\010\300\001\300\021\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005\035\230H\240A\330\004 \320 2\260+\270W\300I\310Q\330\t\n\330\010\025\320\025(\250\001\250\026\250v\260Q\330\004\020\220\001\220\021\330\004\013\2101\320\000H\310\001\360\030\000\n\013\330\010\025\320\0250\260\001\260\030\270\030\300\021\300!\330\004\020\220\001\220\021\330\004\013\2101\320\000H\310\001\360\030\000\n\013\330\010\025\320\0252\260!\3203J\310,\320VW\320WX\330\004\020\220\001\220\021\330\004\013\2101\360\000\000\001H\002\360\000\000H\002I\002\360\036\000\n\013\330\010\025\320\0254\260A\260X\270X\320EW\320Wc\320ct\360\000\000u\001C\002\360\000\000C\002T\002\360\000\000T\002c\002\360\000\000c\002d\002\360\000\000d\002e\002\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005(\320':\270!\330\004+\320+H\310\013\320SZ\320Zc\320cd\330\t\n\330\010\025\320\0252\260!\3203D\320DV\320VW\330\004\020\220\001\220""\021\330\004\013\2101\200\001\360\026\000\005,\320+@\300\001\330\004,\320,H\310\013\320S\\\320\\e\320ef\330\004\n\210+\320\0259\270\025\270a\330\t\n\330\010\025\320\0254\260A\260X\270X\300Q\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005+\320*:\270!\330\004.\320.H\310\013\320S`\320`i\320ij\330\t\n\330\010\025\320\025/\250q\260\010\270\010\300\001\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005,\250?\270!\330\004/\320/H\310\013\320Sb\320bk\320kl\330\t\n\330\010\025\320\0256\260a\3207K\310?\320Z[\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\022\000\005\"\240\021\240!\330\t\n\330\010\025\320\025H\310\001\310\035\320Vd\320dp\320pv\360\000\000w\001F\002\360\000\000F\002G\002\330\004\025\220Q\220a\330\004/\320/G\300q\310\005\310Q\310a\330\004:\320:\\\320\\g\320gq\320qz\320z{\330\004\007\200u\210A\210S\220\003\2201\330\010\017\210q\330\t\n\330\010\025\320\025H\310\001\310\035\320Vd\320dp\360\000\000q\001A\002\360\000\000A\002P\002\360\000\000P\002Q\002\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\022\000\n\013\330\010\025\320\025I\310\021\310(\320RZ\360\000\000[\001I\002\360\000\000I\002J\002\330\004\020\220\001\220\021\200\001\360\020\000\005'\320&:\270!\330\004+\320+I\310\025\310i\320WX\360\006\000\n\013\330\010\013\210;\320\0269\270\025\270a\330\010\013\2108\2201\330\010\013\320\013\"\240!\330\010\013\210=\230\001\330\010\025\320\025:\270!\2708\3008\3101\330\004\025\220Q\220a\340\004\007\200s\320\n\037\230s\240!\330\010\017\210q\340\004\020\320\020 \240\001\240\023\240A\330\004\t\210\036\220q\340\t\n\330\010\025\320\025:\270!\2708\3008\3101\330\004\020\220\001\220\021\320\000J\310!\360\030\000\n\013\330\010\025\320\0252\260!\2608\2701\270A\330\004\020\220\001\220\021\330\004\013\210:\220Q\320\000J\310!\360\030\000\n\013\330\010\025\320\025/\250q\3200D\300O\320ST\320TU\330\004\020\220\001\220\021\330\004\013\2105\220\001\200\001\360\026\000\005%\240J\250a\330\004(\320(<\270K\300}\320T]\320]^\330\t\n\330""\010\025\320\0250\260\001\260\030\270\030\300\021\330\004\020\220\001\220\021\330\004\013\2101\320\000K\3101\360\030\000\n\013\330\010\025\320\0252\260!\2608\2708\3001\300A\330\004\020\220\001\220\021\330\004\013\2101\320\000K\3101\360\030\000\n\013\330\010\025\320\0253\2601\260H\270H\300A\300Q\330\004\020\220\001\220\021\330\004\013\2101\320\000K\3101\360\032\000\n\013\330\010\025\320\025)\250\021\250(\260(\270&\300\001\300\021\330\004\020\220\001\220\021\330\004\013\2105\220\001\320\000K\3101\360\032\000\n\013\330\010\025\320\025*\250!\2508\2609\270H\300I\310Q\310a\330\004\020\220\001\220\021\330\004\013\2101\320\000L\310A\360\030\000\n\013\330\010\025\320\025;\2701\270H\300H\310A\310Q\330\004\020\220\001\220\021\330\004\013\2105\220\001\320\000L\310A\360\032\000\n\013\330\010\025\320\025+\2501\250H\260H\270L\310\006\310a\310q\330\004\020\220\001\220\021\330\004\013\2101\320\000L\310A\360\030\000\n\013\330\010\025\320\0253\2601\260H\270H\300A\300Q\330\004\020\220\001\220\021\330\004\013\2101\320\000L\310A\360\030\000\n\013\330\010\025\320\0254\260A\260X\270X\300Q\300a\330\004\020\220\001\220\021\330\004\013\2101\320\000M\310Q\360\032\000\n\013\330\010\025\320\0250\260\001\260\030\270\030\320AQ\320Q[\320[\\\320\\]\330\004\020\220\001\220\021\330\004\013\2105\220\001\200\001\360 \000\n\013\330\010\025\320\025)\250\021\250(\260(\270!\270>\310\021\310!\330\004\020\220\001\220\021\330\004\014\210N\230!\200\001\360\020\000\005!\240\001\240\021\330\t\n\330\010\025\320\025)\250\021\320*<\270N\310&\320P_\320_`\330\004\025\220Q\220a\330\004\007\200t\2101\210C\210s\220!\330\010\017\210q\330\004#\2405\250\001\250\024\250Q\250a\330\004!\240\021\330\t\n\330\010\025\320\025)\250\021\320*<\270N\320J[\320[j\320jk\330\004\020\220\001\220\021\330\004\022\320\022'\240q\250\001\320\000N\310a\360\030\000\n\013\330\010\025\320\0256\260a\260x\270x\300q\310\001\330\004\020\220\001\220\021\330\004\013\2101\320\000N\310a\360\030\000\n\013\330\010\025\320\025,\250A\320-A\300\037\320PQ\320QR""\330\004\020\220\001\220\021\330\004\013\210>\230\021\200\001\360\030\000\005%\240O\2601\330\004(\320(A\300\033\310H\320T]\320]^\330\t\n\330\010\025\320\0251\260\021\260(\270(\300%\300q\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\030\000\005%\240O\2601\330\004(\320(A\300\033\310H\320T]\320]^\330\t\n\330\010\025\320\0257\260q\3208L\310O\320[`\320`a\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005-\250O\2701\330\004-\320-C\300;\320N^\320^g\320gh\330\004\021\220\033\320\034:\270%\270q\330\t\n\330\010\025\320\025.\250a\250x\260x\270q\330\004\020\220\001\220\021\330\004\013\2101\320\000O\310q\360\032\000\n\013\330\010\025\320\025-\250Q\250h\260h\270f\300A\300Q\330\004\020\220\001\220\021\330\004\013\2105\220\001\320\000P\320PQ\360\030\000\n\013\330\010\025\320\0257\260q\270\010\300\010\310\001\310\021\330\004\020\220\001\220\021\330\004\013\2101\320\000P\320PQ\360\032\000\n\013\330\010\025\320\025.\250a\250x\260x\270|\3106\320QR\320RS\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005<\320;P\320PQ\330\004?\320?^\320^i\360\000\000j\001C\002\360\000\000C\002L\002\360\000\000L\002M\002\330\t\n\330\010\025\320\0254\260A\260X\270X\300Q\330\004\020\220\001\220\021\330\004\013\2101\360\000\000\001P\002\360\000\000P\002Q\002\360\034\000\n\013\330\010\025\320\025F\300a\300}\320Tb\320bn\360\000\000o\001V\002\360\000\000V\002a\002\360\000\000a\002b\002\360\000\000b\002c\002\330\004\020\220\001\220\021\330\004\013\210:\220Q\320\000Q\320QR\360\030\000\n\013\330\010\025\320\0256\260a\3207K\310?\320Z[\320[\\\330\004\020\220\001\220\021\330\004\013\2105\220\001\320\000Q\320QR\360\030\000\n\013\330\010\025\320\0258\270\001\270\030\300\030\310\021\310!\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\020\000\005\036\230Q\340\t\n\330\010\025\320\0250\260\001\3201E\300_\320T_\320_`\330\004\020\220\001\220\021\330\004\022\320\022'\240q\250\001\200\001\330\004\n\210+\220Q\200\001\360\026\000\005!\240\r\250Q\330\004!\320!5\260[\300""\006\300i\310q\330\004\007\200{\320\022.\250e\2601\330\t\n\330\010\025\320\025,\250A\250X\260X\270Q\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\020\000\005\036\230Q\340\t\n\330\010\025\320\025,\250A\320-A\300\037\320PV\320VW\330\004\020\220\001\220\021\330\004\022\320\022'\240q\250\001\200\001\360\022\000\005\036\230Q\360\006\000\n\013\330\010\025\320\025,\250A\320-A\300\037\320PV\320V\\\320\\]\330\004\020\220\001\220\021\330\004\014\210G\320\023(\250\001\250\027\260\010\270\001\270\021\200\001\360\026\000\005+\320*=\270Q\330\004+\320+E\300[\320PZ\320Zc\320cd\330\004\013\210;\320\0268\270\005\270Q\330\t\n\330\010\025\320\0258\270\001\3209M\310_\320\\]\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\020\000\005\036\230Q\340\t\n\330\010\025\320\025(\250\001\320);\270>\320IY\320Yh\320hi\330\004\020\220\001\220\021\330\004\022\320\022'\240q\250\001\200\001\360\020\000\005Q\001\320Py\320yz\330\004Q\360\000\000R\001B\002\360\000\000B\002M\002\360\000\000M\002g\002\360\000\000g\002p\002\360\000\000p\002q\002\330\004\033\230;\320&^\320^c\320cd\330\t\n\330\010\025\320\025E\300Q\300a\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\020\000\n\013\330\010\025\320\025-\250Q\320.Q\320QR\330\004\020\220\001\220\021\200\001\360\020\000\n\013\330\010\025\320\025-\250Q\320.R\320RS\330\004\020\220\001\220\021\200\001\360\026\000\005)\250\r\260Q\330\004)\320)=\270[\310\016\320V_\320_`\330\004\017\210{\320\0326\260e\2701\330\t\n\330\010\025\320\025,\250A\250X\260X\270Q\330\004\020\220\001\220\021\330\004\013\2101\320\000=\270Q\360\030\000\n\013\330\010\025\320\025'\240q\250\010\260\010\270\001\270\021\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\022\000\005&\240Q\240a\330\t\n\330\010\025\320\0250\260\001\260\030\270\030\300\033\310A\330\004\025\220Q\220a\330\004\007\200y\220\001\220\023\220C\220q\330\010\023\2206\230\021\230'\240\025\240i\320/E\300W\310E\320QV\320VZ\320Z\\\320\\]\330\004&\240f\250A\250W\260K\270q\300\006\300i\320Oe\320el\320lq""\320qv\320vw\330\t\n\330\010\025\320\0250\260\001\260\030\270\030\300\033\320L`\320`k\320kl\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\022\000\005&\240Q\240a\330\t\n\330\010\025\320\025-\250Q\250h\260h\270k\310\021\330\004\025\220Q\220a\330\004\007\200y\220\001\220\023\220C\220q\330\010\023\2206\230\021\230'\240\025\240i\320/E\300W\310E\320QV\320VZ\320Z\\\320\\]\330\004(\250\006\250a\250w\260k\300\021\300&\310\t\320Qg\320gn\320ns\320sx\320xy\330\t\n\330\010\025\320\025-\250Q\250h\260h\270k\320I_\320_l\320lm\330\004\020\220\001\220\021\330\004\013\2101\200\001\330\004-\250Q\250f\260A\200\001\360\020\000\n\013\330\010\025\320\025-\250Q\250h\260a\330\004\020\220\001\220\021\320\000*\250!\360\030\000\n\013\330\010\025\320\025-\250Q\250h\260h\270a\270q\330\004\020\220\001\220\021\330\004\013\210>\230\021\320\000\\\320\\]\360\030\000\n\013\330\010\025\320\025=\270Q\270h\300h\310a\310q\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\024\000\n\013\330\010\025\320\025-\250Q\250h\260h\270a\270q\330\004\020\220\001\220\021\330\004\022\320\022'\240q\250\010\260\001\200\001\360\024\000\n\013\330\010\025\320\025-\250Q\250h\260h\270e\3001\330\004\020\220\001\220\021\200\001\360\022\000\n\013\330\010\025\320\025%\240Q\240h\250h\260k\300\021\330\004\020\220\001\220\021\200\001\360\024\000\n\013\330\010\025\320\025-\250Q\250h\260h\270m\310:\320UV\330\004\020\220\001\220\021\200\001\360\022\000\n\013\330\010\025\320\025-\250Q\250h\260h\270n\310A\330\004\020\220\001\220\021\200\001\360\024\000\n\013\330\010\025\320\025-\250Q\250h\260h\270n\310N\320Z[\330\004\020\220\001\220\021\200\001\360\020\000\005*\250\021\250!\330\t\n\330\010\025\320\025-\250Q\250h\260h\270o\310_\320\\]\330\004\025\220Q\220a\330\004'\240~\260Q\260m\3001\300A\330\0042\3202J\310+\320Ua\320aj\320jk\330\004\007\200}\220A\220S\230\003\2301\330\010\017\210q\330\t\n\330\010\025\320\025-\250Q\250h\260h\270o\310_\320\\]\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\020\000\n\013\330\010""\025\320\025%\240Q\240j\260\001\330\004\020\220\001\220\021\320\000R\320RS\360\032\000\n\013\330\010\025\320\025+\2501\250F\260&\270\006\270a\270q\330\004\020\220\001\220\021\330\004\013\2101\320\000R\320RS\360\030\000\n\013\330\010\025\320\0259\270\021\270(\300(\310!\3101\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005<\320;R\320RS\330\004<\320<Z\320Ze\320e|\360\000\000}\001F\002\360\000\000F\002G\002\330\004\030\230\013\320#I\310\025\310a\330\t\n\330\010\025\320\0257\260q\270\r\300^\320ST\330\004\020\220\001\220\021\330\004\013\2101\320\000S\320ST\360\030\000\n\013\330\010\025\320\0255\260Q\260h\270h\300a\300q\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005;\320:S\320ST\330\004;\320;[\320[f\320fz\360\000\000{\001D\002\360\000\000D\002E\002\330\004\025\220[\320 H\310\005\310Q\330\t\n\330\010\025\320\0258\270\001\270\030\300\030\310\021\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\024\000\005\010\200~\220S\230\001\330\010\023\2206\230\021\230'\240\025\240i\320/F\300g\310U\320RW\320W[\320[]\320]^\330\004#\2406\250\021\250'\3201B\300)\320Kb\320bi\320in\320ns\320st\330\004'\320'9\270\030\300\021\330\t\n\330\010\025\320\0250\260\001\260\030\270\030\300\037\320P^\320^s\320st\330\004\020\220\001\220\021\330\004\013\2101\320\000T\320TU\360\032\000\n\013\330\010\025\320\0250\260\001\260\030\270\030\320AS\320S\\\320\\]\320]^\330\004\020\220\001\220\021\330\004\013\2101\320\000T\320TU\360\030\000\n\013\330\010\025\320\025:\270!\2708\3008\3101\310A\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005=\320<T\320TU\330\004=\320=\\\320\\g\320g~\360\000\000\177\001H\002\360\000\000H\002I\002\330\004\030\230\013\320#J\310%\310q\330\t\n\330\010\025\320\0257\260q\270\010\300\010\310\001\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005=\320<T\320TU\330\004=\320=\\\320\\g\320g~\360\000\000\177\001H\002\360\000\000H\002I\002\330\004\030\230\013\320#J\310%\310q\330\t\n\330\010\025\320\025<\270A\270]""\310.\320XY\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\010\000\005\016\210T\220\021\330\004\014\210G\2201\220F\230,\240a\330\004\007\200v\210W\220E\230\024\230Q\330\010\022\220!\330\010\027\220q\340\010\030\230\001\330\004\007\200q\330\010\017\320\0170\260\004\260A\260W\270K\300w\310a\340\010\017\320\0170\260\004\260A\260W\270K\300q\200\001\360\010\000\005\016\210T\220\021\330\004\014\210G\2201\220F\230,\240a\330\004\007\200v\210W\220E\230\024\230Q\330\010\022\220!\330\010\027\220q\340\010\030\230\001\330\004\007\200q\330\010\017\320\0171\260\024\260Q\260g\270[\310\007\310q\340\010\017\320\0171\260\024\260Q\260g\270[\310\001\200\001\360\010\000\005\016\210T\220\021\330\004\014\210G\2201\220F\230,\240a\330\004\007\200v\210W\220E\230\024\230Q\330\010\022\220!\330\010\027\220q\340\010\030\230\001\330\004\007\200q\330\010\017\320\0173\2604\260q\270\007\270{\310'\320QR\340\010\017\320\0173\2604\260q\270\007\270{\310!\200\001\360\010\000\005\016\210T\220\021\330\004\014\210G\2201\220F\230,\240a\330\004\007\200v\210W\220E\230\024\230Q\330\010\022\220!\330\010\027\220q\340\010\030\230\001\330\004\007\200q\330\010\017\320\017+\2504\250q\260\007\260{\300'\310\021\340\010\017\320\017+\2504\250q\260\007\260{\300!\200\001\360\010\000\005\016\210T\220\021\330\004\014\210G\2201\220F\230,\240a\330\004\007\200v\210W\220E\230\024\230Q\330\010\022\220!\330\010\027\220q\340\010\030\230\001\330\004\007\200q\330\010\017\320\0175\260T\270\021\270'\300\033\310G\320ST\340\010\017\320\0175\260T\270\021\270'\300\033\310A\200\001\360\010\000\005\016\210T\220\021\330\004\014\210G\2201\220F\230,\240a\330\004\007\200v\210W\220E\230\024\230Q\330\010\022\220!\330\010\027\220q\340\010\030\230\001\330\004\007\200q\330\010\017\320\0176\260d\270!\2707\300+\310W\320TU\340\010\017\320\0176\260d\270!\2707\300+\310Q\200\001\360\010\000\005\016\210T\220\021\330\004\014\210G\2201\220F\230,\240a\330\004\007\200v\210W\220E\230\024\230Q\330\010\022\220!\330\010\027\220q\340\010\030\230\001\330""\004\007\200q\330\010\017\320\0179\270\024\270Q\270g\300[\320PW\320WX\340\010\017\320\0179\270\024\270Q\270g\300[\320PQ\200\001\360\010\000\005\016\210T\220\021\330\004\014\210G\2201\220F\230,\240a\330\004\007\200v\210W\220E\230\024\230Q\330\010\022\220!\330\010\027\220q\340\010\030\230\001\330\004\007\200q\330\010\017\320\017,\250D\260\001\260\027\270\013\3007\310!\340\010\017\320\017,\250D\260\001\260\027\270\013\3001\200\001\360\010\000\005\016\210T\220\021\330\004\014\210G\2201\220F\230,\240a\330\004\007\200v\210W\220E\230\024\230Q\330\010\022\220!\330\010\027\220q\340\010\030\230\001\330\004\007\200q\330\010\017\320\017F\300d\310!\3107\320R]\320]d\320de\340\010\017\320\017F\300d\310!\3107\320R]\320]^\200\001\360\010\000\005\016\210T\220\021\330\004\014\210G\2201\220F\230,\240a\330\004\007\200v\210W\220E\230\024\230Q\330\010\022\220!\330\010\027\220q\340\010\030\230\001\330\004\007\200q\330\010\017\320\017:\270$\270a\270w\300k\320QX\320XY\340\010\017\320\017:\270$\270a\270w\300k\320QR\200\001\360\010\000\005\016\210T\220\021\330\004\014\210G\2201\220F\230,\240a\330\004\007\200v\210W\220E\230\024\230Q\330\010\022\220!\330\010\027\220q\340\010\030\230\001\330\004\007\200q\330\010\017\320\017*\250$\250a\250w\260k\300\027\310\001\340\010\017\320\017*\250$\250a\250w\260k\300\021\200\001\360\010\000\005\016\210T\220\021\330\004\014\210G\2201\220F\230,\240a\330\004\007\200v\210W\220E\230\024\230Q\330\010\022\220!\330\010\027\220q\340\010\030\230\001\330\004\007\200q\330\010\017\320\017>\270d\300!\3007\310+\320U\\\320\\]\340\010\017\320\017>\270d\300!\3007\310+\320UV\200\001\360\010\000\005\016\210T\220\021\330\004\014\210G\2201\220F\230,\240a\330\004\007\200v\210W\220E\230\024\230Q\330\010\022\220!\330\010\027\220q\340\010\030\230\001\330\004\007\200q\330\010\017\320\017?\270t\3001\300G\310;\320V]\320]^\340\010\017\320\017?\270t\3001\300G\310;\320VW\200\001\360\010\000\005\016\210T\220\021\330\004\014\210G\2201\220F\230,\240a\330\004\007\200v\210W\220E\230\024\230Q""\330\010\022\220!\330\010\027\220q\340\010\030\230\001\330\004\007\200q\330\010\017\320\017/\250t\2601\260G\270;\300g\310Q\340\010\017\320\017/\250t\2601\260G\270;\300a\200\001\360\010\000\005\016\210T\220\021\330\004\014\210G\2201\220F\230,\240a\330\004\007\200v\210W\220E\230\024\230Q\330\010\022\220!\330\010\027\220q\340\010\030\230\001\330\004\007\200q\330\010\017\320\017'\240t\2501\250G\260;\270g\300Q\340\010\017\320\017'\240t\2501\250G\260;\270a\320\000U\320UV\360\030\000\n\013\330\010\025\320\0252\260!\3203E\300^\320ST\320TU\330\004\020\220\001\220\021\330\004\013\2101\320\000V\320VW\360\030\000\n\013\330\010\025\320\0253\2601\3204F\300n\320TU\320UV\330\004\020\220\001\220\021\330\004\013\2101\320\000V\320VW\360\030\000\n\013\330\010\025\320\0258\270\001\270\030\300\030\310\021\310!\330\004\020\220\001\220\021\330\004\013\2101\320\000V\320VW\360\030\000\n\013\330\010\025\320\025=\270Q\270h\300h\310a\310q\330\004\020\220\001\220\021\330\004\013\2101\320\000V\320VW\360\032\000\n\013\330\010\025\320\025-\250Q\250h\260h\270e\3001\300A\330\004\020\220\001\220\021\330\004\013\2101\320\000V\320VW\360\030\000\n\013\330\010\025\320\025>\270a\270x\300x\310q\320PQ\330\004\020\220\001\220\021\330\004\013\2101\320\000W\320WX\360\032\000\n\013\330\010\025\320\0251\260\021\260(\270(\300$\300a\300q\330\004\020\220\001\220\021\330\004\013\210:\220Q\200\001\360\026\000\005>\320=W\320WX\330\004>\320>_\320_j\360\000\000k\001A\002\360\000\000A\002J\002\360\000\000J\002K\002\330\004\027\220{\320\"K\3105\320PQ\330\t\n\330\010\025\320\0253\2601\260H\270H\300A\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005\036\230X\240Q\330\004!\320!3\260;\270h\300i\310q\330\t\n\330\010\025\320\025(\250\001\250\026\250v\260Q\330\004\020\220\001\220\021\330\004\013\2101\320\000X\320XY\360\030\000\n\013\330\010\025\320\0255\260Q\3206J\310/\320YZ\320Z[\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005@\001\320?X\320XY\330\004@\320@`\320`k\360\000\000l\001E\002""\360\000\000E\002N\002\360\000\000N\002O\002\330\004\032\230+\320%I\310\025\310a\330\t\n\330\010\025\320\0259\270\021\270-\300~\320UV\330\004\020\220\001\220\021\330\004\013\2101\320\000X\320XY\360\030\000\n\013\330\010\025\320\025/\250q\3200B\300.\320PQ\320QR\330\004\020\220\001\220\021\330\004\013\2101\320\000X\320XY\360\030\000\n\013\330\010\025\320\025/\250q\3200D\300O\320ST\320TU\330\004\020\220\001\220\021\330\004\013\2101\320\000Y\320YZ\360\030\000\n\013\330\010\025\320\0251\260\021\3202D\300N\320RS\320ST\330\004\020\220\001\220\021\330\004\013\2101\320\000Y\320YZ\360\032\000\n\013\330\010\025\320\0252\260!\2608\2708\320CZ\320Zf\320fg\320gh\330\004\020\220\001\220\021\330\004\013\2101\320\000Y\320YZ\360\030\000\n\013\330\010\025\320\0255\260Q\3206H\310\016\320VW\320WX\330\004\020\220\001\220\021\330\004\013\2101\320\000Y\320YZ\360\030\000\n\013\330\010\025\320\0255\260Q\3206J\310/\320YZ\320Z[\330\004\020\220\001\220\021\330\004\013\2101\320\000Y\320YZ\360\032\000\n\013\330\010\025\320\025/\250q\260\010\270\010\300\006\300a\300q\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\030\000\005\036\230Y\240a\330\004!\320!4\260K\270w\300i\310q\330\t\n\330\010\025\320\025(\250\001\250\032\2605\270\006\270a\330\004\020\220\001\220\021\330\004\013\2101\320\000Z\320Z[\360\032\000\n\013\330\010\025\320\0250\260\001\260\030\270\030\300\025\300a\300q\330\004\020\220\001\220\021\330\004\013\2101\320\000Z\320Z[\360\030\000\n\013\330\010\025\320\025;\2701\270H\300H\310A\310Q\330\004\020\220\001\220\021\330\004\013\2101\320\000Z\320Z[\360\030\000\n\013\330\010\025\320\0256\260a\3207K\310?\320Z[\320[\\\330\004\020\220\001\220\021\330\004\013\2101\320\000Z\320Z[\360\030\000\n\013\330\010\025\320\0257\260q\3208L\310O\320[\\\320\\]\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005&\240Z\250q\330\004)\320)=\270[\310\016\320V_\320_`\330\004\017\210{\230!\330\t\n\330\010\025\320\025.\250a\250x\260x\270q\330\004\020\220\001\220\021\330\004\013\2101\200\001""\360\n\000\n\013\330\010\025\220[\240\001\330\004\020\220\001\220\021\200\001\360\n\000\n\013\330\010\025\220\\\240\021\330\004\020\220\001\220\021\200\001\360\022\000\n\013\330\010\025\320\025)\250\021\250(\260(\270.\310\001\330\004\020\220\001\220\021\200\001\330\004)\250\021\250&\260\001\320\000`\320`a\360\032\000\n\013\330\010\025\320\0251\260\021\260(\270(\300.\320Pd\320de\320ef\330\004\020\220\001\220\021\330\004\013\2101\320\000`\320`a\360\032\000\n\013\330\010\025\320\0252\260!\2608\2708\320CX\320Xc\320cd\320de\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005\"\240\035\250a\330\004\"\320\"6\260k\300\027\310\t\320QR\330\004\010\210\013\320\023/\250u\260A\330\t\n\330\010\025\320\025,\250A\250X\260X\270Q\330\004\020\220\001\220\021\330\004\013\2101\320\000`\320`a\360\032\000\n\013\330\010\025\320\0257\260q\270\010\300\010\310\014\320T`\320`a\320ab\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\"\000\n\013\330\010\025\320\025.\250a\320/A\300\036\320O_\320_`\320`f\320fg\320gh\330\004\020\220\001\220\021\330\004\014\210F\220!\200\001\360\026\000\005*\320)>\270a\330\004*\320*F\300k\320QX\320Xa\320ab\330\004\010\210\013\320\0237\260u\270A\330\t\n\330\010\025\320\025.\250a\250x\260x\270q\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005,\320+>\270a\330\004,\320,F\300k\320Q\\\320\\e\320ef\330\004\014\210K\320\0279\270\025\270a\330\t\n\330\010\025\320\025,\250A\320-?\270~\310Q\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005-\320,>\270a\330\004-\320-F\300k\320Q^\320^g\320gh\330\004\016\210k\320\031:\270%\270q\330\t\n\330\010\025\320\0252\260!\2608\2708\3001\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005#\240,\250a\330\004&\320&<\270K\300y\320PY\320YZ\330\t\n\330\010\025\320\025.\250a\250x\260x\270q\330\004\020\220\001\220\021\330\004\013\2101\320\000>\270a\360\030\000\n\013\330\010\025\320\025.\250a\250x\260x\270q\300\001\330\004\020\220\001\220\021\330\004\013\2105\220\001\320""\000>\270a\360\030\000\n\013\330\010\025\320\025/\250q\260\010\270\010\300\001\300\021\330\004\020\220\001\220\021\330\004\013\2105\220\001\320\000a\320ab\360\032\000\n\013\330\010\025\320\0250\260\001\3201C\300>\320Qb\320bn\320no\320op\330\004\020\220\001\220\021\330\004\013\2101\320\000a\320ab\360\032\000\n\013\330\010\025\320\0256\260a\260x\270x\320G_\320_o\320op\320pq\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\020\000\n\013\330\010\025\320\025&\240a\240q\330\004\020\220\001\220\021\200\001\360\024\000\005'\240a\240q\330\t\n\330\010\025\320\0251\260\021\260(\270(\320BX\320X_\320_n\320nz\360\000\000{\001A\002\360\000\000A\002B\002\330\004\025\220Q\220a\330\004\007\200z\220\021\220#\220S\230\001\330\010\t\330\014\020\220\006\220a\220w\230e\2409\320,H\310\007\310u\320TY\320Y]\320]_\320_`\330\020\026\220a\220w\230e\2409\320,H\310\007\310u\320TY\320Y]\320]_\320_`\340\004$\240F\250!\2507\260*\270A\270V\3009\320Lh\320ho\320ot\320ty\320yz\330\004%\240V\2501\250G\260:\270Q\270f\300I\320Mi\320ip\320pu\320uz\320z{\330\t\n\330\010\025\320\0251\260\021\260(\270(\320BX\320X_\320_n\320nz\360\000\000{\001Q\002\360\000\000Q\002Z\002\360\000\000Z\002a\002\360\000\000a\002w\002\360\000\000w\002A\003\360\000\000A\003B\003\330\004\020\220\001\220\021\330\004\014\210K\220q\200\001\360\020\000\n\013\330\010\025\320\025.\250a\250q\330\004\020\220\001\220\021\200\001\360\026\000\005&\240\\\260\021\330\004&\240a\240q\330\004#\320#9\270\034\300Y\310a\340\t\n\330\010\025\320\0250\260\001\3201E\300_\320TY\320YZ\330\004\025\220Q\220a\340\004\013\2101\200\001\360\020\000\005'\240a\240q\330\t\n\330\010\025\320\025<\270A\270X\300X\310_\320\\h\320hi\330\004\025\220Q\220a\330\004\035\230[\250\001\250\032\2601\260A\330\004(\320(=\270[\310\005\310Y\320VW\330\004\007\200z\220\021\220#\220S\230\001\330\010\017\210q\330\t\n\330\010\025\320\025<\270A\270X\300X\310_\320\\h\320hi\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\n\000\005'\240a\240q\330\t\n\330\010\025\320\025,\250A""\250_\270L\310\001\330\004\025\220Q\220a\330\004\"\240)\2501\250J\260a\260q\330\004-\320-@\300\013\310<\320W`\320`a\330\004\007\200z\220\021\220#\220S\230\001\330\010\017\210q\330\t\n\330\010\025\320\025,\250A\250_\270L\310\001\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\022\000\005'\240a\240q\330\t\n\330\010\025\320\025.\250a\250x\260x\320?U\320U\\\320\\k\320kw\320wx\330\004\025\220Q\220a\330\004\007\200z\220\021\220#\220S\230\001\330\010\023\2206\230\021\230'\240\025\240i\320/K\3107\320RW\320W\\\320\\`\320`b\320bc\330\004$\240F\250!\2507\260*\270A\270V\3009\320Lh\320ho\320ot\320ty\320yz\330\004-\320-D\300I\310Q\330\t\n\330\010\025\320\025.\250a\250x\260x\320?U\320U\\\320\\k\320kw\320wx\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\020\000\005'\240a\240q\330\t\n\330\010\025\320\025?\270q\300\010\310\010\320P_\320_k\320kl\330\004\025\220Q\220a\330\004\035\230[\250\001\250\032\2601\260A\330\004(\320(=\270[\310\005\310Y\320VW\330\004\007\200z\220\021\220#\220S\230\001\330\010\017\210q\330\t\n\330\010\025\320\025?\270q\300\010\310\010\320P_\320_k\320kl\330\004\020\220\001\220\021\330\004\013\2101\200\001\330\004.\250a\250v\260Q\200\001\360\020\000\n\013\330\010\025\320\025.\250a\250x\260q\330\004\020\220\001\220\021\200\001\360\024\000\005\"\240\021\240!\330\t\n\330\010\025\320\025.\250a\250x\260x\270|\3106\320QR\330\004\025\220Q\220a\340\004\007\200u\210A\210S\220\003\2201\330\014\022\220!\2207\230%\230y\320(:\270'\300\025\300e\3104\310r\320QR\340\004'\240v\250Q\250g\260U\270!\2706\300\031\320J\\\320\\c\320ch\320hm\320mn\330\t\n\330\010\025\320\025.\250a\250x\260x\270|\320K`\320`l\320ls\320st\330\004\020\220\001\220\021\340\004\013\2101\200\001\360\022\000\n\013\330\010\025\320\025.\250a\250x\260x\320?Q\320QR\330\004\020\220\001\220\021\200\001\360\022\000\n\013\330\010\025\320\025.\250a\250x\260x\320?S\320ST\330\004\020\220\001\220\021\200\001\360(\000\n\013\330\010\025\320\025.\250a\250x\260x\270q\300\013\3101\310J\320VW\320Wc\320cd\320de""\330\004\020\220\001\220\021\330\004\014\210K\220z\240\034\250Q\200\001\360$\000\n\013\330\010\025\320\025.\250a\250x\260x\270q\300\017\310q\320P]\320]^\320^_\330\004\020\220\001\220\021\330\004\014\210O\230=\250\001\200\001\360\022\000\005\020\210\177\230a\230y\250\001\330\004\013\2108\2207\230!\320\000c\320cd\360\032\000\n\013\330\010\025\320\0250\260\001\260\030\270\030\320AS\320Sa\320ab\320bc\330\004\020\220\001\220\021\330\004\013\2101\320\000c\320cd\360\032\000\n\013\330\010\025\320\0258\270\001\270\030\300\030\310\030\320QR\320RS\330\004\020\220\001\220\021\330\004\013\210:\220Q\200\001\360\024\000\005?\320>d\320de\330\004=\320=m\320mx\360\000\000y\001B\002\360\000\000B\002C\002\340\t\n\330\010\013\210;\320\026K\3105\320PQ\330\010\013\210>\230\021\330\010\013\210;\220a\330\010\025\320\025E\300Q\300h\310h\320VW\330\004\025\220Q\220a\340\0049\3209^\320^_\320_b\320bc\330\004\017\210{\230!\340\004\007\200s\210,\220c\230\021\330\010\017\210q\340\t\n\330\010\025\320\025E\300Q\300h\310h\320VW\330\004\020\220\001\220\021\340\004\013\2101\320\000g\320gh\360\032\000\n\013\330\010\025\320\025:\270!\270=\310\016\320VZ\320Z[\320[\\\330\004\020\220\001\220\021\330\004\013\210:\220Q\200\001\360\026\000\005'\240k\260\021\330\004*\320*?\270{\310.\320Xa\320ab\330\t\n\330\010\025\320\0252\260!\2608\2708\3001\330\004\020\220\001\220\021\330\004\013\2101\320\000l\320lm\360\034\000\n\013\330\010\025\320\0252\260!\2608\2708\3006\320I\\\320\\h\320hi\320ij\330\004\020\220\001\220\021\330\004\013\2101\320\000l\320lm\360\032\000\n\013\330\010\025\320\0259\270\021\270-\300~\320Ua\320ab\320bc\330\004\020\220\001\220\021\330\004\013\210:\220Q\320\000p\320pq\360\034\000\n\013\330\010\025\320\0250\260\001\260\030\270\030\320AS\320S_\320_p\320p~\320~\177\360\000\000@\002A\002\330\004\020\220\001\220\021\330\004\013\2101\320\000p\320pq\360\032\000\n\013\330\010\025\320\025>\270a\270x\300x\310|\320[\\\320\\]\330\004\020\220\001\220\021\330\004\013\2101\200\001\330\004(\250\001\250\026\250q""\200\001\360\020\000\n\013\330\010\025\320\025/\250q\3200A\300\021\330\004\020\220\001\220\021\200\001\360\022\000\n\013\330\010\025\320\025/\250q\3200@\300\n\310.\320XY\330\004\020\220\001\220\021\320\000?\270q\360\030\000\n\013\330\010\025\320\0250\260\001\260\030\270\030\300\021\300!\330\004\020\220\001\220\021\330\004\013\2105\220\001\200\001\360\022\000\005 \230q\340\t\n\330\010\025\320\0250\260\001\260\030\270\030\320AQ\320QY\320Yb\320bc\330\004\020\220\001\220\021\330\004\022\320\022'\240q\250\001\200\001\340\004\037\230q\320 0\260\013\270;\300k\320QR\330\004\023\320\0230\260\010\270\001\270\021\330\004\007\200|\2207\230!\330\010?\270q\320@`\320`n\320no\330\004\013\2101\200\001\340\004\037\230q\320 0\260\013\270;\300k\320QR\330\004\023\220?\240(\250!\2501\330\004\007\200|\2207\230!\330\0101\260\021\3202D\300N\320RS\330\004\013\2101\200\001\340\004\037\230q\320 0\260\013\270;\300k\320QR\330\004\023\320\0231\260\030\270\021\270!\330\004\007\200|\2207\230!\330\010@\300\001\320Ab\320bp\320pq\330\004\013\2101\200\001\340\004\037\230q\320 0\260\013\270;\300k\320QR\330\004\023\2206\230\030\240\021\240!\330\004\007\200|\2207\230!\330\010(\250\001\250\031\260.\300\001\330\004\013\2101\200\001\340\004\037\230q\320 0\260\013\270;\300k\320QR\330\004\023\220>\240\030\250\021\250!\330\004\007\200|\2207\230!\330\0100\260\001\3201B\300.\320PQ\330\004\013\2101\200\001\340\004\037\230q\320 0\260\013\270;\300k\320QR\330\004\023\320\023(\250\010\260\001\260\021\330\004\007\200|\2207\230!\330\0107\260q\3208P\320P^\320^_\330\004\013\2101\200\001\340\004\037\230q\320 0\260\013\270;\300k\320QR\330\004\023\320\023#\2408\2501\250A\330\004\007\200|\2207\230!\330\0102\260!\3203F\300n\320TU\330\004\013\2101\200\001\340\004\037\230q\320 0\260\013\270;\300k\320QR\330\004\023\320\023+\2508\2601\260A\330\004\007\200|\2207\230!\330\010:\270!\320;V\320Vd\320de\330\004\013\2101\200\001\340\004\037\230q\320 0\260\013\270;\300k\320QR\330\004\023\320\0238\270\010\300\001\300\021\330\004\007\200|""\2207\230!\330\010G\300q\320Hp\320p~\320~\177\330\004\013\2101\200\001\340\004\037\230q\320 0\260\013\270;\300k\320QR\330\004\023\2209\230H\240A\240Q\330\004\007\200|\2207\230!\330\010+\2501\250L\270\016\300a\330\004\013\2101\200\001\340\004\037\230q\320 0\260\013\270;\300k\320QR\330\004\023\320\023,\250H\260A\260Q\330\004\007\200|\2207\230!\330\010;\2701\320<X\320Xf\320fg\330\004\013\2101\200\001\340\004\037\230q\320 0\260\013\270;\300k\320QR\330\004\023\320\023%\240X\250Q\250a\330\004\007\200|\2207\230!\330\0104\260A\3205J\310.\320XY\330\004\013\2101\200\001\340\004\037\230q\320 0\260\013\270;\300k\320QR\330\004\023\220:\230X\240Q\240a\330\004\007\200|\2207\230!\330\010,\250A\250]\270.\310\001\330\004\013\2101\200\001\340\004\037\230q\320 0\260\013\270;\300k\320QR\330\004\023\220;\230h\240a\240q\330\004\007\200|\2207\230!\330\010-\250Q\250n\270N\310!\330\004\013\2101\200\001\340\004\037\230q\320 0\260\013\270;\300k\320QR\330\004\023\320\023'\240x\250q\260\001\330\004\007\200|\2207\230!\330\0106\260a\3207N\310n\320\\]\330\004\013\2101\320\000?\270q\360\030\000\n\013\330\010\025\320\025(\250\001\250\030\260\030\270\021\270!\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\020\000\005 \230q\340\t\n\330\010\025\320\0251\260\021\260(\270(\300-\310q\330\004\020\220\001\220\021\330\004\022\320\022'\240q\250\001\200\001\360\020\000\n\013\330\010\025\320\025/\250q\260\010\270\001\330\004\020\220\001\220\021\200\001\360\022\000\n\013\330\010\025\320\025/\250q\260\010\270\010\300\001\330\004\020\220\001\220\021\200\001\360\020\000\005(\240q\250\001\330\t\n\330\010\025\320\0254\260A\260X\270X\300V\310?\320Z[\330\004\025\220Q\220a\330\004\007\200{\220!\2203\220c\230\021\330\010\017\210q\330\004!\240\025\240a\240{\260!\2601\330\004\037\230q\330\t\n\330\010\025\320\0254\260A\260X\270X\300_\320Tc\320cd\330\004\020\220\001\220\021\330\004\022\320\022'\240q\250\001\200\001\360\020\000\005 \230q\340\t\n\330\010\025\320\0255\260Q\260h\270h\300i\310q\330\004\020\220\001\220""\021\330\004\022\320\022'\240q\250\001\200\001\360\020\000\005 \230q\340\t\n\330\010\025\320\0257\260q\3208L\310O\320[d\320de\330\004\020\220\001\220\021\330\004\022\320\022'\240q\250\001\200\001\360\020\000\n\013\330\010\025\320\025'\240q\320(<\270A\330\004\020\220\001\220\021\200\001\360\026\000\005.\320-?\270q\330\004.\320.G\300{\320R`\320`i\320ij\330\004\017\210{\320\032;\2705\300\001\330\t\n\330\010\025\320\0253\2601\3204H\310\017\320WX\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\026\000\005.\320-?\270q\330\004.\320.G\300{\320R`\320`i\320ij\330\004\017\210{\320\032;\2705\300\001\330\t\n\330\010\025\320\025.\250a\250x\260x\270q\330\004\020\220\001\220\021\330\004\013\2101\320\000[\320[\\\360\034\000\n\013\330\010\025\320\025'\240q\250\010\260\010\270\014\300L\320PZ\320Zd\320de\320ef\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\022\000\n\013\330\010\025\320\025?\270q\300\010\310\010\320PQ\330\004\020\220\001\220\021\200\001\360\022\000\n\013\330\010\025\320\025?\270q\300\010\310\010\320Pi\320ij\330\004\020\220\001\220\021\320\000*\250!\360\022\000\n\013\330\010\025\320\025'\240q\250\001\250\021\330\004\020\220\001\220\021\330\004\013\210:\220Q\200\001\360\020\000\005 \230q\340\t\n\330\010\025\320\025-\250Q\250e\2606\270\021\330\004\020\220\001\220\021\330\004\022\320\022'\240q\250\001\200\001\360\n\000\005 \230q\340\t\n\330\010\025\320\025-\250Q\250i\260q\330\004\020\220\001\220\021\330\004\022\320\022'\240q\250\001\200\001\360\026\000\005)\250\017\260q\330\004)\320)?\270{\310,\320V_\320_`\330\004\r\210[\320\0306\260e\2701\330\t\n\330\010\025\320\025-\250Q\250h\260h\270a\330\004\020\220\001\220\021\330\004\013\2101\200\001\330\004'\240q\250\006\250a\320\000?\270q\360\030\000\n\013\330\010\025\320\025.\250a\320/?\270z\310\021\310!\330\004\020\220\001\220\021\330\004\013\2105\220\001\200\001\360\020\000\005 \230q\340\t\n\330\010\025\320\025&\240a\240x\250x\260v\270Q\330\004\020\220\001\220\021\330\004\022\320\022'\240q\250\001\200\001\360""\020\000\005 \230q\340\t\n\330\010\025\320\025.\250a\250x\260x\270y\310\001\330\004\020\220\001\220\021\330\004\022\320\022'\240q\250\001\200\001\360\026\000\005\033\230'\240\021\330\004\036\320\036/\250{\270&\300\t\310\021\330\t\n\330\010\025\320\025'\240q\250\006\250f\260A\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\020\000\005 \230q\340\t\n\330\010\025\320\025(\250\001\250\030\260\030\270\030\300\021\330\004\020\220\001\220\021\330\004\022\320\022'\240q\250\001\200\001\360\022\000\005(\240q\250\001\330\t\n\330\010\025\320\025'\240q\250\016\260f\270O\310=\320XY\330\004\025\220Q\220a\330\004\007\200{\220!\2203\220c\230\021\330\010\023\2206\230\021\230'\240\025\240i\320/A\300\027\310\005\310U\320RV\320VX\320XY\330\004&\240f\250A\250W\260K\270q\300\006\300i\320Oa\320ah\320hm\320mr\320rs\330\t\n\330\010\025\320\025'\240q\250\016\260f\270O\310=\320Xh\320hs\320st\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\n\000\005 \230q\340\t\n\330\010\025\320\025/\250q\260\t\270\021\330\004\020\220\001\220\021\330\004\022\320\022'\240q\250\001\200\001\360\030\000\005\020\210q\220\002\220+\320\035:\270%\270q\330\004\017\210q\220\002\220.\320 :\270!\340\t\n\330\010\025\320\025.\250a\250x\260x\270q\330\004\020\220\001\220\021\330\004\013\210;\220a\200\001\360\026\000\005\032\230\026\230q\330\004\032\230-\240{\260&\270\t\300\021\330\004\007\200{\320\022'\240u\250A\330\t\n\330\010\025\320\025%\240Q\240h\250h\260a\330\004\020\220\001\220\021\330\004\013\2101\320\000r\320rs\360\034\000\n\013\330\010\025\320\0254\260A\260X\270X\300V\320K`\320`i\320ij\320jk\330\004\020\220\001\220\021\330\004\013\2101\200\001\360\016\000\005\010\200u\210C\210q\330\010\023\2206\230\021\230'\240\025\240i\320/E\300W\310E\320QV\320VZ\320Z\\\320\\]\330\004\"\240&\250\001\250\027\260\010\270\t\320AW\320W^\320^c\320ch\320hi\360\010\000\n\013\330\010\025\320\025<\270A\330\014\024\220A\330\014\035\230W\240A\330\014\021\220\022\2201\340\004\020\220\001\220\021\330\004\013\2101\200\001\360\022""\000\n\013\330\010\025\320\025(\250\001\250\026\250v\260[\300\001\330\004\020\220\001\220\021\200\001\330*+\330\004\007\200w\210c\220\021\330\r\016\330\014\022\320\022%\240Q\240a\330\004\013\2107\220#\220Q\200\001\360\026\000\005 \230y\250\001\330\004#\320#6\260k\300\031\310)\320ST\330\004\n\210+\320\025-\250U\260!\330\t\n\330\010\025\320\025/\250q\260\010\270\010\300\001\330\004\020\220\001\220\021\330\004\013\2101O";
    PyObject *data = NULL;
    CYTHON_UNUSED_VAR(__Pyx_DecompressString);
    #endif
    PyObject **stringtab = __pyx_mstate->__pyx_string_tab;
    Py_ssize_t pos = 0;
    for (int i = 0; i < 2902; i++) {
      Py_ssize_t bytes_length = index[i].length;
      PyObject *string = PyUnicode_DecodeUTF8(bytes + pos, bytes_length, NULL);
      if (likely(string) && i >= 478) PyUnicode_InternInPlace(&string);
      if (unlikely(!string)) {
        Py_XDECREF(data);
        __PYX_ERR(0, 1, __pyx_L1_error)
      }
      stringtab[i] = string;
      pos += bytes_length;
    }
    for (int i = 2902; i < 3377; i++) {
      Py_ssize_t bytes_length = index[i].length;
      PyObject *string = PyBytes_FromStringAndSize(bytes + pos, bytes_length);
      stringtab[i] = string;
      pos += bytes_length;
      if (unlikely(!string)) {
        Py_XDECREF(data);
        __PYX_ERR(0, 1, __pyx_L1_error)
      }
    }
    Py_XDECREF(data);
    for (Py_ssize_t i = 0; i < 3377; i++) {
      if (unlikely(PyObject_Hash(stringtab[i]) == -1)) {
        __PYX_ERR(0, 1, __pyx_L1_error)
      }
    }
    #if CYTHON_IMMORTAL_CONSTANTS
    {
      PyObject **table = stringtab + 2902;
      for (Py_ssize_t i=0; i<475; ++i) {
        #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
        Py_SET_REFCNT(table[i], _Py_IMMORTAL_REFCNT_LOCAL);
        #else
        Py_SET_REFCNT(table[i], _Py_IMMORTAL_INITIAL_REFCNT);
        #endif
      }
    }
    #endif
  }
  {
    PyObject **numbertab = __pyx_mstate->__pyx_number_tab + 0;
    int8_t const cint_constants_1[] = {0,-1,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127};
    int16_t const cint_constants_2[] = {128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,4096,5120,8192};
    int32_t const cint_constants_4[] = {136983863L,175497610L};
    for (int i = 0; i < 281; i++) {
      numbertab[i] = PyLong_FromLong((i < 129 ? cint_constants_1[i - 0] : (i < 279 ? cint_constants_2[i - 129] : cint_constants_4[i - 279])));
      if (unlikely(!numbertab[i])) __PYX_ERR(0, 1, __pyx_L1_error)
    }
  }
  #if CYTHON_IMMORTAL_CONSTANTS
  {
    PyObject **table = __pyx_mstate->__pyx_number_tab;
    for (Py_ssize_t i=0; i<281; ++i) {
      #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
      Py_SET_REFCNT(table[i], _Py_IMMORTAL_REFCNT_LOCAL);
      #else
      Py_SET_REFCNT(table[i], _Py_IMMORTAL_INITIAL_REFCNT);
      #endif
    }
  }
  #endif
  return 0;
  __pyx_L1_error:;
  return -1;
}
/* #### Code section: init_codeobjects ### */
typedef struct {
    unsigned int argcount : 3;
    unsigned int num_posonly_args : 1;
    unsigned int num_kwonly_args : 1;
    unsigned int nlocals : 3;
    unsigned int flags : 10;
    unsigned int first_line : 15;
} __Pyx_PyCode_New_function_description;
/* NewCodeObj.proto */
static PyObject* __Pyx_PyCode_New(
        const __Pyx_PyCode_New_function_description descr,
        PyObject * const *varnames,
        PyObject *filename,
        PyObject *funcname,
        PyObject *line_table,
        PyObject *tuple_dedup_map
);


static int __Pyx_CreateCodeObjects(__pyx_mstatetype *__pyx_mstate) {
  PyObject* tuple_dedup_map = PyDict_New();
  if (unlikely(!tuple_dedup_map)) return -1;
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1143};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_status, __pyx_mstate->__pyx_n_u_s};
    __pyx_mstate_global->__pyx_codeobj_tab[0] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_init, __pyx_mstate->__pyx_kp_b_iso88591_A_Ja_L_Qk_iq, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[0])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1148};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[1] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_reduce, __pyx_mstate->__pyx_kp_b_iso88591_A_AXT, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[1])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1313};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_status};
    __pyx_mstate_global->__pyx_codeobj_tab[2] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_check_status, __pyx_mstate->__pyx_kp_b_iso88591_wc_Qa_7_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[2])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1321};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_status};
    __pyx_mstate_global->__pyx_codeobj_tab[3] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_check_status_size, __pyx_mstate->__pyx_kp_b_iso88591_0_woQ_q_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[3])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1517};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[4] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_QQR, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[4])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1526};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[5] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_fAQ_s_1_k_83, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[5])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[6] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[6])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[7] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[7])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1717};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[8] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_33C1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[8])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1726};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[9] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_7_1_6_A_x_aq_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[9])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[10] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[10])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[11] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[11])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1849};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[12] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_66I, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[12])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1858};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[13] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_1_6_A_x_F_1_s_1_k_83hg, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[13])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[14] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[14])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[15] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[15])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1993};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[16] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_1_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[16])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 2002};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[17] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_6_A_x_vQa_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[17])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[18] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[18])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[19] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[19])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 2150};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[20] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_44EQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[20])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 2159};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[21] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_Yhaq_6_A_x_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[21])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[22] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[22])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[23] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[23])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 2294};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[24] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_66I, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[24])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 2303};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[25] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_j_6_A_x_6_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[25])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[26] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[26])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[27] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[27])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 2461};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[28] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_1_4z_a_1A_4vS_AQ_4wc_AQ_9D_QfA, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[28])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 2479};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[29] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_1_N_1MQ_DA_5_2_F_vXQfG, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[29])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[30] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_D_7_D_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[30])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[31] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591__9, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[31])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 2643};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[32] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_4HAQ_4z_a_1A_4vS_AQ_4wc_AQ_9D, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[32])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 2661};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[33] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_4HAQ_N_1MQ_DA_5_7r_q_v, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[33])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[34] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_1_Qg_q_1_Qg, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[34])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[35] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_avQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[35])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 2862};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[36] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_UUV, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[36])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 2871};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[37] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_4HAQ_6_A_x_1_q_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[37])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[38] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[38])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[39] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[39])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 2982};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[40] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_SST, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[40])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 2991};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[41] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_6_A_x_vQa_s_1_k_83hgU_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[41])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[42] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[42])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[43] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[43])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3150};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[44] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_HHmmn, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[44])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3159};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[45] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_H_PQQR_6_A_x_6_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[45])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[46] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[46])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[47] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[47])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3293};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[48] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_4z_a_1A_4vS_AQ_4wc_AQ_9D_QfA_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[48])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3311};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[49] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_N_1MQ_DA_5_5RvQ_vXQfG5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[49])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[50] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_t1G_gQ_t1G_a, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[50])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[51] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_AV1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[51])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3483};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[52] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[52])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3492};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[53] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_fAQ_s_1_k_83_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[53])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[54] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[54])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[55] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[55])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3651};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[56] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_88MQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[56])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3660};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[57] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_V1A_s_1_k_83, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[57])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[58] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[58])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[59] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[59])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3807};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[60] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_99Oq, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[60])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3816};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[61] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_fAQ_s_1_k_83, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[61])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[62] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[62])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[63] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[63])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3939};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[64] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[64])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3948};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[65] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_HAQ_6_A_x_5V1A_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[65])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[66] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[66])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[67] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[67])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4082};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[68] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_1_4z_a_1A_4vS_AQ_4wc_AQ_9D_QfA_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[68])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4100};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[69] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_1_N_1MQ_DA_5_6b_a_vXQf, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[69])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[70] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_0_AWKwa_0_AWK, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[70])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[71] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_QfA, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[71])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4265};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[72] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_QQR, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[72])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4274};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[73] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_6_A_x_vQa_s_1_k_83hgU_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[73])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[74] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[74])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[75] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[75])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4409};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[76] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_UUV, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[76])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4418};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[77] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_1_6_A_x_0_aq_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[77])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[78] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[78])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[79] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[79])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4545};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[80] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[80])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4554};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[81] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_8_6_A_x_36_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[81])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[82] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[82])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[83] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[83])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4681};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[84] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_HHmmn, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[84])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4690};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[85] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_H_PQQR_6_A_x_6_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[85])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[86] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[86])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[87] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[87])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4872};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[88] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_DHAQ_4z_a_1A_4vS_AQ_4wc_AQ_9D, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[88])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4890};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[89] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_DHAQ_N_1MQ_DA_5_r_q_vX, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[89])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[90] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_9_Qg_PWWX_9_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[90])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[91] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_6avQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[91])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 5090};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[92] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_Fhaq_4z_a_1A_4vS_AQ_4wc_AQ_9D, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[92])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 5108};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[93] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_Fhaq_N_1MQ_DA_5_vXQfG5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[93])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[94] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_awkQXXY_awkQR, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[94])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[95] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_7q_a, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[95])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 5357};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[96] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_CCccd, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[96])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 5366};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[97] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_6fAQ_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[97])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[98] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[98])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[99] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[99])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 5569};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[100] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_SST, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[100])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 5578};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[101] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_1_6_A_x_0_aq_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[101])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[102] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[102])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[103] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[103])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 5725};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[104] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_88MQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[104])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 5734};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[105] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_V1A_s_1_k_83, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[105])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[106] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[106])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[107] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[107])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 5857};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[108] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_EEggh, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[108])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 5866};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[109] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_DHAQ_6_A_x_9_q_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[109])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[110] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[110])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[111] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[111])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 5989};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[112] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[112])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 5998};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[113] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_8_6_A_x_36_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[113])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[114] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[114])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[115] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[115])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 6156};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[116] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_AA, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[116])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 6165};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[117] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_HAQ_6_A_x_5V1A_s_1_k_8_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[117])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[118] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[118])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[119] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[119])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 6289};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[120] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[120])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 6298};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[121] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_1_6_A_x_4F_1_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[121])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[122] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[122])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[123] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[123])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 6526};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[124] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_21NhVWWX_4z_a_1A_4vS_AQ_4wc_AQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[124])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 6544};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[125] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_11NhVWWX_N_1MQ_DA_5_DB, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[125])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[126] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_d_7_U_d_7_UV, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[126])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[127] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_1F, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[127])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 6661};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[128] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[128])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 6670};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[129] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_1_6_A_x_4F_1_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[129])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[130] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[130])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[131] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[131])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 6793};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[132] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_88MQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[132])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 6802};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[133] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_V1A_s_1_k_83, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[133])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[134] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[134])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[135] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[135])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 6913};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[136] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_88MQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[136])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 6922};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[137] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_V1A_s_1_k_83, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[137])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[138] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[138])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[139] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[139])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 7104};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[140] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_haq_4z_a_1A_4vS_AQ_4wc_AQ_9D_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[140])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 7122};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[141] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_N_1MQ_DA_5_BfA_vXQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[141])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[142] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_6d_7_WTU_6d_7, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[142])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[143] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_31F, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[143])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 7239};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[144] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_88MQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[144])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 7248};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[145] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_V1A_s_1_k_83, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[145])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[146] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[146])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[147] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[147])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 7359};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[148] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_88MQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[148])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 7368};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[149] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_V1A_s_1_k_83, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[149])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[150] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[150])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[151] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[151])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 7567};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[152] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_FFiij, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[152])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 7576};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[153] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_Fhaq_6_A_x_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[153])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[154] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[154])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[155] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[155])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 7759};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[156] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_YYZ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[156])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 7768};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[157] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_6haq_6_A_x_2_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[157])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[158] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[158])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[159] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[159])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 7951};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[160] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_YYZ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[160])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 7960};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[161] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_6haq_6_A_x_2_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[161])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[162] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[162])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[163] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[163])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 8084};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[164] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[164])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 8093};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[165] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_6haq_6_A_x_2_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[165])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[166] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[166])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[167] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[167])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 8228};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[168] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_DDeef, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[168])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 8237};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[169] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_6_A_x_7vQa_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[169])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[170] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[170])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[171] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[171])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 8361};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[172] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_GGkkl, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[172])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 8370};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[173] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_Fhaq_6_A_x_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[173])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[174] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[174])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[175] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[175])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 8528};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[176] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_KKsst, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[176])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 8537};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[177] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_11NhVWWX_6_A_x_fAQ_s_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[177])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[178] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[178])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[179] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[179])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 8670};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[180] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_Yhaq_4z_a_1A_4vS_AQ_4wc_AQ_9D, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[180])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 8688};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[181] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_Yhaq_N_1MQ_DA_5_0_vXQf, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[181])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[182] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_awk_awk, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[182])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[183] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_q_a, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[183])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 8809};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[184] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_44EQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[184])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 8818};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[185] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_HHAQ_6_A_x_q_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[185])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[186] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[186])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[187] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[187])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 8981};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[188] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_44EQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[188])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 8990};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[189] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_HHAQ_6_A_x_q_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[189])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[190] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[190])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[191] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[191])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 9141};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[192] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_33C1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[192])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 9150};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[193] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_7_1_6_A_x_aq_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[193])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[194] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[194])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[195] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[195])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 9284};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[196] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_1_4z_a_1A_4vS_AQ_4wc_AQ_9D_QfA, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[196])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 9302};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[197] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_1_N_1MQ_DA_5_2_F_vXQfG, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[197])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[198] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_D_7_D_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[198])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[199] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591__9, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[199])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 9455};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[200] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_55Gq, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[200])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 9464};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[201] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_Yhaq_6_A_x_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[201])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[202] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[202])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[203] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[203])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 9636};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[204] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_SST, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[204])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 9645};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[205] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_1_6_A_x_0_aq_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[205])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[206] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[206])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[207] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[207])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 9851};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[208] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_8_4z_a_1A_4vS_AQ_4wc_AQ_9D_QfA, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[208])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 9869};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[209] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_8_N_1MQ_DA_5_9_6_vXQfG, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[209])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[210] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_34q_QR_34q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[210])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[211] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_0_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[211])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 9998};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[212] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_44EQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[212])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 10007};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[213] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_HHAQ_6_A_x_q_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[213])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[214] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[214])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[215] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[215])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 10261};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[216] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_4z_a_1A_4vS_AQ_4wc_AQ_9D_QfA_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[216])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 10279};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[217] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_N_1MQ_DA_5_5RvQ_vXQfG5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[217])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[218] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_t1G_gQ_t1G_a, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[218])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[219] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_AV1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[219])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 10396};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[220] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_CCccd, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[220])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 10405};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[221] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_6fAQ_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[221])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[222] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[222])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[223] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[223])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 10540};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[224] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_DDeef, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[224])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 10549};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[225] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_6_A_x_7vQa_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[225])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[226] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[226])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[227] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[227])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 10708};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[228] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_JJqqr, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[228])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 10717};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[229] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_00LHTUUV_6_A_x_V1A_s_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[229])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[230] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[230])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[231] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[231])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 10840};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[232] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_EEggh, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[232])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 10849};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[233] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_6_A_x_7vQa_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[233])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[234] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[234])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[235] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[235])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 11004};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[236] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_GGkkl, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[236])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 11013};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[237] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_Fhaq_6_A_x_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[237])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[238] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[238])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[239] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[239])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 11196};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[240] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_NNyyz, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[240])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 11205};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[241] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_33RRZZ_6_A_x_aq_s_1_k, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[241])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[242] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[242])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[243] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[243])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 11328};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[244] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_Z_R_R_S, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[244])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 11337};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[245] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_ffnnoop_6_A_x_J_PQQR_s, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[245])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[246] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[246])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[247] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[247])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 11476};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[248] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_GGkkl, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[248])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 11485};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[249] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_Fhaq_6_A_x_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[249])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[250] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[250])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[251] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[251])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 11620};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[252] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[252])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 11629};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[253] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_8_6_A_x_36_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[253])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[254] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[254])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[255] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[255])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 11764};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[256] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[256])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 11773};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[257] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_8_6_A_x_36_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[257])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[258] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[258])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[259] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[259])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 11896};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[260] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_77K1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[260])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 11905};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[261] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_1_6_A_x_F_1_s_1_k_83hg, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[261])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[262] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[262])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[263] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[263])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 12125};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[264] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_88MQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[264])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 12134};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[265] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_HAQ_6_A_x_V1A_s_1_k_83, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[265])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[266] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[266])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[267] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[267])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 12257};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[268] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_BBaab, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[268])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 12266};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[269] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_6fAQ_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[269])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[270] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[270])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[271] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[271])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 12400};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[272] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_HAQ_4z_a_1A_4vS_AQ_4wc_AQ_9D_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[272])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 12418};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[273] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_HAQ_N_1MQ_DA_5_2V1_vXQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[273])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[274] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_5T_GST_5T_A, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[274])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[275] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_2_6, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[275])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 12671};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[276] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_GGkkl, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[276])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 12680};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[277] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_Fhaq_6_A_x_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[277])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[278] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[278])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[279] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[279])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 12939};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[280] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_GGkkl, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[280])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 12948};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[281] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_Fhaq_6_A_x_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[281])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[282] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[282])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[283] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[283])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 13082};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[284] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_DHAQ_4z_a_1A_4vS_AQ_4wc_AQ_9D, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[284])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 13100};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[285] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_DHAQ_N_1MQ_DA_5_r_q_vX, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[285])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[286] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_9_Qg_PWWX_9_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[286])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[287] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_6avQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[287])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 13329};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[288] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_KKsst, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[288])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 13338};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[289] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_11NhVWWX_6_A_x_fAQ_s_1_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[289])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[290] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[290])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[291] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[291])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 13585};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[292] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_KKsst, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[292])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 13594};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[293] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_11NhVWWX_6_A_x_fAQ_s_1_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[293])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[294] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[294])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[295] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[295])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 13717};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[296] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_66I, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[296])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 13726};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[297] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_j_6_A_x_6_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[297])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[298] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[298])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[299] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[299])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 13849};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[300] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_AA, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[300])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 13858};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[301] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_6fAQ_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[301])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[302] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[302])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[303] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[303])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 13981};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[304] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_DDeef, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[304])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 13990};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[305] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_B_1_6_A_x_8_aq_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[305])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[306] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[306])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[307] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[307])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 14125};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[308] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_SST, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[308])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 14134};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[309] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_1_6_A_x_0_aq_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[309])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[310] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[310])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[311] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[311])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 14257};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[312] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_1_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[312])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 14266};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[313] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_6_A_x_vQa_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[313])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[314] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[314])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[315] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[315])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 14401};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[316] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_FFiij, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[316])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 14410};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[317] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_DHAQ_6_A_x_9_q_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[317])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[318] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[318])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[319] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[319])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 14604};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[320] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_9_ffggh_4z_a_1A_4vS_AQ_4wc_AQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[320])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 14622};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[321] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_99_ffggh_N_1MQ_DA_5_LB, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[321])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[322] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_Fd_7R_dde_Fd, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[322])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[323] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_C1F, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[323])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 14803};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[324] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_WWX, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[324])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 14812};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[325] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_4HAQ_6_A_x_1_q_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[325])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[326] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[326])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[327] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[327])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 14959};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[328] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_BBaab, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[328])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 14968};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[329] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_6fAQ_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[329])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[330] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[330])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[331] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[331])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 15096};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[332] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[332])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 15105};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[333] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_8_6_A_x_36_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[333])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[334] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[334])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[335] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[335])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 15248};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[336] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_AA, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[336])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 15257};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[337] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_HAQ_6_A_x_5V1A_s_1_k_8_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[337])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[338] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[338])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[339] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[339])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 15384};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[340] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[340])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 15393};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[341] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_1_6_A_x_4F_1_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[341])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[342] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[342])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[343] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[343])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 15525};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[344] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_4z_a_1A_4vS_AQ_4wc_AQ_9D_QfA_q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[344])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 15543};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[345] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_N_1MQ_DA_5_RvQ_vXQfG5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[345])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[346] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_t1G_gQ_t1G_a_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[346])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[347] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_AV1_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[347])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 15731};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[348] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_32PPXXYYZ_4z_a_1A_4vS_AQ_4wc_A, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[348])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 15749};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[349] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_22PPXXYYZ_N_1MQ_DA_5_E, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[349])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[350] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_t1G_V_t1G_VW, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[350])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[351] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_AV1_3, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[351])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 15935};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[352] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_j_4z_a_1A_4vS_AQ_4wc_AQ_9D_QfA, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[352])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 15953};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[353] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_j_N_1MQ_DA_5_1_6_vXQfG, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[353])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[354] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_4q_4q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[354])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[355] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[355])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16073};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[356] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[356])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16082};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[357] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_8_6_A_x_36_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[357])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[358] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[358])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[359] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[359])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16220};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[360] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_99Oq, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[360])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16229};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[361] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_HAQ_6_A_x_V1A_s_1_k_83, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[361])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[362] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[362])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[363] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[363])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16372};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[364] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_HHmmn, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[364])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16381};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[365] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_J_RSST_6_A_x_F_1_s_1_k, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[365])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[366] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[366])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[367] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[367])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16508};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[368] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_CCccd, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[368])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16517};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[369] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_6fAQ_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[369])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[370] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[370])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[371] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[371])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16660};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[372] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_MMwwx, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[372])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16669};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[373] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_33RRZZ_6_A_x_aq_s_1_k_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[373])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[374] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[374])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[375] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[375])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16790};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[376] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[376])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16799};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[377] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_1_6_A_x_4F_1_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[377])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[378] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[378])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[379] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[379])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16919};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[380] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_DDeef, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[380])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16928};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[381] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_6_A_x_7vQa_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[381])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[382] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[382])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[383] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[383])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 17064};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[384] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_UUV, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[384])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 17073};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[385] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_1_6_A_x_0_aq_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[385])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[386] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[386])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[387] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[387])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 17249};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[388] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_haq_4z_a_1A_4vS_AQ_4wc_AQ_9D_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[388])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 17267};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_size, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_obj, __pyx_mstate->__pyx_n_u_flag, __pyx_mstate->__pyx_n_u_buf, __pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[389] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_N_1MQ_DA_5_BfA_vXQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[389])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_state, __pyx_mstate->__pyx_n_u_dict_2, __pyx_mstate->__pyx_n_u_use_setstate};
    __pyx_mstate_global->__pyx_codeobj_tab[390] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_T_G1F_a_vWE_Q_q_q_6d_7_WTU_6d_7, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[390])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 16};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[391] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_31F, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[391])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 17387};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[392] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_QQR, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[392])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 17396};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[393] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_fAQ_s_1_k_83, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[393])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[394] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[394])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[395] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[395])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 17589};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[396] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_WWX, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[396])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 17598};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[397] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_4HAQ_6_A_x_1_q_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[397])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[398] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[398])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[399] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[399])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 17746};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[400] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_UUV, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[400])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 17755};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[401] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_1_6_A_x_0_aq_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[401])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[402] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[402])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[403] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[403])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 17915};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[404] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[404])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 17924};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[405] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_1_6_A_x_4F_1_s_1_k_83h, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[405])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[406] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[406])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[407] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[407])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 18055};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[408] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_V_W_J_J_K, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[408])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 18064};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[409] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_hhiij_6_A_x_GvQa_s_1_k, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[409])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[410] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[410])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[411] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[411])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 18191};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[412] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[412])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 18200};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[413] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_8_6_A_x_36_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[413])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[414] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[414])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[415] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[415])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 18355};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[416] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_MMwwx, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[416])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 18364};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[417] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_33RRZZ_6_A_x_aq_s_1_k_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[417])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[418] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[418])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[419] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[419])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 18540};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[420] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_WWX, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[420])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 18549};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[421] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_4HAQ_6_A_x_1_q_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[421])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[422] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[422])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[423] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[423])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 18685};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[424] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_CCccd, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[424])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 18694};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[425] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_6fAQ_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[425])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[426] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[426])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[427] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[427])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 18854};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[428] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_GGkkl, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[428])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 18863};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[429] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_Fhaq_6_A_x_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[429])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[430] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[430])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[431] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[431])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19050};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[432] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_EEggh, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[432])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19059};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[433] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_B_1_6_A_x_8_aq_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[433])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[434] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[434])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[435] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[435])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19219};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[436] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_BBaab, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[436])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19228};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[437] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_6fAQ_s_1_k_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[437])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[438] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[438])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[439] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[439])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19366};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[440] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_CCccd, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[440])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19375};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[441] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_6_A_x_7vQa_s_1_k_83hgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[441])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[442] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[442])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[443] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[443])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19511};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_data};
    __pyx_mstate_global->__pyx_codeobj_tab[444] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_data, __pyx_mstate->__pyx_kp_b_iso88591_A_6_QQR, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[444])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19520};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ptr, __pyx_mstate->__pyx_n_u_readonly, __pyx_mstate->__pyx_n_u_owner, __pyx_mstate->__pyx_n_u_obj};
    __pyx_mstate_global->__pyx_codeobj_tab[445] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_from_ptr, __pyx_mstate->__pyx_kp_b_iso88591_A_A_4s_AQ_haq_6_A_x_fAQ_s_1_k_83, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[445])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 1};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self};
    __pyx_mstate_global->__pyx_codeobj_tab[446] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_reduce_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[446])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 3};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_self, __pyx_mstate->__pyx_n_u_pyx_state};
    __pyx_mstate_global->__pyx_codeobj_tab[447] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_setstate_cython, __pyx_mstate->__pyx_kp_b_iso88591_Q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[447])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19548};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[448] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_init_v2, __pyx_mstate->__pyx_kp_b_iso88591__10, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[448])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19558};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_flags};
    __pyx_mstate_global->__pyx_codeobj_tab[449] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_init_with_flags, __pyx_mstate->__pyx_kp_b_iso88591_aq, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[449])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19571};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[450] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_shutdown, __pyx_mstate->__pyx_kp_b_iso88591__11, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[450])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19581};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_result};
    __pyx_mstate_global->__pyx_codeobj_tab[451] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_error_string, __pyx_mstate->__pyx_kp_b_iso88591_ay_87, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[451])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19594};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[452] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_get_driver_version, __pyx_mstate->__pyx_kp_b_iso88591_q_q_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[452])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19607};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[453] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_get_nvml_version, __pyx_mstate->__pyx_kp_b_iso88591_q_Qiq_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[453])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19620};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[454] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_get_cuda_driver_version, __pyx_mstate->__pyx_kp_b_iso88591_31AQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[454])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19635};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[455] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_get_cuda_driver_version_v, __pyx_mstate->__pyx_kp_b_iso88591_5Q_6aq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[455])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19650};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pid};
    __pyx_mstate_global->__pyx_codeobj_tab[456] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_get_process_name, __pyx_mstate->__pyx_kp_b_iso88591_q_Qe6_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[456])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19666};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[457] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_get_hic_version, __pyx_mstate->__pyx_kp_b_iso88591_aq_A_L_Qa_1Jaq_W_a_z_S_q_A_L_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[457])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19685};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[458] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_unit_get_count, __pyx_mstate->__pyx_kp_b_iso88591_A_Qaq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[458])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19700};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ind_ex};
    __pyx_mstate_global->__pyx_codeobj_tab[459] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_unit_get_handle_by_index, __pyx_mstate->__pyx_kp_b_iso88591_EQ_Qhaq_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[459])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19718};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_unit};
    __pyx_mstate_global->__pyx_codeobj_tab[460] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_unit_get_unit_info, __pyx_mstate->__pyx_kp_b_iso88591_HA_2_WIQ_vQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[460])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19737};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_unit};
    __pyx_mstate_global->__pyx_codeobj_tab[461] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_unit_get_led_state, __pyx_mstate->__pyx_kp_b_iso88591_XQ_3_hiq_vQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[461])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19756};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_unit};
    __pyx_mstate_global->__pyx_codeobj_tab[462] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_unit_get_psu_info, __pyx_mstate->__pyx_kp_b_iso88591_q_fA_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[462])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19775};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_unit, __pyx_mstate->__pyx_n_u_type};
    __pyx_mstate_global->__pyx_codeobj_tab[463] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_unit_get_temperature, __pyx_mstate->__pyx_kp_b_iso88591_RRS_1F_aq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[463])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19794};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_unit};
    __pyx_mstate_global->__pyx_codeobj_tab[464] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_unit_get_fan_speed_info, __pyx_mstate->__pyx_kp_b_iso88591_A_B_Zccd_AV6_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[464])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19813};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[465] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_count_v2, __pyx_mstate->__pyx_kp_b_iso88591_1_1A_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[465])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19828};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[466] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_attributes_v2, __pyx_mstate->__pyx_kp_b_iso88591_H_S_iij_q_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[466])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19847};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ind_ex};
    __pyx_mstate_global->__pyx_codeobj_tab[467] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_handle_by_index_v2, __pyx_mstate->__pyx_kp_b_iso88591_J_2_81A_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[467])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19865};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_serial};
    __pyx_mstate_global->__pyx_codeobj_tab[468] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_handle_by_serial, __pyx_mstate->__pyx_kp_b_iso88591_1_t_Qha_iq_WG1_0_j_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[468])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19887};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_uuid};
    __pyx_mstate_global->__pyx_codeobj_tab[469] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_handle_by_uuid, __pyx_mstate->__pyx_kp_b_iso88591_7q_t_QfA_iq_e5_q_a_HAQ_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[469])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19909};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_uuid};
    __pyx_mstate_global->__pyx_codeobj_tab[470] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_handle_by_uuidv, __pyx_mstate->__pyx_kp_b_iso88591_A_q0C6_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[470])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19927};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pci_bus_id};
    __pyx_mstate_global->__pyx_codeobj_tab[471] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_handle_by_pci_bus_id, __pyx_mstate->__pyx_kp_b_iso88591_Fa_t_Ql_iq_E_G1_a_5Qm_QRRS_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[471])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19949};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[472] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_name, __pyx_mstate->__pyx_kp_b_iso88591_q_axxvQ_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[472])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19965};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[473] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_brand, __pyx_mstate->__pyx_kp_b_iso88591_5Q_q_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[473])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 19983};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[474] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_index, __pyx_mstate->__pyx_kp_b_iso88591_Q_q_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[474])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20001};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[475] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_serial, __pyx_mstate->__pyx_kp_b_iso88591_q_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[475])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20017};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[476] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_module_id, __pyx_mstate->__pyx_kp_b_iso88591_A_881A_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[476])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20035};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[477] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_c2c_mode_info_v, __pyx_mstate->__pyx_kp_b_iso88591_G_Rbbkkl_axxq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[477])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20054};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_node_set_size, __pyx_mstate->__pyx_n_u_scope};
    __pyx_mstate_global->__pyx_codeobj_tab[478] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_memory_affinity, __pyx_mstate->__pyx_kp_b_iso88591_S_6_i_FgURWW_6_1B_Kbbiinnsst_9, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[478])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20074};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_cpu_set_size, __pyx_mstate->__pyx_n_u_scope};
    __pyx_mstate_global->__pyx_codeobj_tab[479] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_cpu_affinity_within_s, __pyx_mstate->__pyx_kp_b_iso88591_Cq_6_i_FgURWW_0_I_ggllqqr_8_q_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[479])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20094};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_cpu_set_size};
    __pyx_mstate_global->__pyx_codeobj_tab[480] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_cpu_affinity, __pyx_mstate->__pyx_kp_b_iso88591_Cq_6_i_FgURWW_0_I_ggllqqr_8_q_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[480])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20113};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[481] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_cpu_affinity, __pyx_mstate->__pyx_kp_b_iso88591_Qha, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[481])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20126};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[482] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_clear_cpu_affinity, __pyx_mstate->__pyx_kp_b_iso88591_q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[482])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20139};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[483] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_numa_node_id, __pyx_mstate->__pyx_kp_b_iso88591_DA_AXXQa_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[483])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20157};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device1, __pyx_mstate->__pyx_n_u_device2};
    __pyx_mstate_global->__pyx_codeobj_tab[484] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_topology_common_ances, __pyx_mstate->__pyx_kp_b_iso88591_8_R_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[484])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20176};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device1, __pyx_mstate->__pyx_n_u_device2, __pyx_mstate->__pyx_n_u_p2p_ind_ex};
    __pyx_mstate_global->__pyx_codeobj_tab[485] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_p2p_status, __pyx_mstate->__pyx_kp_b_iso88591_1HIXYN_llmmn_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[485])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20196};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[486] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_uuid, __pyx_mstate->__pyx_kp_b_iso88591_q_axxvQ_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[486])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20212};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[487] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_minor_number, __pyx_mstate->__pyx_kp_b_iso88591_DA_Qhhaq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[487])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20230};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[488] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_board_part_number, __pyx_mstate->__pyx_kp_b_iso88591_q_1_q_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[488])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20246};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_object_2};
    __pyx_mstate_global->__pyx_codeobj_tab[489] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_inforom_version, __pyx_mstate->__pyx_kp_b_iso88591_q_0_AQQYYbbc_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[489])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20263};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[490] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_inforom_image_version, __pyx_mstate->__pyx_kp_b_iso88591_q_5Qhhiq_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[490])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20279};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[491] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_inforom_configuration, __pyx_mstate->__pyx_kp_b_iso88591_VVW_axxqPQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[491])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20297};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[492] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_validate_inforom, __pyx_mstate->__pyx_kp_b_iso88591_axq, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[492])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20310};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_timestamp};
    __pyx_mstate_global->__pyx_codeobj_tab[493] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_last_bbx_flush_time, __pyx_mstate->__pyx_kp_b_iso88591_a_2_88CXXccdde_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[493])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20329};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[494] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_display_mode, __pyx_mstate->__pyx_kp_b_iso88591_A_Qhhaq_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[494])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20347};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[495] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_display_active, __pyx_mstate->__pyx_kp_b_iso88591_a_q_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[495])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20365};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[496] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_persistence_mode, __pyx_mstate->__pyx_kp_b_iso88591_1_1_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[496])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20383};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[497] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_pci_info_ext, __pyx_mstate->__pyx_kp_b_iso88591_Q_5_iq_e1_AXXQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[497])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20403};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[498] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_pci_info_v3, __pyx_mstate->__pyx_kp_b_iso88591_AXXQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[498])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20422};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[499] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_max_pcie_link_generat, __pyx_mstate->__pyx_kp_b_iso88591_PPQ_7q_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[499])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20440};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[500] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_gpu_max_pcie_link_gen, __pyx_mstate->__pyx_kp_b_iso88591_TTU_881A_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[500])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20458};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[501] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_max_pcie_link_width, __pyx_mstate->__pyx_kp_b_iso88591_K1_2_881A_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[501])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20476};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[502] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_curr_pcie_link_genera, __pyx_mstate->__pyx_kp_b_iso88591_QQR_8_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[502])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20494};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[503] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_curr_pcie_link_width, __pyx_mstate->__pyx_kp_b_iso88591_LA_31HHAQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[503])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20512};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_counter};
    __pyx_mstate_global->__pyx_codeobj_tab[504] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_pcie_throughput, __pyx_mstate->__pyx_kp_b_iso88591_TTU_0_ASS_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[504])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20531};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[505] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_pcie_replay_counter, __pyx_mstate->__pyx_kp_b_iso88591_K1_31HHAQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[505])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20549};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_type};
    __pyx_mstate_global->__pyx_codeobj_tab[506] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_clock_info, __pyx_mstate->__pyx_kp_b_iso88591_LA_1HHL_aq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[506])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20568};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_type};
    __pyx_mstate_global->__pyx_codeobj_tab[507] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_max_clock_info, __pyx_mstate->__pyx_kp_b_iso88591_PPQ_axx_6QRRS_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[507])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20587};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[508] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_gpc_clk_vf_offset, __pyx_mstate->__pyx_kp_b_iso88591_0_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[508])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20605};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_clock_type, __pyx_mstate->__pyx_n_u_clock_id};
    __pyx_mstate_global->__pyx_codeobj_tab[509] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_clock, __pyx_mstate->__pyx_kp_b_iso88591_q_LPZZddeef_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[509])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20625};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_clock_type};
    __pyx_mstate_global->__pyx_codeobj_tab[510] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_max_customer_boost_cl, __pyx_mstate->__pyx_kp_b_iso88591_a_7q_T_aab_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[510])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20644};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[511] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_supported_memory_cloc, __pyx_mstate->__pyx_kp_b_iso88591_7q_W___Qa_uAS_1_6_i_EWEQVVZZ_fA, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[511])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20666};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_memory_clock_m_hz};
    __pyx_mstate_global->__pyx_codeobj_tab[512] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_supported_graphics_cl, __pyx_mstate->__pyx_kp_b_iso88591_9_J_llsst_Qa_uAS_1_6_i_EWEQVVZZ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[512])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20689};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[513] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_auto_boosted_clocks_e, __pyx_mstate->__pyx_kp_b_iso88591_881LXYYZ_E_U, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[513])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20711};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[514] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_fan_speed, __pyx_mstate->__pyx_kp_b_iso88591_A_881A_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[514])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20729};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_fan};
    __pyx_mstate_global->__pyx_codeobj_tab[515] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_fan_speed_v2, __pyx_mstate->__pyx_kp_b_iso88591_VVW_Qhhe1A_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[515])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20748};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[516] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_fan_speed_rpm, __pyx_mstate->__pyx_kp_b_iso88591_q_V___6e1_Qhha_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[516])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20768};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_fan};
    __pyx_mstate_global->__pyx_codeobj_tab[517] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_target_fan_speed, __pyx_mstate->__pyx_kp_b_iso88591_ZZ_0_aq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[517])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20787};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[518] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_min_max_fan_speed, __pyx_mstate->__pyx_kp_b_iso88591_0_Qa_Kq, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[518])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20809};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_fan};
    __pyx_mstate_global->__pyx_codeobj_tab[519] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_fan_control_policy_v2, __pyx_mstate->__pyx_kp_b_iso88591_31_5Qhhe1A, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[519])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20828};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[520] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_num_fans, __pyx_mstate->__pyx_kp_b_iso88591_1_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[520])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20846};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[521] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_cooler_info, __pyx_mstate->__pyx_kp_b_iso88591_Q_V___6e1_AXXQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[521])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20866};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_threshold_type};
    __pyx_mstate_global->__pyx_codeobj_tab[522] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_temperature_threshold, __pyx_mstate->__pyx_kp_b_iso88591_aab_6axxG__ooppq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[522])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20885};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[523] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_margin_temperature, __pyx_mstate->__pyx_kp_b_iso88591_54H_55PP_nnwwx_K_B_q_31HHA_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[523])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20905};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_sensor_ind_ex};
    __pyx_mstate_global->__pyx_codeobj_tab[524] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_thermal_settings, __pyx_mstate->__pyx_kp_b_iso88591_54Fa_88TT__tt_1_QR_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[524])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20925};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[525] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_performance_state, __pyx_mstate->__pyx_kp_b_iso88591_A_2_881A_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[525])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20943};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[526] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_current_clocks_event, __pyx_mstate->__pyx_kp_b_iso88591_ZZ_1HHAQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[526])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20961};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[527] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_supported_clocks_even, __pyx_mstate->__pyx_kp_b_iso88591_Qhhaq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[527])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20979};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[528] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_power_state, __pyx_mstate->__pyx_kp_b_iso88591_1_AXXQa_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[528])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 20997};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[529] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_dynamic_pstates_info, __pyx_mstate->__pyx_kp_b_iso88591_PPQ_i_j_C_C_L_L_M_4AXXQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[529])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21016};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[530] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_mem_clk_vf_offset, __pyx_mstate->__pyx_kp_b_iso88591_0_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[530])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21034};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_type, __pyx_mstate->__pyx_n_u_pstate};
    __pyx_mstate_global->__pyx_codeobj_tab[531] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_min_max_clock_of_p_st, __pyx_mstate->__pyx_kp_b_iso88591_5QhhlRXXbbjjkk_A, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[531])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21058};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[532] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_gpc_clk_min_max_vf_of, __pyx_mstate->__pyx_kp_b_iso88591_6axxq_TUUV_L, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[532])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21080};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[533] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_mem_clk_min_max_vf_of, __pyx_mstate->__pyx_kp_b_iso88591_6axxq_TUUV_L, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[533])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21102};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[534] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_clock_offsets, __pyx_mstate->__pyx_kp_b_iso88591_8_7_ST_0_Q_axxq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[534])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21122};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_info};
    __pyx_mstate_global->__pyx_codeobj_tab[535] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_clock_offsets, __pyx_mstate->__pyx_kp_b_iso88591_axx_SST, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[535])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21136};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[536] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_performance_modes, __pyx_mstate->__pyx_kp_b_iso88591_a_FkQ_ggh_k_q_2_881_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[536])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21156};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[537] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_current_clock_freqs, __pyx_mstate->__pyx_kp_b_iso88591_WWX___j_k_A_A_J_J_K_K5PQ_31HHA, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[537])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21176};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[538] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_power_management_limi, __pyx_mstate->__pyx_kp_b_iso88591_Na_6axxq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[538])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21194};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[539] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_power_management_limi_2, __pyx_mstate->__pyx_kp_b_iso88591_A_RSS____Kq, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[539])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21216};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[540] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_power_management_defa, __pyx_mstate->__pyx_kp_b_iso88591_VVW_Qhhaq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[540])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21234};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[541] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_power_usage, __pyx_mstate->__pyx_kp_b_iso88591_C1_AXXQa_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[541])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21252};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[542] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_total_energy_consumpt, __pyx_mstate->__pyx_kp_b_iso88591_VVW_8_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[542])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21270};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[543] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_enforced_power_limit, __pyx_mstate->__pyx_kp_b_iso88591_LA_4AXXQa_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[543])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21288};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[544] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_gpu_operation_mode, __pyx_mstate->__pyx_kp_b_iso88591_2_881IQa_E_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[544])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21310};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[545] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_memory_info_v2, __pyx_mstate->__pyx_kp_b_iso88591_y_6k_ST_U_q_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[545])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21330};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[546] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_compute_mode, __pyx_mstate->__pyx_kp_b_iso88591_A_Qhhaq_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[546])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21348};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[547] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_cuda_compute_capabili, __pyx_mstate->__pyx_kp_b_iso88591_7q_PQQR_G1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[547])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21370};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[548] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_dram_encryption_mode, __pyx_mstate->__pyx_kp_b_iso88591_4AXXQiqPQ_2Oq, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[548])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21392};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_dram_encryption};
    __pyx_mstate_global->__pyx_codeobj_tab[549] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_dram_encryption_mode, __pyx_mstate->__pyx_kp_b_iso88591_4AXXEffg, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[549])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21406};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[550] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_ecc_mode, __pyx_mstate->__pyx_kp_b_iso88591_9AQ_E_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[550])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21428};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[551] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_default_ecc_mode, __pyx_mstate->__pyx_kp_b_iso88591_0_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[551])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21446};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[552] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_board_id, __pyx_mstate->__pyx_kp_b_iso88591_1_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[552])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21464};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[553] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_multi_gpu_board, __pyx_mstate->__pyx_kp_b_iso88591_Gq_q_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[553])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21482};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_error_type, __pyx_mstate->__pyx_n_u_counter_type};
    __pyx_mstate_global->__pyx_codeobj_tab[554] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_total_ecc_errors, __pyx_mstate->__pyx_kp_b_iso88591_ppq_0_ASS__pp_A_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[554])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {4, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21502};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_error_type, __pyx_mstate->__pyx_n_u_counter_type, __pyx_mstate->__pyx_n_u_location_type};
    __pyx_mstate_global->__pyx_codeobj_tab[555] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_memory_error_counter, __pyx_mstate->__pyx_kp_b_iso88591_H_H_I_4AXXEWWcct_u_C_C_T_T_c_c, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[555])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21523};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[556] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_utilization_rates, __pyx_mstate->__pyx_kp_b_iso88591_k_Xaab_2_881_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[556])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21542};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[557] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_encoder_utilization, __pyx_mstate->__pyx_kp_b_iso88591_4AXXQmSTTU_M, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[557])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21564};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_encoder_query_type};
    __pyx_mstate_global->__pyx_codeobj_tab[558] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_encoder_capacity, __pyx_mstate->__pyx_kp_b_iso88591_a_1_Pddeef_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[558])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21583};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[559] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_encoder_stats, __pyx_mstate->__pyx_kp_b_iso88591_axxq_qP___O, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[559])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21607};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[560] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_encoder_sessions, __pyx_mstate->__pyx_kp_b_iso88591_1_Q_a_Qa_a_AQ_77SS_kkttu_AS_1_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[560])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21629};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[561] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_decoder_utilization, __pyx_mstate->__pyx_kp_b_iso88591_4AXXQmSTTU_M, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[561])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21651};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[562] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_jpg_utilization, __pyx_mstate->__pyx_kp_b_iso88591_0_qPQ_M, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[562])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21673};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[563] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_ofa_utilization, __pyx_mstate->__pyx_kp_b_iso88591_0_qPQ_M, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[563])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21695};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[564] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_fbc_stats, __pyx_mstate->__pyx_kp_b_iso88591_7_iWX_881_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[564])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21714};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[565] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_fbc_sessions, __pyx_mstate->__pyx_kp_b_iso88591_Qhho__Qa_Qm1A_22J_Uaajjk_AS_1_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[565])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21736};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[566] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_driver_model_v2, __pyx_mstate->__pyx_kp_b_iso88591_0_1A_E_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[566])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21758};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[567] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_vbios_version, __pyx_mstate->__pyx_kp_b_iso88591_q_axxy_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[567])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21774};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[568] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_bridge_chip_info, __pyx_mstate->__pyx_kp_b_iso88591_43Fa_77TT__rr_0_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[568])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21793};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[569] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_compute_running_proce, __pyx_mstate->__pyx_kp_b_iso88591_aq_AXX__hhi_Qa_1A_YVW_z_S_q_AXX, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[569])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21815};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[570] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_mps_compute_running_p, __pyx_mstate->__pyx_kp_b_iso88591_aq_q_P__kkl_Qa_1A_YVW_z_S_q_q_P, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[570])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21837};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device1, __pyx_mstate->__pyx_n_u_device2};
    __pyx_mstate_global->__pyx_codeobj_tab[571] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_on_same_board, __pyx_mstate->__pyx_kp_b_iso88591_K1_89HIQa_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[571])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21856};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_api_type};
    __pyx_mstate_global->__pyx_codeobj_tab[572] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_api_restriction, __pyx_mstate->__pyx_kp_b_iso88591_MQ_0_AQQ_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[572])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21875};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[573] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_bar1_memory_info, __pyx_mstate->__pyx_kp_b_iso88591_Ja_K_T_0_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[573])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21894};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[574] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_irq_num, __pyx_mstate->__pyx_kp_b_iso88591_q_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[574])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21912};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[575] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_num_gpu_cores, __pyx_mstate->__pyx_kp_b_iso88591_EQ_Qhhaq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[575])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21930};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[576] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_power_source, __pyx_mstate->__pyx_kp_b_iso88591_Qhhaq, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[576])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21948};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[577] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_memory_bus_width, __pyx_mstate->__pyx_kp_b_iso88591_H_0_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[577])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21966};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[578] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_pcie_link_max_speed, __pyx_mstate->__pyx_kp_b_iso88591_K1_2_881A_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[578])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 21984};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[579] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_pcie_speed, __pyx_mstate->__pyx_kp_b_iso88591_B_1HHAQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[579])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22002};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[580] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_adaptive_clock_info_s, __pyx_mstate->__pyx_kp_b_iso88591_RRS_9_1_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[580])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22020};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[581] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_bus_type, __pyx_mstate->__pyx_kp_b_iso88591_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[581])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22038};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[582] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_gpu_fabric_info_v, __pyx_mstate->__pyx_kp_b_iso88591_0_q_11I_Tffoop_U_0_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[582])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22058};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[583] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_get_conf_compute_capabili, __pyx_mstate->__pyx_kp_b_iso88591_21Fa_55TT__nnwwx_9_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[583])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22074};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[584] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_get_conf_compute_state, __pyx_mstate->__pyx_kp_b_iso88591_A_O_Zbbkkl_2_1_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[584])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22090};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[585] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_conf_compute_mem_size, __pyx_mstate->__pyx_kp_b_iso88591_DA_22RR_hhqqr_8_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[585])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22109};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[586] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_get_conf_compute_gpus_rea, __pyx_mstate->__pyx_kp_b_iso88591_Fa_1AQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[586])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22124};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[587] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_conf_compute_protecte, __pyx_mstate->__pyx_kp_b_iso88591_F_0_9IQ_A_RS_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[587])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22143};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[588] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_conf_compute_gpu_cert, __pyx_mstate->__pyx_kp_b_iso88591_21J_55XXccnnwwx_1HHA_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[588])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22162};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[589] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_conf_compute_gpu_atte, __pyx_mstate->__pyx_kp_b_iso88591_BBkkv_w_I_I_R_R_S_A_RS_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[589])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22181};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[590] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_get_conf_compute_key_rota, __pyx_mstate->__pyx_kp_b_iso88591_Q_Pyyz_Q_R_B_B_M_M_g_g_p_p_q_cc, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[590])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22198};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_size_ki_b};
    __pyx_mstate_global->__pyx_codeobj_tab[591] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_conf_compute_unprotec, __pyx_mstate->__pyx_kp_b_iso88591_q_PQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[591])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22212};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_is_accepting_work};
    __pyx_mstate_global->__pyx_codeobj_tab[592] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_set_conf_compute_gpus_rea, __pyx_mstate->__pyx_kp_b_iso88591_1A, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[592])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22225};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_p_key_rotation_thr_info};
    __pyx_mstate_global->__pyx_codeobj_tab[593] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_set_conf_compute_key_rota, __pyx_mstate->__pyx_kp_b_iso88591_EQFuuv, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[593])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22238};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[594] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_get_conf_compute_settings, __pyx_mstate->__pyx_kp_b_iso88591_54PPQ_55XXccnnwwx_K_B_q_5Qa_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[594])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22255};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[595] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_gsp_firmware_version, __pyx_mstate->__pyx_kp_b_iso88591_DA_4AXXQa_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[595])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22273};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[596] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_gsp_firmware_mode, __pyx_mstate->__pyx_kp_b_iso88591_1_qPQ_L, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[596])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22295};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[597] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_sram_ecc_error_status, __pyx_mstate->__pyx_kp_b_iso88591_H_S_eef_9_a_4AXXQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[597])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22315};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[598] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_accounting_mode, __pyx_mstate->__pyx_kp_b_iso88591_q_0_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[598])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22333};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_pid};
    __pyx_mstate_global->__pyx_codeobj_tab[599] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_accounting_stats, __pyx_mstate->__pyx_kp_b_iso88591_O1_A_HT_1_q_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[599])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22353};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[600] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_accounting_pids, __pyx_mstate->__pyx_kp_b_iso88591_0_PWWX_Qa_uAS_1_6_i_EWEQVVZZ_vQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[600])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22375};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[601] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_accounting_buffer_siz, __pyx_mstate->__pyx_kp_b_iso88591_Na_6axxq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[601])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22393};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_cause};
    __pyx_mstate_global->__pyx_codeobj_tab[602] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_retired_pages, __pyx_mstate->__pyx_kp_b_iso88591_aq_axx_UU_kkwwx_Qa_z_S_6_i_K7RW, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[602])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22416};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[603] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_retired_pages_pending, __pyx_mstate->__pyx_kp_b_iso88591_LA_1HHAQ_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[603])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22434};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[604] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_remapped_rows, __pyx_mstate->__pyx_kp_b_iso88591_axxq_1JVWWccdde_Kz_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[604])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22460};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[605] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_row_remapper_histogra, __pyx_mstate->__pyx_kp_b_iso88591_10J_44XXcclluuv_6axxq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[605])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22479};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[606] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_architecture, __pyx_mstate->__pyx_kp_b_iso88591_DA_axxq, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[606])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22497};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[607] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_clk_mon_status, __pyx_mstate->__pyx_kp_b_iso88591_a_KyPYYZ_axxq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[607])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22516};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_last_seen_time_stamp};
    __pyx_mstate_global->__pyx_codeobj_tab[608] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_process_utilization, __pyx_mstate->__pyx_kp_b_iso88591_2_4AXXV_Zqqr_Qa_00H_I____hhss_A, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[608])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22539};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[609] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_platform_info, __pyx_mstate->__pyx_kp_b_iso88591_O1_C_N_ggh_q_axxq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[609])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22559};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_unit, __pyx_mstate->__pyx_n_u_color};
    __pyx_mstate_global->__pyx_codeobj_tab[610] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_unit_set_led_state, __pyx_mstate->__pyx_kp_b_iso88591_v, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[610])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22573};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_mode};
    __pyx_mstate_global->__pyx_codeobj_tab[611] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_persistence_mode, __pyx_mstate->__pyx_kp_b_iso88591_1_PQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[611])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22587};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_mode};
    __pyx_mstate_global->__pyx_codeobj_tab[612] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_compute_mode, __pyx_mstate->__pyx_kp_b_iso88591_QhhnA, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[612])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22601};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_ecc};
    __pyx_mstate_global->__pyx_codeobj_tab[613] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_ecc_mode, __pyx_mstate->__pyx_kp_b_iso88591__12, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[613])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22615};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_counter_type};
    __pyx_mstate_global->__pyx_codeobj_tab[614] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_clear_ecc_error_counts, __pyx_mstate->__pyx_kp_b_iso88591_2_88CTTU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[614])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22629};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_driver_model, __pyx_mstate->__pyx_n_u_flags};
    __pyx_mstate_global->__pyx_codeobj_tab[615] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_driver_model, __pyx_mstate->__pyx_kp_b_iso88591_QhhnNZ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[615])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22644};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_min_gpu_clock_m_hz, __pyx_mstate->__pyx_n_u_max_gpu_clock_m_hz};
    __pyx_mstate_global->__pyx_codeobj_tab[616] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_gpu_locked_clocks, __pyx_mstate->__pyx_kp_b_iso88591_1_BVVW, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[616])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22659};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[617] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_reset_gpu_locked_clocks, __pyx_mstate->__pyx_kp_b_iso88591_31HA, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[617])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22672};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_min_mem_clock_m_hz, __pyx_mstate->__pyx_n_u_max_mem_clock_m_hz};
    __pyx_mstate_global->__pyx_codeobj_tab[618] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_memory_locked_clocks, __pyx_mstate->__pyx_kp_b_iso88591_4AXXEYYZ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[618])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22687};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[619] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_reset_memory_locked_clock, __pyx_mstate->__pyx_kp_b_iso88591_6axq, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[619])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22700};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_enabled};
    __pyx_mstate_global->__pyx_codeobj_tab[620] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_auto_boosted_clocks_e, __pyx_mstate->__pyx_kp_b_iso88591_88_YZ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[620])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22714};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_enabled, __pyx_mstate->__pyx_n_u_flags};
    __pyx_mstate_global->__pyx_codeobj_tab[621] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_default_auto_boosted, __pyx_mstate->__pyx_kp_b_iso88591_A_R_iij, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[621])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22729};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_fan};
    __pyx_mstate_global->__pyx_codeobj_tab[622] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_default_fan_speed_v2, __pyx_mstate->__pyx_kp_b_iso88591_4AXXQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[622])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22743};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_fan, __pyx_mstate->__pyx_n_u_policy};
    __pyx_mstate_global->__pyx_codeobj_tab[623] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_fan_control_policy, __pyx_mstate->__pyx_kp_b_iso88591_2_885H_a, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[623])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22758};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_threshold_type, __pyx_mstate->__pyx_n_u_temp};
    __pyx_mstate_global->__pyx_codeobj_tab[624] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_temperature_threshold, __pyx_mstate->__pyx_kp_b_iso88591_6axxG__oouuv, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[624])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22773};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_limit};
    __pyx_mstate_global->__pyx_codeobj_tab[625] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_power_management_limi, __pyx_mstate->__pyx_kp_b_iso88591_6axxq, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[625])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22787};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_mode};
    __pyx_mstate_global->__pyx_codeobj_tab[626] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_gpu_operation_mode, __pyx_mstate->__pyx_kp_b_iso88591_2_88CVVW, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[626])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22801};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_api_type, __pyx_mstate->__pyx_n_u_is_restricted};
    __pyx_mstate_global->__pyx_codeobj_tab[627] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_api_restriction, __pyx_mstate->__pyx_kp_b_iso88591_0_AQQ_iij, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[627])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22816};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_fan, __pyx_mstate->__pyx_n_u_speed};
    __pyx_mstate_global->__pyx_codeobj_tab[628] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_fan_speed_v2, __pyx_mstate->__pyx_kp_b_iso88591_Qhhe1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[628])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22831};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_mode};
    __pyx_mstate_global->__pyx_codeobj_tab[629] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_accounting_mode, __pyx_mstate->__pyx_kp_b_iso88591_0_q_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[629])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22845};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[630] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_clear_accounting_pids, __pyx_mstate->__pyx_kp_b_iso88591_2_81, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[630])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22858};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_power_value};
    __pyx_mstate_global->__pyx_codeobj_tab[631] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_power_management_limi_2, __pyx_mstate->__pyx_kp_b_iso88591_9_J_a, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[631])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22872};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_link};
    __pyx_mstate_global->__pyx_codeobj_tab[632] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_nvlink_state, __pyx_mstate->__pyx_kp_b_iso88591_Oq_QhhfAQ_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[632])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22891};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_link};
    __pyx_mstate_global->__pyx_codeobj_tab[633] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_nvlink_version, __pyx_mstate->__pyx_kp_b_iso88591_YYZ_q_aq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[633])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22910};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_link, __pyx_mstate->__pyx_n_u_capability};
    __pyx_mstate_global->__pyx_codeobj_tab[634] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_nvlink_capability, __pyx_mstate->__pyx_kp_b_iso88591_llm_2_886I_hhiij_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[634])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22930};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_link};
    __pyx_mstate_global->__pyx_codeobj_tab[635] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_nvlink_remote_pci_inf, __pyx_mstate->__pyx_kp_b_iso88591_8_q_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[635])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22950};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_link, __pyx_mstate->__pyx_n_u_counter};
    __pyx_mstate_global->__pyx_codeobj_tab[636] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_nvlink_error_counter, __pyx_mstate->__pyx_kp_b_iso88591_rrs_4AXXVK_iijjk_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[636])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22970};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_link};
    __pyx_mstate_global->__pyx_codeobj_tab[637] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_reset_nvlink_error_counte, __pyx_mstate->__pyx_kp_b_iso88591_7q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[637])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 22984};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_link};
    __pyx_mstate_global->__pyx_codeobj_tab[638] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_nvlink_remote_device, __pyx_mstate->__pyx_kp_b_iso88591_8_qPQ_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[638])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23003};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_info};
    __pyx_mstate_global->__pyx_codeobj_tab[639] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_nvlink_device_low_pow, __pyx_mstate->__pyx_kp_b_iso88591_q_Piij, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[639])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23017};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_nvlink_bw_mode};
    __pyx_mstate_global->__pyx_codeobj_tab[640] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_set_nvlink_bw_mode, __pyx_mstate->__pyx_kp_b_iso88591_aq_2, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[640])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23030};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[641] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_get_nvlink_bw_mode, __pyx_mstate->__pyx_kp_b_iso88591_7q_aq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[641])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23045};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[642] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_nvlink_supported_bw_m, __pyx_mstate->__pyx_kp_b_iso88591_SST_ffz_D_D_E_H_Q_8_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[642])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23065};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[643] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_nvlink_bw_mode, __pyx_mstate->__pyx_kp_b_iso88591_q_G_R_iij_5_axxq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[643])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23085};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_set_bw_mode};
    __pyx_mstate_global->__pyx_codeobj_tab[644] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_nvlink_bw_mode, __pyx_mstate->__pyx_kp_b_iso88591_5_axx_WWX, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[644])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23100};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[645] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_event_set_create, __pyx_mstate->__pyx_kp_b_iso88591_q_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[645])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23115};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_event_types, __pyx_mstate->__pyx_n_u_set};
    __pyx_mstate_global->__pyx_codeobj_tab[646] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_register_events, __pyx_mstate->__pyx_kp_b_iso88591_Qhhm_UV, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[646])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23130};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[647] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_supported_event_types, __pyx_mstate->__pyx_kp_b_iso88591_SST_5Qhhaq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[647])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23148};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_set, __pyx_mstate->__pyx_n_u_timeoutms};
    __pyx_mstate_global->__pyx_codeobj_tab[648] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_event_set_wait_v2, __pyx_mstate->__pyx_kp_b_iso88591_Ya_4Kwiq_5_a_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[648])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23168};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_set};
    __pyx_mstate_global->__pyx_codeobj_tab[649] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_event_set_free, __pyx_mstate->__pyx_kp_b_iso88591_Qj, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[649])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23181};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_request};
    __pyx_mstate_global->__pyx_codeobj_tab[650] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_event_set_create, __pyx_mstate->__pyx_kp_b_iso88591_Q_RRS, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[650])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23194};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_request};
    __pyx_mstate_global->__pyx_codeobj_tab[651] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_event_set_free, __pyx_mstate->__pyx_kp_b_iso88591_1_Na, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[651])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23207};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_request};
    __pyx_mstate_global->__pyx_codeobj_tab[652] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_register_events, __pyx_mstate->__pyx_kp_b_iso88591_Q_QQR, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[652])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23220};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_request};
    __pyx_mstate_global->__pyx_codeobj_tab[653] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_event_set_wait, __pyx_mstate->__pyx_kp_b_iso88591_1_Na, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[653])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23233};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pci_info, __pyx_mstate->__pyx_n_u_new_state};
    __pyx_mstate_global->__pyx_codeobj_tab[654] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_modify_drain_state, __pyx_mstate->__pyx_kp_b_iso88591_q0_XY, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[654])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23247};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pci_info};
    __pyx_mstate_global->__pyx_codeobj_tab[655] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_query_drain_state, __pyx_mstate->__pyx_kp_b_iso88591_q_a_z_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[655])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23265};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pci_info, __pyx_mstate->__pyx_n_u_gpu_state, __pyx_mstate->__pyx_n_u_link_state};
    __pyx_mstate_global->__pyx_codeobj_tab[656] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_remove_gpu_v2, __pyx_mstate->__pyx_kp_b_iso88591_1_JFWWbbrrs, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[656])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23280};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pci_info};
    __pyx_mstate_global->__pyx_codeobj_tab[657] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_discover_gpus, __pyx_mstate->__pyx_kp_b_iso88591_1_A, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[657])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23293};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[658] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_virtualization_mode, __pyx_mstate->__pyx_kp_b_iso88591_C1_4AXXQa_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[658])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23311};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[659] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_host_vgpu_mode, __pyx_mstate->__pyx_kp_b_iso88591_a_axxq_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[659])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23329};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_virtual_mode};
    __pyx_mstate_global->__pyx_codeobj_tab[660] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_virtualization_mode, __pyx_mstate->__pyx_kp_b_iso88591_4AXXE, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[660])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23343};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[661] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_vgpu_heterogeneous_mo, __pyx_mstate->__pyx_kp_b_iso88591_TTU_gg_H_H_I_J_q_7q_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[661])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23363};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_p_heterogeneous_mode};
    __pyx_mstate_global->__pyx_codeobj_tab[662] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_vgpu_heterogeneous_mo, __pyx_mstate->__pyx_kp_b_iso88591_7q_Hllm, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[662])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23377};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[663] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_placement_id, __pyx_mstate->__pyx_kp_b_iso88591_q_G_R_iij_5_314H_WX_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[663])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23397};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_vgpu_type_id};
    __pyx_mstate_global->__pyx_codeobj_tab[664] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_vgpu_type_supported_p, __pyx_mstate->__pyx_kp_b_iso88591_54H_55PP_nnwwx_QhhN_nno_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[664])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23417};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_type_id};
    __pyx_mstate_global->__pyx_codeobj_tab[665] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_type_get_gsp_heap_size, __pyx_mstate->__pyx_kp_b_iso88591_XXY_q0B_PQQR_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[665])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23435};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_type_id};
    __pyx_mstate_global->__pyx_codeobj_tab[666] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_type_get_fb_reservation, __pyx_mstate->__pyx_kp_b_iso88591_YYZ_1_2DNRSST_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[666])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23453};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[667] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_runtime_state, __pyx_mstate->__pyx_kp_b_iso88591_Q_E_PZZccd_8_Q_8_9M__1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[667])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23473};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_capability, __pyx_mstate->__pyx_n_u_state};
    __pyx_mstate_global->__pyx_codeobj_tab[668] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_vgpu_capabilities, __pyx_mstate->__pyx_kp_b_iso88591_2_88CZZffttu, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[668])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23488};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[669] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_grid_licensable_featu, __pyx_mstate->__pyx_kp_b_iso88591_A_VVW_DDddo_p_M_M_V_V_W_1HHA_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[669])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23507};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_capability};
    __pyx_mstate_global->__pyx_codeobj_tab[670] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_get_vgpu_driver_capabilities, __pyx_mstate->__pyx_kp_b_iso88591_H_2_3J_VWWX_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[670])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23525};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_capability};
    __pyx_mstate_global->__pyx_codeobj_tab[671] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_vgpu_capabilities, __pyx_mstate->__pyx_kp_b_iso88591_YYZ_2_88CZZffggh_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[671])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23544};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_type_id};
    __pyx_mstate_global->__pyx_codeobj_tab[672] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_type_get_class, __pyx_mstate->__pyx_kp_b_iso88591_N_P___Qa_t1Cs_q_5_Qa_NJ_jjk_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[672])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23566};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_type_id};
    __pyx_mstate_global->__pyx_codeobj_tab[673] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_type_get_name, __pyx_mstate->__pyx_kp_b_iso88591_Q_IYYhhi_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[673])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23582};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_type_id};
    __pyx_mstate_global->__pyx_codeobj_tab[674] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_type_get_gpu_instance_profi, __pyx_mstate->__pyx_kp_b_iso88591_8_9K_YZZ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[674])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23600};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_type_id};
    __pyx_mstate_global->__pyx_codeobj_tab[675] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_type_get_device_id, __pyx_mstate->__pyx_kp_b_iso88591_A_QkYZZ_Kq, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[675])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23622};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_type_id};
    __pyx_mstate_global->__pyx_codeobj_tab[676] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_type_get_framebuffer_size, __pyx_mstate->__pyx_kp_b_iso88591_314FnTUUV_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[676])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23640};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_type_id};
    __pyx_mstate_global->__pyx_codeobj_tab[677] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_type_get_num_display_heads, __pyx_mstate->__pyx_kp_b_iso88591_VVW_314FnTUUV_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[677])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23658};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_type_id, __pyx_mstate->__pyx_n_u_display_ind_ex};
    __pyx_mstate_global->__pyx_codeobj_tab[678] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_type_get_resolution, __pyx_mstate->__pyx_kp_b_iso88591_a_A_O___ffggh_F, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[678])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23681};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_type_id, __pyx_mstate->__pyx_n_u_vgpu_type_license_string, __pyx_mstate->__pyx_n_u_size};
    __pyx_mstate_global->__pyx_codeobj_tab[679] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_type_get_license, __pyx_mstate->__pyx_kp_b_iso88591_1_nGSmmn, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[679])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23696};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_type_id};
    __pyx_mstate_global->__pyx_codeobj_tab[680] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_type_get_frame_rate_limit, __pyx_mstate->__pyx_kp_b_iso88591_UUV_2_3E_STTU_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[680])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23714};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_vgpu_type_id};
    __pyx_mstate_global->__pyx_codeobj_tab[681] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_type_get_max_instances, __pyx_mstate->__pyx_kp_b_iso88591_ccd_0_ASSaabbc_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[681])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23733};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_type_id};
    __pyx_mstate_global->__pyx_codeobj_tab[682] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_type_get_max_instances_per, __pyx_mstate->__pyx_kp_b_iso88591_YYZ_5Q6H_VWWX_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[682])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23751};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_type_id};
    __pyx_mstate_global->__pyx_codeobj_tab[683] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_type_get_bar1_info, __pyx_mstate->__pyx_kp_b_iso88591_a_FkQ_eef_K_9_a_A_Q_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[683])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23771};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[684] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_uuid, __pyx_mstate->__pyx_kp_b_iso88591_Q_A_A_PVVW_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[684])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23787};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[685] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_vm_driver_vers, __pyx_mstate->__pyx_kp_b_iso88591_q_7q8LO_dde_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[685])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23803};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[686] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_fb_usage, __pyx_mstate->__pyx_kp_b_iso88591_XXY_q0DOSTTU_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[686])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23821};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[687] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_license_status, __pyx_mstate->__pyx_kp_b_iso88591_XXY_5Q6J_YZZ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[687])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23839};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[688] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_type, __pyx_mstate->__pyx_kp_b_iso88591_Na_A_A_PQQR, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[688])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23857};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[689] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_frame_rate_lim, __pyx_mstate->__pyx_kp_b_iso88591_ZZ_6a7K_Z_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[689])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23875};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[690] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_ecc_mode, __pyx_mstate->__pyx_kp_b_iso88591_J_q0DOSTTU_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[690])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23893};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[691] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_encoder_capaci, __pyx_mstate->__pyx_kp_b_iso88591_ZZ_7q8LO_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[691])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23911};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance, __pyx_mstate->__pyx_n_u_encoder_capacity};
    __pyx_mstate_global->__pyx_codeobj_tab[692] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_set_encoder_capaci, __pyx_mstate->__pyx_kp_b_iso88591_7q8LO, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[692])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23925};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[693] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_encoder_stats, __pyx_mstate->__pyx_kp_b_iso88591_4A5I_XYYhhiivvwwx_O, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[693])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23949};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[694] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_encoder_sessio, __pyx_mstate->__pyx_kp_b_iso88591_7q8LO_jjyyz_Qa_Qm1A_66RR_iirrs, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[694])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23971};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[695] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_fbc_stats, __pyx_mstate->__pyx_kp_b_iso88591_7_iWX_0_1E_TU_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[695])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 23990};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[696] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_fbc_sessions, __pyx_mstate->__pyx_kp_b_iso88591_314H_Wffuuv_Qa_Qm1A_22J_Uaajjk, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[696])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24012};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[697] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_gpu_instance_i, __pyx_mstate->__pyx_kp_b_iso88591_YYZ_5Q6J_YZZ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[697])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24030};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[698] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_gpu_pci_id, __pyx_mstate->__pyx_kp_b_iso88591_1_0_1E_TZZiij_Qa_vQc_A_q_uAV1A, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[698])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24052};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_type_id, __pyx_mstate->__pyx_n_u_capability};
    __pyx_mstate_global->__pyx_codeobj_tab[699] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_type_get_capabilities, __pyx_mstate->__pyx_kp_b_iso88591_aab_0_1C_Qbbnnoop_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[699])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24071};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[700] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_mdev_uuid, __pyx_mstate->__pyx_kp_b_iso88591_Q_0_1E_T___q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[700])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24087};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[701] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_type_get_max_instances_per_2, __pyx_mstate->__pyx_kp_b_iso88591_54J_55RR_nnwwx_B_q_aq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[701])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24104};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_gpu_instance, __pyx_mstate->__pyx_n_u_p_scheduler};
    __pyx_mstate_global->__pyx_codeobj_tab[702] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpu_instance_set_vgpu_scheduler, __pyx_mstate->__pyx_kp_b_iso88591_9_Uppq, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[702])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24118};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_gpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[703] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpu_instance_get_vgpu_scheduler, __pyx_mstate->__pyx_kp_b_iso88591_XXY_k_l_E_E_N_N_O_I_a_9_UV_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[703])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24138};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_gpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[704] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpu_instance_get_vgpu_scheduler_2, __pyx_mstate->__pyx_kp_b_iso88591_RRS_ZZee_F_F_G_I_a_7q_ST_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[704])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24158};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_gpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[705] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpu_instance_get_vgpu_heterogene, __pyx_mstate->__pyx_kp_b_iso88591_TTU_gg_H_H_I_J_q_A_XY_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[705])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24178};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_gpu_instance, __pyx_mstate->__pyx_n_u_p_heterogeneous_mode};
    __pyx_mstate_global->__pyx_codeobj_tab[706] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpu_instance_set_vgpu_heterogene, __pyx_mstate->__pyx_kp_b_iso88591_A_X, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[706])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24192};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[707] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_pgpu_metadata_string, __pyx_mstate->__pyx_kp_b_iso88591_q_4AXXV_Z_Qa_3c_q_a_1_q_4AXX_Tc, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[707])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24214};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[708] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_vgpu_scheduler_log, __pyx_mstate->__pyx_kp_b_iso88591_0_q_33M_Xjjsst_2_881_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[708])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24233};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[709] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_vgpu_scheduler_state, __pyx_mstate->__pyx_kp_b_iso88591_76K1_YYddx_y_B_B_C_4AXXQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[709])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24252};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[710] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_vgpu_scheduler_capabi, __pyx_mstate->__pyx_kp_b_iso88591_87PPQ_iiz_D_D_E_1HHA_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[710])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24271};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_p_scheduler_state};
    __pyx_mstate_global->__pyx_codeobj_tab[711] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_vgpu_scheduler_state, __pyx_mstate->__pyx_kp_b_iso88591_4AXXEccd, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[711])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24285};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_version};
    __pyx_mstate_global->__pyx_codeobj_tab[712] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_set_vgpu_version, __pyx_mstate->__pyx_kp_b_iso88591_q_A, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[712])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24298};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_last_seen_time_stamp};
    __pyx_mstate_global->__pyx_codeobj_tab[713] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_vgpu_utilization, __pyx_mstate->__pyx_kp_b_iso88591_1_BXXYYjjk_l_I_I_J_J_K_E_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[713])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24323};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_last_seen_time_stamp};
    __pyx_mstate_global->__pyx_codeobj_tab[714] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_vgpu_process_utilizat, __pyx_mstate->__pyx_kp_b_iso88591_8_I, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[714])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24346};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[715] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_accounting_mod, __pyx_mstate->__pyx_kp_b_iso88591_QQR_6a7K_Z_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[715])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24364};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[716] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_accounting_pid, __pyx_mstate->__pyx_kp_b_iso88591_6a7K_Ziippq_Qa_uAS_1_6_i_EWEQVV, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[716])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24386};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance, __pyx_mstate->__pyx_n_u_pid};
    __pyx_mstate_global->__pyx_codeobj_tab[717] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_accounting_sta, __pyx_mstate->__pyx_kp_b_iso88591_O1_A_HT_7q8LO_a_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[717])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24406};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[718] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_clear_accounting_p, __pyx_mstate->__pyx_kp_b_iso88591_8_9MQ, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[718])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24419};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[719] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_license_info_v, __pyx_mstate->__pyx_kp_b_iso88591_H_Sbbkkl_6a7K_Z_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[719])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24438};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[720] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_get_excluded_device_count, __pyx_mstate->__pyx_kp_b_iso88591_7q_q_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[720])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24453};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_ind_ex};
    __pyx_mstate_global->__pyx_codeobj_tab[721] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_get_excluded_device_info_by_inde, __pyx_mstate->__pyx_kp_b_iso88591_8_FkQXXaab_5Qha_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[721])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24472};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_mode};
    __pyx_mstate_global->__pyx_codeobj_tab[722] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_mig_mode, __pyx_mstate->__pyx_kp_b_iso88591_K1_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[722])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24491};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[723] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_mig_mode, __pyx_mstate->__pyx_kp_b_iso88591_N, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[723])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24513};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_profile};
    __pyx_mstate_global->__pyx_codeobj_tab[724] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_gpu_instance_profile, __pyx_mstate->__pyx_kp_b_iso88591_Fa_11TT__ffoop_5_9_ST_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[724])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24534};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_profile_id};
    __pyx_mstate_global->__pyx_codeobj_tab[725] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_gpu_instance_possible, __pyx_mstate->__pyx_kp_b_iso88591_B_88S__eettu_Qa_q_Qa_66TT__iirr, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[725])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24557};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_profile_id};
    __pyx_mstate_global->__pyx_codeobj_tab[726] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_gpu_instance_remainin, __pyx_mstate->__pyx_kp_b_iso88591_ppq_axx_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[726])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24576};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_profile_id};
    __pyx_mstate_global->__pyx_codeobj_tab[727] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_create_gpu_instance, __pyx_mstate->__pyx_kp_b_iso88591_0_Qa_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[727])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24595};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_profile_id, __pyx_mstate->__pyx_n_u_placement};
    __pyx_mstate_global->__pyx_codeobj_tab[728] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_create_gpu_instance_with, __pyx_mstate->__pyx_kp_b_iso88591_A_QhhlZ_I_I_J_J_K_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[728])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24615};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_gpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[729] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpu_instance_destroy, __pyx_mstate->__pyx_kp_b_iso88591_1M, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[729])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24628};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_id};
    __pyx_mstate_global->__pyx_codeobj_tab[730] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_gpu_instance_by_id, __pyx_mstate->__pyx_kp_b_iso88591_WWX_1_aq_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[730])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24647};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_gpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[731] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpu_instance_get_info, __pyx_mstate->__pyx_kp_b_iso88591_7R_1M_q_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[731])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24666};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_gpu_instance, __pyx_mstate->__pyx_n_u_profile, __pyx_mstate->__pyx_n_u_eng_profile};
    __pyx_mstate_global->__pyx_codeobj_tab[732] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpu_instance_get_compute_instanc, __pyx_mstate->__pyx_kp_b_iso88591_21Na_55_ggnnwwx_uA_B_P_ggttu_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[732])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24688};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_gpu_instance, __pyx_mstate->__pyx_n_u_profile_id};
    __pyx_mstate_global->__pyx_codeobj_tab[733] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpu_instance_get_compute_instanc_2, __pyx_mstate->__pyx_kp_b_iso88591_A_A_B_Gq_Uccooppq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[733])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24707};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_gpu_instance, __pyx_mstate->__pyx_n_u_profile_id};
    __pyx_mstate_global->__pyx_codeobj_tab[734] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpu_instance_get_compute_instanc_3, __pyx_mstate->__pyx_kp_b_iso88591_H_Vddppv_w_F_F_G_Qa_Gq_Qa_ggqqz, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[734])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24730};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_gpu_instance, __pyx_mstate->__pyx_n_u_profile_id};
    __pyx_mstate_global->__pyx_codeobj_tab[735] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpu_instance_create_compute_inst, __pyx_mstate->__pyx_kp_b_iso88591_llm_9_Uaabbc_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[735])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24749};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_gpu_instance, __pyx_mstate->__pyx_n_u_profile_id, __pyx_mstate->__pyx_n_u_placement};
    __pyx_mstate_global->__pyx_codeobj_tab[736] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpu_instance_create_compute_inst_2, __pyx_mstate->__pyx_kp_b_iso88591_P_P_Q_Fa_Tbbn_o_V_V_a_a_b_b_c_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[736])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24769};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_compute_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[737] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_compute_instance_destroy, __pyx_mstate->__pyx_kp_b_iso88591_q0A, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[737])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24782};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_gpu_instance, __pyx_mstate->__pyx_n_u_id};
    __pyx_mstate_global->__pyx_codeobj_tab[738] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpu_instance_get_compute_instanc_4, __pyx_mstate->__pyx_kp_b_iso88591_ggh_VZZ_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[738])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24801};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_compute_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[739] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_compute_instance_get_info_v2, __pyx_mstate->__pyx_kp_b_iso88591_H_SZZccd_2_3DDVVW_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[739])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24820};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[740] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_is_mig_device_handle, __pyx_mstate->__pyx_kp_b_iso88591_H_0_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[740])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24838};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[741] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_gpu_instance_id, __pyx_mstate->__pyx_kp_b_iso88591_Gq_q_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[741])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24856};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[742] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_compute_instance_id, __pyx_mstate->__pyx_kp_b_iso88591_K1_31HHAQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[742])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24874};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[743] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_max_mig_device_count, __pyx_mstate->__pyx_kp_b_iso88591_LA_31HHAQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[743])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24892};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_ind_ex};
    __pyx_mstate_global->__pyx_codeobj_tab[744] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_mig_device_handle_by, __pyx_mstate->__pyx_kp_b_iso88591_ccd_8_QRRS_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[744])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24911};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_mig_device};
    __pyx_mstate_global->__pyx_codeobj_tab[745] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_device_handle_from_mi, __pyx_mstate->__pyx_kp_b_iso88591_A_VWWX_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[745])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24929};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_gpm_sample};
    __pyx_mstate_global->__pyx_codeobj_tab[746] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpm_sample_get, __pyx_mstate->__pyx_kp_b_iso88591_Qhhk, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[746])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24943};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_gpu_instance_id, __pyx_mstate->__pyx_n_u_gpm_sample};
    __pyx_mstate_global->__pyx_codeobj_tab[747] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpm_mig_sample_get, __pyx_mstate->__pyx_kp_b_iso88591_9J_UV, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[747])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24958};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[748] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpm_query_device_support, __pyx_mstate->__pyx_kp_b_iso88591_Zq_V___axxq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[748])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24978};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[749] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpm_query_if_streaming_enabled, __pyx_mstate->__pyx_kp_b_iso88591_K1_31HHAQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[749])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 24996};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_state};
    __pyx_mstate_global->__pyx_codeobj_tab[750] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpm_set_streaming_enabled, __pyx_mstate->__pyx_kp_b_iso88591_q_3, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[750])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25010};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[751] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_capabilities, __pyx_mstate->__pyx_kp_b_iso88591_a_FkQXXaab_7uA_axxq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[751])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25030};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_requested_profiles};
    __pyx_mstate_global->__pyx_codeobj_tab[752] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_workload_power_profile_cl, __pyx_mstate->__pyx_kp_b_iso88591_I_RZ_I_I_J, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[752])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25044};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_profile};
    __pyx_mstate_global->__pyx_codeobj_tab[753] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_power_smoothing_activate, __pyx_mstate->__pyx_kp_b_iso88591_B_88Sqqr, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[753])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25058};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_profile};
    __pyx_mstate_global->__pyx_codeobj_tab[754] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_power_smoothing_update_pr, __pyx_mstate->__pyx_kp_b_iso88591_EQhhVttu, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[754])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25072};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_state};
    __pyx_mstate_global->__pyx_codeobj_tab[755] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_power_smoothing_set_state, __pyx_mstate->__pyx_kp_b_iso88591_5QhhFbbc, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[755])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25086};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[756] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_addressing_mode, __pyx_mstate->__pyx_kp_b_iso88591_B_J_U_eef_9_a_0_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[756])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25106};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[757] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_repair_status, __pyx_mstate->__pyx_kp_b_iso88591_O1_C_N_ggh_q_axxq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[757])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25126};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[758] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_power_mizer_mode_v1, __pyx_mstate->__pyx_kp_b_iso88591_98PPQ_ii_F_F_G_31HHA_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[758])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25145};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_power_mizer_mode};
    __pyx_mstate_global->__pyx_codeobj_tab[759] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_power_mizer_mode_v1, __pyx_mstate->__pyx_kp_b_iso88591_31HHDeef, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[759])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25159};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[760] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_pdi, __pyx_mstate->__pyx_kp_b_iso88591_q_uA_Qhha_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[760])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25179};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[761] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_nvlink_info, __pyx_mstate->__pyx_kp_b_iso88591_a_6k_QR_uA_AXXQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[761])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25199};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_buffer};
    __pyx_mstate_global->__pyx_codeobj_tab[762] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_read_write_prm_v1, __pyx_mstate->__pyx_kp_b_iso88591_axx_QQR, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[762])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25213};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_profile_id};
    __pyx_mstate_global->__pyx_codeobj_tab[763] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_gpu_instance_profile_2, __pyx_mstate->__pyx_kp_b_iso88591_Fa_11TT__ffoop_5_QhhlZ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[763])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25234};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_cpuNumber};
    __pyx_mstate_global->__pyx_codeobj_tab[764] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_get_topology_gpu_set, __pyx_mstate->__pyx_kp_b_iso88591_0_O7RS_Qa_uAS_1_6_i_A_URVVXXY_f, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[764])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25256};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[765] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_system_get_driver_branch, __pyx_mstate->__pyx_kp_b_iso88591_5_q_aq_a_q_A, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[765])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25274};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_unit};
    __pyx_mstate_global->__pyx_codeobj_tab[766] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_unit_get_devices, __pyx_mstate->__pyx_kp_b_iso88591_q_q_fO_XY_Qa_3c_6_i_A_URVVXXY_f, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[766])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25296};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_level};
    __pyx_mstate_global->__pyx_codeobj_tab[767] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_topology_nearest_gpus, __pyx_mstate->__pyx_kp_b_iso88591_5Q_A_A_Qa_uAS_1_6_i_A_URVVXXY_f, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[767])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25329};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_sensorType};
    __pyx_mstate_global->__pyx_codeobj_tab[768] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_temperature_v, __pyx_mstate->__pyx_kp_b_iso88591_q_q_q_axxq_a, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[768])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25350};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_size};
    __pyx_mstate_global->__pyx_codeobj_tab[769] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_supported_performance, __pyx_mstate->__pyx_kp_b_iso88591_uCq_6_i_EWEQVVZZ_AWW_cchhi_A_A, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[769])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25373};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_mode};
    __pyx_mstate_global->__pyx_codeobj_tab[770] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_running_process_detai, __pyx_mstate->__pyx_kp_b_iso88591_I_iWX_9_a_81_881_Qa_s_s_q_A_q_8, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[770])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25404};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_type, __pyx_mstate->__pyx_n_u_last_seen_time_stamp};
    __pyx_mstate_global->__pyx_codeobj_tab[771] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_samples, __pyx_mstate->__pyx_kp_b_iso88591_1A_Oeer_s_D_D_S_S_a_a_b_Qa_Qa_5, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[771])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25429};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_cause};
    __pyx_mstate_global->__pyx_codeobj_tab[772] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_retired_pages_v2, __pyx_mstate->__pyx_kp_b_iso88591_aq_1_BXX__nnz_A_A_B_Qa_z_S_awe9, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[772])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25456};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_last_seen_time_stamp};
    __pyx_mstate_global->__pyx_codeobj_tab[773] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_processes_utilization, __pyx_mstate->__pyx_kp_b_iso88591_98SST_TTddmmn_Q_A_Qa_s_s_q_33LA, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[773])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25497};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_hostname};
    __pyx_mstate_global->__pyx_codeobj_tab[774] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_set_hostname_v1, __pyx_mstate->__pyx_kp_b_iso88591_0_s_7_A_j_8_9J_8SVVWWX_Qhhaq, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[774])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25516};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[775] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_hostname_v1, __pyx_mstate->__pyx_kp_b_iso88591_Qhhaq_q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[775])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25547};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_values};
    __pyx_mstate_global->__pyx_codeobj_tab[776] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_field_values, __pyx_mstate->__pyx_kp_b_iso88591_0_5WIQ_Cq_Qhhm1_Yaz_a, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[776])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25566};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_values};
    __pyx_mstate_global->__pyx_codeobj_tab[777] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_clear_field_values, __pyx_mstate->__pyx_kp_b_iso88591_0_5WIQ_Cq_q_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[777])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25583};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[778] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_supported_vgpus, __pyx_mstate->__pyx_kp_b_iso88591_Qa_0_A_Qa_y_Cq_6_i_EWEQVVZZ_fAW, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[778])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25605};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[779] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_creatable_vgpus, __pyx_mstate->__pyx_kp_b_iso88591_Qa_0_A_Qa_y_Cq_6_i_EWEQVVZZ_fAW, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[779])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25627};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[780] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_active_vgpus, __pyx_mstate->__pyx_kp_b_iso88591_Qa_Qhhk_Qa_y_Cq_6_i_EWEQVVZZ_aw, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[780])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25649};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[781] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_vm_id, __pyx_mstate->__pyx_kp_b_iso88591_Q_A_A_PVV_G, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[781])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25667};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_gpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[782] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpu_instance_get_creatable_vgpus, __pyx_mstate->__pyx_kp_b_iso88591_5Q_C6_RS_6e1_5Qm_QR_Qa_s_S_q_fA, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[782])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25701};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_gpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[783] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpu_instance_get_active_vgpus, __pyx_mstate->__pyx_kp_b_iso88591_54MQ_00SSaajjk_e1_A_2_a_Qa_s_S, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[783])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25733};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_gpu_instance, __pyx_mstate->__pyx_n_u_vgpu_type_id};
    __pyx_mstate_global->__pyx_codeobj_tab[784] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpu_instance_get_vgpu_type_creat, __pyx_mstate->__pyx_kp_b_iso88591_B_A___44_rr_B_q_9A_1_B_P___Qa_s, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[784])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 3, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25768};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_vgpu_type_id, __pyx_mstate->__pyx_n_u_mode};
    __pyx_mstate_global->__pyx_codeobj_tab[785] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_vgpu_type_creatable_p, __pyx_mstate->__pyx_kp_b_iso88591_0_C1_I_W_a_9_a_9A_1_81_QhhN_nno, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[785])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25805};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_instance};
    __pyx_mstate_global->__pyx_codeobj_tab[786] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_vgpu_instance_get_metadata, __pyx_mstate->__pyx_kp_b_iso88591_aq_9_Ya_0_1E_TYYZ_Qa_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[786])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25827};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[787] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_vgpu_metadata, __pyx_mstate->__pyx_kp_b_iso88591_9_aq_A_YVW_axxuA_Qa_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[787])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25849};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_vgpu_metadata, __pyx_mstate->__pyx_n_u_pgpu_metadata};
    __pyx_mstate_global->__pyx_codeobj_tab[788] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_get_vgpu_compatibility, __pyx_mstate->__pyx_kp_b_iso88591_43H_KK_eef_11G_T_55O_eef_Q_AATT, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[788])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {0, 0, 0, 0, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25873};
    PyObject* const varnames[] = {0};
    __pyx_mstate_global->__pyx_codeobj_tab[789] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_get_vgpu_version, __pyx_mstate->__pyx_kp_b_iso88591_A_A_ST_wiq_q_q_Kq, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[789])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25891};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[790] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_vgpu_instances_utiliz, __pyx_mstate->__pyx_kp_b_iso88591_98WWX_66_hhqqr_DE_A_axxq_Qa_s_S, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[790])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25924};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_last_seen_time_stamp};
    __pyx_mstate_global->__pyx_codeobj_tab[791] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_vgpu_processes_utiliz, __pyx_mstate->__pyx_kp_b_iso88591_66__ooxxy_DE_q_axxq_Qa_s_Cq_q_X, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[791])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25958};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2, __pyx_mstate->__pyx_n_u_profile_id};
    __pyx_mstate_global->__pyx_codeobj_tab[792] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_gpu_instances, __pyx_mstate->__pyx_kp_b_iso88591_axx_6QR_Qa_uAS_1_7_y_e4rQR_vQgU, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[792])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {2, 0, 0, 2, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 25984};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_gpu_instance, __pyx_mstate->__pyx_n_u_profile_id};
    __pyx_mstate_global->__pyx_codeobj_tab[793] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_gpu_instance_get_compute_instanc_5, __pyx_mstate->__pyx_kp_b_iso88591_7q_S__eef_Qa_uAS_1_7_y_e4rQR_6, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[793])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {1, 0, 0, 1, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 26010};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_device_2};
    __pyx_mstate_global->__pyx_codeobj_tab[794] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_cuda_bindings__nvml_pyx, __pyx_mstate->__pyx_n_u_device_get_sram_unique_uncorrect, __pyx_mstate->__pyx_kp_b_iso88591_dde_mmx_y_B_B_C_K5PQ_a_EQhhVW_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[794])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[795] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_ProcessInfo, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_haq_7_QnN_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[795])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[796] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_ProcessDetail_v1, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_81A_7_2_3FnTU_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[796])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[797] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_BridgeChipInfo, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_7_0_1B_PQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[797])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[798] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_ClkMonFaultInfo, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_1_7_1_2DNRS_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[798])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[799] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_ProcessUtilizatio, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_81A_7_VVdde_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[799])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[800] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_ProcessUtilizatio_2, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_HAQ_7_1_XXffg_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[800])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[801] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_VgpuProcessUtiliz, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_0_7_q_nno_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[801])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[802] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_VgpuSchedulerLogE, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_7_7q8PP___1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[802])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[803] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_HwbcEntry, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_9HAQ_7_1L_a_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[803])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[804] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_UnitFanInfo, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_haq_7_QnN_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[804])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[805] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_EncoderSessionInf, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_XQa_7_4A5J_XY_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[805])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[806] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_FBCSessionInfo, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_7_0_1B_PQ_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[806])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[807] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_GpuInstancePlacem, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_xq_7_6a7Nn_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[807])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[808] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_ComputeInstancePl, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_81A_7_VVdde_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[808])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[809] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_EccSramUniqueUnco, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_8_7_GqHpp_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[809])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[810] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_Sample, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_6_7_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[810])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[811] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_VgpuInstanceUtili, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_1_7_Abbppq_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[811])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[812] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_FieldValue, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_XQa_7_A_1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[812])) goto bad;
  }
  {
    const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 4, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 4};
    PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_pyx_type, __pyx_mstate->__pyx_n_u_pyx_checksum, __pyx_mstate->__pyx_n_u_pyx_state, __pyx_mstate->__pyx_n_u_pyx_result};
    __pyx_mstate_global->__pyx_codeobj_tab[813] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_stringsource, __pyx_mstate->__pyx_n_u_pyx_unpickle_GridLicensableFea, __pyx_mstate->__pyx_kp_b_iso88591_q_0_kQR_7_7q8PP___1, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[813])) goto bad;
  }
  Py_DECREF(tuple_dedup_map);
  return 0;
  bad:
  Py_DECREF(tuple_dedup_map);
  return -1;
}
/* #### Code section: init_globals ### */

static int __Pyx_InitGlobals(void) {
  /* PythonCompatibility.init */
  if (likely(__Pyx_init_co_variables() == 0)); else
  
  if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)

  /* AssertionsEnabled.init */
  if (likely(__Pyx_init_assertions_enabled() == 0)); else
  
  if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)

  /* CommonTypesMetaclass.init */
  if (likely(__pyx_CommonTypesMetaclass_init(__pyx_m) == 0)); else
  
  if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)

  /* CachedMethodType.init */
  #if CYTHON_COMPILING_IN_LIMITED_API
  {
      PyObject *typesModule=NULL;
      typesModule = PyImport_ImportModule("types");
      if (typesModule) {
          __pyx_mstate_global->__Pyx_CachedMethodType = PyObject_GetAttrString(typesModule, "MethodType");
          Py_DECREF(typesModule);
      }
  } // error handling follows
  #endif
  
  if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)

  /* CythonFunctionShared.init */
  if (likely(__pyx_CyFunction_init(__pyx_m) == 0)); else
  
  if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)

  return 0;
  __pyx_L1_error:;
  return -1;
}
/* #### Code section: cleanup_globals ### */
/* #### Code section: cleanup_module ### */
/* #### Code section: main_method ### */
/* #### Code section: utility_code_pragmas ### */
#ifdef _MSC_VER
#pragma warning( push )
/* Warning 4127: conditional expression is constant
 * Cython uses constant conditional expressions to allow in inline functions to be optimized at
 * compile-time, so this warning is not useful
 */
#pragma warning( disable : 4127 )
#endif



/* #### Code section: utility_code_def ### */

/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
    PyObject *m = NULL, *p = NULL;
    void *r = NULL;
    m = PyImport_ImportModule(modname);
    if (!m) goto end;
    p = PyObject_GetAttrString(m, "RefNannyAPI");
    if (!p) goto end;
    r = PyLong_AsVoidPtr(p);
end:
    Py_XDECREF(p);
    Py_XDECREF(m);
    return (__Pyx_RefNannyAPIStruct *)r;
}
#endif

/* PyErrExceptionMatches (used by PyObjectGetAttrStrNoError) */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
    Py_ssize_t i, n;
    n = PyTuple_GET_SIZE(tuple);
    for (i=0; i<n; i++) {
        if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
    }
    for (i=0; i<n; i++) {
        if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
    }
    return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
    int result;
    PyObject *exc_type;
#if PY_VERSION_HEX >= 0x030C00A6
    PyObject *current_exception = tstate->current_exception;
    if (unlikely(!current_exception)) return 0;
    exc_type = (PyObject*) Py_TYPE(current_exception);
    if (exc_type == err) return 1;
#else
    exc_type = tstate->curexc_type;
    if (exc_type == err) return 1;
    if (unlikely(!exc_type)) return 0;
#endif
    #if CYTHON_AVOID_BORROWED_REFS
    Py_INCREF(exc_type);
    #endif
    if (unlikely(PyTuple_Check(err))) {
        result = __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
    } else {
        result = __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
    }
    #if CYTHON_AVOID_BORROWED_REFS
    Py_DECREF(exc_type);
    #endif
    return result;
}
#endif

/* PyErrFetchRestore (used by PyObjectGetAttrStrNoError) */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
#if PY_VERSION_HEX >= 0x030C00A6
    PyObject *tmp_value;
    assert(type == NULL || (value != NULL && type == (PyObject*) Py_TYPE(value)));
    if (value) {
        #if CYTHON_COMPILING_IN_CPYTHON
        if (unlikely(((PyBaseExceptionObject*) value)->traceback != tb))
        #endif
            PyException_SetTraceback(value, tb);
    }
    tmp_value = tstate->current_exception;
    tstate->current_exception = value;
    Py_XDECREF(tmp_value);
    Py_XDECREF(type);
    Py_XDECREF(tb);
#else
    PyObject *tmp_type, *tmp_value, *tmp_tb;
    tmp_type = tstate->curexc_type;
    tmp_value = tstate->curexc_value;
    tmp_tb = tstate->curexc_traceback;
    tstate->curexc_type = type;
    tstate->curexc_value = value;
    tstate->curexc_traceback = tb;
    Py_XDECREF(tmp_type);
    Py_XDECREF(tmp_value);
    Py_XDECREF(tmp_tb);
#endif
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if PY_VERSION_HEX >= 0x030C00A6
    PyObject* exc_value;
    exc_value = tstate->current_exception;
    tstate->current_exception = 0;
    *value = exc_value;
    *type = NULL;
    *tb = NULL;
    if (exc_value) {
        *type = (PyObject*) Py_TYPE(exc_value);
        Py_INCREF(*type);
        #if CYTHON_COMPILING_IN_CPYTHON
        *tb = ((PyBaseExceptionObject*) exc_value)->traceback;
        Py_XINCREF(*tb);
        #else
        *tb = PyException_GetTraceback(exc_value);
        #endif
    }
#else
    *type = tstate->curexc_type;
    *value = tstate->curexc_value;
    *tb = tstate->curexc_traceback;
    tstate->curexc_type = 0;
    tstate->curexc_value = 0;
    tstate->curexc_traceback = 0;
#endif
}
#endif

/* PyObjectGetAttrStr (used by PyObjectGetAttrStrNoError) */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
    PyTypeObject* tp = Py_TYPE(obj);
    if (likely(tp->tp_getattro))
        return tp->tp_getattro(obj, attr_name);
    return PyObject_GetAttr(obj, attr_name);
}
#endif

/* PyObjectGetAttrStrNoError (used by GetBuiltinName) */
#if __PYX_LIMITED_VERSION_HEX < 0x030d0000
static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
    __Pyx_PyThreadState_declare
    __Pyx_PyThreadState_assign
    if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
        __Pyx_PyErr_Clear();
}
#endif
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
    PyObject *result;
#if __PYX_LIMITED_VERSION_HEX >= 0x030d0000
    (void) PyObject_GetOptionalAttr(obj, attr_name, &result);
    return result;
#else
#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS
    PyTypeObject* tp = Py_TYPE(obj);
    if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
        return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
    }
#endif
    result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
    if (unlikely(!result)) {
        __Pyx_PyObject_GetAttrStr_ClearAttributeError();
    }
    return result;
#endif
}

/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
    PyObject* result = __Pyx_PyObject_GetAttrStrNoError(__pyx_mstate_global->__pyx_b, name);
    if (unlikely(!result) && !PyErr_Occurred()) {
        PyErr_Format(PyExc_NameError,
            "name '%U' is not defined", name);
    }
    return result;
}

/* TupleAndListFromArray (used by fastcall) */
#if !CYTHON_COMPILING_IN_CPYTHON && CYTHON_METH_FASTCALL
static CYTHON_INLINE PyObject *
__Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n)
{
    PyObject *res;
    Py_ssize_t i;
    if (n <= 0) {
        return __Pyx_NewRef(__pyx_mstate_global->__pyx_empty_tuple);
    }
    res = PyTuple_New(n);
    if (unlikely(res == NULL)) return NULL;
    for (i = 0; i < n; i++) {
        if (unlikely(__Pyx_PyTuple_SET_ITEM(res, i, src[i]) < (0))) {
            Py_DECREF(res);
            return NULL;
        }
        Py_INCREF(src[i]);
    }
    return res;
}
#elif CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE void __Pyx_copy_object_array(PyObject *const *CYTHON_RESTRICT src, PyObject** CYTHON_RESTRICT dest, Py_ssize_t length) {
    PyObject *v;
    Py_ssize_t i;
    for (i = 0; i < length; i++) {
        v = dest[i] = src[i];
        Py_INCREF(v);
    }
}
static CYTHON_INLINE PyObject *
__Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n)
{
    PyObject *res;
    if (n <= 0) {
        return __Pyx_NewRef(__pyx_mstate_global->__pyx_empty_tuple);
    }
    res = PyTuple_New(n);
    if (unlikely(res == NULL)) return NULL;
    __Pyx_copy_object_array(src, ((PyTupleObject*)res)->ob_item, n);
    return res;
}
static CYTHON_INLINE PyObject *
__Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n)
{
    PyObject *res;
    if (n <= 0) {
        return PyList_New(0);
    }
    res = PyList_New(n);
    if (unlikely(res == NULL)) return NULL;
    __Pyx_copy_object_array(src, ((PyListObject*)res)->ob_item, n);
    return res;
}
#endif

/* BytesEquals (used by UnicodeEquals) */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_GRAAL ||\
        !(CYTHON_ASSUME_SAFE_SIZE && CYTHON_ASSUME_SAFE_MACROS)
    return PyObject_RichCompareBool(s1, s2, equals);
#else
    if (s1 == s2) {
        return (equals == Py_EQ);
    } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
        const char *ps1, *ps2;
        Py_ssize_t length = PyBytes_GET_SIZE(s1);
        if (length != PyBytes_GET_SIZE(s2))
            return (equals == Py_NE);
        ps1 = PyBytes_AS_STRING(s1);
        ps2 = PyBytes_AS_STRING(s2);
        if (ps1[0] != ps2[0]) {
            return (equals == Py_NE);
        } else if (length == 1) {
            return (equals == Py_EQ);
        } else {
            int result;
#if CYTHON_USE_UNICODE_INTERNALS && (PY_VERSION_HEX < 0x030B0000)
            Py_hash_t hash1, hash2;
            hash1 = ((PyBytesObject*)s1)->ob_shash;
            hash2 = ((PyBytesObject*)s2)->ob_shash;
            if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
                return (equals == Py_NE);
            }
#endif
            result = memcmp(ps1, ps2, (size_t)length);
            return (equals == Py_EQ) ? (result == 0) : (result != 0);
        }
    } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
        return (equals == Py_NE);
    } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
        return (equals == Py_NE);
    } else {
        int result;
        PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
        if (!py_result)
            return -1;
        result = __Pyx_PyObject_IsTrue(py_result);
        Py_DECREF(py_result);
        return result;
    }
#endif
}

/* UnicodeEquals (used by fastcall) */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_GRAAL
    return PyObject_RichCompareBool(s1, s2, equals);
#else
    int s1_is_unicode, s2_is_unicode;
    if (s1 == s2) {
        goto return_eq;
    }
    s1_is_unicode = PyUnicode_CheckExact(s1);
    s2_is_unicode = PyUnicode_CheckExact(s2);
    if (s1_is_unicode & s2_is_unicode) {
        Py_ssize_t length, length2;
        int kind;
        void *data1, *data2;
        #if !CYTHON_COMPILING_IN_LIMITED_API
        if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
            return -1;
        #endif
        length = __Pyx_PyUnicode_GET_LENGTH(s1);
        #if !CYTHON_ASSUME_SAFE_SIZE
        if (unlikely(length < 0)) return -1;
        #endif
        length2 = __Pyx_PyUnicode_GET_LENGTH(s2);
        #if !CYTHON_ASSUME_SAFE_SIZE
        if (unlikely(length2 < 0)) return -1;
        #endif
        if (length != length2) {
            goto return_ne;
        }
#if CYTHON_USE_UNICODE_INTERNALS
        {
            Py_hash_t hash1, hash2;
            hash1 = ((PyASCIIObject*)s1)->hash;
            hash2 = ((PyASCIIObject*)s2)->hash;
            if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
                goto return_ne;
            }
        }
#endif
        kind = __Pyx_PyUnicode_KIND(s1);
        if (kind != __Pyx_PyUnicode_KIND(s2)) {
            goto return_ne;
        }
        data1 = __Pyx_PyUnicode_DATA(s1);
        data2 = __Pyx_PyUnicode_DATA(s2);
        if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
            goto return_ne;
        } else if (length == 1) {
            goto return_eq;
        } else {
            int result = memcmp(data1, data2, (size_t)(length * kind));
            return (equals == Py_EQ) ? (result == 0) : (result != 0);
        }
    } else if ((s1 == Py_None) & s2_is_unicode) {
        goto return_ne;
    } else if ((s2 == Py_None) & s1_is_unicode) {
        goto return_ne;
    } else {
        int result;
        PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
        if (!py_result)
            return -1;
        result = __Pyx_PyObject_IsTrue(py_result);
        Py_DECREF(py_result);
        return result;
    }
return_eq:
    return (equals == Py_EQ);
return_ne:
    return (equals == Py_NE);
#endif
}

/* fastcall */
#if CYTHON_METH_FASTCALL
static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s)
{
    Py_ssize_t i, n = __Pyx_PyTuple_GET_SIZE(kwnames);
    #if !CYTHON_ASSUME_SAFE_SIZE
    if (unlikely(n == -1)) return NULL;
    #endif
    for (i = 0; i < n; i++)
    {
        PyObject *namei = __Pyx_PyTuple_GET_ITEM(kwnames, i);
        #if !CYTHON_ASSUME_SAFE_MACROS
        if (unlikely(!namei)) return NULL;
        #endif
        if (s == namei) return kwvalues[i];
    }
    for (i = 0; i < n; i++)
    {
        PyObject *namei = __Pyx_PyTuple_GET_ITEM(kwnames, i);
        #if !CYTHON_ASSUME_SAFE_MACROS
        if (unlikely(!namei)) return NULL;
        #endif
        int eq = __Pyx_PyUnicode_Equals(s, namei, Py_EQ);
        if (unlikely(eq != 0)) {
            if (unlikely(eq < 0)) return NULL;
            return kwvalues[i];
        }
    }
    return NULL;
}
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000 || CYTHON_COMPILING_IN_LIMITED_API
CYTHON_UNUSED static PyObject *__Pyx_KwargsAsDict_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues) {
    Py_ssize_t i, nkwargs;
    PyObject *dict;
#if !CYTHON_ASSUME_SAFE_SIZE
    nkwargs = PyTuple_Size(kwnames);
    if (unlikely(nkwargs < 0)) return NULL;
#else
    nkwargs = PyTuple_GET_SIZE(kwnames);
#endif
    dict = PyDict_New();
    if (unlikely(!dict))
        return NULL;
    for (i=0; i<nkwargs; i++) {
#if !CYTHON_ASSUME_SAFE_MACROS
        PyObject *key = PyTuple_GetItem(kwnames, i);
        if (!key) goto bad;
#else
        PyObject *key = PyTuple_GET_ITEM(kwnames, i);
#endif
        if (unlikely(PyDict_SetItem(dict, key, kwvalues[i]) < 0))
            goto bad;
    }
    return dict;
bad:
    Py_DECREF(dict);
    return NULL;
}
#endif
#endif

/* PyObjectCall (used by PyObjectFastCall) */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
    PyObject *result;
    ternaryfunc call = Py_TYPE(func)->tp_call;
    if (unlikely(!call))
        return PyObject_Call(func, arg, kw);
    if (unlikely(Py_EnterRecursiveCall(" while calling a Python object")))
        return NULL;
    result = (*call)(func, arg, kw);
    Py_LeaveRecursiveCall();
    if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
        PyErr_SetString(
            PyExc_SystemError,
            "NULL result without error in PyObject_Call");
    }
    return result;
}
#endif

/* PyObjectCallMethO (used by PyObjectFastCall) */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
    PyObject *self, *result;
    PyCFunction cfunc;
    cfunc = __Pyx_CyOrPyCFunction_GET_FUNCTION(func);
    self = __Pyx_CyOrPyCFunction_GET_SELF(func);
    if (unlikely(Py_EnterRecursiveCall(" while calling a Python object")))
        return NULL;
    result = cfunc(self, arg);
    Py_LeaveRecursiveCall();
    if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
        PyErr_SetString(
            PyExc_SystemError,
            "NULL result without error in PyObject_Call");
    }
    return result;
}
#endif

/* PyObjectFastCall (used by PyObjectCallOneArg) */
#if PY_VERSION_HEX < 0x03090000 || CYTHON_COMPILING_IN_LIMITED_API
static PyObject* __Pyx_PyObject_FastCall_fallback(PyObject *func, PyObject * const*args, size_t nargs, PyObject *kwargs) {
    PyObject *argstuple;
    PyObject *result = 0;
    size_t i;
    argstuple = PyTuple_New((Py_ssize_t)nargs);
    if (unlikely(!argstuple)) return NULL;
    for (i = 0; i < nargs; i++) {
        Py_INCREF(args[i]);
        if (__Pyx_PyTuple_SET_ITEM(argstuple, (Py_ssize_t)i, args[i]) != (0)) goto bad;
    }
    result = __Pyx_PyObject_Call(func, argstuple, kwargs);
  bad:
    Py_DECREF(argstuple);
    return result;
}
#endif
#if CYTHON_VECTORCALL && !CYTHON_COMPILING_IN_LIMITED_API
  #if PY_VERSION_HEX < 0x03090000
    #define __Pyx_PyVectorcall_Function(callable) _PyVectorcall_Function(callable)
  #elif CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE vectorcallfunc __Pyx_PyVectorcall_Function(PyObject *callable) {
    PyTypeObject *tp = Py_TYPE(callable);
    #if defined(__Pyx_CyFunction_USED)
    if (__Pyx_CyFunction_CheckExact(callable)) {
        return __Pyx_CyFunction_func_vectorcall(callable);
    }
    #endif
    if (!PyType_HasFeature(tp, Py_TPFLAGS_HAVE_VECTORCALL)) {
        return NULL;
    }
    assert(PyCallable_Check(callable));
    Py_ssize_t offset = tp->tp_vectorcall_offset;
    assert(offset > 0);
    vectorcallfunc ptr;
    memcpy(&ptr, (char *) callable + offset, sizeof(ptr));
    return ptr;
}
  #else
    #define __Pyx_PyVectorcall_Function(callable) PyVectorcall_Function(callable)
  #endif
#endif
static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject *const *args, size_t _nargs, PyObject *kwargs) {
    Py_ssize_t nargs = __Pyx_PyVectorcall_NARGS(_nargs);
#if CYTHON_COMPILING_IN_CPYTHON
    if (nargs == 0 && kwargs == NULL) {
        if (__Pyx_CyOrPyCFunction_Check(func) && likely( __Pyx_CyOrPyCFunction_GET_FLAGS(func) & METH_NOARGS))
            return __Pyx_PyObject_CallMethO(func, NULL);
    }
    else if (nargs == 1 && kwargs == NULL) {
        if (__Pyx_CyOrPyCFunction_Check(func) && likely( __Pyx_CyOrPyCFunction_GET_FLAGS(func) & METH_O))
            return __Pyx_PyObject_CallMethO(func, args[0]);
    }
#endif
    if (kwargs == NULL) {
        #if CYTHON_VECTORCALL
          #if CYTHON_COMPILING_IN_LIMITED_API
            return PyObject_Vectorcall(func, args, _nargs, NULL);
          #else
            vectorcallfunc f = __Pyx_PyVectorcall_Function(func);
            if (f) {
                return f(func, args, _nargs, NULL);
            }
          #endif
        #endif
    }
    if (nargs == 0) {
        return __Pyx_PyObject_Call(func, __pyx_mstate_global->__pyx_empty_tuple, kwargs);
    }
    #if PY_VERSION_HEX >= 0x03090000 && !CYTHON_COMPILING_IN_LIMITED_API
    return PyObject_VectorcallDict(func, args, (size_t)nargs, kwargs);
    #else
    return __Pyx_PyObject_FastCall_fallback(func, args, (size_t)nargs, kwargs);
    #endif
}

/* PyObjectCallOneArg (used by CallUnboundCMethod0) */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
    PyObject *args[2] = {NULL, arg};
    return __Pyx_PyObject_FastCall(func, args+1, 1 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET);
}

/* UnpackUnboundCMethod (used by CallUnboundCMethod0) */
#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030C0000
static PyObject *__Pyx_SelflessCall(PyObject *method, PyObject *args, PyObject *kwargs) {
    PyObject *result;
    PyObject *selfless_args = PyTuple_GetSlice(args, 1, PyTuple_Size(args));
    if (unlikely(!selfless_args)) return NULL;
    result = PyObject_Call(method, selfless_args, kwargs);
    Py_DECREF(selfless_args);
    return result;
}
#elif CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x03090000
static PyObject *__Pyx_SelflessCall(PyObject *method, PyObject **args, Py_ssize_t nargs, PyObject *kwnames) {
        return _PyObject_Vectorcall
            (method, args ? args+1 : NULL, nargs ? nargs-1 : 0, kwnames);
}
#else
static PyObject *__Pyx_SelflessCall(PyObject *method, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) {
    return
#if PY_VERSION_HEX < 0x03090000
    _PyObject_Vectorcall
#else
    PyObject_Vectorcall
#endif
        (method, args ? args+1 : NULL, nargs ? (size_t) nargs-1 : 0, kwnames);
}
#endif
static PyMethodDef __Pyx_UnboundCMethod_Def = {
     "CythonUnboundCMethod",
     __PYX_REINTERPRET_FUNCION(PyCFunction, __Pyx_SelflessCall),
#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030C0000
     METH_VARARGS | METH_KEYWORDS,
#else
     METH_FASTCALL | METH_KEYWORDS,
#endif
     NULL
};
static int __Pyx_TryUnpackUnboundCMethod(__Pyx_CachedCFunction* target) {
    PyObject *method, *result=NULL;
    method = __Pyx_PyObject_GetAttrStr(target->type, *target->method_name);
    if (unlikely(!method))
        return -1;
    result = method;
#if CYTHON_COMPILING_IN_CPYTHON
    if (likely(__Pyx_TypeCheck(method, &PyMethodDescr_Type)))
    {
        PyMethodDescrObject *descr = (PyMethodDescrObject*) method;
        target->func = descr->d_method->ml_meth;
        target->flag = descr->d_method->ml_flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_STACKLESS);
    } else
#endif
#if CYTHON_COMPILING_IN_PYPY
#else
    if (PyCFunction_Check(method))
#endif
    {
        PyObject *self;
        int self_found;
#if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_PYPY
        self = PyObject_GetAttrString(method, "__self__");
        if (!self) {
            PyErr_Clear();
        }
#else
        self = PyCFunction_GET_SELF(method);
#endif
        self_found = (self && self != Py_None);
#if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_PYPY
        Py_XDECREF(self);
#endif
        if (self_found) {
            PyObject *unbound_method = PyCFunction_New(&__Pyx_UnboundCMethod_Def, method);
            if (unlikely(!unbound_method)) return -1;
            Py_DECREF(method);
            result = unbound_method;
        }
    }
#if !CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    if (unlikely(target->method)) {
        Py_DECREF(result);
    } else
#endif
    target->method = result;
    return 0;
}

/* CallUnboundCMethod0 */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self) {
    int was_initialized = __Pyx_CachedCFunction_GetAndSetInitializing(cfunc);
    if (likely(was_initialized == 2 && cfunc->func)) {
        if (likely(cfunc->flag == METH_NOARGS))
            return __Pyx_CallCFunction(cfunc, self, NULL);
        if (likely(cfunc->flag == METH_FASTCALL))
            return __Pyx_CallCFunctionFast(cfunc, self, NULL, 0);
        if (cfunc->flag == (METH_FASTCALL | METH_KEYWORDS))
            return __Pyx_CallCFunctionFastWithKeywords(cfunc, self, NULL, 0, NULL);
        if (likely(cfunc->flag == (METH_VARARGS | METH_KEYWORDS)))
            return __Pyx_CallCFunctionWithKeywords(cfunc, self, __pyx_mstate_global->__pyx_empty_tuple, NULL);
        if (cfunc->flag == METH_VARARGS)
            return __Pyx_CallCFunction(cfunc, self, __pyx_mstate_global->__pyx_empty_tuple);
        return __Pyx__CallUnboundCMethod0(cfunc, self);
    }
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    else if (unlikely(was_initialized == 1)) {
        __Pyx_CachedCFunction tmp_cfunc = {
#ifndef __cplusplus
            0
#endif
        };
        tmp_cfunc.type = cfunc->type;
        tmp_cfunc.method_name = cfunc->method_name;
        return __Pyx__CallUnboundCMethod0(&tmp_cfunc, self);
    }
#endif
    PyObject *result = __Pyx__CallUnboundCMethod0(cfunc, self);
    __Pyx_CachedCFunction_SetFinishedInitializing(cfunc);
    return result;
}
#endif
static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self) {
    PyObject *result;
    if (unlikely(!cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL;
    result = __Pyx_PyObject_CallOneArg(cfunc->method, self);
    return result;
}

/* py_dict_items (used by OwnedDictNext) */
static CYTHON_INLINE PyObject* __Pyx_PyDict_Items(PyObject* d) {
    return __Pyx_CallUnboundCMethod0(&__pyx_mstate_global->__pyx_umethod_PyDict_Type_items, d);
}

/* py_dict_values (used by OwnedDictNext) */
static CYTHON_INLINE PyObject* __Pyx_PyDict_Values(PyObject* d) {
    return __Pyx_CallUnboundCMethod0(&__pyx_mstate_global->__pyx_umethod_PyDict_Type_values, d);
}

/* OwnedDictNext (used by ParseKeywordsImpl) */
#if CYTHON_AVOID_BORROWED_REFS
static int __Pyx_PyDict_NextRef(PyObject *p, PyObject **ppos, PyObject **pkey, PyObject **pvalue) {
    PyObject *next = NULL;
    if (!*ppos) {
        if (pvalue) {
            PyObject *dictview = pkey ? __Pyx_PyDict_Items(p) : __Pyx_PyDict_Values(p);
            if (unlikely(!dictview)) goto bad;
            *ppos = PyObject_GetIter(dictview);
            Py_DECREF(dictview);
        } else {
            *ppos = PyObject_GetIter(p);
        }
        if (unlikely(!*ppos)) goto bad;
    }
    next = PyIter_Next(*ppos);
    if (!next) {
        if (PyErr_Occurred()) goto bad;
        return 0;
    }
    if (pkey && pvalue) {
        *pkey = __Pyx_PySequence_ITEM(next, 0);
        if (unlikely(*pkey)) goto bad;
        *pvalue = __Pyx_PySequence_ITEM(next, 1);
        if (unlikely(*pvalue)) goto bad;
        Py_DECREF(next);
    } else if (pkey) {
        *pkey = next;
    } else {
        assert(pvalue);
        *pvalue = next;
    }
    return 1;
  bad:
    Py_XDECREF(next);
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d0000
    PyErr_FormatUnraisable("Exception ignored in __Pyx_PyDict_NextRef");
#else
    PyErr_WriteUnraisable(__pyx_mstate_global->__pyx_n_u_Pyx_PyDict_NextRef);
#endif
    if (pkey) *pkey = NULL;
    if (pvalue) *pvalue = NULL;
    return 0;
}
#else // !CYTHON_AVOID_BORROWED_REFS
static int __Pyx_PyDict_NextRef(PyObject *p, Py_ssize_t *ppos, PyObject **pkey, PyObject **pvalue) {
    int result = PyDict_Next(p, ppos, pkey, pvalue);
    if (likely(result == 1)) {
        if (pkey) Py_INCREF(*pkey);
        if (pvalue) Py_INCREF(*pvalue);
    }
    return result;
}
#endif

/* RaiseDoubleKeywords (used by ParseKeywordsImpl) */
static void __Pyx_RaiseDoubleKeywordsError(
    const char* func_name,
    PyObject* kw_name)
{
    PyErr_Format(PyExc_TypeError,
        "%s() got multiple values for keyword argument '%U'", func_name, kw_name);
}

/* CallUnboundCMethod2 */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject *__Pyx_CallUnboundCMethod2(__Pyx_CachedCFunction *cfunc, PyObject *self, PyObject *arg1, PyObject *arg2) {
    int was_initialized = __Pyx_CachedCFunction_GetAndSetInitializing(cfunc);
    if (likely(was_initialized == 2 && cfunc->func)) {
        PyObject *args[2] = {arg1, arg2};
        if (cfunc->flag == METH_FASTCALL) {
            return __Pyx_CallCFunctionFast(cfunc, self, args, 2);
        }
        if (cfunc->flag == (METH_FASTCALL | METH_KEYWORDS))
            return __Pyx_CallCFunctionFastWithKeywords(cfunc, self, args, 2, NULL);
    }
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    else if (unlikely(was_initialized == 1)) {
        __Pyx_CachedCFunction tmp_cfunc = {
#ifndef __cplusplus
            0
#endif
        };
        tmp_cfunc.type = cfunc->type;
        tmp_cfunc.method_name = cfunc->method_name;
        return __Pyx__CallUnboundCMethod2(&tmp_cfunc, self, arg1, arg2);
    }
#endif
    PyObject *result = __Pyx__CallUnboundCMethod2(cfunc, self, arg1, arg2);
    __Pyx_CachedCFunction_SetFinishedInitializing(cfunc);
    return result;
}
#endif
static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg1, PyObject* arg2){
    if (unlikely(!cfunc->func && !cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL;
#if CYTHON_COMPILING_IN_CPYTHON
    if (cfunc->func && (cfunc->flag & METH_VARARGS)) {
        PyObject *result = NULL;
        PyObject *args = PyTuple_New(2);
        if (unlikely(!args)) return NULL;
        Py_INCREF(arg1);
        PyTuple_SET_ITEM(args, 0, arg1);
        Py_INCREF(arg2);
        PyTuple_SET_ITEM(args, 1, arg2);
        if (cfunc->flag & METH_KEYWORDS)
            result = __Pyx_CallCFunctionWithKeywords(cfunc, self, args, NULL);
        else
            result = __Pyx_CallCFunction(cfunc, self, args);
        Py_DECREF(args);
        return result;
    }
#endif
    {
        PyObject *args[4] = {NULL, self, arg1, arg2};
        return __Pyx_PyObject_FastCall(cfunc->method, args+1, 3 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET);
    }
}

/* ParseKeywordsImpl (used by ParseKeywords) */
static int __Pyx_ValidateDuplicatePosArgs(
    PyObject *kwds,
    PyObject ** const argnames[],
    PyObject ** const *first_kw_arg,
    const char* function_name)
{
    PyObject ** const *name = argnames;
    while (name != first_kw_arg) {
        PyObject *key = **name;
        int found = PyDict_Contains(kwds, key);
        if (unlikely(found)) {
            if (found == 1) __Pyx_RaiseDoubleKeywordsError(function_name, key);
            goto bad;
        }
        name++;
    }
    return 0;
bad:
    return -1;
}
#if CYTHON_USE_UNICODE_INTERNALS
static CYTHON_INLINE int __Pyx_UnicodeKeywordsEqual(PyObject *s1, PyObject *s2) {
    int kind;
    Py_ssize_t len = PyUnicode_GET_LENGTH(s1);
    if (len != PyUnicode_GET_LENGTH(s2)) return 0;
    kind = PyUnicode_KIND(s1);
    if (kind != PyUnicode_KIND(s2)) return 0;
    const void *data1 = PyUnicode_DATA(s1);
    const void *data2 = PyUnicode_DATA(s2);
    return (memcmp(data1, data2, (size_t) len * (size_t) kind) == 0);
}
#endif
static int __Pyx_MatchKeywordArg_str(
    PyObject *key,
    PyObject ** const argnames[],
    PyObject ** const *first_kw_arg,
    size_t *index_found,
    const char *function_name)
{
    PyObject ** const *name;
    #if CYTHON_USE_UNICODE_INTERNALS
    Py_hash_t key_hash = ((PyASCIIObject*)key)->hash;
    if (unlikely(key_hash == -1)) {
        key_hash = PyObject_Hash(key);
        if (unlikely(key_hash == -1))
            goto bad;
    }
    #endif
    name = first_kw_arg;
    while (*name) {
        PyObject *name_str = **name;
        #if CYTHON_USE_UNICODE_INTERNALS
        if (key_hash == ((PyASCIIObject*)name_str)->hash && __Pyx_UnicodeKeywordsEqual(name_str, key)) {
            *index_found = (size_t) (name - argnames);
            return 1;
        }
        #else
        #if CYTHON_ASSUME_SAFE_SIZE
        if (PyUnicode_GET_LENGTH(name_str) == PyUnicode_GET_LENGTH(key))
        #endif
        {
            int cmp = PyUnicode_Compare(name_str, key);
            if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
            if (cmp == 0) {
                *index_found = (size_t) (name - argnames);
                return 1;
            }
        }
        #endif
        name++;
    }
    name = argnames;
    while (name != first_kw_arg) {
        PyObject *name_str = **name;
        #if CYTHON_USE_UNICODE_INTERNALS
        if (unlikely(key_hash == ((PyASCIIObject*)name_str)->hash)) {
            if (__Pyx_UnicodeKeywordsEqual(name_str, key))
                goto arg_passed_twice;
        }
        #else
        #if CYTHON_ASSUME_SAFE_SIZE
        if (PyUnicode_GET_LENGTH(name_str) == PyUnicode_GET_LENGTH(key))
        #endif
        {
            if (unlikely(name_str == key)) goto arg_passed_twice;
            int cmp = PyUnicode_Compare(name_str, key);
            if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
            if (cmp == 0) goto arg_passed_twice;
        }
        #endif
        name++;
    }
    return 0;
arg_passed_twice:
    __Pyx_RaiseDoubleKeywordsError(function_name, key);
    goto bad;
bad:
    return -1;
}
static int __Pyx_MatchKeywordArg_nostr(
    PyObject *key,
    PyObject ** const argnames[],
    PyObject ** const *first_kw_arg,
    size_t *index_found,
    const char *function_name)
{
    PyObject ** const *name;
    if (unlikely(!PyUnicode_Check(key))) goto invalid_keyword_type;
    name = first_kw_arg;
    while (*name) {
        int cmp = PyObject_RichCompareBool(**name, key, Py_EQ);
        if (cmp == 1) {
            *index_found = (size_t) (name - argnames);
            return 1;
        }
        if (unlikely(cmp == -1)) goto bad;
        name++;
    }
    name = argnames;
    while (name != first_kw_arg) {
        int cmp = PyObject_RichCompareBool(**name, key, Py_EQ);
        if (unlikely(cmp != 0)) {
            if (cmp == 1) goto arg_passed_twice;
            else goto bad;
        }
        name++;
    }
    return 0;
arg_passed_twice:
    __Pyx_RaiseDoubleKeywordsError(function_name, key);
    goto bad;
invalid_keyword_type:
    PyErr_Format(PyExc_TypeError,
        "%.200s() keywords must be strings", function_name);
    goto bad;
bad:
    return -1;
}
static CYTHON_INLINE int __Pyx_MatchKeywordArg(
    PyObject *key,
    PyObject ** const argnames[],
    PyObject ** const *first_kw_arg,
    size_t *index_found,
    const char *function_name)
{
    return likely(PyUnicode_CheckExact(key)) ?
        __Pyx_MatchKeywordArg_str(key, argnames, first_kw_arg, index_found, function_name) :
        __Pyx_MatchKeywordArg_nostr(key, argnames, first_kw_arg, index_found, function_name);
}
static void __Pyx_RejectUnknownKeyword(
    PyObject *kwds,
    PyObject ** const argnames[],
    PyObject ** const *first_kw_arg,
    const char *function_name)
{
    #if CYTHON_AVOID_BORROWED_REFS
    PyObject *pos = NULL;
    #else
    Py_ssize_t pos = 0;
    #endif
    PyObject *key = NULL;
    __Pyx_BEGIN_CRITICAL_SECTION(kwds);
    while (
        #if CYTHON_AVOID_BORROWED_REFS
        __Pyx_PyDict_NextRef(kwds, &pos, &key, NULL)
        #else
        PyDict_Next(kwds, &pos, &key, NULL)
        #endif
    ) {
        PyObject** const *name = first_kw_arg;
        while (*name && (**name != key)) name++;
        if (!*name) {
            size_t index_found = 0;
            int cmp = __Pyx_MatchKeywordArg(key, argnames, first_kw_arg, &index_found, function_name);
            if (cmp != 1) {
                if (cmp == 0) {
                    PyErr_Format(PyExc_TypeError,
                        "%s() got an unexpected keyword argument '%U'",
                        function_name, key);
                }
                #if CYTHON_AVOID_BORROWED_REFS
                Py_DECREF(key);
                #endif
                break;
            }
        }
        #if CYTHON_AVOID_BORROWED_REFS
        Py_DECREF(key);
        #endif
    }
    __Pyx_END_CRITICAL_SECTION();
    #if CYTHON_AVOID_BORROWED_REFS
    Py_XDECREF(pos);
    #endif
    assert(PyErr_Occurred());
}
static int __Pyx_ParseKeywordDict(
    PyObject *kwds,
    PyObject ** const argnames[],
    PyObject *values[],
    Py_ssize_t num_pos_args,
    Py_ssize_t num_kwargs,
    const char* function_name,
    int ignore_unknown_kwargs)
{
    PyObject** const *name;
    PyObject** const *first_kw_arg = argnames + num_pos_args;
    Py_ssize_t extracted = 0;
#if !CYTHON_COMPILING_IN_PYPY || defined(PyArg_ValidateKeywordArguments)
    if (unlikely(!PyArg_ValidateKeywordArguments(kwds))) return -1;
#endif
    name = first_kw_arg;
    while (*name && num_kwargs > extracted) {
        PyObject * key = **name;
        PyObject *value;
        int found = 0;
        #if __PYX_LIMITED_VERSION_HEX >= 0x030d0000
        found = PyDict_GetItemRef(kwds, key, &value);
        #else
        value = PyDict_GetItemWithError(kwds, key);
        if (value) {
            Py_INCREF(value);
            found = 1;
        } else {
            if (unlikely(PyErr_Occurred())) goto bad;
        }
        #endif
        if (found) {
            if (unlikely(found < 0)) goto bad;
            values[name-argnames] = value;
            extracted++;
        }
        name++;
    }
    if (num_kwargs > extracted) {
        if (ignore_unknown_kwargs) {
            if (unlikely(__Pyx_ValidateDuplicatePosArgs(kwds, argnames, first_kw_arg, function_name) == -1))
                goto bad;
        } else {
            __Pyx_RejectUnknownKeyword(kwds, argnames, first_kw_arg, function_name);
            goto bad;
        }
    }
    return 0;
bad:
    return -1;
}
static int __Pyx_ParseKeywordDictToDict(
    PyObject *kwds,
    PyObject ** const argnames[],
    PyObject *kwds2,
    PyObject *values[],
    Py_ssize_t num_pos_args,
    const char* function_name)
{
    PyObject** const *name;
    PyObject** const *first_kw_arg = argnames + num_pos_args;
    Py_ssize_t len;
#if !CYTHON_COMPILING_IN_PYPY || defined(PyArg_ValidateKeywordArguments)
    if (unlikely(!PyArg_ValidateKeywordArguments(kwds))) return -1;
#endif
    if (PyDict_Update(kwds2, kwds) < 0) goto bad;
    name = first_kw_arg;
    while (*name) {
        PyObject *key = **name;
        PyObject *value;
#if !CYTHON_COMPILING_IN_LIMITED_API && (PY_VERSION_HEX >= 0x030d00A2 || defined(PyDict_Pop))
        int found = PyDict_Pop(kwds2, key, &value);
        if (found) {
            if (unlikely(found < 0)) goto bad;
            values[name-argnames] = value;
        }
#elif __PYX_LIMITED_VERSION_HEX >= 0x030d0000
        int found = PyDict_GetItemRef(kwds2, key, &value);
        if (found) {
            if (unlikely(found < 0)) goto bad;
            values[name-argnames] = value;
            if (unlikely(PyDict_DelItem(kwds2, key) < 0)) goto bad;
        }
#else
    #if CYTHON_COMPILING_IN_CPYTHON
        value = _PyDict_Pop(kwds2, key, kwds2);
    #else
        value = __Pyx_CallUnboundCMethod2(&__pyx_mstate_global->__pyx_umethod_PyDict_Type_pop, kwds2, key, kwds2);
    #endif
        if (value == kwds2) {
            Py_DECREF(value);
        } else {
            if (unlikely(!value)) goto bad;
            values[name-argnames] = value;
        }
#endif
        name++;
    }
    len = PyDict_Size(kwds2);
    if (len > 0) {
        return __Pyx_ValidateDuplicatePosArgs(kwds, argnames, first_kw_arg, function_name);
    } else if (unlikely(len == -1)) {
        goto bad;
    }
    return 0;
bad:
    return -1;
}
static int __Pyx_ParseKeywordsTuple(
    PyObject *kwds,
    PyObject * const *kwvalues,
    PyObject ** const argnames[],
    PyObject *kwds2,
    PyObject *values[],
    Py_ssize_t num_pos_args,
    Py_ssize_t num_kwargs,
    const char* function_name,
    int ignore_unknown_kwargs)
{
    PyObject *key = NULL;
    PyObject** const * name;
    PyObject** const *first_kw_arg = argnames + num_pos_args;
    for (Py_ssize_t pos = 0; pos < num_kwargs; pos++) {
#if CYTHON_AVOID_BORROWED_REFS
        key = __Pyx_PySequence_ITEM(kwds, pos);
#else
        key = __Pyx_PyTuple_GET_ITEM(kwds, pos);
#endif
#if !CYTHON_ASSUME_SAFE_MACROS
        if (unlikely(!key)) goto bad;
#endif
        name = first_kw_arg;
        while (*name && (**name != key)) name++;
        if (*name) {
            PyObject *value = kwvalues[pos];
            values[name-argnames] = __Pyx_NewRef(value);
        } else {
            size_t index_found = 0;
            int cmp = __Pyx_MatchKeywordArg(key, argnames, first_kw_arg, &index_found, function_name);
            if (cmp == 1) {
                PyObject *value = kwvalues[pos];
                values[index_found] = __Pyx_NewRef(value);
            } else {
                if (unlikely(cmp == -1)) goto bad;
                if (kwds2) {
                    PyObject *value = kwvalues[pos];
                    if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
                } else if (!ignore_unknown_kwargs) {
                    goto invalid_keyword;
                }
            }
        }
        #if CYTHON_AVOID_BORROWED_REFS
        Py_DECREF(key);
        key = NULL;
        #endif
    }
    return 0;
invalid_keyword:
    PyErr_Format(PyExc_TypeError,
        "%s() got an unexpected keyword argument '%U'",
        function_name, key);
    goto bad;
bad:
    #if CYTHON_AVOID_BORROWED_REFS
    Py_XDECREF(key);
    #endif
    return -1;
}

/* ParseKeywords */
static int __Pyx_ParseKeywords(
    PyObject *kwds,
    PyObject * const *kwvalues,
    PyObject ** const argnames[],
    PyObject *kwds2,
    PyObject *values[],
    Py_ssize_t num_pos_args,
    Py_ssize_t num_kwargs,
    const char* function_name,
    int ignore_unknown_kwargs)
{
    if (CYTHON_METH_FASTCALL && likely(PyTuple_Check(kwds)))
        return __Pyx_ParseKeywordsTuple(kwds, kwvalues, argnames, kwds2, values, num_pos_args, num_kwargs, function_name, ignore_unknown_kwargs);
    else if (kwds2)
        return __Pyx_ParseKeywordDictToDict(kwds, argnames, kwds2, values, num_pos_args, function_name);
    else
        return __Pyx_ParseKeywordDict(kwds, argnames, values, num_pos_args, num_kwargs, function_name, ignore_unknown_kwargs);
}

/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
    const char* func_name,
    int exact,
    Py_ssize_t num_min,
    Py_ssize_t num_max,
    Py_ssize_t num_found)
{
    Py_ssize_t num_expected;
    const char *more_or_less;
    if (num_found < num_min) {
        num_expected = num_min;
        more_or_less = "at least";
    } else {
        num_expected = num_max;
        more_or_less = "at most";
    }
    if (exact) {
        more_or_less = "exactly";
    }
    PyErr_Format(PyExc_TypeError,
                 "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
                 func_name, more_or_less, num_expected,
                 (num_expected == 1) ? "" : "s", num_found);
}

/* ArgTypeTestFunc (used by ArgTypeTest) */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
    __Pyx_TypeName type_name;
    __Pyx_TypeName obj_type_name;
    PyObject *extra_info = __pyx_mstate_global->__pyx_empty_unicode;
    int from_annotation_subclass = 0;
    if (unlikely(!type)) {
        PyErr_SetString(PyExc_SystemError, "Missing type object");
        return 0;
    }
    else if (!exact) {
        if (likely(__Pyx_TypeCheck(obj, type))) return 1;
    } else if (exact == 2) {
        if (__Pyx_TypeCheck(obj, type)) {
            from_annotation_subclass = 1;
            extra_info = __pyx_mstate_global->__pyx_kp_u_Note_that_Cython_is_deliberately;
        }
    }
    type_name = __Pyx_PyType_GetFullyQualifiedName(type);
    obj_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(obj));
    PyErr_Format(PyExc_TypeError,
        "Argument '%.200s' has incorrect type (expected " __Pyx_FMT_TYPENAME
        ", got " __Pyx_FMT_TYPENAME ")"
#if __PYX_LIMITED_VERSION_HEX < 0x030C0000
        "%s%U"
#endif
        , name, type_name, obj_type_name
#if __PYX_LIMITED_VERSION_HEX < 0x030C0000
        , (from_annotation_subclass ? ". " : ""), extra_info
#endif
        );
#if __PYX_LIMITED_VERSION_HEX >= 0x030C0000
    if (exact == 2 && from_annotation_subclass) {
        PyObject *res;
        PyObject *vargs[2];
        vargs[0] = PyErr_GetRaisedException();
        vargs[1] = extra_info;
        res = PyObject_VectorcallMethod(__pyx_mstate_global->__pyx_kp_u_add_note, vargs, 2, NULL);
        Py_XDECREF(res);
        PyErr_SetRaisedException(vargs[0]);
    }
#endif
    __Pyx_DECREF_TypeName(type_name);
    __Pyx_DECREF_TypeName(obj_type_name);
    return 0;
}

/* RaiseException */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
    PyObject* owned_instance = NULL;
    if (tb == Py_None) {
        tb = 0;
    } else if (tb && !PyTraceBack_Check(tb)) {
        PyErr_SetString(PyExc_TypeError,
            "raise: arg 3 must be a traceback or None");
        goto bad;
    }
    if (value == Py_None)
        value = 0;
    if (PyExceptionInstance_Check(type)) {
        if (value) {
            PyErr_SetString(PyExc_TypeError,
                "instance exception may not have a separate value");
            goto bad;
        }
        value = type;
        type = (PyObject*) Py_TYPE(value);
    } else if (PyExceptionClass_Check(type)) {
        PyObject *instance_class = NULL;
        if (value && PyExceptionInstance_Check(value)) {
            instance_class = (PyObject*) Py_TYPE(value);
            if (instance_class != type) {
                int is_subclass = PyObject_IsSubclass(instance_class, type);
                if (!is_subclass) {
                    instance_class = NULL;
                } else if (unlikely(is_subclass == -1)) {
                    goto bad;
                } else {
                    type = instance_class;
                }
            }
        }
        if (!instance_class) {
            PyObject *args;
            if (!value)
                args = PyTuple_New(0);
            else if (PyTuple_Check(value)) {
                Py_INCREF(value);
                args = value;
            } else
                args = PyTuple_Pack(1, value);
            if (!args)
                goto bad;
            owned_instance = PyObject_Call(type, args, NULL);
            Py_DECREF(args);
            if (!owned_instance)
                goto bad;
            value = owned_instance;
            if (!PyExceptionInstance_Check(value)) {
                PyErr_Format(PyExc_TypeError,
                             "calling %R should have returned an instance of "
                             "BaseException, not %R",
                             type, Py_TYPE(value));
                goto bad;
            }
        }
    } else {
        PyErr_SetString(PyExc_TypeError,
            "raise: exception class must be a subclass of BaseException");
        goto bad;
    }
    if (cause) {
        PyObject *fixed_cause;
        if (cause == Py_None) {
            fixed_cause = NULL;
        } else if (PyExceptionClass_Check(cause)) {
            fixed_cause = PyObject_CallObject(cause, NULL);
            if (fixed_cause == NULL)
                goto bad;
        } else if (PyExceptionInstance_Check(cause)) {
            fixed_cause = cause;
            Py_INCREF(fixed_cause);
        } else {
            PyErr_SetString(PyExc_TypeError,
                            "exception causes must derive from "
                            "BaseException");
            goto bad;
        }
        PyException_SetCause(value, fixed_cause);
    }
    PyErr_SetObject(type, value);
    if (tb) {
#if PY_VERSION_HEX >= 0x030C00A6
        PyException_SetTraceback(value, tb);
#elif CYTHON_FAST_THREAD_STATE
        PyThreadState *tstate = __Pyx_PyThreadState_Current;
        PyObject* tmp_tb = tstate->curexc_traceback;
        if (tb != tmp_tb) {
            Py_INCREF(tb);
            tstate->curexc_traceback = tb;
            Py_XDECREF(tmp_tb);
        }
#else
        PyObject *tmp_type, *tmp_value, *tmp_tb;
        PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
        Py_INCREF(tb);
        PyErr_Restore(tmp_type, tmp_value, tb);
        Py_XDECREF(tmp_tb);
#endif
    }
bad:
    Py_XDECREF(owned_instance);
    return;
}

/* PyObjectFastCallMethod */
#if !CYTHON_VECTORCALL || PY_VERSION_HEX < 0x03090000
static PyObject *__Pyx_PyObject_FastCallMethod(PyObject *name, PyObject *const *args, size_t nargsf) {
    PyObject *result;
    PyObject *attr = PyObject_GetAttr(args[0], name);
    if (unlikely(!attr))
        return NULL;
    result = __Pyx_PyObject_FastCall(attr, args+1, nargsf - 1);
    Py_DECREF(attr);
    return result;
}
#endif

/* RaiseUnexpectedTypeError */
static int
__Pyx_RaiseUnexpectedTypeError(const char *expected, PyObject *obj)
{
    __Pyx_TypeName obj_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(obj));
    PyErr_Format(PyExc_TypeError, "Expected %s, got " __Pyx_FMT_TYPENAME,
                 expected, obj_type_name);
    __Pyx_DECREF_TypeName(obj_type_name);
    return 0;
}

/* CIntToDigits (used by CIntToPyUnicode) */
static const char DIGIT_PAIRS_10[2*10*10+1] = {
    "00010203040506070809"
    "10111213141516171819"
    "20212223242526272829"
    "30313233343536373839"
    "40414243444546474849"
    "50515253545556575859"
    "60616263646566676869"
    "70717273747576777879"
    "80818283848586878889"
    "90919293949596979899"
};
static const char DIGIT_PAIRS_8[2*8*8+1] = {
    "0001020304050607"
    "1011121314151617"
    "2021222324252627"
    "3031323334353637"
    "4041424344454647"
    "5051525354555657"
    "6061626364656667"
    "7071727374757677"
};
static const char DIGITS_HEX[2*16+1] = {
    "0123456789abcdef"
    "0123456789ABCDEF"
};

/* BuildPyUnicode (used by COrdinalToPyUnicode) */
static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, const char* chars, int clength,
                                                int prepend_sign, char padding_char) {
    PyObject *uval;
    Py_ssize_t uoffset = ulength - clength;
#if CYTHON_USE_UNICODE_INTERNALS
    Py_ssize_t i;
    void *udata;
    uval = PyUnicode_New(ulength, 127);
    if (unlikely(!uval)) return NULL;
    udata = PyUnicode_DATA(uval);
    if (uoffset > 0) {
        i = 0;
        if (prepend_sign) {
            __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, 0, '-');
            i++;
        }
        for (; i < uoffset; i++) {
            __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, i, padding_char);
        }
    }
    for (i=0; i < clength; i++) {
        __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, uoffset+i, chars[i]);
    }
#else
    {
        PyObject *sign = NULL, *padding = NULL;
        uval = NULL;
        if (uoffset > 0) {
            prepend_sign = !!prepend_sign;
            if (uoffset > prepend_sign) {
                padding = PyUnicode_FromOrdinal(padding_char);
                if (likely(padding) && uoffset > prepend_sign + 1) {
                    PyObject *tmp = PySequence_Repeat(padding, uoffset - prepend_sign);
                    Py_DECREF(padding);
                    padding = tmp;
                }
                if (unlikely(!padding)) goto done_or_error;
            }
            if (prepend_sign) {
                sign = PyUnicode_FromOrdinal('-');
                if (unlikely(!sign)) goto done_or_error;
            }
        }
        uval = PyUnicode_DecodeASCII(chars, clength, NULL);
        if (likely(uval) && padding) {
            PyObject *tmp = PyUnicode_Concat(padding, uval);
            Py_DECREF(uval);
            uval = tmp;
        }
        if (likely(uval) && sign) {
            PyObject *tmp = PyUnicode_Concat(sign, uval);
            Py_DECREF(uval);
            uval = tmp;
        }
done_or_error:
        Py_XDECREF(padding);
        Py_XDECREF(sign);
    }
#endif
    return uval;
}

/* COrdinalToPyUnicode (used by CIntToPyUnicode) */
static CYTHON_INLINE int __Pyx_CheckUnicodeValue(int value) {
    return value <= 1114111;
}
static PyObject* __Pyx_PyUnicode_FromOrdinal_Padded(int value, Py_ssize_t ulength, char padding_char) {
    Py_ssize_t padding_length = ulength - 1;
    if (likely((padding_length <= 250) && (value < 0xD800 || value > 0xDFFF))) {
        char chars[256];
        if (value <= 255) {
            memset(chars, padding_char, (size_t) padding_length);
            chars[ulength-1] = (char) value;
            return PyUnicode_DecodeLatin1(chars, ulength, NULL);
        }
        char *cpos = chars + sizeof(chars);
        if (value < 0x800) {
            *--cpos = (char) (0x80 | (value & 0x3f));
            value >>= 6;
            *--cpos = (char) (0xc0 | (value & 0x1f));
        } else if (value < 0x10000) {
            *--cpos = (char) (0x80 | (value & 0x3f));
            value >>= 6;
            *--cpos = (char) (0x80 | (value & 0x3f));
            value >>= 6;
            *--cpos = (char) (0xe0 | (value & 0x0f));
        } else {
            *--cpos = (char) (0x80 | (value & 0x3f));
            value >>= 6;
            *--cpos = (char) (0x80 | (value & 0x3f));
            value >>= 6;
            *--cpos = (char) (0x80 | (value & 0x3f));
            value >>= 6;
            *--cpos = (char) (0xf0 | (value & 0x07));
        }
        cpos -= padding_length;
        memset(cpos, padding_char, (size_t) padding_length);
        return PyUnicode_DecodeUTF8(cpos, chars + sizeof(chars) - cpos, NULL);
    }
    if (value <= 127 && CYTHON_USE_UNICODE_INTERNALS) {
        const char chars[1] = {(char) value};
        return __Pyx_PyUnicode_BuildFromAscii(ulength, chars, 1, 0, padding_char);
    }
    {
        PyObject *uchar, *padding_uchar, *padding, *result;
        padding_uchar = PyUnicode_FromOrdinal(padding_char);
        if (unlikely(!padding_uchar)) return NULL;
        padding = PySequence_Repeat(padding_uchar, padding_length);
        Py_DECREF(padding_uchar);
        if (unlikely(!padding)) return NULL;
        uchar = PyUnicode_FromOrdinal(value);
        if (unlikely(!uchar)) {
            Py_DECREF(padding);
            return NULL;
        }
        result = PyUnicode_Concat(padding, uchar);
        Py_DECREF(padding);
        Py_DECREF(uchar);
        return result;
    }
}

/* CIntToPyUnicode */
static CYTHON_INLINE PyObject* __Pyx_uchar___Pyx_PyUnicode_From_int(int value, Py_ssize_t width, char padding_char) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (unlikely(!(is_unsigned || value == 0 || value > 0) ||
                    !(sizeof(value) <= 2 || value & ~ (int) 0x01fffff || __Pyx_CheckUnicodeValue((int) value)))) {
        PyErr_SetString(PyExc_OverflowError, "%c arg not in range(0x110000)");
        return NULL;
    }
    if (width <= 1) {
        return PyUnicode_FromOrdinal((int) value);
    }
    return __Pyx_PyUnicode_FromOrdinal_Padded((int) value, width, padding_char);
}
static CYTHON_INLINE PyObject* __Pyx____Pyx_PyUnicode_From_int(int value, Py_ssize_t width, char padding_char, char format_char) {
    char digits[sizeof(int)*3+2];
    char *dpos, *end = digits + sizeof(int)*3+2;
    const char *hex_digits = DIGITS_HEX;
    Py_ssize_t length, ulength;
    int prepend_sign, last_one_off;
    int remaining;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (format_char == 'X') {
        hex_digits += 16;
        format_char = 'x';
    }
    remaining = value;
    last_one_off = 0;
    dpos = end;
    do {
        int digit_pos;
        switch (format_char) {
        case 'o':
            digit_pos = abs((int)(remaining % (8*8)));
            remaining = (int) (remaining / (8*8));
            dpos -= 2;
            memcpy(dpos, DIGIT_PAIRS_8 + digit_pos * 2, 2);
            last_one_off = (digit_pos < 8);
            break;
        case 'd':
            digit_pos = abs((int)(remaining % (10*10)));
            remaining = (int) (remaining / (10*10));
            dpos -= 2;
            memcpy(dpos, DIGIT_PAIRS_10 + digit_pos * 2, 2);
            last_one_off = (digit_pos < 10);
            break;
        case 'x':
            *(--dpos) = hex_digits[abs((int)(remaining % 16))];
            remaining = (int) (remaining / 16);
            break;
        default:
            assert(0);
            break;
        }
    } while (unlikely(remaining != 0));
    assert(!last_one_off || *dpos == '0');
    dpos += last_one_off;
    length = end - dpos;
    ulength = length;
    prepend_sign = 0;
    if (!is_unsigned && value <= neg_one) {
        if (padding_char == ' ' || width <= length + 1) {
            *(--dpos) = '-';
            ++length;
        } else {
            prepend_sign = 1;
        }
        ++ulength;
    }
    if (width > ulength) {
        ulength = width;
    }
    if (ulength == 1) {
        return PyUnicode_FromOrdinal(*dpos);
    }
    return __Pyx_PyUnicode_BuildFromAscii(ulength, dpos, (int) length, prepend_sign, padding_char);
}

/* CIntToPyUnicode */
static CYTHON_INLINE PyObject* __Pyx_uchar___Pyx_PyUnicode_From_Py_ssize_t(Py_ssize_t value, Py_ssize_t width, char padding_char) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const Py_ssize_t neg_one = (Py_ssize_t) -1, const_zero = (Py_ssize_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (unlikely(!(is_unsigned || value == 0 || value > 0) ||
                    !(sizeof(value) <= 2 || value & ~ (Py_ssize_t) 0x01fffff || __Pyx_CheckUnicodeValue((int) value)))) {
        PyErr_SetString(PyExc_OverflowError, "%c arg not in range(0x110000)");
        return NULL;
    }
    if (width <= 1) {
        return PyUnicode_FromOrdinal((int) value);
    }
    return __Pyx_PyUnicode_FromOrdinal_Padded((int) value, width, padding_char);
}
static CYTHON_INLINE PyObject* __Pyx____Pyx_PyUnicode_From_Py_ssize_t(Py_ssize_t value, Py_ssize_t width, char padding_char, char format_char) {
    char digits[sizeof(Py_ssize_t)*3+2];
    char *dpos, *end = digits + sizeof(Py_ssize_t)*3+2;
    const char *hex_digits = DIGITS_HEX;
    Py_ssize_t length, ulength;
    int prepend_sign, last_one_off;
    Py_ssize_t remaining;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const Py_ssize_t neg_one = (Py_ssize_t) -1, const_zero = (Py_ssize_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (format_char == 'X') {
        hex_digits += 16;
        format_char = 'x';
    }
    remaining = value;
    last_one_off = 0;
    dpos = end;
    do {
        int digit_pos;
        switch (format_char) {
        case 'o':
            digit_pos = abs((int)(remaining % (8*8)));
            remaining = (Py_ssize_t) (remaining / (8*8));
            dpos -= 2;
            memcpy(dpos, DIGIT_PAIRS_8 + digit_pos * 2, 2);
            last_one_off = (digit_pos < 8);
            break;
        case 'd':
            digit_pos = abs((int)(remaining % (10*10)));
            remaining = (Py_ssize_t) (remaining / (10*10));
            dpos -= 2;
            memcpy(dpos, DIGIT_PAIRS_10 + digit_pos * 2, 2);
            last_one_off = (digit_pos < 10);
            break;
        case 'x':
            *(--dpos) = hex_digits[abs((int)(remaining % 16))];
            remaining = (Py_ssize_t) (remaining / 16);
            break;
        default:
            assert(0);
            break;
        }
    } while (unlikely(remaining != 0));
    assert(!last_one_off || *dpos == '0');
    dpos += last_one_off;
    length = end - dpos;
    ulength = length;
    prepend_sign = 0;
    if (!is_unsigned && value <= neg_one) {
        if (padding_char == ' ' || width <= length + 1) {
            *(--dpos) = '-';
            ++length;
        } else {
            prepend_sign = 1;
        }
        ++ulength;
    }
    if (width > ulength) {
        ulength = width;
    }
    if (ulength == 1) {
        return PyUnicode_FromOrdinal(*dpos);
    }
    return __Pyx_PyUnicode_BuildFromAscii(ulength, dpos, (int) length, prepend_sign, padding_char);
}

/* JoinPyUnicode */
static PyObject* __Pyx_PyUnicode_Join(PyObject** values, Py_ssize_t value_count, Py_ssize_t result_ulength,
                                      Py_UCS4 max_char) {
#if CYTHON_USE_UNICODE_INTERNALS && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
    PyObject *result_uval;
    int result_ukind, kind_shift;
    Py_ssize_t i, char_pos;
    void *result_udata;
    if (max_char > 1114111) max_char = 1114111;
    result_uval = PyUnicode_New(result_ulength, max_char);
    if (unlikely(!result_uval)) return NULL;
    result_ukind = (max_char <= 255) ? PyUnicode_1BYTE_KIND : (max_char <= 65535) ? PyUnicode_2BYTE_KIND : PyUnicode_4BYTE_KIND;
    kind_shift = (result_ukind == PyUnicode_4BYTE_KIND) ? 2 : result_ukind - 1;
    result_udata = PyUnicode_DATA(result_uval);
    assert(kind_shift == 2 || kind_shift == 1 || kind_shift == 0);
    if (unlikely((PY_SSIZE_T_MAX >> kind_shift) - result_ulength < 0))
        goto overflow;
    char_pos = 0;
    for (i=0; i < value_count; i++) {
        int ukind;
        Py_ssize_t ulength;
        void *udata;
        PyObject *uval = values[i];
        #if !CYTHON_COMPILING_IN_LIMITED_API
        if (__Pyx_PyUnicode_READY(uval) == (-1))
            goto bad;
        #endif
        ulength = __Pyx_PyUnicode_GET_LENGTH(uval);
        #if !CYTHON_ASSUME_SAFE_SIZE
        if (unlikely(ulength < 0)) goto bad;
        #endif
        if (unlikely(!ulength))
            continue;
        if (unlikely((PY_SSIZE_T_MAX >> kind_shift) - ulength < char_pos))
            goto overflow;
        ukind = __Pyx_PyUnicode_KIND(uval);
        udata = __Pyx_PyUnicode_DATA(uval);
        if (ukind == result_ukind) {
            memcpy((char *)result_udata + (char_pos << kind_shift), udata, (size_t) (ulength << kind_shift));
        } else {
            #if PY_VERSION_HEX >= 0x030d0000
            if (unlikely(PyUnicode_CopyCharacters(result_uval, char_pos, uval, 0, ulength) < 0)) goto bad;
            #elif CYTHON_COMPILING_IN_CPYTHON || defined(_PyUnicode_FastCopyCharacters)
            _PyUnicode_FastCopyCharacters(result_uval, char_pos, uval, 0, ulength);
            #else
            Py_ssize_t j;
            for (j=0; j < ulength; j++) {
                Py_UCS4 uchar = __Pyx_PyUnicode_READ(ukind, udata, j);
                __Pyx_PyUnicode_WRITE(result_ukind, result_udata, char_pos+j, uchar);
            }
            #endif
        }
        char_pos += ulength;
    }
    return result_uval;
overflow:
    PyErr_SetString(PyExc_OverflowError, "join() result is too long for a Python string");
bad:
    Py_DECREF(result_uval);
    return NULL;
#else
    Py_ssize_t i;
    PyObject *result = NULL;
    PyObject *value_tuple = PyTuple_New(value_count);
    if (unlikely(!value_tuple)) return NULL;
    CYTHON_UNUSED_VAR(max_char);
    CYTHON_UNUSED_VAR(result_ulength);
    for (i=0; i<value_count; i++) {
        if (__Pyx_PyTuple_SET_ITEM(value_tuple, i, values[i]) != (0)) goto bad;
        Py_INCREF(values[i]);
    }
    result = PyUnicode_Join(__pyx_mstate_global->__pyx_empty_unicode, value_tuple);
bad:
    Py_DECREF(value_tuple);
    return result;
#endif
}

/* GetAttr */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_USE_TYPE_SLOTS
    if (likely(PyUnicode_Check(n)))
        return __Pyx_PyObject_GetAttrStr(o, n);
#endif
    return PyObject_GetAttr(o, n);
}

/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
    PyObject *r;
    if (unlikely(!j)) return NULL;
    r = PyObject_GetItem(o, j);
    Py_DECREF(j);
    return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
                                                              int wraparound, int boundscheck, int unsafe_shared) {
    CYTHON_MAYBE_UNUSED_VAR(unsafe_shared);
#if CYTHON_ASSUME_SAFE_SIZE
    Py_ssize_t wrapped_i = i;
    if (wraparound & unlikely(i < 0)) {
        wrapped_i += PyList_GET_SIZE(o);
    }
    if ((CYTHON_AVOID_BORROWED_REFS || CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS || !CYTHON_ASSUME_SAFE_MACROS)) {
        return __Pyx_PyList_GetItemRefFast(o, wrapped_i, unsafe_shared);
    } else
    if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
        return __Pyx_NewRef(PyList_GET_ITEM(o, wrapped_i));
    }
    return __Pyx_GetItemInt_Generic(o, PyLong_FromSsize_t(i));
#else
    (void)wraparound;
    (void)boundscheck;
    return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
                                                              int wraparound, int boundscheck, int unsafe_shared) {
    CYTHON_MAYBE_UNUSED_VAR(unsafe_shared);
#if CYTHON_ASSUME_SAFE_SIZE && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
    Py_ssize_t wrapped_i = i;
    if (wraparound & unlikely(i < 0)) {
        wrapped_i += PyTuple_GET_SIZE(o);
    }
    if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
        return __Pyx_NewRef(PyTuple_GET_ITEM(o, wrapped_i));
    }
    return __Pyx_GetItemInt_Generic(o, PyLong_FromSsize_t(i));
#else
    (void)wraparound;
    (void)boundscheck;
    return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
                                                     int wraparound, int boundscheck, int unsafe_shared) {
    CYTHON_MAYBE_UNUSED_VAR(unsafe_shared);
#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE
    if (is_list || PyList_CheckExact(o)) {
        Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
        if ((CYTHON_AVOID_BORROWED_REFS || CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS)) {
            return __Pyx_PyList_GetItemRefFast(o, n, unsafe_shared);
        } else if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
            return __Pyx_NewRef(PyList_GET_ITEM(o, n));
        }
    } else
    #if !CYTHON_AVOID_BORROWED_REFS
    if (PyTuple_CheckExact(o)) {
        Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
        if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
            return __Pyx_NewRef(PyTuple_GET_ITEM(o, n));
        }
    } else
    #endif
#endif
#if CYTHON_USE_TYPE_SLOTS && !CYTHON_COMPILING_IN_PYPY
    {
        PyMappingMethods *mm = Py_TYPE(o)->tp_as_mapping;
        PySequenceMethods *sm = Py_TYPE(o)->tp_as_sequence;
        if (!is_list && mm && mm->mp_subscript) {
            PyObject *r, *key = PyLong_FromSsize_t(i);
            if (unlikely(!key)) return NULL;
            r = mm->mp_subscript(o, key);
            Py_DECREF(key);
            return r;
        }
        if (is_list || likely(sm && sm->sq_item)) {
            if (wraparound && unlikely(i < 0) && likely(sm->sq_length)) {
                Py_ssize_t l = sm->sq_length(o);
                if (likely(l >= 0)) {
                    i += l;
                } else {
                    if (!PyErr_ExceptionMatches(PyExc_OverflowError))
                        return NULL;
                    PyErr_Clear();
                }
            }
            return sm->sq_item(o, i);
        }
    }
#else
    if (is_list || !PyMapping_Check(o)) {
        return PySequence_GetItem(o, i);
    }
#endif
    (void)wraparound;
    (void)boundscheck;
    return __Pyx_GetItemInt_Generic(o, PyLong_FromSsize_t(i));
}

/* ObjectGetItem */
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject *index) {
    PyObject *runerr = NULL;
    Py_ssize_t key_value;
    key_value = __Pyx_PyIndex_AsSsize_t(index);
    if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
        return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1, 1);
    }
    if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
        __Pyx_TypeName index_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(index));
        PyErr_Clear();
        PyErr_Format(PyExc_IndexError,
            "cannot fit '" __Pyx_FMT_TYPENAME "' into an index-sized integer", index_type_name);
        __Pyx_DECREF_TypeName(index_type_name);
    }
    return NULL;
}
static PyObject *__Pyx_PyObject_GetItem_Slow(PyObject *obj, PyObject *key) {
    __Pyx_TypeName obj_type_name;
    if (likely(PyType_Check(obj))) {
        PyObject *meth = __Pyx_PyObject_GetAttrStrNoError(obj, __pyx_mstate_global->__pyx_n_u_class_getitem);
        if (!meth) {
            PyErr_Clear();
        } else {
            PyObject *result = __Pyx_PyObject_CallOneArg(meth, key);
            Py_DECREF(meth);
            return result;
        }
    }
    obj_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(obj));
    PyErr_Format(PyExc_TypeError,
        "'" __Pyx_FMT_TYPENAME "' object is not subscriptable", obj_type_name);
    __Pyx_DECREF_TypeName(obj_type_name);
    return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key) {
    PyTypeObject *tp = Py_TYPE(obj);
    PyMappingMethods *mm = tp->tp_as_mapping;
    PySequenceMethods *sm = tp->tp_as_sequence;
    if (likely(mm && mm->mp_subscript)) {
        return mm->mp_subscript(obj, key);
    }
    if (likely(sm && sm->sq_item)) {
        return __Pyx_PyObject_GetIndex(obj, key);
    }
    return __Pyx_PyObject_GetItem_Slow(obj, key);
}
#endif

/* RejectKeywords */
static void __Pyx_RejectKeywords(const char* function_name, PyObject *kwds) {
    PyObject *key = NULL;
    if (CYTHON_METH_FASTCALL && likely(PyTuple_Check(kwds))) {
        key = __Pyx_PySequence_ITEM(kwds, 0);
    } else {
#if CYTHON_AVOID_BORROWED_REFS
        PyObject *pos = NULL;
#else
        Py_ssize_t pos = 0;
#endif
#if !CYTHON_COMPILING_IN_PYPY || defined(PyArg_ValidateKeywordArguments)
        if (unlikely(!PyArg_ValidateKeywordArguments(kwds))) return;
#endif
        __Pyx_PyDict_NextRef(kwds, &pos, &key, NULL);
#if CYTHON_AVOID_BORROWED_REFS
        Py_XDECREF(pos);
#endif
    }
    if (likely(key)) {
        PyErr_Format(PyExc_TypeError,
            "%s() got an unexpected keyword argument '%U'",
            function_name, key);
        Py_DECREF(key);
    }
}

/* DivInt[Py_ssize_t] */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b, int b_is_constant) {
    Py_ssize_t q = a / b;
    Py_ssize_t r = a - q*b;
    Py_ssize_t adapt_python = (b_is_constant ?
        ((r != 0) & ((r < 0) ^ (b < 0))) :
        ((r != 0) & ((r ^ b) < 0))
    );
    return q - adapt_python;
}

/* GetAttr3 */
#if __PYX_LIMITED_VERSION_HEX < 0x030d0000
static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
    __Pyx_PyThreadState_declare
    __Pyx_PyThreadState_assign
    if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
        return NULL;
    __Pyx_PyErr_Clear();
    Py_INCREF(d);
    return d;
}
#endif
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
    PyObject *r;
#if __PYX_LIMITED_VERSION_HEX >= 0x030d0000
    int res = PyObject_GetOptionalAttr(o, n, &r);
    return (res != 0) ? r : __Pyx_NewRef(d);
#else
  #if CYTHON_USE_TYPE_SLOTS
    if (likely(PyUnicode_Check(n))) {
        r = __Pyx_PyObject_GetAttrStrNoError(o, n);
        if (unlikely(!r) && likely(!PyErr_Occurred())) {
            r = __Pyx_NewRef(d);
        }
        return r;
    }
  #endif
    r = PyObject_GetAttr(o, n);
    return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
#endif
}

/* PyDictVersioning (used by GetModuleGlobalName) */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
    PyObject *dict = Py_TYPE(obj)->tp_dict;
    return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
    PyObject **dictptr = NULL;
    Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
    if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
        dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
        dictptr = _PyObject_GetDictPtr(obj);
#endif
    }
    return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
    PyObject *dict = Py_TYPE(obj)->tp_dict;
    if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
        return 0;
    return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif

/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
    PyObject *result;
#if CYTHON_COMPILING_IN_LIMITED_API
    if (unlikely(!__pyx_m)) {
        if (!PyErr_Occurred())
            PyErr_SetNone(PyExc_NameError);
        return NULL;
    }
    result = PyObject_GetAttr(__pyx_m, name);
    if (likely(result)) {
        return result;
    }
    PyErr_Clear();
#elif CYTHON_AVOID_BORROWED_REFS || CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS
    if (unlikely(__Pyx_PyDict_GetItemRef(__pyx_mstate_global->__pyx_d, name, &result) == -1)) PyErr_Clear();
    __PYX_UPDATE_DICT_CACHE(__pyx_mstate_global->__pyx_d, result, *dict_cached_value, *dict_version)
    if (likely(result)) {
        return result;
    }
#else
    result = _PyDict_GetItem_KnownHash(__pyx_mstate_global->__pyx_d, name, ((PyASCIIObject *) name)->hash);
    __PYX_UPDATE_DICT_CACHE(__pyx_mstate_global->__pyx_d, result, *dict_cached_value, *dict_version)
    if (likely(result)) {
        return __Pyx_NewRef(result);
    }
    PyErr_Clear();
#endif
    return __Pyx_GetBuiltinName(name);
}

/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
    PyErr_Format(PyExc_ValueError,
                 "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}

/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
    PyErr_Format(PyExc_ValueError,
                 "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
                 index, (index == 1) ? "" : "s");
}

/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}

/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
    __Pyx_TypeName obj_type_name;
    __Pyx_TypeName type_name;
    if (unlikely(!type)) {
        PyErr_SetString(PyExc_SystemError, "Missing type object");
        return 0;
    }
    if (likely(__Pyx_TypeCheck(obj, type)))
        return 1;
    obj_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(obj));
    type_name = __Pyx_PyType_GetFullyQualifiedName(type);
    PyErr_Format(PyExc_TypeError,
                 "Cannot convert " __Pyx_FMT_TYPENAME " to " __Pyx_FMT_TYPENAME,
                 obj_type_name, type_name);
    __Pyx_DECREF_TypeName(obj_type_name);
    __Pyx_DECREF_TypeName(type_name);
    return 0;
}

/* GetTopmostException (used by SaveResetException) */
#if CYTHON_USE_EXC_INFO_STACK && CYTHON_FAST_THREAD_STATE
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
    _PyErr_StackItem *exc_info = tstate->exc_info;
    while ((exc_info->exc_value == NULL || exc_info->exc_value == Py_None) &&
           exc_info->previous_item != NULL)
    {
        exc_info = exc_info->previous_item;
    }
    return exc_info;
}
#endif

/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
  #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4
    _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
    PyObject *exc_value = exc_info->exc_value;
    if (exc_value == NULL || exc_value == Py_None) {
        *value = NULL;
        *type = NULL;
        *tb = NULL;
    } else {
        *value = exc_value;
        Py_INCREF(*value);
        *type = (PyObject*) Py_TYPE(exc_value);
        Py_INCREF(*type);
        *tb = PyException_GetTraceback(exc_value);
    }
  #elif CYTHON_USE_EXC_INFO_STACK
    _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
    *type = exc_info->exc_type;
    *value = exc_info->exc_value;
    *tb = exc_info->exc_traceback;
    Py_XINCREF(*type);
    Py_XINCREF(*value);
    Py_XINCREF(*tb);
  #else
    *type = tstate->exc_type;
    *value = tstate->exc_value;
    *tb = tstate->exc_traceback;
    Py_XINCREF(*type);
    Py_XINCREF(*value);
    Py_XINCREF(*tb);
  #endif
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
  #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4
    _PyErr_StackItem *exc_info = tstate->exc_info;
    PyObject *tmp_value = exc_info->exc_value;
    exc_info->exc_value = value;
    Py_XDECREF(tmp_value);
    Py_XDECREF(type);
    Py_XDECREF(tb);
  #else
    PyObject *tmp_type, *tmp_value, *tmp_tb;
    #if CYTHON_USE_EXC_INFO_STACK
    _PyErr_StackItem *exc_info = tstate->exc_info;
    tmp_type = exc_info->exc_type;
    tmp_value = exc_info->exc_value;
    tmp_tb = exc_info->exc_traceback;
    exc_info->exc_type = type;
    exc_info->exc_value = value;
    exc_info->exc_traceback = tb;
    #else
    tmp_type = tstate->exc_type;
    tmp_value = tstate->exc_value;
    tmp_tb = tstate->exc_traceback;
    tstate->exc_type = type;
    tstate->exc_value = value;
    tstate->exc_traceback = tb;
    #endif
    Py_XDECREF(tmp_type);
    Py_XDECREF(tmp_value);
    Py_XDECREF(tmp_tb);
  #endif
}
#endif

/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
    PyObject *local_type = NULL, *local_value, *local_tb = NULL;
#if CYTHON_FAST_THREAD_STATE
    PyObject *tmp_type, *tmp_value, *tmp_tb;
  #if PY_VERSION_HEX >= 0x030C0000
    local_value = tstate->current_exception;
    tstate->current_exception = 0;
  #else
    local_type = tstate->curexc_type;
    local_value = tstate->curexc_value;
    local_tb = tstate->curexc_traceback;
    tstate->curexc_type = 0;
    tstate->curexc_value = 0;
    tstate->curexc_traceback = 0;
  #endif
#elif __PYX_LIMITED_VERSION_HEX > 0x030C0000
    local_value = PyErr_GetRaisedException();
#else
    PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
#if __PYX_LIMITED_VERSION_HEX > 0x030C0000
    if (likely(local_value)) {
        local_type = (PyObject*) Py_TYPE(local_value);
        Py_INCREF(local_type);
        local_tb = PyException_GetTraceback(local_value);
    }
#else
    PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
    if (unlikely(tstate->curexc_type))
#else
    if (unlikely(PyErr_Occurred()))
#endif
        goto bad;
    if (local_tb) {
        if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
            goto bad;
    }
#endif // __PYX_LIMITED_VERSION_HEX > 0x030C0000
    Py_XINCREF(local_tb);
    Py_XINCREF(local_type);
    Py_XINCREF(local_value);
    *type = local_type;
    *value = local_value;
    *tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
    #if CYTHON_USE_EXC_INFO_STACK
    {
        _PyErr_StackItem *exc_info = tstate->exc_info;
      #if PY_VERSION_HEX >= 0x030B00a4
        tmp_value = exc_info->exc_value;
        exc_info->exc_value = local_value;
        tmp_type = NULL;
        tmp_tb = NULL;
        Py_XDECREF(local_type);
        Py_XDECREF(local_tb);
      #else
        tmp_type = exc_info->exc_type;
        tmp_value = exc_info->exc_value;
        tmp_tb = exc_info->exc_traceback;
        exc_info->exc_type = local_type;
        exc_info->exc_value = local_value;
        exc_info->exc_traceback = local_tb;
      #endif
    }
    #else
    tmp_type = tstate->exc_type;
    tmp_value = tstate->exc_value;
    tmp_tb = tstate->exc_traceback;
    tstate->exc_type = local_type;
    tstate->exc_value = local_value;
    tstate->exc_traceback = local_tb;
    #endif
    Py_XDECREF(tmp_type);
    Py_XDECREF(tmp_value);
    Py_XDECREF(tmp_tb);
#elif __PYX_LIMITED_VERSION_HEX >= 0x030b0000
    PyErr_SetHandledException(local_value);
    Py_XDECREF(local_value);
    Py_XDECREF(local_type);
    Py_XDECREF(local_tb);
#else
    PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
    return 0;
#if __PYX_LIMITED_VERSION_HEX <= 0x030C0000
bad:
    *type = 0;
    *value = 0;
    *tb = 0;
    Py_XDECREF(local_type);
    Py_XDECREF(local_value);
    Py_XDECREF(local_tb);
    return -1;
#endif
}

/* SwapException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
    PyObject *tmp_type, *tmp_value, *tmp_tb;
  #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4
    _PyErr_StackItem *exc_info = tstate->exc_info;
    tmp_value = exc_info->exc_value;
    exc_info->exc_value = *value;
    if (tmp_value == NULL || tmp_value == Py_None) {
        Py_XDECREF(tmp_value);
        tmp_value = NULL;
        tmp_type = NULL;
        tmp_tb = NULL;
    } else {
        tmp_type = (PyObject*) Py_TYPE(tmp_value);
        Py_INCREF(tmp_type);
        #if CYTHON_COMPILING_IN_CPYTHON
        tmp_tb = ((PyBaseExceptionObject*) tmp_value)->traceback;
        Py_XINCREF(tmp_tb);
        #else
        tmp_tb = PyException_GetTraceback(tmp_value);
        #endif
    }
  #elif CYTHON_USE_EXC_INFO_STACK
    _PyErr_StackItem *exc_info = tstate->exc_info;
    tmp_type = exc_info->exc_type;
    tmp_value = exc_info->exc_value;
    tmp_tb = exc_info->exc_traceback;
    exc_info->exc_type = *type;
    exc_info->exc_value = *value;
    exc_info->exc_traceback = *tb;
  #else
    tmp_type = tstate->exc_type;
    tmp_value = tstate->exc_value;
    tmp_tb = tstate->exc_traceback;
    tstate->exc_type = *type;
    tstate->exc_value = *value;
    tstate->exc_traceback = *tb;
  #endif
    *type = tmp_type;
    *value = tmp_value;
    *tb = tmp_tb;
}
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
    PyObject *tmp_type, *tmp_value, *tmp_tb;
    PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
    PyErr_SetExcInfo(*type, *value, *tb);
    *type = tmp_type;
    *value = tmp_value;
    *tb = tmp_tb;
}
#endif

/* HasAttr (used by ImportImpl) */
#if __PYX_LIMITED_VERSION_HEX < 0x030d0000
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
    PyObject *r;
    if (unlikely(!PyUnicode_Check(n))) {
        PyErr_SetString(PyExc_TypeError,
                        "hasattr(): attribute name must be string");
        return -1;
    }
    r = __Pyx_PyObject_GetAttrStrNoError(o, n);
    if (!r) {
        return (unlikely(PyErr_Occurred())) ? -1 : 0;
    } else {
        Py_DECREF(r);
        return 1;
    }
}
#endif

/* ImportImpl (used by Import) */
static int __Pyx__Import_GetModule(PyObject *qualname, PyObject **module) {
    PyObject *imported_module = PyImport_GetModule(qualname);
    if (unlikely(!imported_module)) {
        *module = NULL;
        if (PyErr_Occurred()) {
            return -1;
        }
        return 0;
    }
    *module = imported_module;
    return 1;
}
static int __Pyx__Import_Lookup(PyObject *qualname, PyObject *const *imported_names, Py_ssize_t len_imported_names, PyObject **module) {
    PyObject *imported_module;
    PyObject *top_level_package_name;
    Py_ssize_t i;
    int status, module_found;
    Py_ssize_t dot_index;
    module_found = __Pyx__Import_GetModule(qualname, &imported_module);
    if (unlikely(!module_found || module_found == -1)) {
        *module = NULL;
        return module_found;
    }
    if (imported_names) {
        for (i = 0; i < len_imported_names; i++) {
            PyObject *imported_name = imported_names[i];
#if __PYX_LIMITED_VERSION_HEX < 0x030d0000
            int has_imported_attribute = PyObject_HasAttr(imported_module, imported_name);
#else
            int has_imported_attribute = PyObject_HasAttrWithError(imported_module, imported_name);
            if (unlikely(has_imported_attribute == -1)) goto error;
#endif
            if (!has_imported_attribute) {
                goto not_found;
            }
        }
        *module = imported_module;
        return 1;
    }
    dot_index = PyUnicode_FindChar(qualname, '.', 0, PY_SSIZE_T_MAX, 1);
    if (dot_index == -1) {
        *module = imported_module;
        return 1;
    }
    if (unlikely(dot_index == -2)) goto error;
    top_level_package_name = PyUnicode_Substring(qualname, 0, dot_index);
    if (unlikely(!top_level_package_name)) goto error;
    Py_DECREF(imported_module);
    status = __Pyx__Import_GetModule(top_level_package_name, module);
    Py_DECREF(top_level_package_name);
    return status;
error:
    Py_DECREF(imported_module);
    *module = NULL;
    return -1;
not_found:
    Py_DECREF(imported_module);
    *module = NULL;
    return 0;
}
static PyObject *__Pyx__Import(PyObject *name, PyObject *const *imported_names, Py_ssize_t len_imported_names, PyObject *qualname, PyObject *moddict, int level) {
    PyObject *module = 0;
    PyObject *empty_dict = 0;
    PyObject *from_list = 0;
    int module_found;
    if (!qualname) {
        qualname = name;
    }
    module_found = __Pyx__Import_Lookup(qualname, imported_names, len_imported_names, &module);
    if (likely(module_found == 1)) {
        return module;
    } else if (unlikely(module_found == -1)) {
        return NULL;
    }
    empty_dict = PyDict_New();
    if (unlikely(!empty_dict))
        goto bad;
    if (imported_names) {
#if CYTHON_COMPILING_IN_CPYTHON
        from_list = __Pyx_PyList_FromArray(imported_names, len_imported_names);
        if (unlikely(!from_list))
            goto bad;
#else
        from_list = PyList_New(len_imported_names);
        if (unlikely(!from_list)) goto bad;
        for (Py_ssize_t i=0; i<len_imported_names; ++i) {
            if (PyList_SetItem(from_list, i, __Pyx_NewRef(imported_names[i])) < 0) goto bad;
        }
#endif
    }
    if (level == -1) {
        const char* package_sep = strchr(__Pyx_MODULE_NAME, '.');
        if (package_sep != (0)) {
            module = PyImport_ImportModuleLevelObject(
                name, moddict, empty_dict, from_list, 1);
            if (unlikely(!module)) {
                if (unlikely(!PyErr_ExceptionMatches(PyExc_ImportError)))
                    goto bad;
                PyErr_Clear();
            }
        }
        level = 0;
    }
    if (!module) {
        module = PyImport_ImportModuleLevelObject(
            name, moddict, empty_dict, from_list, level);
    }
bad:
    Py_XDECREF(from_list);
    Py_XDECREF(empty_dict);
    return module;
}

/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *const *imported_names, Py_ssize_t len_imported_names, PyObject *qualname, int level) {
    return __Pyx__Import(name, imported_names, len_imported_names, qualname, __pyx_mstate_global->__pyx_d, level);
}

/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
    while (a) {
        a = __Pyx_PyType_GetSlot(a, tp_base, PyTypeObject*);
        if (a == b)
            return 1;
    }
    return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
    PyObject *mro;
    if (a == b) return 1;
    mro = a->tp_mro;
    if (likely(mro)) {
        Py_ssize_t i, n;
        n = PyTuple_GET_SIZE(mro);
        for (i = 0; i < n; i++) {
            if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
                return 1;
        }
        return 0;
    }
    return __Pyx_InBases(a, b);
}
static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b) {
    PyObject *mro;
    if (cls == a || cls == b) return 1;
    mro = cls->tp_mro;
    if (likely(mro)) {
        Py_ssize_t i, n;
        n = PyTuple_GET_SIZE(mro);
        for (i = 0; i < n; i++) {
            PyObject *base = PyTuple_GET_ITEM(mro, i);
            if (base == (PyObject *)a || base == (PyObject *)b)
                return 1;
        }
        return 0;
    }
    return __Pyx_InBases(cls, a) || __Pyx_InBases(cls, b);
}
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
    if (exc_type1) {
        return __Pyx_IsAnySubtype2((PyTypeObject*)err, (PyTypeObject*)exc_type1, (PyTypeObject*)exc_type2);
    } else {
        return __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
    }
}
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
    Py_ssize_t i, n;
    assert(PyExceptionClass_Check(exc_type));
    n = PyTuple_GET_SIZE(tuple);
    for (i=0; i<n; i++) {
        if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
    }
    for (i=0; i<n; i++) {
        PyObject *t = PyTuple_GET_ITEM(tuple, i);
        if (likely(PyExceptionClass_Check(t))) {
            if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
        } else {
        }
    }
    return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
    if (likely(err == exc_type)) return 1;
    if (likely(PyExceptionClass_Check(err))) {
        if (likely(PyExceptionClass_Check(exc_type))) {
            return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
        } else if (likely(PyTuple_Check(exc_type))) {
            return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
        } else {
        }
    }
    return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
    assert(PyExceptionClass_Check(exc_type1));
    assert(PyExceptionClass_Check(exc_type2));
    if (likely(err == exc_type1 || err == exc_type2)) return 1;
    if (likely(PyExceptionClass_Check(err))) {
        return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
    }
    return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif

/* PySequenceMultiply */
#if CYTHON_USE_TYPE_SLOTS
static PyObject* __Pyx_PySequence_Multiply_Generic(PyObject *seq, Py_ssize_t mul) {
    PyObject *result, *pymul = PyLong_FromSsize_t(mul);
    if (unlikely(!pymul))
        return NULL;
    result = PyNumber_Multiply(seq, pymul);
    Py_DECREF(pymul);
    return result;
}
static CYTHON_INLINE PyObject* __Pyx_PySequence_Multiply(PyObject *seq, Py_ssize_t mul) {
    PyTypeObject *type = Py_TYPE(seq);
    if (likely(type->tp_as_sequence && type->tp_as_sequence->sq_repeat)) {
        return type->tp_as_sequence->sq_repeat(seq, mul);
    } else {
        return __Pyx_PySequence_Multiply_Generic(seq, mul);
    }
}
#endif

/* PyObjectFormatAndDecref */
static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatSimpleAndDecref(PyObject* s, PyObject* f) {
    if (unlikely(!s)) return NULL;
    if (likely(PyUnicode_CheckExact(s))) return s;
    return __Pyx_PyObject_FormatAndDecref(s, f);
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatAndDecref(PyObject* s, PyObject* f) {
    PyObject *result;
    if (unlikely(!s)) return NULL;
    result = PyObject_Format(s, f);
    Py_DECREF(s);
    return result;
}

/* PyObjectFormat */
#if CYTHON_USE_UNICODE_WRITER
static PyObject* __Pyx_PyObject_Format(PyObject* obj, PyObject* format_spec) {
    int ret;
    _PyUnicodeWriter writer;
    if (likely(PyFloat_CheckExact(obj))) {
        _PyUnicodeWriter_Init(&writer);
        ret = _PyFloat_FormatAdvancedWriter(
            &writer,
            obj,
            format_spec, 0, PyUnicode_GET_LENGTH(format_spec));
    } else if (likely(PyLong_CheckExact(obj))) {
        _PyUnicodeWriter_Init(&writer);
        ret = _PyLong_FormatAdvancedWriter(
            &writer,
            obj,
            format_spec, 0, PyUnicode_GET_LENGTH(format_spec));
    } else {
        return PyObject_Format(obj, format_spec);
    }
    if (unlikely(ret == -1)) {
        _PyUnicodeWriter_Dealloc(&writer);
        return NULL;
    }
    return _PyUnicodeWriter_Finish(&writer);
}
#endif

/* SetItemInt */
static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) {
    int r;
    if (unlikely(!j)) return -1;
    r = PyObject_SetItem(o, j, v);
    Py_DECREF(j);
    return r;
}
static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list,
                                               int wraparound, int boundscheck, int unsafe_shared) {
    CYTHON_MAYBE_UNUSED_VAR(unsafe_shared);
#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE && !CYTHON_AVOID_BORROWED_REFS
    if (is_list || PyList_CheckExact(o)) {
        Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o));
        if ((CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS && !__Pyx_IS_UNIQUELY_REFERENCED(o, unsafe_shared))) {
            Py_INCREF(v);
            return PyList_SetItem(o, n, v);
        } else if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o)))) {
            PyObject* old;
            Py_INCREF(v);
            old = PyList_GET_ITEM(o, n);
            PyList_SET_ITEM(o, n, v);
            Py_DECREF(old);
            return 0;
        }
    } else
#endif
#if CYTHON_USE_TYPE_SLOTS && !CYTHON_COMPILING_IN_PYPY
    {
        PyMappingMethods *mm = Py_TYPE(o)->tp_as_mapping;
        PySequenceMethods *sm = Py_TYPE(o)->tp_as_sequence;
        if (!is_list && mm && mm->mp_ass_subscript) {
            int r;
            PyObject *key = PyLong_FromSsize_t(i);
            if (unlikely(!key)) return -1;
            r = mm->mp_ass_subscript(o, key, v);
            Py_DECREF(key);
            return r;
        }
        if (is_list || likely(sm && sm->sq_ass_item)) {
            if (wraparound && unlikely(i < 0) && likely(sm->sq_length)) {
                Py_ssize_t l = sm->sq_length(o);
                if (likely(l >= 0)) {
                    i += l;
                } else {
                    if (!PyErr_ExceptionMatches(PyExc_OverflowError))
                        return -1;
                    PyErr_Clear();
                }
            }
            return sm->sq_ass_item(o, i, v);
        }
    }
#else
    if (is_list || !PyMapping_Check(o)) {
        return PySequence_SetItem(o, i, v);
    }
#endif
    (void)wraparound;
    (void)boundscheck;
    return __Pyx_SetItemInt_Generic(o, PyLong_FromSsize_t(i), v);
}

/* RaiseUnboundLocalError */
static void __Pyx_RaiseUnboundLocalError(const char *varname) {
    PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}

/* DivInt[long] */
static CYTHON_INLINE long __Pyx_div_long(long a, long b, int b_is_constant) {
    long q = a / b;
    long r = a - q*b;
    long adapt_python = (b_is_constant ?
        ((r != 0) & ((r < 0) ^ (b < 0))) :
        ((r != 0) & ((r ^ b) < 0))
    );
    return q - adapt_python;
}

/* PyLongCompare */
static CYTHON_INLINE int __Pyx_PyLong_BoolNeObjC(PyObject *op1, PyObject *op2, long intval, long inplace) {
    CYTHON_MAYBE_UNUSED_VAR(intval);
    CYTHON_UNUSED_VAR(inplace);
    if (op1 == op2) {
        return 0;
    }
    #if CYTHON_USE_PYLONG_INTERNALS
    if (likely(PyLong_CheckExact(op1))) {
        int unequal;
        unsigned long uintval;
        Py_ssize_t size = __Pyx_PyLong_DigitCount(op1);
        const digit* digits = __Pyx_PyLong_Digits(op1);
        if (intval == 0) {
            return (__Pyx_PyLong_IsZero(op1) != 1);
        } else if (intval < 0) {
            if (__Pyx_PyLong_IsNonNeg(op1))
                return 1;
            intval = -intval;
        } else {
            if (__Pyx_PyLong_IsNeg(op1))
                return 1;
        }
        uintval = (unsigned long) intval;
#if PyLong_SHIFT * 4 < SIZEOF_LONG*8
        if (uintval >> (PyLong_SHIFT * 4)) {
            unequal = (size != 5) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
                 | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[4] != ((uintval >> (4 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
        } else
#endif
#if PyLong_SHIFT * 3 < SIZEOF_LONG*8
        if (uintval >> (PyLong_SHIFT * 3)) {
            unequal = (size != 4) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
                 | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
        } else
#endif
#if PyLong_SHIFT * 2 < SIZEOF_LONG*8
        if (uintval >> (PyLong_SHIFT * 2)) {
            unequal = (size != 3) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
                 | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
        } else
#endif
#if PyLong_SHIFT * 1 < SIZEOF_LONG*8
        if (uintval >> (PyLong_SHIFT * 1)) {
            unequal = (size != 2) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
                 | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
        } else
#endif
            unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK));
        return (unequal != 0);
    }
    #endif
    if (PyFloat_CheckExact(op1)) {
        const long b = intval;
        double a = __Pyx_PyFloat_AS_DOUBLE(op1);
        return ((double)a != (double)b);
    }
    return __Pyx_PyObject_IsTrueAndDecref(
        PyObject_RichCompare(op1, op2, Py_NE));
}

/* PyObjectSetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) {
    PyTypeObject* tp = Py_TYPE(obj);
    if (likely(tp->tp_setattro))
        return tp->tp_setattro(obj, attr_name, value);
    return PyObject_SetAttr(obj, attr_name, value);
}
#endif

/* PyUnicode_Unicode */
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Unicode(PyObject *obj) {
    if (unlikely(obj == Py_None))
        obj = __pyx_mstate_global->__pyx_kp_u_None;
    return __Pyx_NewRef(obj);
}

/* PyLongCompare */
static CYTHON_INLINE int __Pyx_PyLong_BoolEqObjC(PyObject *op1, PyObject *op2, long intval, long inplace) {
    CYTHON_MAYBE_UNUSED_VAR(intval);
    CYTHON_UNUSED_VAR(inplace);
    if (op1 == op2) {
        return 1;
    }
    #if CYTHON_USE_PYLONG_INTERNALS
    if (likely(PyLong_CheckExact(op1))) {
        int unequal;
        unsigned long uintval;
        Py_ssize_t size = __Pyx_PyLong_DigitCount(op1);
        const digit* digits = __Pyx_PyLong_Digits(op1);
        if (intval == 0) {
            return (__Pyx_PyLong_IsZero(op1) == 1);
        } else if (intval < 0) {
            if (__Pyx_PyLong_IsNonNeg(op1))
                return 0;
            intval = -intval;
        } else {
            if (__Pyx_PyLong_IsNeg(op1))
                return 0;
        }
        uintval = (unsigned long) intval;
#if PyLong_SHIFT * 4 < SIZEOF_LONG*8
        if (uintval >> (PyLong_SHIFT * 4)) {
            unequal = (size != 5) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
                 | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[4] != ((uintval >> (4 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
        } else
#endif
#if PyLong_SHIFT * 3 < SIZEOF_LONG*8
        if (uintval >> (PyLong_SHIFT * 3)) {
            unequal = (size != 4) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
                 | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
        } else
#endif
#if PyLong_SHIFT * 2 < SIZEOF_LONG*8
        if (uintval >> (PyLong_SHIFT * 2)) {
            unequal = (size != 3) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
                 | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
        } else
#endif
#if PyLong_SHIFT * 1 < SIZEOF_LONG*8
        if (uintval >> (PyLong_SHIFT * 1)) {
            unequal = (size != 2) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
                 | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
        } else
#endif
            unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK));
        return (unequal == 0);
    }
    #endif
    if (PyFloat_CheckExact(op1)) {
        const long b = intval;
        double a = __Pyx_PyFloat_AS_DOUBLE(op1);
        return ((double)a == (double)b);
    }
    return __Pyx_PyObject_IsTrueAndDecref(
        PyObject_RichCompare(op1, op2, Py_EQ));
}

/* PyObjectVectorCallKwBuilder */
#if CYTHON_VECTORCALL
static int __Pyx_VectorcallBuilder_AddArg(PyObject *key, PyObject *value, PyObject *builder, PyObject **args, int n) {
    (void)__Pyx_PyObject_FastCallDict;
    if (__Pyx_PyTuple_SET_ITEM(builder, n, key) != (0)) return -1;
    Py_INCREF(key);
    args[n] = value;
    return 0;
}
CYTHON_UNUSED static int __Pyx_VectorcallBuilder_AddArg_Check(PyObject *key, PyObject *value, PyObject *builder, PyObject **args, int n) {
    (void)__Pyx_VectorcallBuilder_AddArgStr;
    if (unlikely(!PyUnicode_Check(key))) {
        PyErr_SetString(PyExc_TypeError, "keywords must be strings");
        return -1;
    }
    return __Pyx_VectorcallBuilder_AddArg(key, value, builder, args, n);
}
static int __Pyx_VectorcallBuilder_AddArgStr(const char *key, PyObject *value, PyObject *builder, PyObject **args, int n) {
    PyObject *pyKey = PyUnicode_FromString(key);
    if (!pyKey) return -1;
    return __Pyx_VectorcallBuilder_AddArg(pyKey, value, builder, args, n);
}
#else // CYTHON_VECTORCALL
CYTHON_UNUSED static int __Pyx_VectorcallBuilder_AddArg_Check(PyObject *key, PyObject *value, PyObject *builder, CYTHON_UNUSED PyObject **args, CYTHON_UNUSED int n) {
    if (unlikely(!PyUnicode_Check(key))) {
        PyErr_SetString(PyExc_TypeError, "keywords must be strings");
        return -1;
    }
    return PyDict_SetItem(builder, key, value);
}
#endif

/* CIntToPyUnicode */
static CYTHON_INLINE PyObject* __Pyx_uchar___Pyx_PyUnicode_From_size_t(size_t value, Py_ssize_t width, char padding_char) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const size_t neg_one = (size_t) -1, const_zero = (size_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (unlikely(!(is_unsigned || value == 0 || value > 0) ||
                    !(sizeof(value) <= 2 || value & ~ (size_t) 0x01fffff || __Pyx_CheckUnicodeValue((int) value)))) {
        PyErr_SetString(PyExc_OverflowError, "%c arg not in range(0x110000)");
        return NULL;
    }
    if (width <= 1) {
        return PyUnicode_FromOrdinal((int) value);
    }
    return __Pyx_PyUnicode_FromOrdinal_Padded((int) value, width, padding_char);
}
static CYTHON_INLINE PyObject* __Pyx____Pyx_PyUnicode_From_size_t(size_t value, Py_ssize_t width, char padding_char, char format_char) {
    char digits[sizeof(size_t)*3+2];
    char *dpos, *end = digits + sizeof(size_t)*3+2;
    const char *hex_digits = DIGITS_HEX;
    Py_ssize_t length, ulength;
    int prepend_sign, last_one_off;
    size_t remaining;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const size_t neg_one = (size_t) -1, const_zero = (size_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (format_char == 'X') {
        hex_digits += 16;
        format_char = 'x';
    }
    remaining = value;
    last_one_off = 0;
    dpos = end;
    do {
        int digit_pos;
        switch (format_char) {
        case 'o':
            digit_pos = abs((int)(remaining % (8*8)));
            remaining = (size_t) (remaining / (8*8));
            dpos -= 2;
            memcpy(dpos, DIGIT_PAIRS_8 + digit_pos * 2, 2);
            last_one_off = (digit_pos < 8);
            break;
        case 'd':
            digit_pos = abs((int)(remaining % (10*10)));
            remaining = (size_t) (remaining / (10*10));
            dpos -= 2;
            memcpy(dpos, DIGIT_PAIRS_10 + digit_pos * 2, 2);
            last_one_off = (digit_pos < 10);
            break;
        case 'x':
            *(--dpos) = hex_digits[abs((int)(remaining % 16))];
            remaining = (size_t) (remaining / 16);
            break;
        default:
            assert(0);
            break;
        }
    } while (unlikely(remaining != 0));
    assert(!last_one_off || *dpos == '0');
    dpos += last_one_off;
    length = end - dpos;
    ulength = length;
    prepend_sign = 0;
    if (!is_unsigned && value <= neg_one) {
        if (padding_char == ' ' || width <= length + 1) {
            *(--dpos) = '-';
            ++length;
        } else {
            prepend_sign = 1;
        }
        ++ulength;
    }
    if (width > ulength) {
        ulength = width;
    }
    if (ulength == 1) {
        return PyUnicode_FromOrdinal(*dpos);
    }
    return __Pyx_PyUnicode_BuildFromAscii(ulength, dpos, (int) length, prepend_sign, padding_char);
}

/* SliceObject */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj,
        Py_ssize_t cstart, Py_ssize_t cstop,
        PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
        int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) {
    __Pyx_TypeName obj_type_name;
#if CYTHON_USE_TYPE_SLOTS
    PyMappingMethods* mp = Py_TYPE(obj)->tp_as_mapping;
    if (likely(mp && mp->mp_subscript))
#endif
    {
        PyObject* result;
        PyObject *py_slice, *py_start, *py_stop;
        if (_py_slice) {
            py_slice = *_py_slice;
        } else {
            PyObject* owned_start = NULL;
            PyObject* owned_stop = NULL;
            if (_py_start) {
                py_start = *_py_start;
            } else {
                if (has_cstart) {
                    owned_start = py_start = PyLong_FromSsize_t(cstart);
                    if (unlikely(!py_start)) goto bad;
                } else
                    py_start = Py_None;
            }
            if (_py_stop) {
                py_stop = *_py_stop;
            } else {
                if (has_cstop) {
                    owned_stop = py_stop = PyLong_FromSsize_t(cstop);
                    if (unlikely(!py_stop)) {
                        Py_XDECREF(owned_start);
                        goto bad;
                    }
                } else
                    py_stop = Py_None;
            }
            py_slice = PySlice_New(py_start, py_stop, Py_None);
            Py_XDECREF(owned_start);
            Py_XDECREF(owned_stop);
            if (unlikely(!py_slice)) goto bad;
        }
#if CYTHON_USE_TYPE_SLOTS
        result = mp->mp_subscript(obj, py_slice);
#else
        result = PyObject_GetItem(obj, py_slice);
#endif
        if (!_py_slice) {
            Py_DECREF(py_slice);
        }
        return result;
    }
    obj_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(obj));
    PyErr_Format(PyExc_TypeError,
        "'" __Pyx_FMT_TYPENAME "' object is unsliceable", obj_type_name);
    __Pyx_DECREF_TypeName(obj_type_name);
bad:
    return NULL;
}

/* SliceObject */
static CYTHON_INLINE int __Pyx_PyObject_SetSlice(PyObject* obj, PyObject* value,
        Py_ssize_t cstart, Py_ssize_t cstop,
        PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
        int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) {
    __Pyx_TypeName obj_type_name;
#if CYTHON_USE_TYPE_SLOTS
    PyMappingMethods* mp = Py_TYPE(obj)->tp_as_mapping;
    if (likely(mp && mp->mp_ass_subscript))
#endif
    {
        int result;
        PyObject *py_slice, *py_start, *py_stop;
        if (_py_slice) {
            py_slice = *_py_slice;
        } else {
            PyObject* owned_start = NULL;
            PyObject* owned_stop = NULL;
            if (_py_start) {
                py_start = *_py_start;
            } else {
                if (has_cstart) {
                    owned_start = py_start = PyLong_FromSsize_t(cstart);
                    if (unlikely(!py_start)) goto bad;
                } else
                    py_start = Py_None;
            }
            if (_py_stop) {
                py_stop = *_py_stop;
            } else {
                if (has_cstop) {
                    owned_stop = py_stop = PyLong_FromSsize_t(cstop);
                    if (unlikely(!py_stop)) {
                        Py_XDECREF(owned_start);
                        goto bad;
                    }
                } else
                    py_stop = Py_None;
            }
            py_slice = PySlice_New(py_start, py_stop, Py_None);
            Py_XDECREF(owned_start);
            Py_XDECREF(owned_stop);
            if (unlikely(!py_slice)) goto bad;
        }
#if CYTHON_USE_TYPE_SLOTS
        result = mp->mp_ass_subscript(obj, py_slice, value);
#else
        result = value ? PyObject_SetItem(obj, py_slice, value) : PyObject_DelItem(obj, py_slice);
#endif
        if (!_py_slice) {
            Py_DECREF(py_slice);
        }
        return result;
    }
    obj_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(obj));
    PyErr_Format(PyExc_TypeError,
        "'" __Pyx_FMT_TYPENAME "' object does not support slice %.10s",
        obj_type_name, value ? "assignment" : "deletion");
    __Pyx_DECREF_TypeName(obj_type_name);
bad:
    return -1;
}

/* ErrOccurredWithGIL */
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void) {
  int err;
  PyGILState_STATE _save = PyGILState_Ensure();
  err = !!PyErr_Occurred();
  PyGILState_Release(_save);
  return err;
}

/* decode_c_bytes (used by decode_bytes) */
static CYTHON_INLINE PyObject* __Pyx_decode_c_bytes(
         const char* cstring, Py_ssize_t length, Py_ssize_t start, Py_ssize_t stop,
         const char* encoding, const char* errors,
         PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
    if (unlikely((start < 0) | (stop < 0))) {
        if (start < 0) {
            start += length;
            if (start < 0)
                start = 0;
        }
        if (stop < 0)
            stop += length;
    }
    if (stop > length)
        stop = length;
    if (unlikely(stop <= start))
        return __Pyx_NewRef(__pyx_mstate_global->__pyx_empty_unicode);
    length = stop - start;
    cstring += start;
    if (decode_func) {
        return decode_func(cstring, length, errors);
    } else {
        return PyUnicode_Decode(cstring, length, encoding, errors);
    }
}

/* PyLongBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_Fallback___Pyx_PyLong_AddObjC(PyObject *op1, PyObject *op2, int inplace) {
    return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#if CYTHON_USE_PYLONG_INTERNALS
static PyObject* __Pyx_Unpacked___Pyx_PyLong_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) {
    CYTHON_MAYBE_UNUSED_VAR(inplace);
    CYTHON_UNUSED_VAR(zerodivision_check);
    const long b = intval;
    long a;
    const PY_LONG_LONG llb = intval;
    PY_LONG_LONG lla;
    if (unlikely(__Pyx_PyLong_IsZero(op1))) {
        return __Pyx_NewRef(op2);
    }
    const int is_positive = __Pyx_PyLong_IsPos(op1);
    const digit* digits = __Pyx_PyLong_Digits(op1);
    const Py_ssize_t size = __Pyx_PyLong_DigitCount(op1);
    if (likely(size == 1)) {
        a = (long) digits[0];
        if (!is_positive) a *= -1;
    } else {
        switch (size) {
            case 2:
                if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
                    a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
                    if (!is_positive) a *= -1;
                    goto calculate_long;
                } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
                    lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
                    if (!is_positive) lla *= -1;
                    goto calculate_long_long;
                }
                break;
            case 3:
                if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
                    a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
                    if (!is_positive) a *= -1;
                    goto calculate_long;
                } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
                    lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
                    if (!is_positive) lla *= -1;
                    goto calculate_long_long;
                }
                break;
            case 4:
                if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
                    a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
                    if (!is_positive) a *= -1;
                    goto calculate_long;
                } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
                    lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
                    if (!is_positive) lla *= -1;
                    goto calculate_long_long;
                }
                break;
        }
        return PyLong_Type.tp_as_number->nb_add(op1, op2);
    }
    calculate_long:
        {
            long x;
            x = a + b;
            return PyLong_FromLong(x);
        }
    calculate_long_long:
        {
            PY_LONG_LONG llx;
            llx = lla + llb;
            return PyLong_FromLongLong(llx);
        }
    
}
#endif
static PyObject* __Pyx_Float___Pyx_PyLong_AddObjC(PyObject *float_val, long intval, int zerodivision_check) {
    CYTHON_UNUSED_VAR(zerodivision_check);
    const long b = intval;
    double a = __Pyx_PyFloat_AS_DOUBLE(float_val);
        double result;
        
        result = ((double)a) + (double)b;
        return PyFloat_FromDouble(result);
}
static CYTHON_INLINE PyObject* __Pyx_PyLong_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) {
    CYTHON_MAYBE_UNUSED_VAR(intval);
    CYTHON_UNUSED_VAR(zerodivision_check);
    #if CYTHON_USE_PYLONG_INTERNALS
    if (likely(PyLong_CheckExact(op1))) {
        return __Pyx_Unpacked___Pyx_PyLong_AddObjC(op1, op2, intval, inplace, zerodivision_check);
    }
    #endif
    if (PyFloat_CheckExact(op1)) {
        return __Pyx_Float___Pyx_PyLong_AddObjC(op1, intval, zerodivision_check);
    }
    return __Pyx_Fallback___Pyx_PyLong_AddObjC(op1, op2, inplace);
}
#endif

/* AllocateExtensionType */
static PyObject *__Pyx_AllocateExtensionType(PyTypeObject *t, int is_final) {
    if (is_final || likely(!__Pyx_PyType_HasFeature(t, Py_TPFLAGS_IS_ABSTRACT))) {
        allocfunc alloc_func = __Pyx_PyType_GetSlot(t, tp_alloc, allocfunc);
        return alloc_func(t, 0);
    } else {
        newfunc tp_new = __Pyx_PyType_TryGetSlot(&PyBaseObject_Type, tp_new, newfunc);
    #if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000
        if (!tp_new) {
            PyObject *new_str = PyUnicode_FromString("__new__");
            if (likely(new_str)) {
                PyObject *o = PyObject_CallMethodObjArgs((PyObject *)&PyBaseObject_Type, new_str, t, NULL);
                Py_DECREF(new_str);
                return o;
            } else
                return NULL;
        } else
    #endif
        return tp_new(t, __pyx_mstate_global->__pyx_empty_tuple, 0);
    }
}

/* CallTypeTraverse */
#if !CYTHON_USE_TYPE_SPECS || (!CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x03090000)
#else
static int __Pyx_call_type_traverse(PyObject *o, int always_call, visitproc visit, void *arg) {
    #if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x03090000
    if (__Pyx_get_runtime_version() < 0x03090000) return 0;
    #endif
    if (!always_call) {
        PyTypeObject *base = __Pyx_PyObject_GetSlot(o, tp_base, PyTypeObject*);
        unsigned long flags = PyType_GetFlags(base);
        if (flags & Py_TPFLAGS_HEAPTYPE) {
            return 0;
        }
    }
    Py_VISIT((PyObject*)Py_TYPE(o));
    return 0;
}
#endif

/* FunctionExport */
static int __Pyx_ExportFunction(PyObject *api_dict, const char *name, void (*f)(void), const char *sig) {
    PyObject *cobj;
    union {
        void (*fp)(void);
        void *p;
    } tmp;
    tmp.fp = f;
    cobj = PyCapsule_New(tmp.p, sig, 0);
    if (!cobj)
        goto bad;
    if (PyDict_SetItemString(api_dict, name, cobj) < 0)
        goto bad;
    Py_DECREF(cobj);
    return 0;
bad:
    Py_XDECREF(cobj);
    return -1;
}

/* GetApiDict */
static PyObject *__Pyx_ApiExport_GetApiDict(void) {
    PyObject *d;
    if (__Pyx_PyDict_GetItemRef(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_capi, &d) == -1)
        return NULL;
    if (!d) {
        d = PyDict_New();
        if (!d)
            goto bad;
        if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_pyx_capi, d) < 0)
            goto bad;
    }
    return d;
bad:
    Py_XDECREF(d);
    return NULL;
}

/* LimitedApiGetTypeDict (used by SetItemOnTypeDict) */
#if CYTHON_COMPILING_IN_LIMITED_API
static Py_ssize_t __Pyx_GetTypeDictOffset(void) {
    PyObject *tp_dictoffset_o;
    Py_ssize_t tp_dictoffset;
    tp_dictoffset_o = PyObject_GetAttrString((PyObject*)(&PyType_Type), "__dictoffset__");
    if (unlikely(!tp_dictoffset_o)) return -1;
    tp_dictoffset = PyLong_AsSsize_t(tp_dictoffset_o);
    Py_DECREF(tp_dictoffset_o);
    if (unlikely(tp_dictoffset == 0)) {
        PyErr_SetString(
            PyExc_TypeError,
            "'type' doesn't have a dictoffset");
        return -1;
    } else if (unlikely(tp_dictoffset < 0)) {
        PyErr_SetString(
            PyExc_TypeError,
            "'type' has an unexpected negative dictoffset. "
            "Please report this as Cython bug");
        return -1;
    }
    return tp_dictoffset;
}
static PyObject *__Pyx_GetTypeDict(PyTypeObject *tp) {
    static Py_ssize_t tp_dictoffset = 0;
    if (unlikely(tp_dictoffset == 0)) {
        tp_dictoffset = __Pyx_GetTypeDictOffset();
        if (unlikely(tp_dictoffset == -1 && PyErr_Occurred())) {
            tp_dictoffset = 0; // try again next time?
            return NULL;
        }
    }
    return *(PyObject**)((char*)tp + tp_dictoffset);
}
#endif

/* SetItemOnTypeDict (used by FixUpExtensionType) */
static int __Pyx__SetItemOnTypeDict(PyTypeObject *tp, PyObject *k, PyObject *v) {
    int result;
    PyObject *tp_dict;
#if CYTHON_COMPILING_IN_LIMITED_API
    tp_dict = __Pyx_GetTypeDict(tp);
    if (unlikely(!tp_dict)) return -1;
#else
    tp_dict = tp->tp_dict;
#endif
    result = PyDict_SetItem(tp_dict, k, v);
    if (likely(!result)) {
        PyType_Modified(tp);
        if (unlikely(PyObject_HasAttr(v, __pyx_mstate_global->__pyx_n_u_set_name))) {
            PyObject *setNameResult = PyObject_CallMethodObjArgs(v, __pyx_mstate_global->__pyx_n_u_set_name,  (PyObject *) tp, k, NULL);
            if (!setNameResult) return -1;
            Py_DECREF(setNameResult);
        }
    }
    return result;
}

/* FixUpExtensionType */
static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type) {
#if __PYX_LIMITED_VERSION_HEX > 0x030900B1
    CYTHON_UNUSED_VAR(spec);
    CYTHON_UNUSED_VAR(type);
    CYTHON_UNUSED_VAR(__Pyx__SetItemOnTypeDict);
#else
    const PyType_Slot *slot = spec->slots;
    int changed = 0;
#if !CYTHON_COMPILING_IN_LIMITED_API
    while (slot && slot->slot && slot->slot != Py_tp_members)
        slot++;
    if (slot && slot->slot == Py_tp_members) {
#if !CYTHON_COMPILING_IN_CPYTHON
        const
#endif  // !CYTHON_COMPILING_IN_CPYTHON)
            PyMemberDef *memb = (PyMemberDef*) slot->pfunc;
        while (memb && memb->name) {
            if (memb->name[0] == '_' && memb->name[1] == '_') {
                if (strcmp(memb->name, "__weaklistoffset__") == 0) {
                    assert(memb->type == T_PYSSIZET);
                    assert(memb->flags == READONLY);
                    type->tp_weaklistoffset = memb->offset;
                    changed = 1;
                }
                else if (strcmp(memb->name, "__dictoffset__") == 0) {
                    assert(memb->type == T_PYSSIZET);
                    assert(memb->flags == READONLY);
                    type->tp_dictoffset = memb->offset;
                    changed = 1;
                }
#if CYTHON_METH_FASTCALL
                else if (strcmp(memb->name, "__vectorcalloffset__") == 0) {
                    assert(memb->type == T_PYSSIZET);
                    assert(memb->flags == READONLY);
                    type->tp_vectorcall_offset = memb->offset;
                    changed = 1;
                }
#endif  // CYTHON_METH_FASTCALL
#if !CYTHON_COMPILING_IN_PYPY
                else if (strcmp(memb->name, "__module__") == 0) {
                    PyObject *descr;
                    assert(memb->type == T_OBJECT);
                    assert(memb->flags == 0 || memb->flags == READONLY);
                    descr = PyDescr_NewMember(type, memb);
                    if (unlikely(!descr))
                        return -1;
                    int set_item_result = PyDict_SetItem(type->tp_dict, PyDescr_NAME(descr), descr);
                    Py_DECREF(descr);
                    if (unlikely(set_item_result < 0)) {
                        return -1;
                    }
                    changed = 1;
                }
#endif  // !CYTHON_COMPILING_IN_PYPY
            }
            memb++;
        }
    }
#endif  // !CYTHON_COMPILING_IN_LIMITED_API
#if !CYTHON_COMPILING_IN_PYPY
    slot = spec->slots;
    while (slot && slot->slot && slot->slot != Py_tp_getset)
        slot++;
    if (slot && slot->slot == Py_tp_getset) {
        PyGetSetDef *getset = (PyGetSetDef*) slot->pfunc;
        while (getset && getset->name) {
            if (getset->name[0] == '_' && getset->name[1] == '_' && strcmp(getset->name, "__module__") == 0) {
                PyObject *descr = PyDescr_NewGetSet(type, getset);
                if (unlikely(!descr))
                    return -1;
                #if CYTHON_COMPILING_IN_LIMITED_API
                PyObject *pyname = PyUnicode_FromString(getset->name);
                if (unlikely(!pyname)) {
                    Py_DECREF(descr);
                    return -1;
                }
                int set_item_result = __Pyx_SetItemOnTypeDict(type, pyname, descr);
                Py_DECREF(pyname);
                #else
                CYTHON_UNUSED_VAR(__Pyx__SetItemOnTypeDict);
                int set_item_result = PyDict_SetItem(type->tp_dict, PyDescr_NAME(descr), descr);
                #endif
                Py_DECREF(descr);
                if (unlikely(set_item_result < 0)) {
                    return -1;
                }
                changed = 1;
            }
            ++getset;
        }
    }
#else
    CYTHON_UNUSED_VAR(__Pyx__SetItemOnTypeDict);
#endif  // !CYTHON_COMPILING_IN_PYPY
    if (changed)
        PyType_Modified(type);
#endif  // PY_VERSION_HEX > 0x030900B1
    return 0;
}

/* PyObjectCallNoArg (used by PyObjectCallMethod0) */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
    PyObject *arg[2] = {NULL, NULL};
    return __Pyx_PyObject_FastCall(func, arg + 1, 0 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET);
}

/* PyObjectGetMethod (used by PyObjectCallMethod0) */
#if !(CYTHON_VECTORCALL && (__PYX_LIMITED_VERSION_HEX >= 0x030C0000 || (!CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x03090000)))
static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) {
    PyObject *attr;
#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP
    __Pyx_TypeName type_name;
    PyTypeObject *tp = Py_TYPE(obj);
    PyObject *descr;
    descrgetfunc f = NULL;
    PyObject **dictptr, *dict;
    int meth_found = 0;
    assert (*method == NULL);
    if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) {
        attr = __Pyx_PyObject_GetAttrStr(obj, name);
        goto try_unpack;
    }
    if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) {
        return 0;
    }
    descr = _PyType_Lookup(tp, name);
    if (likely(descr != NULL)) {
        Py_INCREF(descr);
#if defined(Py_TPFLAGS_METHOD_DESCRIPTOR) && Py_TPFLAGS_METHOD_DESCRIPTOR
        if (__Pyx_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR))
#else
        #ifdef __Pyx_CyFunction_USED
        if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr)))
        #else
        if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type)))
        #endif
#endif
        {
            meth_found = 1;
        } else {
            f = Py_TYPE(descr)->tp_descr_get;
            if (f != NULL && PyDescr_IsData(descr)) {
                attr = f(descr, obj, (PyObject *)Py_TYPE(obj));
                Py_DECREF(descr);
                goto try_unpack;
            }
        }
    }
    dictptr = _PyObject_GetDictPtr(obj);
    if (dictptr != NULL && (dict = *dictptr) != NULL) {
        Py_INCREF(dict);
        attr = __Pyx_PyDict_GetItemStr(dict, name);
        if (attr != NULL) {
            Py_INCREF(attr);
            Py_DECREF(dict);
            Py_XDECREF(descr);
            goto try_unpack;
        }
        Py_DECREF(dict);
    }
    if (meth_found) {
        *method = descr;
        return 1;
    }
    if (f != NULL) {
        attr = f(descr, obj, (PyObject *)Py_TYPE(obj));
        Py_DECREF(descr);
        goto try_unpack;
    }
    if (likely(descr != NULL)) {
        *method = descr;
        return 0;
    }
    type_name = __Pyx_PyType_GetFullyQualifiedName(tp);
    PyErr_Format(PyExc_AttributeError,
                 "'" __Pyx_FMT_TYPENAME "' object has no attribute '%U'",
                 type_name, name);
    __Pyx_DECREF_TypeName(type_name);
    return 0;
#else
    attr = __Pyx_PyObject_GetAttrStr(obj, name);
    goto try_unpack;
#endif
try_unpack:
#if CYTHON_UNPACK_METHODS
    if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) {
        PyObject *function = PyMethod_GET_FUNCTION(attr);
        Py_INCREF(function);
        Py_DECREF(attr);
        *method = function;
        return 1;
    }
#endif
    *method = attr;
    return 0;
}
#endif

/* PyObjectCallMethod0 (used by PyType_Ready) */
static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) {
#if CYTHON_VECTORCALL && (__PYX_LIMITED_VERSION_HEX >= 0x030C0000 || (!CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x03090000))
    PyObject *args[1] = {obj};
    (void) __Pyx_PyObject_CallOneArg;
    (void) __Pyx_PyObject_CallNoArg;
    return PyObject_VectorcallMethod(method_name, args, 1 | PY_VECTORCALL_ARGUMENTS_OFFSET, NULL);
#else
    PyObject *method = NULL, *result = NULL;
    int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method);
    if (likely(is_method)) {
        result = __Pyx_PyObject_CallOneArg(method, obj);
        Py_DECREF(method);
        return result;
    }
    if (unlikely(!method)) goto bad;
    result = __Pyx_PyObject_CallNoArg(method);
    Py_DECREF(method);
bad:
    return result;
#endif
}

/* ValidateBasesTuple (used by PyType_Ready) */
#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_USE_TYPE_SPECS
static int __Pyx_validate_bases_tuple(const char *type_name, Py_ssize_t dictoffset, PyObject *bases) {
    Py_ssize_t i, n;
#if CYTHON_ASSUME_SAFE_SIZE
    n = PyTuple_GET_SIZE(bases);
#else
    n = PyTuple_Size(bases);
    if (unlikely(n < 0)) return -1;
#endif
    for (i = 1; i < n; i++)
    {
        PyTypeObject *b;
#if CYTHON_AVOID_BORROWED_REFS
        PyObject *b0 = PySequence_GetItem(bases, i);
        if (!b0) return -1;
#elif CYTHON_ASSUME_SAFE_MACROS
        PyObject *b0 = PyTuple_GET_ITEM(bases, i);
#else
        PyObject *b0 = PyTuple_GetItem(bases, i);
        if (!b0) return -1;
#endif
        b = (PyTypeObject*) b0;
        if (!__Pyx_PyType_HasFeature(b, Py_TPFLAGS_HEAPTYPE))
        {
            __Pyx_TypeName b_name = __Pyx_PyType_GetFullyQualifiedName(b);
            PyErr_Format(PyExc_TypeError,
                "base class '" __Pyx_FMT_TYPENAME "' is not a heap type", b_name);
            __Pyx_DECREF_TypeName(b_name);
#if CYTHON_AVOID_BORROWED_REFS
            Py_DECREF(b0);
#endif
            return -1;
        }
        if (dictoffset == 0)
        {
            Py_ssize_t b_dictoffset = 0;
#if CYTHON_USE_TYPE_SLOTS
            b_dictoffset = b->tp_dictoffset;
#else
            PyObject *py_b_dictoffset = PyObject_GetAttrString((PyObject*)b, "__dictoffset__");
            if (!py_b_dictoffset) goto dictoffset_return;
            b_dictoffset = PyLong_AsSsize_t(py_b_dictoffset);
            Py_DECREF(py_b_dictoffset);
            if (b_dictoffset == -1 && PyErr_Occurred()) goto dictoffset_return;
#endif
            if (b_dictoffset) {
                {
                    __Pyx_TypeName b_name = __Pyx_PyType_GetFullyQualifiedName(b);
                    PyErr_Format(PyExc_TypeError,
                        "extension type '%.200s' has no __dict__ slot, "
                        "but base type '" __Pyx_FMT_TYPENAME "' has: "
                        "either add 'cdef dict __dict__' to the extension type "
                        "or add '__slots__ = [...]' to the base type",
                        type_name, b_name);
                    __Pyx_DECREF_TypeName(b_name);
                }
#if !CYTHON_USE_TYPE_SLOTS
              dictoffset_return:
#endif
#if CYTHON_AVOID_BORROWED_REFS
                Py_DECREF(b0);
#endif
                return -1;
            }
        }
#if CYTHON_AVOID_BORROWED_REFS
        Py_DECREF(b0);
#endif
    }
    return 0;
}
#endif

/* PyType_Ready */
CYTHON_UNUSED static int __Pyx_PyType_HasMultipleInheritance(PyTypeObject *t) {
    while (t) {
        PyObject *bases = __Pyx_PyType_GetSlot(t, tp_bases, PyObject*);
        if (bases) {
            return 1;
        }
        t = __Pyx_PyType_GetSlot(t, tp_base, PyTypeObject*);
    }
    return 0;
}
static int __Pyx_PyType_Ready(PyTypeObject *t) {
#if CYTHON_USE_TYPE_SPECS || !CYTHON_COMPILING_IN_CPYTHON || defined(PYSTON_MAJOR_VERSION)
    (void)__Pyx_PyObject_CallMethod0;
#if CYTHON_USE_TYPE_SPECS
    (void)__Pyx_validate_bases_tuple;
#endif
    return PyType_Ready(t);
#else
    int r;
    if (!__Pyx_PyType_HasMultipleInheritance(t)) {
        return PyType_Ready(t);
    }
    PyObject *bases = __Pyx_PyType_GetSlot(t, tp_bases, PyObject*);
    if (bases && unlikely(__Pyx_validate_bases_tuple(t->tp_name, t->tp_dictoffset, bases) == -1))
        return -1;
#if !defined(PYSTON_MAJOR_VERSION)
    {
        int gc_was_enabled;
    #if PY_VERSION_HEX >= 0x030A00b1
        gc_was_enabled = PyGC_Disable();
        (void)__Pyx_PyObject_CallMethod0;
    #else
        PyObject *ret, *py_status;
        PyObject *gc = NULL;
        #if (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM+0 >= 0x07030400) &&\
                !CYTHON_COMPILING_IN_GRAAL
        gc = PyImport_GetModule(__pyx_mstate_global->__pyx_kp_u_gc);
        #endif
        if (unlikely(!gc)) gc = PyImport_Import(__pyx_mstate_global->__pyx_kp_u_gc);
        if (unlikely(!gc)) return -1;
        py_status = __Pyx_PyObject_CallMethod0(gc, __pyx_mstate_global->__pyx_kp_u_isenabled);
        if (unlikely(!py_status)) {
            Py_DECREF(gc);
            return -1;
        }
        gc_was_enabled = __Pyx_PyObject_IsTrue(py_status);
        Py_DECREF(py_status);
        if (gc_was_enabled > 0) {
            ret = __Pyx_PyObject_CallMethod0(gc, __pyx_mstate_global->__pyx_kp_u_disable);
            if (unlikely(!ret)) {
                Py_DECREF(gc);
                return -1;
            }
            Py_DECREF(ret);
        } else if (unlikely(gc_was_enabled == -1)) {
            Py_DECREF(gc);
            return -1;
        }
    #endif
        t->tp_flags |= Py_TPFLAGS_HEAPTYPE;
#if PY_VERSION_HEX >= 0x030A0000
        t->tp_flags |= Py_TPFLAGS_IMMUTABLETYPE;
#endif
#else
        (void)__Pyx_PyObject_CallMethod0;
#endif
    r = PyType_Ready(t);
#if !defined(PYSTON_MAJOR_VERSION)
        t->tp_flags &= ~Py_TPFLAGS_HEAPTYPE;
    #if PY_VERSION_HEX >= 0x030A00b1
        if (gc_was_enabled)
            PyGC_Enable();
    #else
        if (gc_was_enabled) {
            PyObject *tp, *v, *tb;
            PyErr_Fetch(&tp, &v, &tb);
            ret = __Pyx_PyObject_CallMethod0(gc, __pyx_mstate_global->__pyx_kp_u_enable);
            if (likely(ret || r == -1)) {
                Py_XDECREF(ret);
                PyErr_Restore(tp, v, tb);
            } else {
                Py_XDECREF(tp);
                Py_XDECREF(v);
                Py_XDECREF(tb);
                r = -1;
            }
        }
        Py_DECREF(gc);
    #endif
    }
#endif
    return r;
#endif
}

/* SetVTable */
static int __Pyx_SetVtable(PyTypeObject *type, void *vtable) {
    PyObject *ob = PyCapsule_New(vtable, 0, 0);
    if (unlikely(!ob))
        goto bad;
#if CYTHON_COMPILING_IN_LIMITED_API
    if (unlikely(PyObject_SetAttr((PyObject *) type, __pyx_mstate_global->__pyx_n_u_pyx_vtable, ob) < 0))
#else
    if (unlikely(PyDict_SetItem(type->tp_dict, __pyx_mstate_global->__pyx_n_u_pyx_vtable, ob) < 0))
#endif
        goto bad;
    Py_DECREF(ob);
    return 0;
bad:
    Py_XDECREF(ob);
    return -1;
}

/* GetVTable (used by MergeVTables) */
static void* __Pyx_GetVtable(PyTypeObject *type) {
    void* ptr;
#if CYTHON_COMPILING_IN_LIMITED_API
    PyObject *ob = PyObject_GetAttr((PyObject *)type, __pyx_mstate_global->__pyx_n_u_pyx_vtable);
#else
    PyObject *ob = PyObject_GetItem(type->tp_dict, __pyx_mstate_global->__pyx_n_u_pyx_vtable);
#endif
    if (!ob)
        goto bad;
    ptr = PyCapsule_GetPointer(ob, 0);
    if (!ptr && !PyErr_Occurred())
        PyErr_SetString(PyExc_RuntimeError, "invalid vtable found for imported type");
    Py_DECREF(ob);
    return ptr;
bad:
    Py_XDECREF(ob);
    return NULL;
}

/* MergeVTables */
static int __Pyx_MergeVtables(PyTypeObject *type) {
    int i=0;
    Py_ssize_t size;
    void** base_vtables;
    __Pyx_TypeName tp_base_name = NULL;
    __Pyx_TypeName base_name = NULL;
    void* unknown = (void*)-1;
    PyObject* bases = __Pyx_PyType_GetSlot(type, tp_bases, PyObject*);
    int base_depth = 0;
    {
        PyTypeObject* base = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*);
        while (base) {
            base_depth += 1;
            base = __Pyx_PyType_GetSlot(base, tp_base, PyTypeObject*);
        }
    }
    base_vtables = (void**) PyMem_Malloc(sizeof(void*) * (size_t)(base_depth + 1));
    base_vtables[0] = unknown;
#if CYTHON_COMPILING_IN_LIMITED_API
    size = PyTuple_Size(bases);
    if (size < 0) goto other_failure;
#else
    size = PyTuple_GET_SIZE(bases);
#endif
    for (i = 1; i < size; i++) {
        PyObject *basei;
        void* base_vtable;
#if CYTHON_AVOID_BORROWED_REFS
        basei = PySequence_GetItem(bases, i);
        if (unlikely(!basei)) goto other_failure;
#elif !CYTHON_ASSUME_SAFE_MACROS
        basei = PyTuple_GetItem(bases, i);
        if (unlikely(!basei)) goto other_failure;
#else
        basei = PyTuple_GET_ITEM(bases, i);
#endif
        base_vtable = __Pyx_GetVtable((PyTypeObject*)basei);
#if CYTHON_AVOID_BORROWED_REFS
        Py_DECREF(basei);
#endif
        if (base_vtable != NULL) {
            int j;
            PyTypeObject* base = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*);
            for (j = 0; j < base_depth; j++) {
                if (base_vtables[j] == unknown) {
                    base_vtables[j] = __Pyx_GetVtable(base);
                    base_vtables[j + 1] = unknown;
                }
                if (base_vtables[j] == base_vtable) {
                    break;
                } else if (base_vtables[j] == NULL) {
                    goto bad;
                }
                base = __Pyx_PyType_GetSlot(base, tp_base, PyTypeObject*);
            }
        }
    }
    PyErr_Clear();
    PyMem_Free(base_vtables);
    return 0;
bad:
    {
        PyTypeObject* basei = NULL;
        PyTypeObject* tp_base = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*);
        tp_base_name = __Pyx_PyType_GetFullyQualifiedName(tp_base);
#if CYTHON_AVOID_BORROWED_REFS
        basei = (PyTypeObject*)PySequence_GetItem(bases, i);
        if (unlikely(!basei)) goto really_bad;
#elif !CYTHON_ASSUME_SAFE_MACROS
        basei = (PyTypeObject*)PyTuple_GetItem(bases, i);
        if (unlikely(!basei)) goto really_bad;
#else
        basei = (PyTypeObject*)PyTuple_GET_ITEM(bases, i);
#endif
        base_name = __Pyx_PyType_GetFullyQualifiedName(basei);
#if CYTHON_AVOID_BORROWED_REFS
        Py_DECREF(basei);
#endif
    }
    PyErr_Format(PyExc_TypeError,
        "multiple bases have vtable conflict: '" __Pyx_FMT_TYPENAME "' and '" __Pyx_FMT_TYPENAME "'", tp_base_name, base_name);
#if CYTHON_AVOID_BORROWED_REFS || !CYTHON_ASSUME_SAFE_MACROS
really_bad: // bad has failed!
#endif
    __Pyx_DECREF_TypeName(tp_base_name);
    __Pyx_DECREF_TypeName(base_name);
#if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_AVOID_BORROWED_REFS || !CYTHON_ASSUME_SAFE_MACROS
other_failure:
#endif
    PyMem_Free(base_vtables);
    return -1;
}

/* DelItemOnTypeDict (used by SetupReduce) */
static int __Pyx__DelItemOnTypeDict(PyTypeObject *tp, PyObject *k) {
    int result;
    PyObject *tp_dict;
#if CYTHON_COMPILING_IN_LIMITED_API
    tp_dict = __Pyx_GetTypeDict(tp);
    if (unlikely(!tp_dict)) return -1;
#else
    tp_dict = tp->tp_dict;
#endif
    result = PyDict_DelItem(tp_dict, k);
    if (likely(!result)) PyType_Modified(tp);
    return result;
}

/* SetupReduce */
static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
  int ret;
  PyObject *name_attr;
  name_attr = __Pyx_PyObject_GetAttrStrNoError(meth, __pyx_mstate_global->__pyx_n_u_name_2);
  if (likely(name_attr)) {
      ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
  } else {
      ret = -1;
  }
  if (unlikely(ret < 0)) {
      PyErr_Clear();
      ret = 0;
  }
  Py_XDECREF(name_attr);
  return ret;
}
static int __Pyx_setup_reduce(PyObject* type_obj) {
    int ret = 0;
    PyObject *object_reduce = NULL;
    PyObject *object_getstate = NULL;
    PyObject *object_reduce_ex = NULL;
    PyObject *reduce = NULL;
    PyObject *reduce_ex = NULL;
    PyObject *reduce_cython = NULL;
    PyObject *setstate = NULL;
    PyObject *setstate_cython = NULL;
    PyObject *getstate = NULL;
#if CYTHON_USE_PYTYPE_LOOKUP
    getstate = _PyType_Lookup((PyTypeObject*)type_obj, __pyx_mstate_global->__pyx_n_u_getstate);
#else
    getstate = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_mstate_global->__pyx_n_u_getstate);
    if (!getstate && PyErr_Occurred()) {
        goto __PYX_BAD;
    }
#endif
    if (getstate) {
#if CYTHON_USE_PYTYPE_LOOKUP
        object_getstate = _PyType_Lookup(&PyBaseObject_Type, __pyx_mstate_global->__pyx_n_u_getstate);
#else
        object_getstate = __Pyx_PyObject_GetAttrStrNoError((PyObject*)&PyBaseObject_Type, __pyx_mstate_global->__pyx_n_u_getstate);
        if (!object_getstate && PyErr_Occurred()) {
            goto __PYX_BAD;
        }
#endif
        if (object_getstate != getstate) {
            goto __PYX_GOOD;
        }
    }
#if CYTHON_USE_PYTYPE_LOOKUP
    object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_mstate_global->__pyx_n_u_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#else
    object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_mstate_global->__pyx_n_u_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#endif
    reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_mstate_global->__pyx_n_u_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
    if (reduce_ex == object_reduce_ex) {
#if CYTHON_USE_PYTYPE_LOOKUP
        object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_mstate_global->__pyx_n_u_reduce); if (!object_reduce) goto __PYX_BAD;
#else
        object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_mstate_global->__pyx_n_u_reduce); if (!object_reduce) goto __PYX_BAD;
#endif
        reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_mstate_global->__pyx_n_u_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
        if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_mstate_global->__pyx_n_u_reduce_cython)) {
            reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_mstate_global->__pyx_n_u_reduce_cython);
            if (likely(reduce_cython)) {
                ret = __Pyx_SetItemOnTypeDict((PyTypeObject*)type_obj, __pyx_mstate_global->__pyx_n_u_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
                ret = __Pyx_DelItemOnTypeDict((PyTypeObject*)type_obj, __pyx_mstate_global->__pyx_n_u_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
            } else if (reduce == object_reduce || PyErr_Occurred()) {
                goto __PYX_BAD;
            }
            setstate = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_mstate_global->__pyx_n_u_setstate);
            if (!setstate) PyErr_Clear();
            if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_mstate_global->__pyx_n_u_setstate_cython)) {
                setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_mstate_global->__pyx_n_u_setstate_cython);
                if (likely(setstate_cython)) {
                    ret = __Pyx_SetItemOnTypeDict((PyTypeObject*)type_obj, __pyx_mstate_global->__pyx_n_u_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
                    ret = __Pyx_DelItemOnTypeDict((PyTypeObject*)type_obj, __pyx_mstate_global->__pyx_n_u_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
                } else if (!setstate || PyErr_Occurred()) {
                    goto __PYX_BAD;
                }
            }
            PyType_Modified((PyTypeObject*)type_obj);
        }
    }
    goto __PYX_GOOD;
__PYX_BAD:
    if (!PyErr_Occurred()) {
        __Pyx_TypeName type_obj_name =
            __Pyx_PyType_GetFullyQualifiedName((PyTypeObject*)type_obj);
        PyErr_Format(PyExc_RuntimeError,
            "Unable to initialize pickling for " __Pyx_FMT_TYPENAME, type_obj_name);
        __Pyx_DECREF_TypeName(type_obj_name);
    }
    ret = -1;
__PYX_GOOD:
#if !CYTHON_USE_PYTYPE_LOOKUP
    Py_XDECREF(object_reduce);
    Py_XDECREF(object_reduce_ex);
    Py_XDECREF(object_getstate);
    Py_XDECREF(getstate);
#endif
    Py_XDECREF(reduce);
    Py_XDECREF(reduce_ex);
    Py_XDECREF(reduce_cython);
    Py_XDECREF(setstate);
    Py_XDECREF(setstate_cython);
    return ret;
}

/* TypeImport */
#ifndef __PYX_HAVE_RT_ImportType_3_2_2
#define __PYX_HAVE_RT_ImportType_3_2_2
static PyTypeObject *__Pyx_ImportType_3_2_2(PyObject *module, const char *module_name, const char *class_name,
    size_t size, size_t alignment, enum __Pyx_ImportType_CheckSize_3_2_2 check_size)
{
    PyObject *result = 0;
    Py_ssize_t basicsize;
    Py_ssize_t itemsize;
#if defined(Py_LIMITED_API) || (defined(CYTHON_COMPILING_IN_LIMITED_API) && CYTHON_COMPILING_IN_LIMITED_API)
    PyObject *py_basicsize;
    PyObject *py_itemsize;
#endif
    result = PyObject_GetAttrString(module, class_name);
    if (!result)
        goto bad;
    if (!PyType_Check(result)) {
        PyErr_Format(PyExc_TypeError,
            "%.200s.%.200s is not a type object",
            module_name, class_name);
        goto bad;
    }
#if !( defined(Py_LIMITED_API) || (defined(CYTHON_COMPILING_IN_LIMITED_API) && CYTHON_COMPILING_IN_LIMITED_API) )
    basicsize = ((PyTypeObject *)result)->tp_basicsize;
    itemsize = ((PyTypeObject *)result)->tp_itemsize;
#else
    if (size == 0) {
        return (PyTypeObject *)result;
    }
    py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
    if (!py_basicsize)
        goto bad;
    basicsize = PyLong_AsSsize_t(py_basicsize);
    Py_DECREF(py_basicsize);
    py_basicsize = 0;
    if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
        goto bad;
    py_itemsize = PyObject_GetAttrString(result, "__itemsize__");
    if (!py_itemsize)
        goto bad;
    itemsize = PyLong_AsSsize_t(py_itemsize);
    Py_DECREF(py_itemsize);
    py_itemsize = 0;
    if (itemsize == (Py_ssize_t)-1 && PyErr_Occurred())
        goto bad;
#endif
    if (itemsize) {
        if (size % alignment) {
            alignment = size % alignment;
        }
        if (itemsize < (Py_ssize_t)alignment)
            itemsize = (Py_ssize_t)alignment;
    }
    if ((size_t)(basicsize + itemsize) < size) {
        PyErr_Format(PyExc_ValueError,
            "%.200s.%.200s size changed, may indicate binary incompatibility. "
            "Expected %zd from C header, got %zd from PyObject",
            module_name, class_name, size, basicsize+itemsize);
        goto bad;
    }
    if (check_size == __Pyx_ImportType_CheckSize_Error_3_2_2 &&
            ((size_t)basicsize > size || (size_t)(basicsize + itemsize) < size)) {
        PyErr_Format(PyExc_ValueError,
            "%.200s.%.200s size changed, may indicate binary incompatibility. "
            "Expected %zd from C header, got %zd-%zd from PyObject",
            module_name, class_name, size, basicsize, basicsize+itemsize);
        goto bad;
    }
    else if (check_size == __Pyx_ImportType_CheckSize_Warn_3_2_2 && (size_t)basicsize > size) {
        if (PyErr_WarnFormat(NULL, 0,
                "%.200s.%.200s size changed, may indicate binary incompatibility. "
                "Expected %zd from C header, got %zd from PyObject",
                module_name, class_name, size, basicsize) < 0) {
            goto bad;
        }
    }
    return (PyTypeObject *)result;
bad:
    Py_XDECREF(result);
    return NULL;
}
#endif

/* PxdImportShared (used by FunctionImport) */
#ifndef __PYX_HAVE_RT_ImportFromPxd_3_2_2
#define __PYX_HAVE_RT_ImportFromPxd_3_2_2
static int __Pyx_ImportFromPxd_3_2_2(PyObject *module, const char *name, void **p, const char *sig, const char *what) {
    PyObject *d = 0;
    PyObject *cobj = 0;
    d = PyObject_GetAttrString(module, "__pyx_capi__");
    if (!d)
        goto bad;
#if (defined(Py_LIMITED_API) && Py_LIMITED_API >= 0x030d0000) || (!defined(Py_LIMITED_API) && PY_VERSION_HEX >= 0x030d0000)
    PyDict_GetItemStringRef(d, name, &cobj);
#else
    cobj = PyDict_GetItemString(d, name);
    Py_XINCREF(cobj);
#endif
    if (!cobj) {
        PyErr_Format(PyExc_ImportError,
            "%.200s does not export expected C %.8s %.200s",
                PyModule_GetName(module), what, name);
        goto bad;
    }
    if (!PyCapsule_IsValid(cobj, sig)) {
        PyErr_Format(PyExc_TypeError,
            "C %.8s %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
             what, PyModule_GetName(module), name, sig, PyCapsule_GetName(cobj));
        goto bad;
    }
    *p = PyCapsule_GetPointer(cobj, sig);
    if (!(*p))
        goto bad;
    Py_DECREF(d);
    Py_DECREF(cobj);
    return 0;
bad:
    Py_XDECREF(d);
    Py_XDECREF(cobj);
    return -1;
}
#endif

/* FunctionImport */
#ifndef __PYX_HAVE_RT_ImportFunction_3_2_2
#define __PYX_HAVE_RT_ImportFunction_3_2_2
static int __Pyx_ImportFunction_3_2_2(PyObject *module, const char *funcname, void (**f)(void), const char *sig) {
    union {
        void (*fp)(void);
        void *p;
    } tmp;
    int result = __Pyx_ImportFromPxd_3_2_2(module, funcname, &tmp.p, sig, "function");
    if (result == 0) {
        *f = tmp.fp;
    }
    return result;
}
#endif

/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
    PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
    if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
        const char* module_name_str = 0;
        PyObject* module_name = 0;
        PyObject* module_dot = 0;
        PyObject* full_name = 0;
        PyErr_Clear();
        module_name_str = PyModule_GetName(module);
        if (unlikely(!module_name_str)) { goto modbad; }
        module_name = PyUnicode_FromString(module_name_str);
        if (unlikely(!module_name)) { goto modbad; }
        module_dot = PyUnicode_Concat(module_name, __pyx_mstate_global->__pyx_kp_u__2);
        if (unlikely(!module_dot)) { goto modbad; }
        full_name = PyUnicode_Concat(module_dot, name);
        if (unlikely(!full_name)) { goto modbad; }
        #if (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM  < 0x07030400) ||\
                CYTHON_COMPILING_IN_GRAAL
        {
            PyObject *modules = PyImport_GetModuleDict();
            if (unlikely(!modules))
                goto modbad;
            value = PyObject_GetItem(modules, full_name);
        }
        #else
        value = PyImport_GetModule(full_name);
        #endif
      modbad:
        Py_XDECREF(full_name);
        Py_XDECREF(module_dot);
        Py_XDECREF(module_name);
    }
    if (unlikely(!value)) {
        PyErr_Format(PyExc_ImportError, "cannot import name %S", name);
    }
    return value;
}

/* Py3UpdateBases */
static PyObject*
__Pyx_PEP560_update_bases(PyObject *bases)
{
    Py_ssize_t i, j, size_bases;
    PyObject *base = NULL, *meth, *new_base, *result, *new_bases = NULL;
#if CYTHON_ASSUME_SAFE_SIZE
    size_bases = PyTuple_GET_SIZE(bases);
#else
    size_bases = PyTuple_Size(bases);
    if (size_bases < 0) return NULL;
#endif
    for (i = 0; i < size_bases; i++) {
#if CYTHON_AVOID_BORROWED_REFS
        Py_CLEAR(base);
#endif
#if CYTHON_ASSUME_SAFE_MACROS
        base = PyTuple_GET_ITEM(bases, i);
#else
        base = PyTuple_GetItem(bases, i);
        if (!base) goto error;
#endif
#if CYTHON_AVOID_BORROWED_REFS
        Py_INCREF(base);
#endif
        if (PyType_Check(base)) {
            if (new_bases) {
                if (PyList_Append(new_bases, base) < 0) {
                    goto error;
                }
            }
            continue;
        }
        meth = __Pyx_PyObject_GetAttrStrNoError(base, __pyx_mstate_global->__pyx_n_u_mro_entries);
        if (!meth && PyErr_Occurred()) {
            goto error;
        }
        if (!meth) {
            if (new_bases) {
                if (PyList_Append(new_bases, base) < 0) {
                    goto error;
                }
            }
            continue;
        }
        new_base = __Pyx_PyObject_CallOneArg(meth, bases);
        Py_DECREF(meth);
        if (!new_base) {
            goto error;
        }
        if (!PyTuple_Check(new_base)) {
            PyErr_SetString(PyExc_TypeError,
                            "__mro_entries__ must return a tuple");
            Py_DECREF(new_base);
            goto error;
        }
        if (!new_bases) {
            if (!(new_bases = PyList_New(i))) {
                goto error;
            }
            for (j = 0; j < i; j++) {
                PyObject *base_from_list;
#if CYTHON_ASSUME_SAFE_MACROS
                base_from_list = PyTuple_GET_ITEM(bases, j);
                PyList_SET_ITEM(new_bases, j, base_from_list);
                Py_INCREF(base_from_list);
#else
                base_from_list = PyTuple_GetItem(bases, j);
                if (!base_from_list) goto error;
                Py_INCREF(base_from_list);
                if (PyList_SetItem(new_bases, j, base_from_list) < 0) goto error;
#endif
            }
        }
#if CYTHON_ASSUME_SAFE_SIZE
        j = PyList_GET_SIZE(new_bases);
#else
        j = PyList_Size(new_bases);
        if (j < 0) goto error;
#endif
        if (PyList_SetSlice(new_bases, j, j, new_base) < 0) {
            goto error;
        }
        Py_DECREF(new_base);
    }
    if (!new_bases) {
        Py_INCREF(bases);
        return bases;
    }
    result = PyList_AsTuple(new_bases);
    Py_DECREF(new_bases);
#if CYTHON_AVOID_BORROWED_REFS
    Py_XDECREF(base);
#endif
    return result;
error:
    Py_XDECREF(new_bases);
#if CYTHON_AVOID_BORROWED_REFS
    Py_XDECREF(base);
#endif
    return NULL;
}

/* CalculateMetaclass */
static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases) {
    Py_ssize_t i, nbases;
#if CYTHON_ASSUME_SAFE_SIZE
    nbases = PyTuple_GET_SIZE(bases);
#else
    nbases = PyTuple_Size(bases);
    if (nbases < 0) return NULL;
#endif
    for (i=0; i < nbases; i++) {
        PyTypeObject *tmptype;
#if CYTHON_ASSUME_SAFE_MACROS
        PyObject *tmp = PyTuple_GET_ITEM(bases, i);
#else
        PyObject *tmp = PyTuple_GetItem(bases, i);
        if (!tmp) return NULL;
#endif
        tmptype = Py_TYPE(tmp);
        if (!metaclass) {
            metaclass = tmptype;
            continue;
        }
        if (PyType_IsSubtype(metaclass, tmptype))
            continue;
        if (PyType_IsSubtype(tmptype, metaclass)) {
            metaclass = tmptype;
            continue;
        }
        PyErr_SetString(PyExc_TypeError,
                        "metaclass conflict: "
                        "the metaclass of a derived class "
                        "must be a (non-strict) subclass "
                        "of the metaclasses of all its bases");
        return NULL;
    }
    if (!metaclass) {
        metaclass = &PyType_Type;
    }
    Py_INCREF((PyObject*) metaclass);
    return (PyObject*) metaclass;
}

/* PyObjectCall2Args (used by Py3ClassCreate) */
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
    PyObject *args[3] = {NULL, arg1, arg2};
    return __Pyx_PyObject_FastCall(function, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET);
}

/* PyObjectLookupSpecial (used by Py3ClassCreate) */
#if CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx__PyObject_LookupSpecial(PyObject* obj, PyObject* attr_name, int with_error) {
    PyObject *res;
    PyTypeObject *tp = Py_TYPE(obj);
    res = _PyType_Lookup(tp, attr_name);
    if (likely(res)) {
        descrgetfunc f = Py_TYPE(res)->tp_descr_get;
        if (!f) {
            Py_INCREF(res);
        } else {
            res = f(res, obj, (PyObject *)tp);
        }
    } else if (with_error) {
        PyErr_SetObject(PyExc_AttributeError, attr_name);
    }
    return res;
}
#endif

/* Py3ClassCreate */
static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name,
                                           PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc) {
    PyObject *ns;
    if (metaclass) {
        PyObject *prep = __Pyx_PyObject_GetAttrStrNoError(metaclass, __pyx_mstate_global->__pyx_n_u_prepare);
        if (prep) {
            PyObject *pargs[3] = {NULL, name, bases};
            ns = __Pyx_PyObject_FastCallDict(prep, pargs+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, mkw);
            Py_DECREF(prep);
        } else {
            if (unlikely(PyErr_Occurred()))
                return NULL;
            ns = PyDict_New();
        }
    } else {
        ns = PyDict_New();
    }
    if (unlikely(!ns))
        return NULL;
    if (unlikely(PyObject_SetItem(ns, __pyx_mstate_global->__pyx_n_u_module, modname) < 0)) goto bad;
    if (unlikely(PyObject_SetItem(ns, __pyx_mstate_global->__pyx_n_u_qualname, qualname) < 0)) goto bad;
    if (unlikely(doc && PyObject_SetItem(ns, __pyx_mstate_global->__pyx_n_u_doc, doc) < 0)) goto bad;
    return ns;
bad:
    Py_DECREF(ns);
    return NULL;
}
static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases,
                                      PyObject *dict, PyObject *mkw,
                                      int calculate_metaclass, int allow_py2_metaclass) {
    PyObject *result;
    PyObject *owned_metaclass = NULL;
    PyObject *margs[4] = {NULL, name, bases, dict};
    if (allow_py2_metaclass) {
        owned_metaclass = PyObject_GetItem(dict, __pyx_mstate_global->__pyx_n_u_metaclass);
        if (owned_metaclass) {
            metaclass = owned_metaclass;
        } else if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) {
            PyErr_Clear();
        } else {
            return NULL;
        }
    }
    if (calculate_metaclass && (!metaclass || PyType_Check(metaclass))) {
        metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases);
        Py_XDECREF(owned_metaclass);
        if (unlikely(!metaclass))
            return NULL;
        owned_metaclass = metaclass;
    }
    result = __Pyx_PyObject_FastCallDict(metaclass, margs+1, 3 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, mkw);
    Py_XDECREF(owned_metaclass);
    return result;
}

/* dict_setdefault (used by FetchCommonType) */
static CYTHON_INLINE PyObject *__Pyx_PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *default_value) {
    PyObject* value;
#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX >= 0x030C0000
    PyObject *args[] = {d, key, default_value};
    value = PyObject_VectorcallMethod(__pyx_mstate_global->__pyx_n_u_setdefault, args, 3 | PY_VECTORCALL_ARGUMENTS_OFFSET, NULL);
#elif CYTHON_COMPILING_IN_LIMITED_API
    value = PyObject_CallMethodObjArgs(d, __pyx_mstate_global->__pyx_n_u_setdefault, key, default_value, NULL);
#elif PY_VERSION_HEX >= 0x030d0000
    PyDict_SetDefaultRef(d, key, default_value, &value);
#else
    value = PyDict_SetDefault(d, key, default_value);
    if (unlikely(!value)) return NULL;
    Py_INCREF(value);
#endif
    return value;
}

/* AddModuleRef (used by FetchSharedCythonModule) */
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
  static PyObject *__Pyx_PyImport_AddModuleObjectRef(PyObject *name) {
      PyObject *module_dict = PyImport_GetModuleDict();
      PyObject *m;
      if (PyMapping_GetOptionalItem(module_dict, name, &m) < 0) {
          return NULL;
      }
      if (m != NULL && PyModule_Check(m)) {
          return m;
      }
      Py_XDECREF(m);
      m = PyModule_NewObject(name);
      if (m == NULL)
          return NULL;
      if (PyDict_CheckExact(module_dict)) {
          PyObject *new_m;
          (void)PyDict_SetDefaultRef(module_dict, name, m, &new_m);
          Py_DECREF(m);
          return new_m;
      } else {
           if (PyObject_SetItem(module_dict, name, m) != 0) {
                Py_DECREF(m);
                return NULL;
            }
            return m;
      }
  }
  static PyObject *__Pyx_PyImport_AddModuleRef(const char *name) {
      PyObject *py_name = PyUnicode_FromString(name);
      if (!py_name) return NULL;
      PyObject *module = __Pyx_PyImport_AddModuleObjectRef(py_name);
      Py_DECREF(py_name);
      return module;
  }
#elif __PYX_LIMITED_VERSION_HEX >= 0x030d0000
  #define __Pyx_PyImport_AddModuleRef(name) PyImport_AddModuleRef(name)
#else
  static PyObject *__Pyx_PyImport_AddModuleRef(const char *name) {
      PyObject *module = PyImport_AddModule(name);
      Py_XINCREF(module);
      return module;
  }
#endif

/* FetchSharedCythonModule (used by FetchCommonType) */
static PyObject *__Pyx_FetchSharedCythonABIModule(void) {
    return __Pyx_PyImport_AddModuleRef(__PYX_ABI_MODULE_NAME);
}

/* FetchCommonType (used by CommonTypesMetaclass) */
#if __PYX_LIMITED_VERSION_HEX < 0x030C0000
static PyObject* __Pyx_PyType_FromMetaclass(PyTypeObject *metaclass, PyObject *module, PyType_Spec *spec, PyObject *bases) {
    PyObject *result = __Pyx_PyType_FromModuleAndSpec(module, spec, bases);
    if (result && metaclass) {
        PyObject *old_tp = (PyObject*)Py_TYPE(result);
    Py_INCREF((PyObject*)metaclass);
#if __PYX_LIMITED_VERSION_HEX >= 0x03090000
        Py_SET_TYPE(result, metaclass);
#else
        result->ob_type = metaclass;
#endif
        Py_DECREF(old_tp);
    }
    return result;
}
#else
#define __Pyx_PyType_FromMetaclass(me, mo, s, b) PyType_FromMetaclass(me, mo, s, b)
#endif
static int __Pyx_VerifyCachedType(PyObject *cached_type,
                               const char *name,
                               Py_ssize_t expected_basicsize) {
    Py_ssize_t basicsize;
    if (!PyType_Check(cached_type)) {
        PyErr_Format(PyExc_TypeError,
            "Shared Cython type %.200s is not a type object", name);
        return -1;
    }
    if (expected_basicsize == 0) {
        return 0; // size is inherited, nothing useful to check
    }
#if CYTHON_COMPILING_IN_LIMITED_API
    PyObject *py_basicsize;
    py_basicsize = PyObject_GetAttrString(cached_type, "__basicsize__");
    if (unlikely(!py_basicsize)) return -1;
    basicsize = PyLong_AsSsize_t(py_basicsize);
    Py_DECREF(py_basicsize);
    py_basicsize = NULL;
    if (unlikely(basicsize == (Py_ssize_t)-1) && PyErr_Occurred()) return -1;
#else
    basicsize = ((PyTypeObject*) cached_type)->tp_basicsize;
#endif
    if (basicsize != expected_basicsize) {
        PyErr_Format(PyExc_TypeError,
            "Shared Cython type %.200s has the wrong size, try recompiling",
            name);
        return -1;
    }
    return 0;
}
static PyTypeObject *__Pyx_FetchCommonTypeFromSpec(PyTypeObject *metaclass, PyObject *module, PyType_Spec *spec, PyObject *bases) {
    PyObject *abi_module = NULL, *cached_type = NULL, *abi_module_dict, *new_cached_type, *py_object_name;
    int get_item_ref_result;
    const char* object_name = strrchr(spec->name, '.');
    object_name = object_name ? object_name+1 : spec->name;
    py_object_name = PyUnicode_FromString(object_name);
    if (!py_object_name) return NULL;
    abi_module = __Pyx_FetchSharedCythonABIModule();
    if (!abi_module) goto done;
    abi_module_dict = PyModule_GetDict(abi_module);
    if (!abi_module_dict) goto done;
    get_item_ref_result = __Pyx_PyDict_GetItemRef(abi_module_dict, py_object_name, &cached_type);
    if (get_item_ref_result == 1) {
        if (__Pyx_VerifyCachedType(
              cached_type,
              object_name,
              spec->basicsize) < 0) {
            goto bad;
        }
        goto done;
    } else if (unlikely(get_item_ref_result == -1)) {
        goto bad;
    }
    cached_type = __Pyx_PyType_FromMetaclass(
        metaclass,
        CYTHON_USE_MODULE_STATE ? module : abi_module,
        spec, bases);
    if (unlikely(!cached_type)) goto bad;
    if (unlikely(__Pyx_fix_up_extension_type_from_spec(spec, (PyTypeObject *) cached_type) < 0)) goto bad;
    new_cached_type = __Pyx_PyDict_SetDefault(abi_module_dict, py_object_name, cached_type);
    if (unlikely(new_cached_type != cached_type)) {
        if (unlikely(!new_cached_type)) goto bad;
        Py_DECREF(cached_type);
        cached_type = new_cached_type;
        if (__Pyx_VerifyCachedType(
                cached_type,
                object_name,
                spec->basicsize) < 0) {
            goto bad;
        }
        goto done;
    } else {
        Py_DECREF(new_cached_type);
    }
done:
    Py_XDECREF(abi_module);
    Py_DECREF(py_object_name);
    assert(cached_type == NULL || PyType_Check(cached_type));
    return (PyTypeObject *) cached_type;
bad:
    Py_XDECREF(cached_type);
    cached_type = NULL;
    goto done;
}

/* CommonTypesMetaclass (used by CythonFunctionShared) */
static PyObject* __pyx_CommonTypesMetaclass_get_module(CYTHON_UNUSED PyObject *self, CYTHON_UNUSED void* context) {
    return PyUnicode_FromString(__PYX_ABI_MODULE_NAME);
}
#if __PYX_LIMITED_VERSION_HEX < 0x030A0000
static PyObject* __pyx_CommonTypesMetaclass_call(CYTHON_UNUSED PyObject *self, CYTHON_UNUSED PyObject *args, CYTHON_UNUSED PyObject *kwds) {
    PyErr_SetString(PyExc_TypeError, "Cannot instantiate Cython internal types");
    return NULL;
}
static int __pyx_CommonTypesMetaclass_setattr(CYTHON_UNUSED PyObject *self, CYTHON_UNUSED PyObject *attr, CYTHON_UNUSED PyObject *value) {
    PyErr_SetString(PyExc_TypeError, "Cython internal types are immutable");
    return -1;
}
#endif
static PyGetSetDef __pyx_CommonTypesMetaclass_getset[] = {
    {"__module__", __pyx_CommonTypesMetaclass_get_module, NULL, NULL, NULL},
    {0, 0, 0, 0, 0}
};
static PyType_Slot __pyx_CommonTypesMetaclass_slots[] = {
    {Py_tp_getset, (void *)__pyx_CommonTypesMetaclass_getset},
    #if __PYX_LIMITED_VERSION_HEX < 0x030A0000
    {Py_tp_call, (void*)__pyx_CommonTypesMetaclass_call},
    {Py_tp_new, (void*)__pyx_CommonTypesMetaclass_call},
    {Py_tp_setattro, (void*)__pyx_CommonTypesMetaclass_setattr},
    #endif
    {0, 0}
};
static PyType_Spec __pyx_CommonTypesMetaclass_spec = {
    __PYX_TYPE_MODULE_PREFIX "_common_types_metatype",
    0,
    0,
    Py_TPFLAGS_IMMUTABLETYPE |
    Py_TPFLAGS_DISALLOW_INSTANTIATION |
    Py_TPFLAGS_DEFAULT,
    __pyx_CommonTypesMetaclass_slots
};
static int __pyx_CommonTypesMetaclass_init(PyObject *module) {
    __pyx_mstatetype *mstate = __Pyx_PyModule_GetState(module);
    PyObject *bases = PyTuple_Pack(1, &PyType_Type);
    if (unlikely(!bases)) {
        return -1;
    }
    mstate->__pyx_CommonTypesMetaclassType = __Pyx_FetchCommonTypeFromSpec(NULL, module, &__pyx_CommonTypesMetaclass_spec, bases);
    Py_DECREF(bases);
    if (unlikely(mstate->__pyx_CommonTypesMetaclassType == NULL)) {
        return -1;
    }
    return 0;
}

/* PyMethodNew (used by CythonFunctionShared) */
#if CYTHON_COMPILING_IN_LIMITED_API
static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ) {
    PyObject *result;
    CYTHON_UNUSED_VAR(typ);
    if (!self)
        return __Pyx_NewRef(func);
    #if __PYX_LIMITED_VERSION_HEX >= 0x030C0000
    {
        PyObject *args[] = {func, self};
        result = PyObject_Vectorcall(__pyx_mstate_global->__Pyx_CachedMethodType, args, 2, NULL);
    }
    #else
    result = PyObject_CallFunctionObjArgs(__pyx_mstate_global->__Pyx_CachedMethodType, func, self, NULL);
    #endif
    return result;
}
#else
static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ) {
    CYTHON_UNUSED_VAR(typ);
    if (!self)
        return __Pyx_NewRef(func);
    return PyMethod_New(func, self);
}
#endif

/* PyVectorcallFastCallDict (used by CythonFunctionShared) */
#if CYTHON_METH_FASTCALL && CYTHON_VECTORCALL
static PyObject *__Pyx_PyVectorcall_FastCallDict_kw(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw)
{
    PyObject *res = NULL;
    PyObject *kwnames;
    PyObject **newargs;
    PyObject **kwvalues;
    Py_ssize_t i;
    #if CYTHON_AVOID_BORROWED_REFS
    PyObject *pos;
    #else
    Py_ssize_t pos;
    #endif
    size_t j;
    PyObject *key, *value;
    unsigned long keys_are_strings;
    #if !CYTHON_ASSUME_SAFE_SIZE
    Py_ssize_t nkw = PyDict_Size(kw);
    if (unlikely(nkw == -1)) return NULL;
    #else
    Py_ssize_t nkw = PyDict_GET_SIZE(kw);
    #endif
    newargs = (PyObject **)PyMem_Malloc((nargs + (size_t)nkw) * sizeof(args[0]));
    if (unlikely(newargs == NULL)) {
        PyErr_NoMemory();
        return NULL;
    }
    for (j = 0; j < nargs; j++) newargs[j] = args[j];
    kwnames = PyTuple_New(nkw);
    if (unlikely(kwnames == NULL)) {
        PyMem_Free(newargs);
        return NULL;
    }
    kwvalues = newargs + nargs;
    pos = 0;
    i = 0;
    keys_are_strings = Py_TPFLAGS_UNICODE_SUBCLASS;
    while (__Pyx_PyDict_NextRef(kw, &pos, &key, &value)) {
        keys_are_strings &=
        #if CYTHON_COMPILING_IN_LIMITED_API
            PyType_GetFlags(Py_TYPE(key));
        #else
            Py_TYPE(key)->tp_flags;
        #endif
        #if !CYTHON_ASSUME_SAFE_MACROS
        if (unlikely(PyTuple_SetItem(kwnames, i, key) < 0)) goto cleanup;
        #else
        PyTuple_SET_ITEM(kwnames, i, key);
        #endif
        kwvalues[i] = value;
        i++;
    }
    if (unlikely(!keys_are_strings)) {
        PyErr_SetString(PyExc_TypeError, "keywords must be strings");
        goto cleanup;
    }
    res = vc(func, newargs, nargs, kwnames);
cleanup:
    #if CYTHON_AVOID_BORROWED_REFS
    Py_DECREF(pos);
    #endif
    Py_DECREF(kwnames);
    for (i = 0; i < nkw; i++)
        Py_DECREF(kwvalues[i]);
    PyMem_Free(newargs);
    return res;
}
static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw)
{
    Py_ssize_t kw_size =
        likely(kw == NULL) ?
        0 :
#if !CYTHON_ASSUME_SAFE_SIZE
        PyDict_Size(kw);
#else
        PyDict_GET_SIZE(kw);
#endif
    if (kw_size == 0) {
        return vc(func, args, nargs, NULL);
    }
#if !CYTHON_ASSUME_SAFE_SIZE
    else if (unlikely(kw_size == -1)) {
        return NULL;
    }
#endif
    return __Pyx_PyVectorcall_FastCallDict_kw(func, vc, args, nargs, kw);
}
#endif

/* CythonFunctionShared (used by CythonFunction) */
#if CYTHON_COMPILING_IN_LIMITED_API
static CYTHON_INLINE int __Pyx__IsSameCyOrCFunctionNoMethod(PyObject *func, void (*cfunc)(void)) {
    if (__Pyx_CyFunction_Check(func)) {
        return PyCFunction_GetFunction(((__pyx_CyFunctionObject*)func)->func) == (PyCFunction) cfunc;
    } else if (PyCFunction_Check(func)) {
        return PyCFunction_GetFunction(func) == (PyCFunction) cfunc;
    }
    return 0;
}
static CYTHON_INLINE int __Pyx__IsSameCyOrCFunction(PyObject *func, void (*cfunc)(void)) {
    if ((PyObject*)Py_TYPE(func) == __pyx_mstate_global->__Pyx_CachedMethodType) {
        int result;
        PyObject *newFunc = PyObject_GetAttr(func, __pyx_mstate_global->__pyx_n_u_func);
        if (unlikely(!newFunc)) {
            PyErr_Clear(); // It's only an optimization, so don't throw an error
            return 0;
        }
        result = __Pyx__IsSameCyOrCFunctionNoMethod(newFunc, cfunc);
        Py_DECREF(newFunc);
        return result;
    }
    return __Pyx__IsSameCyOrCFunctionNoMethod(func, cfunc);
}
#else
static CYTHON_INLINE int __Pyx__IsSameCyOrCFunction(PyObject *func, void (*cfunc)(void)) {
    if (PyMethod_Check(func)) {
        func = PyMethod_GET_FUNCTION(func);
    }
    return __Pyx_CyOrPyCFunction_Check(func) && __Pyx_CyOrPyCFunction_GET_FUNCTION(func) == (PyCFunction) cfunc;
}
#endif
static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj) {
#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API
    __Pyx_Py_XDECREF_SET(
        __Pyx_CyFunction_GetClassObj(f),
            ((classobj) ? __Pyx_NewRef(classobj) : NULL));
#else
    __Pyx_Py_XDECREF_SET(
        ((PyCMethodObject *) (f))->mm_class,
        (PyTypeObject*)((classobj) ? __Pyx_NewRef(classobj) : NULL));
#endif
}
static PyObject *
__Pyx_CyFunction_get_doc_locked(__pyx_CyFunctionObject *op)
{
    if (unlikely(op->func_doc == NULL)) {
#if CYTHON_COMPILING_IN_LIMITED_API
        op->func_doc = PyObject_GetAttrString(op->func, "__doc__");
        if (unlikely(!op->func_doc)) return NULL;
#else
        if (((PyCFunctionObject*)op)->m_ml->ml_doc) {
            op->func_doc = PyUnicode_FromString(((PyCFunctionObject*)op)->m_ml->ml_doc);
            if (unlikely(op->func_doc == NULL))
                return NULL;
        } else {
            Py_INCREF(Py_None);
            return Py_None;
        }
#endif
    }
    Py_INCREF(op->func_doc);
    return op->func_doc;
}
static PyObject *
__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, void *closure) {
    PyObject *result;
    CYTHON_UNUSED_VAR(closure);
    __Pyx_BEGIN_CRITICAL_SECTION(op);
    result = __Pyx_CyFunction_get_doc_locked(op);
    __Pyx_END_CRITICAL_SECTION();
    return result;
}
static int
__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, void *context)
{
    CYTHON_UNUSED_VAR(context);
    if (value == NULL) {
        value = Py_None;
    }
    Py_INCREF(value);
    __Pyx_BEGIN_CRITICAL_SECTION(op);
    __Pyx_Py_XDECREF_SET(op->func_doc, value);
    __Pyx_END_CRITICAL_SECTION();
    return 0;
}
static PyObject *
__Pyx_CyFunction_get_name_locked(__pyx_CyFunctionObject *op)
{
    if (unlikely(op->func_name == NULL)) {
#if CYTHON_COMPILING_IN_LIMITED_API
        op->func_name = PyObject_GetAttrString(op->func, "__name__");
#else
        op->func_name = PyUnicode_InternFromString(((PyCFunctionObject*)op)->m_ml->ml_name);
#endif
        if (unlikely(op->func_name == NULL))
            return NULL;
    }
    Py_INCREF(op->func_name);
    return op->func_name;
}
static PyObject *
__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, void *context)
{
    PyObject *result = NULL;
    CYTHON_UNUSED_VAR(context);
    __Pyx_BEGIN_CRITICAL_SECTION(op);
    result = __Pyx_CyFunction_get_name_locked(op);
    __Pyx_END_CRITICAL_SECTION();
    return result;
}
static int
__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, void *context)
{
    CYTHON_UNUSED_VAR(context);
    if (unlikely(value == NULL || !PyUnicode_Check(value))) {
        PyErr_SetString(PyExc_TypeError,
                        "__name__ must be set to a string object");
        return -1;
    }
    Py_INCREF(value);
    __Pyx_BEGIN_CRITICAL_SECTION(op);
    __Pyx_Py_XDECREF_SET(op->func_name, value);
    __Pyx_END_CRITICAL_SECTION();
    return 0;
}
static PyObject *
__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, void *context)
{
    CYTHON_UNUSED_VAR(context);
    PyObject *result;
    __Pyx_BEGIN_CRITICAL_SECTION(op);
    Py_INCREF(op->func_qualname);
    result = op->func_qualname;
    __Pyx_END_CRITICAL_SECTION();
    return result;
}
static int
__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, void *context)
{
    CYTHON_UNUSED_VAR(context);
    if (unlikely(value == NULL || !PyUnicode_Check(value))) {
        PyErr_SetString(PyExc_TypeError,
                        "__qualname__ must be set to a string object");
        return -1;
    }
    Py_INCREF(value);
    __Pyx_BEGIN_CRITICAL_SECTION(op);
    __Pyx_Py_XDECREF_SET(op->func_qualname, value);
    __Pyx_END_CRITICAL_SECTION();
    return 0;
}
#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000
static PyObject *
__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, void *context)
{
    CYTHON_UNUSED_VAR(context);
    if (unlikely(op->func_dict == NULL)) {
        op->func_dict = PyDict_New();
        if (unlikely(op->func_dict == NULL))
            return NULL;
    }
    Py_INCREF(op->func_dict);
    return op->func_dict;
}
#endif
static PyObject *
__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, void *context)
{
    CYTHON_UNUSED_VAR(context);
    Py_INCREF(op->func_globals);
    return op->func_globals;
}
static PyObject *
__Pyx_CyFunction_get_closure(__pyx_CyFunctionObject *op, void *context)
{
    CYTHON_UNUSED_VAR(op);
    CYTHON_UNUSED_VAR(context);
    Py_INCREF(Py_None);
    return Py_None;
}
static PyObject *
__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, void *context)
{
    PyObject* result = (op->func_code) ? op->func_code : Py_None;
    CYTHON_UNUSED_VAR(context);
    Py_INCREF(result);
    return result;
}
static int
__Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) {
    int result = 0;
    PyObject *res = op->defaults_getter((PyObject *) op);
    if (unlikely(!res))
        return -1;
    #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
    op->defaults_tuple = PyTuple_GET_ITEM(res, 0);
    Py_INCREF(op->defaults_tuple);
    op->defaults_kwdict = PyTuple_GET_ITEM(res, 1);
    Py_INCREF(op->defaults_kwdict);
    #else
    op->defaults_tuple = __Pyx_PySequence_ITEM(res, 0);
    if (unlikely(!op->defaults_tuple)) result = -1;
    else {
        op->defaults_kwdict = __Pyx_PySequence_ITEM(res, 1);
        if (unlikely(!op->defaults_kwdict)) result = -1;
    }
    #endif
    Py_DECREF(res);
    return result;
}
static int
__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) {
    CYTHON_UNUSED_VAR(context);
    if (!value) {
        value = Py_None;
    } else if (unlikely(value != Py_None && !PyTuple_Check(value))) {
        PyErr_SetString(PyExc_TypeError,
                        "__defaults__ must be set to a tuple object");
        return -1;
    }
    PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__defaults__ will not "
                 "currently affect the values used in function calls", 1);
    Py_INCREF(value);
    __Pyx_BEGIN_CRITICAL_SECTION(op);
    __Pyx_Py_XDECREF_SET(op->defaults_tuple, value);
    __Pyx_END_CRITICAL_SECTION();
    return 0;
}
static PyObject *
__Pyx_CyFunction_get_defaults_locked(__pyx_CyFunctionObject *op) {
    PyObject* result = op->defaults_tuple;
    if (unlikely(!result)) {
        if (op->defaults_getter) {
            if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL;
            result = op->defaults_tuple;
        } else {
            result = Py_None;
        }
    }
    Py_INCREF(result);
    return result;
}
static PyObject *
__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, void *context) {
    PyObject* result = NULL;
    CYTHON_UNUSED_VAR(context);
    __Pyx_BEGIN_CRITICAL_SECTION(op);
    result = __Pyx_CyFunction_get_defaults_locked(op);
    __Pyx_END_CRITICAL_SECTION();
    return result;
}
static int
__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) {
    CYTHON_UNUSED_VAR(context);
    if (!value) {
        value = Py_None;
    } else if (unlikely(value != Py_None && !PyDict_Check(value))) {
        PyErr_SetString(PyExc_TypeError,
                        "__kwdefaults__ must be set to a dict object");
        return -1;
    }
    PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__kwdefaults__ will not "
                 "currently affect the values used in function calls", 1);
    Py_INCREF(value);
    __Pyx_BEGIN_CRITICAL_SECTION(op);
    __Pyx_Py_XDECREF_SET(op->defaults_kwdict, value);
    __Pyx_END_CRITICAL_SECTION();
    return 0;
}
static PyObject *
__Pyx_CyFunction_get_kwdefaults_locked(__pyx_CyFunctionObject *op) {
    PyObject* result = op->defaults_kwdict;
    if (unlikely(!result)) {
        if (op->defaults_getter) {
            if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL;
            result = op->defaults_kwdict;
        } else {
            result = Py_None;
        }
    }
    Py_INCREF(result);
    return result;
}
static PyObject *
__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, void *context) {
    PyObject* result;
    CYTHON_UNUSED_VAR(context);
    __Pyx_BEGIN_CRITICAL_SECTION(op);
    result = __Pyx_CyFunction_get_kwdefaults_locked(op);
    __Pyx_END_CRITICAL_SECTION();
    return result;
}
static int
__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, void *context) {
    CYTHON_UNUSED_VAR(context);
    if (!value || value == Py_None) {
        value = NULL;
    } else if (unlikely(!PyDict_Check(value))) {
        PyErr_SetString(PyExc_TypeError,
                        "__annotations__ must be set to a dict object");
        return -1;
    }
    Py_XINCREF(value);
    __Pyx_BEGIN_CRITICAL_SECTION(op);
    __Pyx_Py_XDECREF_SET(op->func_annotations, value);
    __Pyx_END_CRITICAL_SECTION();
    return 0;
}
static PyObject *
__Pyx_CyFunction_get_annotations_locked(__pyx_CyFunctionObject *op) {
    PyObject* result = op->func_annotations;
    if (unlikely(!result)) {
        result = PyDict_New();
        if (unlikely(!result)) return NULL;
        op->func_annotations = result;
    }
    Py_INCREF(result);
    return result;
}
static PyObject *
__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, void *context) {
    PyObject *result;
    CYTHON_UNUSED_VAR(context);
    __Pyx_BEGIN_CRITICAL_SECTION(op);
    result = __Pyx_CyFunction_get_annotations_locked(op);
    __Pyx_END_CRITICAL_SECTION();
    return result;
}
static PyObject *
__Pyx_CyFunction_get_is_coroutine_value(__pyx_CyFunctionObject *op) {
    int is_coroutine = op->flags & __Pyx_CYFUNCTION_COROUTINE;
    if (is_coroutine) {
        PyObject *is_coroutine_value, *module, *fromlist, *marker = __pyx_mstate_global->__pyx_n_u_is_coroutine;
        fromlist = PyList_New(1);
        if (unlikely(!fromlist)) return NULL;
        Py_INCREF(marker);
#if CYTHON_ASSUME_SAFE_MACROS
        PyList_SET_ITEM(fromlist, 0, marker);
#else
        if (unlikely(PyList_SetItem(fromlist, 0, marker) < 0)) {
            Py_DECREF(marker);
            Py_DECREF(fromlist);
            return NULL;
        }
#endif
        module = PyImport_ImportModuleLevelObject(__pyx_mstate_global->__pyx_n_u_asyncio_coroutines, NULL, NULL, fromlist, 0);
        Py_DECREF(fromlist);
        if (unlikely(!module)) goto ignore;
        is_coroutine_value = __Pyx_PyObject_GetAttrStr(module, marker);
        Py_DECREF(module);
        if (likely(is_coroutine_value)) {
            return is_coroutine_value;
        }
ignore:
        PyErr_Clear();
    }
    return __Pyx_PyBool_FromLong(is_coroutine);
}
static PyObject *
__Pyx_CyFunction_get_is_coroutine(__pyx_CyFunctionObject *op, void *context) {
    PyObject *result;
    CYTHON_UNUSED_VAR(context);
    if (op->func_is_coroutine) {
        return __Pyx_NewRef(op->func_is_coroutine);
    }
    result = __Pyx_CyFunction_get_is_coroutine_value(op);
    if (unlikely(!result))
        return NULL;
    __Pyx_BEGIN_CRITICAL_SECTION(op);
    if (op->func_is_coroutine) {
        Py_DECREF(result);
        result = __Pyx_NewRef(op->func_is_coroutine);
    } else {
        op->func_is_coroutine = __Pyx_NewRef(result);
    }
    __Pyx_END_CRITICAL_SECTION();
    return result;
}
static void __Pyx_CyFunction_raise_argument_count_error(__pyx_CyFunctionObject *func, const char* message, Py_ssize_t size) {
#if CYTHON_COMPILING_IN_LIMITED_API
    PyObject *py_name = __Pyx_CyFunction_get_name(func, NULL);
    if (!py_name) return;
    PyErr_Format(PyExc_TypeError,
        "%.200S() %s (%" CYTHON_FORMAT_SSIZE_T "d given)",
        py_name, message, size);
    Py_DECREF(py_name);
#else
    const char* name = ((PyCFunctionObject*)func)->m_ml->ml_name;
    PyErr_Format(PyExc_TypeError,
        "%.200s() %s (%" CYTHON_FORMAT_SSIZE_T "d given)",
        name, message, size);
#endif
}
static void __Pyx_CyFunction_raise_type_error(__pyx_CyFunctionObject *func, const char* message) {
#if CYTHON_COMPILING_IN_LIMITED_API
    PyObject *py_name = __Pyx_CyFunction_get_name(func, NULL);
    if (!py_name) return;
    PyErr_Format(PyExc_TypeError,
        "%.200S() %s",
        py_name, message);
    Py_DECREF(py_name);
#else
    const char* name = ((PyCFunctionObject*)func)->m_ml->ml_name;
    PyErr_Format(PyExc_TypeError,
        "%.200s() %s",
        name, message);
#endif
}
#if CYTHON_COMPILING_IN_LIMITED_API
static PyObject *
__Pyx_CyFunction_get_module(__pyx_CyFunctionObject *op, void *context) {
    CYTHON_UNUSED_VAR(context);
    return PyObject_GetAttrString(op->func, "__module__");
}
static int
__Pyx_CyFunction_set_module(__pyx_CyFunctionObject *op, PyObject* value, void *context) {
    CYTHON_UNUSED_VAR(context);
    return PyObject_SetAttrString(op->func, "__module__", value);
}
#endif
static PyGetSetDef __pyx_CyFunction_getsets[] = {
    {"func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0},
    {"__doc__",  (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0},
    {"func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0},
    {"__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0},
    {"__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0},
#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000
    {"func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)PyObject_GenericSetDict, 0, 0},
    {"__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)PyObject_GenericSetDict, 0, 0},
#else
    {"func_dict", (getter)PyObject_GenericGetDict, (setter)PyObject_GenericSetDict, 0, 0},
    {"__dict__", (getter)PyObject_GenericGetDict, (setter)PyObject_GenericSetDict, 0, 0},
#endif
    {"func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0},
    {"__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0},
    {"func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0},
    {"__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0},
    {"func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0},
    {"__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0},
    {"func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0},
    {"__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0},
    {"__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0},
    {"__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0},
    {"_is_coroutine", (getter)__Pyx_CyFunction_get_is_coroutine, 0, 0, 0},
#if CYTHON_COMPILING_IN_LIMITED_API
    {"__module__", (getter)__Pyx_CyFunction_get_module, (setter)__Pyx_CyFunction_set_module, 0, 0},
#endif
    {0, 0, 0, 0, 0}
};
static PyMemberDef __pyx_CyFunction_members[] = {
#if !CYTHON_COMPILING_IN_LIMITED_API
    {"__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), 0, 0},
#endif
#if PY_VERSION_HEX < 0x030C0000 || CYTHON_COMPILING_IN_LIMITED_API
    {"__dictoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_dict), READONLY, 0},
#endif
#if CYTHON_METH_FASTCALL
#if CYTHON_COMPILING_IN_LIMITED_API
    {"__vectorcalloffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_vectorcall), READONLY, 0},
#else
    {"__vectorcalloffset__", T_PYSSIZET, offsetof(PyCFunctionObject, vectorcall), READONLY, 0},
#endif
#if CYTHON_COMPILING_IN_LIMITED_API
    {"__weaklistoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_weakreflist), READONLY, 0},
#else
    {"__weaklistoffset__", T_PYSSIZET, offsetof(PyCFunctionObject, m_weakreflist), READONLY, 0},
#endif
#endif
    {0, 0, 0,  0, 0}
};
static PyObject *
__Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, PyObject *args)
{
    PyObject *result = NULL;
    CYTHON_UNUSED_VAR(args);
    __Pyx_BEGIN_CRITICAL_SECTION(m);
    Py_INCREF(m->func_qualname);
    result = m->func_qualname;
    __Pyx_END_CRITICAL_SECTION();
    return result;
}
static PyMethodDef __pyx_CyFunction_methods[] = {
    {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0},
    {0, 0, 0, 0}
};
#if CYTHON_COMPILING_IN_LIMITED_API
#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist)
#else
#define __Pyx_CyFunction_weakreflist(cyfunc) (((PyCFunctionObject*)cyfunc)->m_weakreflist)
#endif
static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname,
                                       PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) {
#if !CYTHON_COMPILING_IN_LIMITED_API
    PyCFunctionObject *cf = (PyCFunctionObject*) op;
#endif
    if (unlikely(op == NULL))
        return NULL;
#if CYTHON_COMPILING_IN_LIMITED_API
    op->func = PyCFunction_NewEx(ml, (PyObject*)op, module);
    if (unlikely(!op->func)) return NULL;
#endif
    op->flags = flags;
    __Pyx_CyFunction_weakreflist(op) = NULL;
#if !CYTHON_COMPILING_IN_LIMITED_API
    cf->m_ml = ml;
    cf->m_self = (PyObject *) op;
#endif
    Py_XINCREF(closure);
    op->func_closure = closure;
#if !CYTHON_COMPILING_IN_LIMITED_API
    Py_XINCREF(module);
    cf->m_module = module;
#endif
#if PY_VERSION_HEX < 0x030C0000 || CYTHON_COMPILING_IN_LIMITED_API
    op->func_dict = NULL;
#endif
    op->func_name = NULL;
    Py_INCREF(qualname);
    op->func_qualname = qualname;
    op->func_doc = NULL;
#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API
    op->func_classobj = NULL;
#else
    ((PyCMethodObject*)op)->mm_class = NULL;
#endif
    op->func_globals = globals;
    Py_INCREF(op->func_globals);
    Py_XINCREF(code);
    op->func_code = code;
    op->defaults = NULL;
    op->defaults_tuple = NULL;
    op->defaults_kwdict = NULL;
    op->defaults_getter = NULL;
    op->func_annotations = NULL;
    op->func_is_coroutine = NULL;
#if CYTHON_METH_FASTCALL
    switch (ml->ml_flags & (METH_VARARGS | METH_FASTCALL | METH_NOARGS | METH_O | METH_KEYWORDS | METH_METHOD)) {
    case METH_NOARGS:
        __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_NOARGS;
        break;
    case METH_O:
        __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_O;
        break;
    case METH_METHOD | METH_FASTCALL | METH_KEYWORDS:
        __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD;
        break;
    case METH_FASTCALL | METH_KEYWORDS:
        __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS;
        break;
    case METH_VARARGS | METH_KEYWORDS:
        __Pyx_CyFunction_func_vectorcall(op) = NULL;
        break;
    default:
        PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction");
        Py_DECREF(op);
        return NULL;
    }
#endif
    return (PyObject *) op;
}
static int
__Pyx_CyFunction_clear(__pyx_CyFunctionObject *m)
{
    Py_CLEAR(m->func_closure);
#if CYTHON_COMPILING_IN_LIMITED_API
    Py_CLEAR(m->func);
#else
    Py_CLEAR(((PyCFunctionObject*)m)->m_module);
#endif
#if PY_VERSION_HEX < 0x030C0000 || CYTHON_COMPILING_IN_LIMITED_API
    Py_CLEAR(m->func_dict);
#elif PY_VERSION_HEX < 0x030d0000
    _PyObject_ClearManagedDict((PyObject*)m);
#else
    PyObject_ClearManagedDict((PyObject*)m);
#endif
    Py_CLEAR(m->func_name);
    Py_CLEAR(m->func_qualname);
    Py_CLEAR(m->func_doc);
    Py_CLEAR(m->func_globals);
    Py_CLEAR(m->func_code);
#if !CYTHON_COMPILING_IN_LIMITED_API
#if PY_VERSION_HEX < 0x030900B1
    Py_CLEAR(__Pyx_CyFunction_GetClassObj(m));
#else
    {
        PyObject *cls = (PyObject*) ((PyCMethodObject *) (m))->mm_class;
        ((PyCMethodObject *) (m))->mm_class = NULL;
        Py_XDECREF(cls);
    }
#endif
#endif
    Py_CLEAR(m->defaults_tuple);
    Py_CLEAR(m->defaults_kwdict);
    Py_CLEAR(m->func_annotations);
    Py_CLEAR(m->func_is_coroutine);
    Py_CLEAR(m->defaults);
    return 0;
}
static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m)
{
    if (__Pyx_CyFunction_weakreflist(m) != NULL)
        PyObject_ClearWeakRefs((PyObject *) m);
    __Pyx_CyFunction_clear(m);
    __Pyx_PyHeapTypeObject_GC_Del(m);
}
static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m)
{
    PyObject_GC_UnTrack(m);
    __Pyx__CyFunction_dealloc(m);
}
static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg)
{
    {
        int e = __Pyx_call_type_traverse((PyObject*)m, 1, visit, arg);
        if (e) return e;
    }
    Py_VISIT(m->func_closure);
#if CYTHON_COMPILING_IN_LIMITED_API
    Py_VISIT(m->func);
#else
    Py_VISIT(((PyCFunctionObject*)m)->m_module);
#endif
#if PY_VERSION_HEX < 0x030C0000 || CYTHON_COMPILING_IN_LIMITED_API
    Py_VISIT(m->func_dict);
#else
    {
        int e =
#if PY_VERSION_HEX < 0x030d0000
            _PyObject_VisitManagedDict
#else
            PyObject_VisitManagedDict
#endif
                ((PyObject*)m, visit, arg);
        if (e != 0) return e;
    }
#endif
    __Pyx_VISIT_CONST(m->func_name);
    __Pyx_VISIT_CONST(m->func_qualname);
    Py_VISIT(m->func_doc);
    Py_VISIT(m->func_globals);
    __Pyx_VISIT_CONST(m->func_code);
#if !CYTHON_COMPILING_IN_LIMITED_API
    Py_VISIT(__Pyx_CyFunction_GetClassObj(m));
#endif
    Py_VISIT(m->defaults_tuple);
    Py_VISIT(m->defaults_kwdict);
    Py_VISIT(m->func_is_coroutine);
    Py_VISIT(m->defaults);
    return 0;
}
static PyObject*
__Pyx_CyFunction_repr(__pyx_CyFunctionObject *op)
{
    PyObject *repr;
    __Pyx_BEGIN_CRITICAL_SECTION(op);
    repr = PyUnicode_FromFormat("<cyfunction %U at %p>",
                                op->func_qualname, (void *)op);
    __Pyx_END_CRITICAL_SECTION();
    return repr;
}
static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) {
#if CYTHON_COMPILING_IN_LIMITED_API
    PyObject *f = ((__pyx_CyFunctionObject*)func)->func;
    PyCFunction meth;
    int flags;
    meth = PyCFunction_GetFunction(f);
    if (unlikely(!meth)) return NULL;
    flags = PyCFunction_GetFlags(f);
    if (unlikely(flags < 0)) return NULL;
#else
    PyCFunctionObject* f = (PyCFunctionObject*)func;
    PyCFunction meth = f->m_ml->ml_meth;
    int flags = f->m_ml->ml_flags;
#endif
    Py_ssize_t size;
    switch (flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) {
    case METH_VARARGS:
        if (likely(kw == NULL || PyDict_Size(kw) == 0))
            return (*meth)(self, arg);
        break;
    case METH_VARARGS | METH_KEYWORDS:
        return (*(PyCFunctionWithKeywords)(void(*)(void))meth)(self, arg, kw);
    case METH_NOARGS:
        if (likely(kw == NULL || PyDict_Size(kw) == 0)) {
#if CYTHON_ASSUME_SAFE_SIZE
            size = PyTuple_GET_SIZE(arg);
#else
            size = PyTuple_Size(arg);
            if (unlikely(size < 0)) return NULL;
#endif
            if (likely(size == 0))
                return (*meth)(self, NULL);
            __Pyx_CyFunction_raise_argument_count_error(
                (__pyx_CyFunctionObject*)func,
                "takes no arguments", size);
            return NULL;
        }
        break;
    case METH_O:
        if (likely(kw == NULL || PyDict_Size(kw) == 0)) {
#if CYTHON_ASSUME_SAFE_SIZE
            size = PyTuple_GET_SIZE(arg);
#else
            size = PyTuple_Size(arg);
            if (unlikely(size < 0)) return NULL;
#endif
            if (likely(size == 1)) {
                PyObject *result, *arg0;
                #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
                arg0 = PyTuple_GET_ITEM(arg, 0);
                #else
                arg0 = __Pyx_PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL;
                #endif
                result = (*meth)(self, arg0);
                #if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
                Py_DECREF(arg0);
                #endif
                return result;
            }
            __Pyx_CyFunction_raise_argument_count_error(
                (__pyx_CyFunctionObject*)func,
                "takes exactly one argument", size);
            return NULL;
        }
        break;
    default:
        PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction");
        return NULL;
    }
    __Pyx_CyFunction_raise_type_error(
        (__pyx_CyFunctionObject*)func, "takes no keyword arguments");
    return NULL;
}
static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) {
    PyObject *self, *result;
#if CYTHON_COMPILING_IN_LIMITED_API
    self = PyCFunction_GetSelf(((__pyx_CyFunctionObject*)func)->func);
    if (unlikely(!self) && PyErr_Occurred()) return NULL;
#else
    self = ((PyCFunctionObject*)func)->m_self;
#endif
    result = __Pyx_CyFunction_CallMethod(func, self, arg, kw);
    return result;
}
static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) {
    PyObject *result;
    __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func;
#if CYTHON_METH_FASTCALL && CYTHON_VECTORCALL
     __pyx_vectorcallfunc vc = __Pyx_CyFunction_func_vectorcall(cyfunc);
    if (vc) {
#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE
        return __Pyx_PyVectorcall_FastCallDict(func, vc, &PyTuple_GET_ITEM(args, 0), (size_t)PyTuple_GET_SIZE(args), kw);
#else
        (void) &__Pyx_PyVectorcall_FastCallDict;
        return PyVectorcall_Call(func, args, kw);
#endif
    }
#endif
    if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) {
        Py_ssize_t argc;
        PyObject *new_args;
        PyObject *self;
#if CYTHON_ASSUME_SAFE_SIZE
        argc = PyTuple_GET_SIZE(args);
#else
        argc = PyTuple_Size(args);
        if (unlikely(argc < 0)) return NULL;
#endif
        new_args = PyTuple_GetSlice(args, 1, argc);
        if (unlikely(!new_args))
            return NULL;
        self = PyTuple_GetItem(args, 0);
        if (unlikely(!self)) {
            Py_DECREF(new_args);
            PyErr_Format(PyExc_TypeError,
                         "unbound method %.200S() needs an argument",
                         cyfunc->func_qualname);
            return NULL;
        }
        result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw);
        Py_DECREF(new_args);
    } else {
        result = __Pyx_CyFunction_Call(func, args, kw);
    }
    return result;
}
#if CYTHON_METH_FASTCALL && CYTHON_VECTORCALL
static CYTHON_INLINE int __Pyx_CyFunction_Vectorcall_CheckArgs(__pyx_CyFunctionObject *cyfunc, Py_ssize_t nargs, PyObject *kwnames)
{
    int ret = 0;
    if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) {
        if (unlikely(nargs < 1)) {
            __Pyx_CyFunction_raise_type_error(
                cyfunc, "needs an argument");
            return -1;
        }
        ret = 1;
    }
    if (unlikely(kwnames) && unlikely(__Pyx_PyTuple_GET_SIZE(kwnames))) {
        __Pyx_CyFunction_raise_type_error(
            cyfunc, "takes no keyword arguments");
        return -1;
    }
    return ret;
}
static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames)
{
    __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func;
    Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
    PyObject *self;
#if CYTHON_COMPILING_IN_LIMITED_API
    PyCFunction meth = PyCFunction_GetFunction(cyfunc->func);
    if (unlikely(!meth)) return NULL;
#else
    PyCFunction meth = ((PyCFunctionObject*)cyfunc)->m_ml->ml_meth;
#endif
    switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) {
    case 1:
        self = args[0];
        args += 1;
        nargs -= 1;
        break;
    case 0:
#if CYTHON_COMPILING_IN_LIMITED_API
        self = PyCFunction_GetSelf(((__pyx_CyFunctionObject*)cyfunc)->func);
        if (unlikely(!self) && PyErr_Occurred()) return NULL;
#else
        self = ((PyCFunctionObject*)cyfunc)->m_self;
#endif
        break;
    default:
        return NULL;
    }
    if (unlikely(nargs != 0)) {
        __Pyx_CyFunction_raise_argument_count_error(
            cyfunc, "takes no arguments", nargs);
        return NULL;
    }
    return meth(self, NULL);
}
static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames)
{
    __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func;
    Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
    PyObject *self;
#if CYTHON_COMPILING_IN_LIMITED_API
    PyCFunction meth = PyCFunction_GetFunction(cyfunc->func);
    if (unlikely(!meth)) return NULL;
#else
    PyCFunction meth = ((PyCFunctionObject*)cyfunc)->m_ml->ml_meth;
#endif
    switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) {
    case 1:
        self = args[0];
        args += 1;
        nargs -= 1;
        break;
    case 0:
#if CYTHON_COMPILING_IN_LIMITED_API
        self = PyCFunction_GetSelf(((__pyx_CyFunctionObject*)cyfunc)->func);
        if (unlikely(!self) && PyErr_Occurred()) return NULL;
#else
        self = ((PyCFunctionObject*)cyfunc)->m_self;
#endif
        break;
    default:
        return NULL;
    }
    if (unlikely(nargs != 1)) {
        __Pyx_CyFunction_raise_argument_count_error(
            cyfunc, "takes exactly one argument", nargs);
        return NULL;
    }
    return meth(self, args[0]);
}
static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames)
{
    __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func;
    Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
    PyObject *self;
#if CYTHON_COMPILING_IN_LIMITED_API
    PyCFunction meth = PyCFunction_GetFunction(cyfunc->func);
    if (unlikely(!meth)) return NULL;
#else
    PyCFunction meth = ((PyCFunctionObject*)cyfunc)->m_ml->ml_meth;
#endif
    switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) {
    case 1:
        self = args[0];
        args += 1;
        nargs -= 1;
        break;
    case 0:
#if CYTHON_COMPILING_IN_LIMITED_API
        self = PyCFunction_GetSelf(((__pyx_CyFunctionObject*)cyfunc)->func);
        if (unlikely(!self) && PyErr_Occurred()) return NULL;
#else
        self = ((PyCFunctionObject*)cyfunc)->m_self;
#endif
        break;
    default:
        return NULL;
    }
    return ((__Pyx_PyCFunctionFastWithKeywords)(void(*)(void))meth)(self, args, nargs, kwnames);
}
static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames)
{
    __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func;
    PyTypeObject *cls = (PyTypeObject *) __Pyx_CyFunction_GetClassObj(cyfunc);
    Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
    PyObject *self;
#if CYTHON_COMPILING_IN_LIMITED_API
    PyCFunction meth = PyCFunction_GetFunction(cyfunc->func);
    if (unlikely(!meth)) return NULL;
#else
    PyCFunction meth = ((PyCFunctionObject*)cyfunc)->m_ml->ml_meth;
#endif
    switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) {
    case 1:
        self = args[0];
        args += 1;
        nargs -= 1;
        break;
    case 0:
#if CYTHON_COMPILING_IN_LIMITED_API
        self = PyCFunction_GetSelf(((__pyx_CyFunctionObject*)cyfunc)->func);
        if (unlikely(!self) && PyErr_Occurred()) return NULL;
#else
        self = ((PyCFunctionObject*)cyfunc)->m_self;
#endif
        break;
    default:
        return NULL;
    }
    #if PY_VERSION_HEX < 0x030e00A6
    size_t nargs_value = (size_t) nargs;
    #else
    Py_ssize_t nargs_value = nargs;
    #endif
    return ((__Pyx_PyCMethod)(void(*)(void))meth)(self, cls, args, nargs_value, kwnames);
}
#endif
static PyType_Slot __pyx_CyFunctionType_slots[] = {
    {Py_tp_dealloc, (void *)__Pyx_CyFunction_dealloc},
    {Py_tp_repr, (void *)__Pyx_CyFunction_repr},
    {Py_tp_call, (void *)__Pyx_CyFunction_CallAsMethod},
    {Py_tp_traverse, (void *)__Pyx_CyFunction_traverse},
    {Py_tp_clear, (void *)__Pyx_CyFunction_clear},
    {Py_tp_methods, (void *)__pyx_CyFunction_methods},
    {Py_tp_members, (void *)__pyx_CyFunction_members},
    {Py_tp_getset, (void *)__pyx_CyFunction_getsets},
    {Py_tp_descr_get, (void *)__Pyx_PyMethod_New},
    {0, 0},
};
static PyType_Spec __pyx_CyFunctionType_spec = {
    __PYX_TYPE_MODULE_PREFIX "cython_function_or_method",
    sizeof(__pyx_CyFunctionObject),
    0,
#ifdef Py_TPFLAGS_METHOD_DESCRIPTOR
    Py_TPFLAGS_METHOD_DESCRIPTOR |
#endif
#if CYTHON_METH_FASTCALL
#if defined(Py_TPFLAGS_HAVE_VECTORCALL)
    Py_TPFLAGS_HAVE_VECTORCALL |
#elif defined(_Py_TPFLAGS_HAVE_VECTORCALL)
    _Py_TPFLAGS_HAVE_VECTORCALL |
#endif
#endif // CYTHON_METH_FASTCALL
#if PY_VERSION_HEX >= 0x030C0000 && !CYTHON_COMPILING_IN_LIMITED_API
    Py_TPFLAGS_MANAGED_DICT |
#endif
    Py_TPFLAGS_IMMUTABLETYPE | Py_TPFLAGS_DISALLOW_INSTANTIATION |
    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE,
    __pyx_CyFunctionType_slots
};
static int __pyx_CyFunction_init(PyObject *module) {
    __pyx_mstatetype *mstate = __Pyx_PyModule_GetState(module);
    mstate->__pyx_CyFunctionType = __Pyx_FetchCommonTypeFromSpec(
        mstate->__pyx_CommonTypesMetaclassType, module, &__pyx_CyFunctionType_spec, NULL);
    if (unlikely(mstate->__pyx_CyFunctionType == NULL)) {
        return -1;
    }
    return 0;
}
static CYTHON_INLINE PyObject *__Pyx_CyFunction_InitDefaults(PyObject *func, PyTypeObject *defaults_type) {
    __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
    m->defaults = PyObject_CallObject((PyObject*)defaults_type, NULL); // _PyObject_New(defaults_type);
    if (unlikely(!m->defaults))
        return NULL;
    return m->defaults;
}
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) {
    __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
    m->defaults_tuple = tuple;
    Py_INCREF(tuple);
}
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) {
    __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
    m->defaults_kwdict = dict;
    Py_INCREF(dict);
}
static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) {
    __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
    m->func_annotations = dict;
    Py_INCREF(dict);
}

/* CythonFunction */
static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname,
                                      PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) {
    PyObject *op = __Pyx_CyFunction_Init(
        PyObject_GC_New(__pyx_CyFunctionObject, __pyx_mstate_global->__pyx_CyFunctionType),
        ml, flags, qualname, closure, module, globals, code
    );
    if (likely(op)) {
        PyObject_GC_Track(op);
    }
    return op;
}

/* GetNameInClass */
static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name) {
    PyObject *result;
    PyObject *dict;
    assert(PyType_Check(nmspace));
#if CYTHON_USE_TYPE_SLOTS
    dict = ((PyTypeObject*)nmspace)->tp_dict;
    Py_XINCREF(dict);
#else
    dict = PyObject_GetAttr(nmspace, __pyx_mstate_global->__pyx_n_u_dict);
#endif
    if (likely(dict)) {
        result = PyObject_GetItem(dict, name);
        Py_DECREF(dict);
        if (result) {
            return result;
        }
    }
    PyErr_Clear();
    __Pyx_GetModuleGlobalNameUncached(result, name);
    return result;
}

/* CLineInTraceback (used by AddTraceback) */
#if CYTHON_CLINE_IN_TRACEBACK && CYTHON_CLINE_IN_TRACEBACK_RUNTIME
#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000
#define __Pyx_PyProbablyModule_GetDict(o) __Pyx_XNewRef(PyModule_GetDict(o))
#elif !CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
#define __Pyx_PyProbablyModule_GetDict(o) PyObject_GenericGetDict(o, NULL);
#else
PyObject* __Pyx_PyProbablyModule_GetDict(PyObject *o) {
    PyObject **dict_ptr = _PyObject_GetDictPtr(o);
    return dict_ptr ? __Pyx_XNewRef(*dict_ptr) : NULL;
}
#endif
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) {
    PyObject *use_cline = NULL;
    PyObject *ptype, *pvalue, *ptraceback;
    PyObject *cython_runtime_dict;
    CYTHON_MAYBE_UNUSED_VAR(tstate);
    if (unlikely(!__pyx_mstate_global->__pyx_cython_runtime)) {
        return c_line;
    }
    __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
    cython_runtime_dict = __Pyx_PyProbablyModule_GetDict(__pyx_mstate_global->__pyx_cython_runtime);
    if (likely(cython_runtime_dict)) {
        __PYX_PY_DICT_LOOKUP_IF_MODIFIED(
            use_cline, cython_runtime_dict,
            __Pyx_PyDict_SetDefault(cython_runtime_dict, __pyx_mstate_global->__pyx_n_u_cline_in_traceback, Py_False))
    }
    if (use_cline == NULL || use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
        c_line = 0;
    }
    Py_XDECREF(use_cline);
    Py_XDECREF(cython_runtime_dict);
    __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
    return c_line;
}
#endif

/* CodeObjectCache (used by AddTraceback) */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
    int start = 0, mid = 0, end = count - 1;
    if (end >= 0 && code_line > entries[end].code_line) {
        return count;
    }
    while (start < end) {
        mid = start + (end - start) / 2;
        if (code_line < entries[mid].code_line) {
            end = mid;
        } else if (code_line > entries[mid].code_line) {
             start = mid + 1;
        } else {
            return mid;
        }
    }
    if (code_line <= entries[mid].code_line) {
        return mid;
    } else {
        return mid + 1;
    }
}
static __Pyx_CachedCodeObjectType *__pyx__find_code_object(struct __Pyx_CodeObjectCache *code_cache, int code_line) {
    __Pyx_CachedCodeObjectType* code_object;
    int pos;
    if (unlikely(!code_line) || unlikely(!code_cache->entries)) {
        return NULL;
    }
    pos = __pyx_bisect_code_objects(code_cache->entries, code_cache->count, code_line);
    if (unlikely(pos >= code_cache->count) || unlikely(code_cache->entries[pos].code_line != code_line)) {
        return NULL;
    }
    code_object = code_cache->entries[pos].code_object;
    Py_INCREF(code_object);
    return code_object;
}
static __Pyx_CachedCodeObjectType *__pyx_find_code_object(int code_line) {
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING && !CYTHON_ATOMICS
    (void)__pyx__find_code_object;
    return NULL; // Most implementation should have atomics. But otherwise, don't make it thread-safe, just miss.
#else
    struct __Pyx_CodeObjectCache *code_cache = &__pyx_mstate_global->__pyx_code_cache;
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    __pyx_nonatomic_int_type old_count = __pyx_atomic_incr_acq_rel(&code_cache->accessor_count);
    if (old_count < 0) {
        __pyx_atomic_decr_acq_rel(&code_cache->accessor_count);
        return NULL;
    }
#endif
    __Pyx_CachedCodeObjectType *result = __pyx__find_code_object(code_cache, code_line);
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    __pyx_atomic_decr_acq_rel(&code_cache->accessor_count);
#endif
    return result;
#endif
}
static void __pyx__insert_code_object(struct __Pyx_CodeObjectCache *code_cache, int code_line, __Pyx_CachedCodeObjectType* code_object)
{
    int pos, i;
    __Pyx_CodeObjectCacheEntry* entries = code_cache->entries;
    if (unlikely(!code_line)) {
        return;
    }
    if (unlikely(!entries)) {
        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
        if (likely(entries)) {
            code_cache->entries = entries;
            code_cache->max_count = 64;
            code_cache->count = 1;
            entries[0].code_line = code_line;
            entries[0].code_object = code_object;
            Py_INCREF(code_object);
        }
        return;
    }
    pos = __pyx_bisect_code_objects(code_cache->entries, code_cache->count, code_line);
    if ((pos < code_cache->count) && unlikely(code_cache->entries[pos].code_line == code_line)) {
        __Pyx_CachedCodeObjectType* tmp = entries[pos].code_object;
        entries[pos].code_object = code_object;
        Py_INCREF(code_object);
        Py_DECREF(tmp);
        return;
    }
    if (code_cache->count == code_cache->max_count) {
        int new_max = code_cache->max_count + 64;
        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
            code_cache->entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
        if (unlikely(!entries)) {
            return;
        }
        code_cache->entries = entries;
        code_cache->max_count = new_max;
    }
    for (i=code_cache->count; i>pos; i--) {
        entries[i] = entries[i-1];
    }
    entries[pos].code_line = code_line;
    entries[pos].code_object = code_object;
    code_cache->count++;
    Py_INCREF(code_object);
}
static void __pyx_insert_code_object(int code_line, __Pyx_CachedCodeObjectType* code_object) {
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING && !CYTHON_ATOMICS
    (void)__pyx__insert_code_object;
    return; // Most implementation should have atomics. But otherwise, don't make it thread-safe, just fail.
#else
    struct __Pyx_CodeObjectCache *code_cache = &__pyx_mstate_global->__pyx_code_cache;
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    __pyx_nonatomic_int_type expected = 0;
    if (!__pyx_atomic_int_cmp_exchange(&code_cache->accessor_count, &expected, INT_MIN)) {
        return;
    }
#endif
    __pyx__insert_code_object(code_cache, code_line, code_object);
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    __pyx_atomic_sub(&code_cache->accessor_count, INT_MIN);
#endif
#endif
}

/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
#if PY_VERSION_HEX >= 0x030b00a6 && !CYTHON_COMPILING_IN_LIMITED_API && !defined(PYPY_VERSION)
  #ifndef Py_BUILD_CORE
    #define Py_BUILD_CORE 1
  #endif
  #include "internal/pycore_frame.h"
#endif
#if CYTHON_COMPILING_IN_LIMITED_API
static PyObject *__Pyx_PyCode_Replace_For_AddTraceback(PyObject *code, PyObject *scratch_dict,
                                                       PyObject *firstlineno, PyObject *name) {
    PyObject *replace = NULL;
    if (unlikely(PyDict_SetItemString(scratch_dict, "co_firstlineno", firstlineno))) return NULL;
    if (unlikely(PyDict_SetItemString(scratch_dict, "co_name", name))) return NULL;
    replace = PyObject_GetAttrString(code, "replace");
    if (likely(replace)) {
        PyObject *result = PyObject_Call(replace, __pyx_mstate_global->__pyx_empty_tuple, scratch_dict);
        Py_DECREF(replace);
        return result;
    }
    PyErr_Clear();
    return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
                               int py_line, const char *filename) {
    PyObject *code_object = NULL, *py_py_line = NULL, *py_funcname = NULL, *dict = NULL;
    PyObject *replace = NULL, *getframe = NULL, *frame = NULL;
    PyObject *exc_type, *exc_value, *exc_traceback;
    int success = 0;
    if (c_line) {
        c_line = __Pyx_CLineForTraceback(__Pyx_PyThreadState_Current, c_line);
    }
    PyErr_Fetch(&exc_type, &exc_value, &exc_traceback);
    code_object = __pyx_find_code_object(c_line ? -c_line : py_line);
    if (!code_object) {
        code_object = Py_CompileString("_getframe()", filename, Py_eval_input);
        if (unlikely(!code_object)) goto bad;
        py_py_line = PyLong_FromLong(py_line);
        if (unlikely(!py_py_line)) goto bad;
        if (c_line) {
            py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
        } else {
            py_funcname = PyUnicode_FromString(funcname);
        }
        if (unlikely(!py_funcname)) goto bad;
        dict = PyDict_New();
        if (unlikely(!dict)) goto bad;
        {
            PyObject *old_code_object = code_object;
            code_object = __Pyx_PyCode_Replace_For_AddTraceback(code_object, dict, py_py_line, py_funcname);
            Py_DECREF(old_code_object);
        }
        if (unlikely(!code_object)) goto bad;
        __pyx_insert_code_object(c_line ? -c_line : py_line, code_object);
    } else {
        dict = PyDict_New();
    }
    getframe = PySys_GetObject("_getframe");
    if (unlikely(!getframe)) goto bad;
    if (unlikely(PyDict_SetItemString(dict, "_getframe", getframe))) goto bad;
    frame = PyEval_EvalCode(code_object, dict, dict);
    if (unlikely(!frame) || frame == Py_None) goto bad;
    success = 1;
  bad:
    PyErr_Restore(exc_type, exc_value, exc_traceback);
    Py_XDECREF(code_object);
    Py_XDECREF(py_py_line);
    Py_XDECREF(py_funcname);
    Py_XDECREF(dict);
    Py_XDECREF(replace);
    if (success) {
        PyTraceBack_Here(
            (struct _frame*)frame);
    }
    Py_XDECREF(frame);
}
#else
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
            const char *funcname, int c_line,
            int py_line, const char *filename) {
    PyCodeObject *py_code = NULL;
    PyObject *py_funcname = NULL;
    if (c_line) {
        py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
        if (!py_funcname) goto bad;
        funcname = PyUnicode_AsUTF8(py_funcname);
        if (!funcname) goto bad;
    }
    py_code = PyCode_NewEmpty(filename, funcname, py_line);
    Py_XDECREF(py_funcname);
    return py_code;
bad:
    Py_XDECREF(py_funcname);
    return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
                               int py_line, const char *filename) {
    PyCodeObject *py_code = 0;
    PyFrameObject *py_frame = 0;
    PyThreadState *tstate = __Pyx_PyThreadState_Current;
    PyObject *ptype, *pvalue, *ptraceback;
    if (c_line) {
        c_line = __Pyx_CLineForTraceback(tstate, c_line);
    }
    py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
    if (!py_code) {
        __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
        py_code = __Pyx_CreateCodeObjectForTraceback(
            funcname, c_line, py_line, filename);
        if (!py_code) {
            /* If the code object creation fails, then we should clear the
               fetched exception references and propagate the new exception */
            Py_XDECREF(ptype);
            Py_XDECREF(pvalue);
            Py_XDECREF(ptraceback);
            goto bad;
        }
        __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
        __pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
    }
    py_frame = PyFrame_New(
        tstate,            /*PyThreadState *tstate,*/
        py_code,           /*PyCodeObject *code,*/
        __pyx_mstate_global->__pyx_d,    /*PyObject *globals,*/
        0                  /*PyObject *locals*/
    );
    if (!py_frame) goto bad;
    __Pyx_PyFrame_SetLineNumber(py_frame, py_line);
    PyTraceBack_Here(py_frame);
bad:
    Py_XDECREF(py_code);
    Py_XDECREF(py_frame);
}
#endif

/* MemviewRefcount */
#include <stdio.h>
#ifndef _Py_NO_RETURN
#define _Py_NO_RETURN
#endif
_Py_NO_RETURN
static void __pyx_fatalerror(const char *fmt, ...) {
    va_list vargs;
    char msg[200];
#if PY_VERSION_HEX >= 0x030A0000 || defined(HAVE_STDARG_PROTOTYPES)
    va_start(vargs, fmt);
#else
    va_start(vargs);
#endif
    vsnprintf(msg, 200, fmt, vargs);
    va_end(vargs);
    Py_FatalError(msg);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int_type *acquisition_count,
                                   PyThread_type_lock lock)
{
    int result;
    PyThread_acquire_lock(lock, 1);
    result = (*acquisition_count)++;
    PyThread_release_lock(lock);
    return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int_type *acquisition_count,
                                   PyThread_type_lock lock)
{
    int result;
    PyThread_acquire_lock(lock, 1);
    result = (*acquisition_count)--;
    PyThread_release_lock(lock);
    return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
    __pyx_nonatomic_int_type old_acquisition_count;
    struct __pyx_memoryview_obj *memview = memslice->memview;
    if (unlikely(!memview || (PyObject *) memview == Py_None)) {
        return;
    }
    old_acquisition_count = __pyx_add_acquisition_count(memview);
    if (unlikely(old_acquisition_count <= 0)) {
        if (likely(old_acquisition_count == 0)) {
            if (have_gil) {
                Py_INCREF((PyObject *) memview);
            } else {
                PyGILState_STATE _gilstate = PyGILState_Ensure();
                Py_INCREF((PyObject *) memview);
                PyGILState_Release(_gilstate);
            }
        } else {
            __pyx_fatalerror("Acquisition count is %d (line %d)",
                             old_acquisition_count+1, lineno);
        }
    }
}
static CYTHON_INLINE void __Pyx_XCLEAR_MEMVIEW(__Pyx_memviewslice *memslice,
                                             int have_gil, int lineno) {
    __pyx_nonatomic_int_type old_acquisition_count;
    struct __pyx_memoryview_obj *memview = memslice->memview;
    if (unlikely(!memview || (PyObject *) memview == Py_None)) {
        memslice->memview = NULL;
        return;
    }
    old_acquisition_count = __pyx_sub_acquisition_count(memview);
    memslice->data = NULL;
    if (likely(old_acquisition_count > 1)) {
        memslice->memview = NULL;
    } else if (likely(old_acquisition_count == 1)) {
        if (have_gil) {
            Py_CLEAR(memslice->memview);
        } else {
            PyGILState_STATE _gilstate = PyGILState_Ensure();
            Py_CLEAR(memslice->memview);
            PyGILState_Release(_gilstate);
        }
    } else {
        __pyx_fatalerror("Acquisition count is %d (line %d)",
                         old_acquisition_count-1, lineno);
    }
}

/* MemviewSliceIsContig */
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim)
{
    int i, index, step, start;
    Py_ssize_t itemsize = mvs.memview->view.itemsize;
    if (order == 'F') {
        step = 1;
        start = 0;
    } else {
        step = -1;
        start = ndim - 1;
    }
    for (i = 0; i < ndim; i++) {
        index = start + step * i;
        if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
            return 0;
        itemsize *= mvs.shape[index];
    }
    return 1;
}

/* OverlappingSlices */
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
                               void **out_start, void **out_end,
                               int ndim, size_t itemsize)
{
    char *start, *end;
    int i;
    start = end = slice->data;
    for (i = 0; i < ndim; i++) {
        Py_ssize_t stride = slice->strides[i];
        Py_ssize_t extent = slice->shape[i];
        if (extent == 0) {
            *out_start = *out_end = start;
            return;
        } else {
            if (stride > 0)
                end += stride * (extent - 1);
            else
                start += stride * (extent - 1);
        }
    }
    *out_start = start;
    *out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
                     __Pyx_memviewslice *slice2,
                     int ndim, size_t itemsize)
{
    void *start1, *end1, *start2, *end2;
    __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
    __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
    return (start1 < end2) && (start2 < end1);
}

/* MemviewSliceInit */
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
                        int ndim,
                        __Pyx_memviewslice *memviewslice,
                        int memview_is_new_reference)
{
    __Pyx_RefNannyDeclarations
    int i, retval=-1;
    Py_buffer *buf = &memview->view;
    __Pyx_RefNannySetupContext("init_memviewslice", 0);
    if (unlikely(memviewslice->memview || memviewslice->data)) {
        PyErr_SetString(PyExc_ValueError,
            "memviewslice is already initialized!");
        goto fail;
    }
    if (buf->strides) {
        for (i = 0; i < ndim; i++) {
            memviewslice->strides[i] = buf->strides[i];
        }
    } else {
        Py_ssize_t stride = buf->itemsize;
        for (i = ndim - 1; i >= 0; i--) {
            memviewslice->strides[i] = stride;
            stride *= buf->shape[i];
        }
    }
    for (i = 0; i < ndim; i++) {
        memviewslice->shape[i]   = buf->shape[i];
        if (buf->suboffsets) {
            memviewslice->suboffsets[i] = buf->suboffsets[i];
        } else {
            memviewslice->suboffsets[i] = -1;
        }
    }
    memviewslice->memview = memview;
    memviewslice->data = (char *)buf->buf;
    if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
        Py_INCREF((PyObject*)memview);
    }
    retval = 0;
    goto no_fail;
fail:
    memviewslice->memview = 0;
    memviewslice->data = 0;
    retval = -1;
no_fail:
    __Pyx_RefNannyFinishContext();
    return retval;
}

/* CheckUnpickleChecksum */
static void __Pyx_RaiseUnpickleChecksumError(long checksum, long checksum1, long checksum2, long checksum3, const char *members) {
    PyObject *pickle_module = PyImport_ImportModule("pickle");
    if (unlikely(!pickle_module)) return;
    PyObject *pickle_error = PyObject_GetAttrString(pickle_module, "PickleError");
    Py_DECREF(pickle_module);
    if (unlikely(!pickle_error)) return;
    if (checksum2 == checksum1) {
        PyErr_Format(pickle_error, "Incompatible checksums (0x%x vs (0x%x) = (%s))",
            checksum, checksum1, members);
    } else if (checksum3 == checksum2) {
        PyErr_Format(pickle_error, "Incompatible checksums (0x%x vs (0x%x, 0x%x) = (%s))",
            checksum, checksum1, checksum2, members);
    } else {
        PyErr_Format(pickle_error, "Incompatible checksums (0x%x vs (0x%x, 0x%x, 0x%x) = (%s))",
            checksum, checksum1, checksum2, checksum3, members);
    }
    Py_DECREF(pickle_error);
}
static int __Pyx_CheckUnpickleChecksum(long checksum, long checksum1, long checksum2, long checksum3, const char *members) {
    int found = 0;
    found |= checksum1 == checksum;
    found |= checksum2 == checksum;
    found |= checksum3 == checksum;
    if (likely(found))
        return 0;
    __Pyx_RaiseUnpickleChecksumError(checksum, checksum1, checksum2, checksum3, members);
    return -1;
}

/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
    __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
    __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
    {\
        func_type value = func_value;\
        if (sizeof(target_type) < sizeof(func_type)) {\
            if (unlikely(value != (func_type) (target_type) value)) {\
                func_type zero = 0;\
                if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
                    return (target_type) -1;\
                if (is_unsigned && unlikely(value < zero))\
                    goto raise_neg_overflow;\
                else\
                    goto raise_overflow;\
            }\
        }\
        return (target_type) value;\
    }

static PyObject* __pyx_convert__to_py_nvmlDramEncryptionInfo_v1_t(nvmlDramEncryptionInfo_v1_t s) {
  PyObject* res;
  PyObject* member;
  res = __Pyx_PyDict_NewPresized(2); if (unlikely(!res)) return NULL;
  member = __Pyx_PyLong_From_unsigned_int(s.version); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_version, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __Pyx_PyLong_From_nvmlEnableState_t(s.encryptionState); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_encryptionState, member) < 0)) goto bad;
  Py_DECREF(member);
  return res;
  bad:
  Py_XDECREF(member);
  Py_DECREF(res);
  return NULL;
}
static PyObject* __pyx_convert__to_py_nvmlValue_t(nvmlValue_t s) {
  PyObject* res;
  PyObject* member;
  res = __Pyx_PyDict_NewPresized(7); if (unlikely(!res)) return NULL;
  member = PyFloat_FromDouble(s.dVal); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_dVal, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __Pyx_PyLong_From_int(s.siVal); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_siVal, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __Pyx_PyLong_From_unsigned_int(s.uiVal); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_uiVal, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __Pyx_PyLong_From_unsigned_long(s.ulVal); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_ulVal, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(s.ullVal); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_ullVal, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __Pyx_PyLong_From_PY_LONG_LONG(s.sllVal); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_sllVal, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __Pyx_PyLong_From_unsigned_short(s.usVal); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_usVal, member) < 0)) goto bad;
  Py_DECREF(member);
  return res;
  bad:
  Py_XDECREF(member);
  Py_DECREF(res);
  return NULL;
}
static PyObject* __pyx_convert__to_py_nvmlVgpuInstanceUtilizationSample_t(nvmlVgpuInstanceUtilizationSample_t s) {
  PyObject* res;
  PyObject* member;
  res = __Pyx_PyDict_NewPresized(6); if (unlikely(!res)) return NULL;
  member = __Pyx_PyLong_From_unsigned_int(s.vgpuInstance); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_vgpuInstance, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(s.timeStamp); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_timeStamp, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __pyx_convert__to_py_nvmlValue_t(s.smUtil); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_smUtil, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __pyx_convert__to_py_nvmlValue_t(s.memUtil); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_memUtil, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __pyx_convert__to_py_nvmlValue_t(s.encUtil); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_encUtil, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __pyx_convert__to_py_nvmlValue_t(s.decUtil); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_decUtil, member) < 0)) goto bad;
  Py_DECREF(member);
  return res;
  bad:
  Py_XDECREF(member);
  Py_DECREF(res);
  return NULL;
}
static PyObject* __pyx_convert__to_py_nvmlVgpuProcessUtilizationSample_t(nvmlVgpuProcessUtilizationSample_t s) {
  PyObject* res;
  PyObject* member;
  res = __Pyx_PyDict_NewPresized(8); if (unlikely(!res)) return NULL;
  member = __Pyx_PyLong_From_unsigned_int(s.vgpuInstance); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_vgpuInstance, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __Pyx_PyLong_From_unsigned_int(s.pid); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_pid, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __Pyx_PyObject_FromString(s.processName); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_processName, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(s.timeStamp); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_timeStamp, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __Pyx_PyLong_From_unsigned_int(s.smUtil); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_smUtil, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __Pyx_PyLong_From_unsigned_int(s.memUtil); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_memUtil, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __Pyx_PyLong_From_unsigned_int(s.encUtil); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_encUtil, member) < 0)) goto bad;
  Py_DECREF(member);
  member = __Pyx_PyLong_From_unsigned_int(s.decUtil); if (unlikely(!member)) goto bad;
  if (unlikely(PyDict_SetItem(res, __pyx_mstate_global->__pyx_n_u_decUtil, member) < 0)) goto bad;
  Py_DECREF(member);
  return res;
  bad:
  Py_XDECREF(member);
  Py_DECREF(res);
  return NULL;
}
/* MemviewSliceCopy */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
                                 const char *mode, int ndim,
                                 size_t sizeof_dtype, int contig_flag,
                                 int dtype_is_object)
{
    __Pyx_RefNannyDeclarations
    int i;
    __Pyx_memviewslice new_mvs = __Pyx_MEMSLICE_INIT;
    struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
    Py_buffer *buf = &from_memview->view;
    PyObject *shape_tuple = NULL;
    PyObject *temp_int = NULL;
    struct __pyx_array_obj *array_obj = NULL;
    struct __pyx_memoryview_obj *memview_obj = NULL;
    __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
    for (i = 0; i < ndim; i++) {
        if (unlikely(from_mvs->suboffsets[i] >= 0)) {
            PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
                                           "indirect dimensions (axis %d)", i);
            goto fail;
        }
    }
    shape_tuple = PyTuple_New(ndim);
    if (unlikely(!shape_tuple)) {
        goto fail;
    }
    __Pyx_GOTREF(shape_tuple);
    for(i = 0; i < ndim; i++) {
        temp_int = PyLong_FromSsize_t(from_mvs->shape[i]);
        if(unlikely(!temp_int)) {
            goto fail;
        } else {
#if CYTHON_ASSUME_SAFE_MACROS
            PyTuple_SET_ITEM(shape_tuple, i, temp_int);
#else
            if (PyTuple_SetItem(shape_tuple, i, temp_int) < 0) {
                goto fail;
            }
#endif
            temp_int = NULL;
        }
    }
    array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, mode, NULL);
    if (unlikely(!array_obj)) {
        goto fail;
    }
    __Pyx_GOTREF(array_obj);
    memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
                                    (PyObject *) array_obj, contig_flag,
                                    dtype_is_object,
                                    from_mvs->memview->typeinfo);
    if (unlikely(!memview_obj))
        goto fail;
    if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
        goto fail;
    if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
                                                dtype_is_object) < 0))
        goto fail;
    goto no_fail;
fail:
    __Pyx_XDECREF((PyObject *) new_mvs.memview);
    new_mvs.memview = NULL;
    new_mvs.data = NULL;
no_fail:
    __Pyx_XDECREF(shape_tuple);
    __Pyx_XDECREF(temp_int);
    __Pyx_XDECREF((PyObject *) array_obj);
    __Pyx_RefNannyFinishContext();
    return new_mvs;
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlBridgeChipType_t(nvmlBridgeChipType_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlBridgeChipType_t neg_one = (nvmlBridgeChipType_t) -1, const_zero = (nvmlBridgeChipType_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlBridgeChipType_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlBridgeChipType_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlBridgeChipType_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlBridgeChipType_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlBridgeChipType_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlBridgeChipType_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlBridgeChipType_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlNvLinkUtilizationCountUnits_t(nvmlNvLinkUtilizationCountUnits_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlNvLinkUtilizationCountUnits_t neg_one = (nvmlNvLinkUtilizationCountUnits_t) -1, const_zero = (nvmlNvLinkUtilizationCountUnits_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlNvLinkUtilizationCountUnits_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlNvLinkUtilizationCountUnits_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlNvLinkUtilizationCountUnits_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlNvLinkUtilizationCountUnits_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlNvLinkUtilizationCountUnits_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlNvLinkUtilizationCountUnits_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlNvLinkUtilizationCountUnits_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlNvLinkUtilizationCountPktTypes_t(nvmlNvLinkUtilizationCountPktTypes_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlNvLinkUtilizationCountPktTypes_t neg_one = (nvmlNvLinkUtilizationCountPktTypes_t) -1, const_zero = (nvmlNvLinkUtilizationCountPktTypes_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlNvLinkUtilizationCountPktTypes_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlNvLinkUtilizationCountPktTypes_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlNvLinkUtilizationCountPktTypes_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlNvLinkUtilizationCountPktTypes_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlNvLinkUtilizationCountPktTypes_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlNvLinkUtilizationCountPktTypes_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlNvLinkUtilizationCountPktTypes_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlNvLinkCapability_t(nvmlNvLinkCapability_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlNvLinkCapability_t neg_one = (nvmlNvLinkCapability_t) -1, const_zero = (nvmlNvLinkCapability_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlNvLinkCapability_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlNvLinkCapability_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlNvLinkCapability_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlNvLinkCapability_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlNvLinkCapability_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlNvLinkCapability_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlNvLinkCapability_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlNvLinkErrorCounter_t(nvmlNvLinkErrorCounter_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlNvLinkErrorCounter_t neg_one = (nvmlNvLinkErrorCounter_t) -1, const_zero = (nvmlNvLinkErrorCounter_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlNvLinkErrorCounter_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlNvLinkErrorCounter_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlNvLinkErrorCounter_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlNvLinkErrorCounter_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlNvLinkErrorCounter_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlNvLinkErrorCounter_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlNvLinkErrorCounter_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlIntNvLinkDeviceType_t(nvmlIntNvLinkDeviceType_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlIntNvLinkDeviceType_t neg_one = (nvmlIntNvLinkDeviceType_t) -1, const_zero = (nvmlIntNvLinkDeviceType_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlIntNvLinkDeviceType_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlIntNvLinkDeviceType_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlIntNvLinkDeviceType_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlIntNvLinkDeviceType_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlIntNvLinkDeviceType_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlIntNvLinkDeviceType_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlIntNvLinkDeviceType_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlGpuTopologyLevel_t(nvmlGpuTopologyLevel_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlGpuTopologyLevel_t neg_one = (nvmlGpuTopologyLevel_t) -1, const_zero = (nvmlGpuTopologyLevel_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlGpuTopologyLevel_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlGpuTopologyLevel_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlGpuTopologyLevel_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlGpuTopologyLevel_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlGpuTopologyLevel_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlGpuTopologyLevel_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlGpuTopologyLevel_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlGpuP2PStatus_t(nvmlGpuP2PStatus_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlGpuP2PStatus_t neg_one = (nvmlGpuP2PStatus_t) -1, const_zero = (nvmlGpuP2PStatus_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlGpuP2PStatus_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlGpuP2PStatus_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlGpuP2PStatus_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlGpuP2PStatus_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlGpuP2PStatus_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlGpuP2PStatus_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlGpuP2PStatus_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlGpuP2PCapsIndex_t(nvmlGpuP2PCapsIndex_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlGpuP2PCapsIndex_t neg_one = (nvmlGpuP2PCapsIndex_t) -1, const_zero = (nvmlGpuP2PCapsIndex_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlGpuP2PCapsIndex_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlGpuP2PCapsIndex_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlGpuP2PCapsIndex_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlGpuP2PCapsIndex_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlGpuP2PCapsIndex_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlGpuP2PCapsIndex_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlGpuP2PCapsIndex_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlSamplingType_t(nvmlSamplingType_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlSamplingType_t neg_one = (nvmlSamplingType_t) -1, const_zero = (nvmlSamplingType_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlSamplingType_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlSamplingType_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlSamplingType_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlSamplingType_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlSamplingType_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlSamplingType_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlSamplingType_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlPcieUtilCounter_t(nvmlPcieUtilCounter_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlPcieUtilCounter_t neg_one = (nvmlPcieUtilCounter_t) -1, const_zero = (nvmlPcieUtilCounter_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlPcieUtilCounter_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlPcieUtilCounter_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlPcieUtilCounter_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlPcieUtilCounter_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlPcieUtilCounter_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlPcieUtilCounter_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlPcieUtilCounter_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlValueType_t(nvmlValueType_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlValueType_t neg_one = (nvmlValueType_t) -1, const_zero = (nvmlValueType_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlValueType_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlValueType_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlValueType_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlValueType_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlValueType_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlValueType_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlValueType_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlPerfPolicyType_t(nvmlPerfPolicyType_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlPerfPolicyType_t neg_one = (nvmlPerfPolicyType_t) -1, const_zero = (nvmlPerfPolicyType_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlPerfPolicyType_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlPerfPolicyType_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlPerfPolicyType_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlPerfPolicyType_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlPerfPolicyType_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlPerfPolicyType_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlPerfPolicyType_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlThermalTarget_t(nvmlThermalTarget_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlThermalTarget_t neg_one = (nvmlThermalTarget_t) -1, const_zero = (nvmlThermalTarget_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlThermalTarget_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlThermalTarget_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlThermalTarget_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlThermalTarget_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlThermalTarget_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlThermalTarget_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlThermalTarget_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlThermalController_t(nvmlThermalController_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlThermalController_t neg_one = (nvmlThermalController_t) -1, const_zero = (nvmlThermalController_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlThermalController_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlThermalController_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlThermalController_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlThermalController_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlThermalController_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlThermalController_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlThermalController_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlCoolerControl_t(nvmlCoolerControl_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlCoolerControl_t neg_one = (nvmlCoolerControl_t) -1, const_zero = (nvmlCoolerControl_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlCoolerControl_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlCoolerControl_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlCoolerControl_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlCoolerControl_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlCoolerControl_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlCoolerControl_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlCoolerControl_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlCoolerTarget_t(nvmlCoolerTarget_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlCoolerTarget_t neg_one = (nvmlCoolerTarget_t) -1, const_zero = (nvmlCoolerTarget_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlCoolerTarget_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlCoolerTarget_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlCoolerTarget_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlCoolerTarget_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlCoolerTarget_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlCoolerTarget_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlCoolerTarget_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlUUIDType_t(nvmlUUIDType_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlUUIDType_t neg_one = (nvmlUUIDType_t) -1, const_zero = (nvmlUUIDType_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlUUIDType_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlUUIDType_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlUUIDType_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlUUIDType_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlUUIDType_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlUUIDType_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlUUIDType_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlEnableState_t(nvmlEnableState_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlEnableState_t neg_one = (nvmlEnableState_t) -1, const_zero = (nvmlEnableState_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlEnableState_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlEnableState_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlEnableState_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlEnableState_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlEnableState_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlEnableState_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlEnableState_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlBrandType_t(nvmlBrandType_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlBrandType_t neg_one = (nvmlBrandType_t) -1, const_zero = (nvmlBrandType_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlBrandType_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlBrandType_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlBrandType_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlBrandType_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlBrandType_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlBrandType_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlBrandType_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlTemperatureThresholds_t(nvmlTemperatureThresholds_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlTemperatureThresholds_t neg_one = (nvmlTemperatureThresholds_t) -1, const_zero = (nvmlTemperatureThresholds_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlTemperatureThresholds_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlTemperatureThresholds_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlTemperatureThresholds_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlTemperatureThresholds_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlTemperatureThresholds_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlTemperatureThresholds_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlTemperatureThresholds_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlTemperatureSensors_t(nvmlTemperatureSensors_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlTemperatureSensors_t neg_one = (nvmlTemperatureSensors_t) -1, const_zero = (nvmlTemperatureSensors_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlTemperatureSensors_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlTemperatureSensors_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlTemperatureSensors_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlTemperatureSensors_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlTemperatureSensors_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlTemperatureSensors_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlTemperatureSensors_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlComputeMode_t(nvmlComputeMode_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlComputeMode_t neg_one = (nvmlComputeMode_t) -1, const_zero = (nvmlComputeMode_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlComputeMode_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlComputeMode_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlComputeMode_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlComputeMode_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlComputeMode_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlComputeMode_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlComputeMode_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlMemoryErrorType_t(nvmlMemoryErrorType_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlMemoryErrorType_t neg_one = (nvmlMemoryErrorType_t) -1, const_zero = (nvmlMemoryErrorType_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlMemoryErrorType_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlMemoryErrorType_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlMemoryErrorType_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlMemoryErrorType_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlMemoryErrorType_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlMemoryErrorType_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlMemoryErrorType_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlNvlinkVersion_t(nvmlNvlinkVersion_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlNvlinkVersion_t neg_one = (nvmlNvlinkVersion_t) -1, const_zero = (nvmlNvlinkVersion_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlNvlinkVersion_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlNvlinkVersion_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlNvlinkVersion_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlNvlinkVersion_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlNvlinkVersion_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlNvlinkVersion_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlNvlinkVersion_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlEccCounterType_t(nvmlEccCounterType_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlEccCounterType_t neg_one = (nvmlEccCounterType_t) -1, const_zero = (nvmlEccCounterType_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlEccCounterType_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlEccCounterType_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlEccCounterType_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlEccCounterType_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlEccCounterType_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlEccCounterType_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlEccCounterType_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlClockType_t(nvmlClockType_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlClockType_t neg_one = (nvmlClockType_t) -1, const_zero = (nvmlClockType_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlClockType_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlClockType_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlClockType_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlClockType_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlClockType_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlClockType_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlClockType_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlClockId_t(nvmlClockId_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlClockId_t neg_one = (nvmlClockId_t) -1, const_zero = (nvmlClockId_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlClockId_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlClockId_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlClockId_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlClockId_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlClockId_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlClockId_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlClockId_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlDriverModel_t(nvmlDriverModel_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlDriverModel_t neg_one = (nvmlDriverModel_t) -1, const_zero = (nvmlDriverModel_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlDriverModel_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlDriverModel_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlDriverModel_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlDriverModel_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlDriverModel_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlDriverModel_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlDriverModel_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlPstates_t(nvmlPstates_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlPstates_t neg_one = (nvmlPstates_t) -1, const_zero = (nvmlPstates_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlPstates_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlPstates_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlPstates_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlPstates_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlPstates_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlPstates_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlPstates_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlGpuOperationMode_t(nvmlGpuOperationMode_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlGpuOperationMode_t neg_one = (nvmlGpuOperationMode_t) -1, const_zero = (nvmlGpuOperationMode_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlGpuOperationMode_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlGpuOperationMode_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlGpuOperationMode_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlGpuOperationMode_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlGpuOperationMode_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlGpuOperationMode_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlGpuOperationMode_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlInforomObject_t(nvmlInforomObject_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlInforomObject_t neg_one = (nvmlInforomObject_t) -1, const_zero = (nvmlInforomObject_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlInforomObject_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlInforomObject_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlInforomObject_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlInforomObject_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlInforomObject_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlInforomObject_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlInforomObject_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlReturn_t(nvmlReturn_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlReturn_t neg_one = (nvmlReturn_t) -1, const_zero = (nvmlReturn_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlReturn_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlReturn_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlReturn_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlReturn_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlReturn_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlReturn_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlReturn_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlMemoryLocation_t(nvmlMemoryLocation_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlMemoryLocation_t neg_one = (nvmlMemoryLocation_t) -1, const_zero = (nvmlMemoryLocation_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlMemoryLocation_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlMemoryLocation_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlMemoryLocation_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlMemoryLocation_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlMemoryLocation_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlMemoryLocation_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlMemoryLocation_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlPageRetirementCause_t(nvmlPageRetirementCause_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlPageRetirementCause_t neg_one = (nvmlPageRetirementCause_t) -1, const_zero = (nvmlPageRetirementCause_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlPageRetirementCause_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlPageRetirementCause_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlPageRetirementCause_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlPageRetirementCause_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlPageRetirementCause_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlPageRetirementCause_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlPageRetirementCause_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlRestrictedAPI_t(nvmlRestrictedAPI_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlRestrictedAPI_t neg_one = (nvmlRestrictedAPI_t) -1, const_zero = (nvmlRestrictedAPI_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlRestrictedAPI_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlRestrictedAPI_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlRestrictedAPI_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlRestrictedAPI_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlRestrictedAPI_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlRestrictedAPI_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlRestrictedAPI_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlGpuUtilizationDomainId_t(nvmlGpuUtilizationDomainId_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlGpuUtilizationDomainId_t neg_one = (nvmlGpuUtilizationDomainId_t) -1, const_zero = (nvmlGpuUtilizationDomainId_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlGpuUtilizationDomainId_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlGpuUtilizationDomainId_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlGpuUtilizationDomainId_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlGpuUtilizationDomainId_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlGpuUtilizationDomainId_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlGpuUtilizationDomainId_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlGpuUtilizationDomainId_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlGpuVirtualizationMode_t(nvmlGpuVirtualizationMode_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlGpuVirtualizationMode_t neg_one = (nvmlGpuVirtualizationMode_t) -1, const_zero = (nvmlGpuVirtualizationMode_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlGpuVirtualizationMode_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlGpuVirtualizationMode_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlGpuVirtualizationMode_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlGpuVirtualizationMode_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlGpuVirtualizationMode_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlGpuVirtualizationMode_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlGpuVirtualizationMode_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlHostVgpuMode_t(nvmlHostVgpuMode_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlHostVgpuMode_t neg_one = (nvmlHostVgpuMode_t) -1, const_zero = (nvmlHostVgpuMode_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlHostVgpuMode_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlHostVgpuMode_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlHostVgpuMode_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlHostVgpuMode_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlHostVgpuMode_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlHostVgpuMode_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlHostVgpuMode_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlVgpuVmIdType_t(nvmlVgpuVmIdType_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlVgpuVmIdType_t neg_one = (nvmlVgpuVmIdType_t) -1, const_zero = (nvmlVgpuVmIdType_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlVgpuVmIdType_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlVgpuVmIdType_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlVgpuVmIdType_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlVgpuVmIdType_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlVgpuVmIdType_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlVgpuVmIdType_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlVgpuVmIdType_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlVgpuGuestInfoState_t(nvmlVgpuGuestInfoState_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlVgpuGuestInfoState_t neg_one = (nvmlVgpuGuestInfoState_t) -1, const_zero = (nvmlVgpuGuestInfoState_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlVgpuGuestInfoState_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlVgpuGuestInfoState_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlVgpuGuestInfoState_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlVgpuGuestInfoState_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlVgpuGuestInfoState_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlVgpuGuestInfoState_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlVgpuGuestInfoState_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlGridLicenseFeatureCode_t(nvmlGridLicenseFeatureCode_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlGridLicenseFeatureCode_t neg_one = (nvmlGridLicenseFeatureCode_t) -1, const_zero = (nvmlGridLicenseFeatureCode_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlGridLicenseFeatureCode_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlGridLicenseFeatureCode_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlGridLicenseFeatureCode_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlGridLicenseFeatureCode_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlGridLicenseFeatureCode_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlGridLicenseFeatureCode_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlGridLicenseFeatureCode_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlVgpuCapability_t(nvmlVgpuCapability_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlVgpuCapability_t neg_one = (nvmlVgpuCapability_t) -1, const_zero = (nvmlVgpuCapability_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlVgpuCapability_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlVgpuCapability_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlVgpuCapability_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlVgpuCapability_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlVgpuCapability_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlVgpuCapability_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlVgpuCapability_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlVgpuDriverCapability_t(nvmlVgpuDriverCapability_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlVgpuDriverCapability_t neg_one = (nvmlVgpuDriverCapability_t) -1, const_zero = (nvmlVgpuDriverCapability_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlVgpuDriverCapability_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlVgpuDriverCapability_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlVgpuDriverCapability_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlVgpuDriverCapability_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlVgpuDriverCapability_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlVgpuDriverCapability_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlVgpuDriverCapability_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlDeviceVgpuCapability_t(nvmlDeviceVgpuCapability_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlDeviceVgpuCapability_t neg_one = (nvmlDeviceVgpuCapability_t) -1, const_zero = (nvmlDeviceVgpuCapability_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlDeviceVgpuCapability_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlDeviceVgpuCapability_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlDeviceVgpuCapability_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlDeviceVgpuCapability_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlDeviceVgpuCapability_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlDeviceVgpuCapability_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlDeviceVgpuCapability_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlDeviceGpuRecoveryAction_t(nvmlDeviceGpuRecoveryAction_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlDeviceGpuRecoveryAction_t neg_one = (nvmlDeviceGpuRecoveryAction_t) -1, const_zero = (nvmlDeviceGpuRecoveryAction_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlDeviceGpuRecoveryAction_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlDeviceGpuRecoveryAction_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlDeviceGpuRecoveryAction_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlDeviceGpuRecoveryAction_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlDeviceGpuRecoveryAction_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlDeviceGpuRecoveryAction_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlDeviceGpuRecoveryAction_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlFanState_t(nvmlFanState_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlFanState_t neg_one = (nvmlFanState_t) -1, const_zero = (nvmlFanState_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlFanState_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlFanState_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlFanState_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlFanState_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlFanState_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlFanState_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlFanState_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlLedColor_t(nvmlLedColor_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlLedColor_t neg_one = (nvmlLedColor_t) -1, const_zero = (nvmlLedColor_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlLedColor_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlLedColor_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlLedColor_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlLedColor_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlLedColor_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlLedColor_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlLedColor_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlEncoderType_t(nvmlEncoderType_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlEncoderType_t neg_one = (nvmlEncoderType_t) -1, const_zero = (nvmlEncoderType_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlEncoderType_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlEncoderType_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlEncoderType_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlEncoderType_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlEncoderType_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlEncoderType_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlEncoderType_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlFBCSessionType_t(nvmlFBCSessionType_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlFBCSessionType_t neg_one = (nvmlFBCSessionType_t) -1, const_zero = (nvmlFBCSessionType_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlFBCSessionType_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlFBCSessionType_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlFBCSessionType_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlFBCSessionType_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlFBCSessionType_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlFBCSessionType_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlFBCSessionType_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlDetachGpuState_t(nvmlDetachGpuState_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlDetachGpuState_t neg_one = (nvmlDetachGpuState_t) -1, const_zero = (nvmlDetachGpuState_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlDetachGpuState_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlDetachGpuState_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlDetachGpuState_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlDetachGpuState_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlDetachGpuState_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlDetachGpuState_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlDetachGpuState_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlPcieLinkState_t(nvmlPcieLinkState_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlPcieLinkState_t neg_one = (nvmlPcieLinkState_t) -1, const_zero = (nvmlPcieLinkState_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlPcieLinkState_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlPcieLinkState_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlPcieLinkState_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlPcieLinkState_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlPcieLinkState_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlPcieLinkState_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlPcieLinkState_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlClockLimitId_t(nvmlClockLimitId_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlClockLimitId_t neg_one = (nvmlClockLimitId_t) -1, const_zero = (nvmlClockLimitId_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlClockLimitId_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlClockLimitId_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlClockLimitId_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlClockLimitId_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlClockLimitId_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlClockLimitId_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlClockLimitId_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlVgpuVmCompatibility_t(nvmlVgpuVmCompatibility_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlVgpuVmCompatibility_t neg_one = (nvmlVgpuVmCompatibility_t) -1, const_zero = (nvmlVgpuVmCompatibility_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlVgpuVmCompatibility_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlVgpuVmCompatibility_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlVgpuVmCompatibility_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlVgpuVmCompatibility_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlVgpuVmCompatibility_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlVgpuVmCompatibility_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlVgpuVmCompatibility_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlVgpuPgpuCompatibilityLimitCode_t(nvmlVgpuPgpuCompatibilityLimitCode_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlVgpuPgpuCompatibilityLimitCode_t neg_one = (nvmlVgpuPgpuCompatibilityLimitCode_t) -1, const_zero = (nvmlVgpuPgpuCompatibilityLimitCode_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlVgpuPgpuCompatibilityLimitCode_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlVgpuPgpuCompatibilityLimitCode_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlVgpuPgpuCompatibilityLimitCode_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlVgpuPgpuCompatibilityLimitCode_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlVgpuPgpuCompatibilityLimitCode_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlVgpuPgpuCompatibilityLimitCode_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlVgpuPgpuCompatibilityLimitCode_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlGpmMetricId_t(nvmlGpmMetricId_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlGpmMetricId_t neg_one = (nvmlGpmMetricId_t) -1, const_zero = (nvmlGpmMetricId_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlGpmMetricId_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlGpmMetricId_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlGpmMetricId_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlGpmMetricId_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlGpmMetricId_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlGpmMetricId_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlGpmMetricId_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlPowerProfileType_t(nvmlPowerProfileType_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlPowerProfileType_t neg_one = (nvmlPowerProfileType_t) -1, const_zero = (nvmlPowerProfileType_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlPowerProfileType_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlPowerProfileType_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlPowerProfileType_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlPowerProfileType_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlPowerProfileType_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlPowerProfileType_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlPowerProfileType_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_nvmlDeviceAddressingModeType_t(nvmlDeviceAddressingModeType_t value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlDeviceAddressingModeType_t neg_one = (nvmlDeviceAddressingModeType_t) -1, const_zero = (nvmlDeviceAddressingModeType_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(nvmlDeviceAddressingModeType_t) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlDeviceAddressingModeType_t) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(nvmlDeviceAddressingModeType_t) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(nvmlDeviceAddressingModeType_t) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(nvmlDeviceAddressingModeType_t) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(nvmlDeviceAddressingModeType_t),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(nvmlDeviceAddressingModeType_t));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyLong_As_int(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (unlikely(!PyLong_Check(x))) {
        int val;
        PyObject *tmp = __Pyx_PyNumber_Long(x);
        if (!tmp) return (int) -1;
        val = __Pyx_PyLong_As_int(tmp);
        Py_DECREF(tmp);
        return val;
    }
    if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
        if (unlikely(__Pyx_PyLong_IsNeg(x))) {
            goto raise_neg_overflow;
        } else if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(int, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_DigitCount(x)) {
                case 2:
                    if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) >= 2 * PyLong_SHIFT)) {
                            return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) >= 3 * PyLong_SHIFT)) {
                            return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) >= 4 * PyLong_SHIFT)) {
                            return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
                        }
                    }
                    break;
            }
        }
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7
        if (unlikely(Py_SIZE(x) < 0)) {
            goto raise_neg_overflow;
        }
#else
        {
            int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
            if (unlikely(result < 0))
                return (int) -1;
            if (unlikely(result == 1))
                goto raise_neg_overflow;
        }
#endif
        if ((sizeof(int) <= sizeof(unsigned long))) {
            __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
        } else if ((sizeof(int) <= sizeof(unsigned PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
        }
    } else {
#if CYTHON_USE_PYLONG_INTERNALS
        if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(int, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_SignedDigitCount(x)) {
                case -2:
                    if ((8 * sizeof(int) - 1 > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) {
                            return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
                        }
                    }
                    break;
                case 2:
                    if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) {
                            return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
                        }
                    }
                    break;
                case -3:
                    if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) {
                            return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) {
                            return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
                        }
                    }
                    break;
                case -4:
                    if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) {
                            return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) {
                            return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
                        }
                    }
                    break;
            }
        }
#endif
        if ((sizeof(int) <= sizeof(long))) {
            __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
        } else if ((sizeof(int) <= sizeof(PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
        }
    }
    {
        int val;
        int ret = -1;
#if PY_VERSION_HEX >= 0x030d00A6 && !CYTHON_COMPILING_IN_LIMITED_API
        Py_ssize_t bytes_copied = PyLong_AsNativeBytes(
            x, &val, sizeof(val), Py_ASNATIVEBYTES_NATIVE_ENDIAN | (is_unsigned ? Py_ASNATIVEBYTES_UNSIGNED_BUFFER | Py_ASNATIVEBYTES_REJECT_NEGATIVE : 0));
        if (unlikely(bytes_copied == -1)) {
        } else if (unlikely(bytes_copied > (Py_ssize_t) sizeof(val))) {
            goto raise_overflow;
        } else {
            ret = 0;
        }
#elif PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray)
        int one = 1; int is_little = (int)*(unsigned char *)&one;
        unsigned char *bytes = (unsigned char *)&val;
        ret = _PyLong_AsByteArray((PyLongObject *)x,
                                    bytes, sizeof(val),
                                    is_little, !is_unsigned);
#else
        PyObject *v;
        PyObject *stepval = NULL, *mask = NULL, *shift = NULL;
        int bits, remaining_bits, is_negative = 0;
        int chunk_size = (sizeof(long) < 8) ? 30 : 62;
        if (likely(PyLong_CheckExact(x))) {
            v = __Pyx_NewRef(x);
        } else {
            v = PyNumber_Long(x);
            if (unlikely(!v)) return (int) -1;
            assert(PyLong_CheckExact(v));
        }
        {
            int result = PyObject_RichCompareBool(v, Py_False, Py_LT);
            if (unlikely(result < 0)) {
                Py_DECREF(v);
                return (int) -1;
            }
            is_negative = result == 1;
        }
        if (is_unsigned && unlikely(is_negative)) {
            Py_DECREF(v);
            goto raise_neg_overflow;
        } else if (is_negative) {
            stepval = PyNumber_Invert(v);
            Py_DECREF(v);
            if (unlikely(!stepval))
                return (int) -1;
        } else {
            stepval = v;
        }
        v = NULL;
        val = (int) 0;
        mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done;
        shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done;
        for (bits = 0; bits < (int) sizeof(int) * 8 - chunk_size; bits += chunk_size) {
            PyObject *tmp, *digit;
            long idigit;
            digit = PyNumber_And(stepval, mask);
            if (unlikely(!digit)) goto done;
            idigit = PyLong_AsLong(digit);
            Py_DECREF(digit);
            if (unlikely(idigit < 0)) goto done;
            val |= ((int) idigit) << bits;
            tmp = PyNumber_Rshift(stepval, shift);
            if (unlikely(!tmp)) goto done;
            Py_DECREF(stepval); stepval = tmp;
        }
        Py_DECREF(shift); shift = NULL;
        Py_DECREF(mask); mask = NULL;
        {
            long idigit = PyLong_AsLong(stepval);
            if (unlikely(idigit < 0)) goto done;
            remaining_bits = ((int) sizeof(int) * 8) - bits - (is_unsigned ? 0 : 1);
            if (unlikely(idigit >= (1L << remaining_bits)))
                goto raise_overflow;
            val |= ((int) idigit) << bits;
        }
        if (!is_unsigned) {
            if (unlikely(val & (((int) 1) << (sizeof(int) * 8 - 1))))
                goto raise_overflow;
            if (is_negative)
                val = ~val;
        }
        ret = 0;
    done:
        Py_XDECREF(shift);
        Py_XDECREF(mask);
        Py_XDECREF(stepval);
#endif
        if (unlikely(ret))
            return (int) -1;
        return val;
    }
raise_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "value too large to convert to int");
    return (int) -1;
raise_neg_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "can't convert negative value to int");
    return (int) -1;
}

/* CIntFromPy */
static CYTHON_INLINE size_t __Pyx_PyLong_As_size_t(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const size_t neg_one = (size_t) -1, const_zero = (size_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (unlikely(!PyLong_Check(x))) {
        size_t val;
        PyObject *tmp = __Pyx_PyNumber_Long(x);
        if (!tmp) return (size_t) -1;
        val = __Pyx_PyLong_As_size_t(tmp);
        Py_DECREF(tmp);
        return val;
    }
    if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
        if (unlikely(__Pyx_PyLong_IsNeg(x))) {
            goto raise_neg_overflow;
        } else if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(size_t, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_DigitCount(x)) {
                case 2:
                    if ((8 * sizeof(size_t) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(size_t) >= 2 * PyLong_SHIFT)) {
                            return (size_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(size_t) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(size_t) >= 3 * PyLong_SHIFT)) {
                            return (size_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(size_t) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(size_t) >= 4 * PyLong_SHIFT)) {
                            return (size_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
                        }
                    }
                    break;
            }
        }
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7
        if (unlikely(Py_SIZE(x) < 0)) {
            goto raise_neg_overflow;
        }
#else
        {
            int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
            if (unlikely(result < 0))
                return (size_t) -1;
            if (unlikely(result == 1))
                goto raise_neg_overflow;
        }
#endif
        if ((sizeof(size_t) <= sizeof(unsigned long))) {
            __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned long, PyLong_AsUnsignedLong(x))
        } else if ((sizeof(size_t) <= sizeof(unsigned PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
        }
    } else {
#if CYTHON_USE_PYLONG_INTERNALS
        if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(size_t, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_SignedDigitCount(x)) {
                case -2:
                    if ((8 * sizeof(size_t) - 1 > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT)) {
                            return (size_t) (((size_t)-1)*(((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
                        }
                    }
                    break;
                case 2:
                    if ((8 * sizeof(size_t) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT)) {
                            return (size_t) ((((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
                        }
                    }
                    break;
                case -3:
                    if ((8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT)) {
                            return (size_t) (((size_t)-1)*(((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(size_t) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT)) {
                            return (size_t) ((((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
                        }
                    }
                    break;
                case -4:
                    if ((8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT)) {
                            return (size_t) (((size_t)-1)*(((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(size_t) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT)) {
                            return (size_t) ((((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
                        }
                    }
                    break;
            }
        }
#endif
        if ((sizeof(size_t) <= sizeof(long))) {
            __PYX_VERIFY_RETURN_INT_EXC(size_t, long, PyLong_AsLong(x))
        } else if ((sizeof(size_t) <= sizeof(PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(size_t, PY_LONG_LONG, PyLong_AsLongLong(x))
        }
    }
    {
        size_t val;
        int ret = -1;
#if PY_VERSION_HEX >= 0x030d00A6 && !CYTHON_COMPILING_IN_LIMITED_API
        Py_ssize_t bytes_copied = PyLong_AsNativeBytes(
            x, &val, sizeof(val), Py_ASNATIVEBYTES_NATIVE_ENDIAN | (is_unsigned ? Py_ASNATIVEBYTES_UNSIGNED_BUFFER | Py_ASNATIVEBYTES_REJECT_NEGATIVE : 0));
        if (unlikely(bytes_copied == -1)) {
        } else if (unlikely(bytes_copied > (Py_ssize_t) sizeof(val))) {
            goto raise_overflow;
        } else {
            ret = 0;
        }
#elif PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray)
        int one = 1; int is_little = (int)*(unsigned char *)&one;
        unsigned char *bytes = (unsigned char *)&val;
        ret = _PyLong_AsByteArray((PyLongObject *)x,
                                    bytes, sizeof(val),
                                    is_little, !is_unsigned);
#else
        PyObject *v;
        PyObject *stepval = NULL, *mask = NULL, *shift = NULL;
        int bits, remaining_bits, is_negative = 0;
        int chunk_size = (sizeof(long) < 8) ? 30 : 62;
        if (likely(PyLong_CheckExact(x))) {
            v = __Pyx_NewRef(x);
        } else {
            v = PyNumber_Long(x);
            if (unlikely(!v)) return (size_t) -1;
            assert(PyLong_CheckExact(v));
        }
        {
            int result = PyObject_RichCompareBool(v, Py_False, Py_LT);
            if (unlikely(result < 0)) {
                Py_DECREF(v);
                return (size_t) -1;
            }
            is_negative = result == 1;
        }
        if (is_unsigned && unlikely(is_negative)) {
            Py_DECREF(v);
            goto raise_neg_overflow;
        } else if (is_negative) {
            stepval = PyNumber_Invert(v);
            Py_DECREF(v);
            if (unlikely(!stepval))
                return (size_t) -1;
        } else {
            stepval = v;
        }
        v = NULL;
        val = (size_t) 0;
        mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done;
        shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done;
        for (bits = 0; bits < (int) sizeof(size_t) * 8 - chunk_size; bits += chunk_size) {
            PyObject *tmp, *digit;
            long idigit;
            digit = PyNumber_And(stepval, mask);
            if (unlikely(!digit)) goto done;
            idigit = PyLong_AsLong(digit);
            Py_DECREF(digit);
            if (unlikely(idigit < 0)) goto done;
            val |= ((size_t) idigit) << bits;
            tmp = PyNumber_Rshift(stepval, shift);
            if (unlikely(!tmp)) goto done;
            Py_DECREF(stepval); stepval = tmp;
        }
        Py_DECREF(shift); shift = NULL;
        Py_DECREF(mask); mask = NULL;
        {
            long idigit = PyLong_AsLong(stepval);
            if (unlikely(idigit < 0)) goto done;
            remaining_bits = ((int) sizeof(size_t) * 8) - bits - (is_unsigned ? 0 : 1);
            if (unlikely(idigit >= (1L << remaining_bits)))
                goto raise_overflow;
            val |= ((size_t) idigit) << bits;
        }
        if (!is_unsigned) {
            if (unlikely(val & (((size_t) 1) << (sizeof(size_t) * 8 - 1))))
                goto raise_overflow;
            if (is_negative)
                val = ~val;
        }
        ret = 0;
    done:
        Py_XDECREF(shift);
        Py_XDECREF(mask);
        Py_XDECREF(stepval);
#endif
        if (unlikely(ret))
            return (size_t) -1;
        return val;
    }
raise_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "value too large to convert to size_t");
    return (size_t) -1;
raise_neg_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "can't convert negative value to size_t");
    return (size_t) -1;
}

/* CIntFromPy */
static CYTHON_INLINE unsigned int __Pyx_PyLong_As_unsigned_int(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (unlikely(!PyLong_Check(x))) {
        unsigned int val;
        PyObject *tmp = __Pyx_PyNumber_Long(x);
        if (!tmp) return (unsigned int) -1;
        val = __Pyx_PyLong_As_unsigned_int(tmp);
        Py_DECREF(tmp);
        return val;
    }
    if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
        if (unlikely(__Pyx_PyLong_IsNeg(x))) {
            goto raise_neg_overflow;
        } else if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(unsigned int, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_DigitCount(x)) {
                case 2:
                    if ((8 * sizeof(unsigned int) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned int) >= 2 * PyLong_SHIFT)) {
                            return (unsigned int) (((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(unsigned int) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned int) >= 3 * PyLong_SHIFT)) {
                            return (unsigned int) (((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(unsigned int) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned int) >= 4 * PyLong_SHIFT)) {
                            return (unsigned int) (((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]));
                        }
                    }
                    break;
            }
        }
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7
        if (unlikely(Py_SIZE(x) < 0)) {
            goto raise_neg_overflow;
        }
#else
        {
            int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
            if (unlikely(result < 0))
                return (unsigned int) -1;
            if (unlikely(result == 1))
                goto raise_neg_overflow;
        }
#endif
        if ((sizeof(unsigned int) <= sizeof(unsigned long))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned long, PyLong_AsUnsignedLong(x))
        } else if ((sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
        }
    } else {
#if CYTHON_USE_PYLONG_INTERNALS
        if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(unsigned int, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_SignedDigitCount(x)) {
                case -2:
                    if ((8 * sizeof(unsigned int) - 1 > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT)) {
                            return (unsigned int) (((unsigned int)-1)*(((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
                        }
                    }
                    break;
                case 2:
                    if ((8 * sizeof(unsigned int) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT)) {
                            return (unsigned int) ((((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
                        }
                    }
                    break;
                case -3:
                    if ((8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT)) {
                            return (unsigned int) (((unsigned int)-1)*(((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(unsigned int) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT)) {
                            return (unsigned int) ((((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
                        }
                    }
                    break;
                case -4:
                    if ((8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT)) {
                            return (unsigned int) (((unsigned int)-1)*(((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(unsigned int) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT)) {
                            return (unsigned int) ((((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
                        }
                    }
                    break;
            }
        }
#endif
        if ((sizeof(unsigned int) <= sizeof(long))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned int, long, PyLong_AsLong(x))
        } else if ((sizeof(unsigned int) <= sizeof(PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned int, PY_LONG_LONG, PyLong_AsLongLong(x))
        }
    }
    {
        unsigned int val;
        int ret = -1;
#if PY_VERSION_HEX >= 0x030d00A6 && !CYTHON_COMPILING_IN_LIMITED_API
        Py_ssize_t bytes_copied = PyLong_AsNativeBytes(
            x, &val, sizeof(val), Py_ASNATIVEBYTES_NATIVE_ENDIAN | (is_unsigned ? Py_ASNATIVEBYTES_UNSIGNED_BUFFER | Py_ASNATIVEBYTES_REJECT_NEGATIVE : 0));
        if (unlikely(bytes_copied == -1)) {
        } else if (unlikely(bytes_copied > (Py_ssize_t) sizeof(val))) {
            goto raise_overflow;
        } else {
            ret = 0;
        }
#elif PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray)
        int one = 1; int is_little = (int)*(unsigned char *)&one;
        unsigned char *bytes = (unsigned char *)&val;
        ret = _PyLong_AsByteArray((PyLongObject *)x,
                                    bytes, sizeof(val),
                                    is_little, !is_unsigned);
#else
        PyObject *v;
        PyObject *stepval = NULL, *mask = NULL, *shift = NULL;
        int bits, remaining_bits, is_negative = 0;
        int chunk_size = (sizeof(long) < 8) ? 30 : 62;
        if (likely(PyLong_CheckExact(x))) {
            v = __Pyx_NewRef(x);
        } else {
            v = PyNumber_Long(x);
            if (unlikely(!v)) return (unsigned int) -1;
            assert(PyLong_CheckExact(v));
        }
        {
            int result = PyObject_RichCompareBool(v, Py_False, Py_LT);
            if (unlikely(result < 0)) {
                Py_DECREF(v);
                return (unsigned int) -1;
            }
            is_negative = result == 1;
        }
        if (is_unsigned && unlikely(is_negative)) {
            Py_DECREF(v);
            goto raise_neg_overflow;
        } else if (is_negative) {
            stepval = PyNumber_Invert(v);
            Py_DECREF(v);
            if (unlikely(!stepval))
                return (unsigned int) -1;
        } else {
            stepval = v;
        }
        v = NULL;
        val = (unsigned int) 0;
        mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done;
        shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done;
        for (bits = 0; bits < (int) sizeof(unsigned int) * 8 - chunk_size; bits += chunk_size) {
            PyObject *tmp, *digit;
            long idigit;
            digit = PyNumber_And(stepval, mask);
            if (unlikely(!digit)) goto done;
            idigit = PyLong_AsLong(digit);
            Py_DECREF(digit);
            if (unlikely(idigit < 0)) goto done;
            val |= ((unsigned int) idigit) << bits;
            tmp = PyNumber_Rshift(stepval, shift);
            if (unlikely(!tmp)) goto done;
            Py_DECREF(stepval); stepval = tmp;
        }
        Py_DECREF(shift); shift = NULL;
        Py_DECREF(mask); mask = NULL;
        {
            long idigit = PyLong_AsLong(stepval);
            if (unlikely(idigit < 0)) goto done;
            remaining_bits = ((int) sizeof(unsigned int) * 8) - bits - (is_unsigned ? 0 : 1);
            if (unlikely(idigit >= (1L << remaining_bits)))
                goto raise_overflow;
            val |= ((unsigned int) idigit) << bits;
        }
        if (!is_unsigned) {
            if (unlikely(val & (((unsigned int) 1) << (sizeof(unsigned int) * 8 - 1))))
                goto raise_overflow;
            if (is_negative)
                val = ~val;
        }
        ret = 0;
    done:
        Py_XDECREF(shift);
        Py_XDECREF(mask);
        Py_XDECREF(stepval);
#endif
        if (unlikely(ret))
            return (unsigned int) -1;
        return val;
    }
raise_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "value too large to convert to unsigned int");
    return (unsigned int) -1;
raise_neg_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "can't convert negative value to unsigned int");
    return (unsigned int) -1;
}

/* CIntFromPy */
static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyLong_As_unsigned_PY_LONG_LONG(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG) -1, const_zero = (unsigned PY_LONG_LONG) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (unlikely(!PyLong_Check(x))) {
        unsigned PY_LONG_LONG val;
        PyObject *tmp = __Pyx_PyNumber_Long(x);
        if (!tmp) return (unsigned PY_LONG_LONG) -1;
        val = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(tmp);
        Py_DECREF(tmp);
        return val;
    }
    if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
        if (unlikely(__Pyx_PyLong_IsNeg(x))) {
            goto raise_neg_overflow;
        } else if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_DigitCount(x)) {
                case 2:
                    if ((8 * sizeof(unsigned PY_LONG_LONG) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned PY_LONG_LONG) >= 2 * PyLong_SHIFT)) {
                            return (unsigned PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(unsigned PY_LONG_LONG) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned PY_LONG_LONG) >= 3 * PyLong_SHIFT)) {
                            return (unsigned PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(unsigned PY_LONG_LONG) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned PY_LONG_LONG) >= 4 * PyLong_SHIFT)) {
                            return (unsigned PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
                        }
                    }
                    break;
            }
        }
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7
        if (unlikely(Py_SIZE(x) < 0)) {
            goto raise_neg_overflow;
        }
#else
        {
            int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
            if (unlikely(result < 0))
                return (unsigned PY_LONG_LONG) -1;
            if (unlikely(result == 1))
                goto raise_neg_overflow;
        }
#endif
        if ((sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned long))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned PY_LONG_LONG, unsigned long, PyLong_AsUnsignedLong(x))
        } else if ((sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned PY_LONG_LONG, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
        }
    } else {
#if CYTHON_USE_PYLONG_INTERNALS
        if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_SignedDigitCount(x)) {
                case -2:
                    if ((8 * sizeof(unsigned PY_LONG_LONG) - 1 > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT)) {
                            return (unsigned PY_LONG_LONG) (((unsigned PY_LONG_LONG)-1)*(((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])));
                        }
                    }
                    break;
                case 2:
                    if ((8 * sizeof(unsigned PY_LONG_LONG) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT)) {
                            return (unsigned PY_LONG_LONG) ((((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])));
                        }
                    }
                    break;
                case -3:
                    if ((8 * sizeof(unsigned PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT)) {
                            return (unsigned PY_LONG_LONG) (((unsigned PY_LONG_LONG)-1)*(((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(unsigned PY_LONG_LONG) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT)) {
                            return (unsigned PY_LONG_LONG) ((((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])));
                        }
                    }
                    break;
                case -4:
                    if ((8 * sizeof(unsigned PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT)) {
                            return (unsigned PY_LONG_LONG) (((unsigned PY_LONG_LONG)-1)*(((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(unsigned PY_LONG_LONG) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT)) {
                            return (unsigned PY_LONG_LONG) ((((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])));
                        }
                    }
                    break;
            }
        }
#endif
        if ((sizeof(unsigned PY_LONG_LONG) <= sizeof(long))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned PY_LONG_LONG, long, PyLong_AsLong(x))
        } else if ((sizeof(unsigned PY_LONG_LONG) <= sizeof(PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned PY_LONG_LONG, PY_LONG_LONG, PyLong_AsLongLong(x))
        }
    }
    {
        unsigned PY_LONG_LONG val;
        int ret = -1;
#if PY_VERSION_HEX >= 0x030d00A6 && !CYTHON_COMPILING_IN_LIMITED_API
        Py_ssize_t bytes_copied = PyLong_AsNativeBytes(
            x, &val, sizeof(val), Py_ASNATIVEBYTES_NATIVE_ENDIAN | (is_unsigned ? Py_ASNATIVEBYTES_UNSIGNED_BUFFER | Py_ASNATIVEBYTES_REJECT_NEGATIVE : 0));
        if (unlikely(bytes_copied == -1)) {
        } else if (unlikely(bytes_copied > (Py_ssize_t) sizeof(val))) {
            goto raise_overflow;
        } else {
            ret = 0;
        }
#elif PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray)
        int one = 1; int is_little = (int)*(unsigned char *)&one;
        unsigned char *bytes = (unsigned char *)&val;
        ret = _PyLong_AsByteArray((PyLongObject *)x,
                                    bytes, sizeof(val),
                                    is_little, !is_unsigned);
#else
        PyObject *v;
        PyObject *stepval = NULL, *mask = NULL, *shift = NULL;
        int bits, remaining_bits, is_negative = 0;
        int chunk_size = (sizeof(long) < 8) ? 30 : 62;
        if (likely(PyLong_CheckExact(x))) {
            v = __Pyx_NewRef(x);
        } else {
            v = PyNumber_Long(x);
            if (unlikely(!v)) return (unsigned PY_LONG_LONG) -1;
            assert(PyLong_CheckExact(v));
        }
        {
            int result = PyObject_RichCompareBool(v, Py_False, Py_LT);
            if (unlikely(result < 0)) {
                Py_DECREF(v);
                return (unsigned PY_LONG_LONG) -1;
            }
            is_negative = result == 1;
        }
        if (is_unsigned && unlikely(is_negative)) {
            Py_DECREF(v);
            goto raise_neg_overflow;
        } else if (is_negative) {
            stepval = PyNumber_Invert(v);
            Py_DECREF(v);
            if (unlikely(!stepval))
                return (unsigned PY_LONG_LONG) -1;
        } else {
            stepval = v;
        }
        v = NULL;
        val = (unsigned PY_LONG_LONG) 0;
        mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done;
        shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done;
        for (bits = 0; bits < (int) sizeof(unsigned PY_LONG_LONG) * 8 - chunk_size; bits += chunk_size) {
            PyObject *tmp, *digit;
            long idigit;
            digit = PyNumber_And(stepval, mask);
            if (unlikely(!digit)) goto done;
            idigit = PyLong_AsLong(digit);
            Py_DECREF(digit);
            if (unlikely(idigit < 0)) goto done;
            val |= ((unsigned PY_LONG_LONG) idigit) << bits;
            tmp = PyNumber_Rshift(stepval, shift);
            if (unlikely(!tmp)) goto done;
            Py_DECREF(stepval); stepval = tmp;
        }
        Py_DECREF(shift); shift = NULL;
        Py_DECREF(mask); mask = NULL;
        {
            long idigit = PyLong_AsLong(stepval);
            if (unlikely(idigit < 0)) goto done;
            remaining_bits = ((int) sizeof(unsigned PY_LONG_LONG) * 8) - bits - (is_unsigned ? 0 : 1);
            if (unlikely(idigit >= (1L << remaining_bits)))
                goto raise_overflow;
            val |= ((unsigned PY_LONG_LONG) idigit) << bits;
        }
        if (!is_unsigned) {
            if (unlikely(val & (((unsigned PY_LONG_LONG) 1) << (sizeof(unsigned PY_LONG_LONG) * 8 - 1))))
                goto raise_overflow;
            if (is_negative)
                val = ~val;
        }
        ret = 0;
    done:
        Py_XDECREF(shift);
        Py_XDECREF(mask);
        Py_XDECREF(stepval);
#endif
        if (unlikely(ret))
            return (unsigned PY_LONG_LONG) -1;
        return val;
    }
raise_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "value too large to convert to unsigned PY_LONG_LONG");
    return (unsigned PY_LONG_LONG) -1;
raise_neg_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "can't convert negative value to unsigned PY_LONG_LONG");
    return (unsigned PY_LONG_LONG) -1;
}

/* CIntFromPy */
static CYTHON_INLINE nvmlTemperatureSensors_t __Pyx_PyLong_As_nvmlTemperatureSensors_t(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const nvmlTemperatureSensors_t neg_one = (nvmlTemperatureSensors_t) -1, const_zero = (nvmlTemperatureSensors_t) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (unlikely(!PyLong_Check(x))) {
        nvmlTemperatureSensors_t val;
        PyObject *tmp = __Pyx_PyNumber_Long(x);
        if (!tmp) return (nvmlTemperatureSensors_t) -1;
        val = __Pyx_PyLong_As_nvmlTemperatureSensors_t(tmp);
        Py_DECREF(tmp);
        return val;
    }
    if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
        if (unlikely(__Pyx_PyLong_IsNeg(x))) {
            goto raise_neg_overflow;
        } else if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(nvmlTemperatureSensors_t, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_DigitCount(x)) {
                case 2:
                    if ((8 * sizeof(nvmlTemperatureSensors_t) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(nvmlTemperatureSensors_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(nvmlTemperatureSensors_t) >= 2 * PyLong_SHIFT)) {
                            return (nvmlTemperatureSensors_t) (((((nvmlTemperatureSensors_t)digits[1]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[0]));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(nvmlTemperatureSensors_t) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(nvmlTemperatureSensors_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(nvmlTemperatureSensors_t) >= 3 * PyLong_SHIFT)) {
                            return (nvmlTemperatureSensors_t) (((((((nvmlTemperatureSensors_t)digits[2]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[1]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[0]));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(nvmlTemperatureSensors_t) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(nvmlTemperatureSensors_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(nvmlTemperatureSensors_t) >= 4 * PyLong_SHIFT)) {
                            return (nvmlTemperatureSensors_t) (((((((((nvmlTemperatureSensors_t)digits[3]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[2]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[1]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[0]));
                        }
                    }
                    break;
            }
        }
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7
        if (unlikely(Py_SIZE(x) < 0)) {
            goto raise_neg_overflow;
        }
#else
        {
            int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
            if (unlikely(result < 0))
                return (nvmlTemperatureSensors_t) -1;
            if (unlikely(result == 1))
                goto raise_neg_overflow;
        }
#endif
        if ((sizeof(nvmlTemperatureSensors_t) <= sizeof(unsigned long))) {
            __PYX_VERIFY_RETURN_INT_EXC(nvmlTemperatureSensors_t, unsigned long, PyLong_AsUnsignedLong(x))
        } else if ((sizeof(nvmlTemperatureSensors_t) <= sizeof(unsigned PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(nvmlTemperatureSensors_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
        }
    } else {
#if CYTHON_USE_PYLONG_INTERNALS
        if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(nvmlTemperatureSensors_t, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_SignedDigitCount(x)) {
                case -2:
                    if ((8 * sizeof(nvmlTemperatureSensors_t) - 1 > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(nvmlTemperatureSensors_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(nvmlTemperatureSensors_t) - 1 > 2 * PyLong_SHIFT)) {
                            return (nvmlTemperatureSensors_t) (((nvmlTemperatureSensors_t)-1)*(((((nvmlTemperatureSensors_t)digits[1]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[0])));
                        }
                    }
                    break;
                case 2:
                    if ((8 * sizeof(nvmlTemperatureSensors_t) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(nvmlTemperatureSensors_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(nvmlTemperatureSensors_t) - 1 > 2 * PyLong_SHIFT)) {
                            return (nvmlTemperatureSensors_t) ((((((nvmlTemperatureSensors_t)digits[1]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[0])));
                        }
                    }
                    break;
                case -3:
                    if ((8 * sizeof(nvmlTemperatureSensors_t) - 1 > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(nvmlTemperatureSensors_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(nvmlTemperatureSensors_t) - 1 > 3 * PyLong_SHIFT)) {
                            return (nvmlTemperatureSensors_t) (((nvmlTemperatureSensors_t)-1)*(((((((nvmlTemperatureSensors_t)digits[2]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[1]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[0])));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(nvmlTemperatureSensors_t) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(nvmlTemperatureSensors_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(nvmlTemperatureSensors_t) - 1 > 3 * PyLong_SHIFT)) {
                            return (nvmlTemperatureSensors_t) ((((((((nvmlTemperatureSensors_t)digits[2]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[1]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[0])));
                        }
                    }
                    break;
                case -4:
                    if ((8 * sizeof(nvmlTemperatureSensors_t) - 1 > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(nvmlTemperatureSensors_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(nvmlTemperatureSensors_t) - 1 > 4 * PyLong_SHIFT)) {
                            return (nvmlTemperatureSensors_t) (((nvmlTemperatureSensors_t)-1)*(((((((((nvmlTemperatureSensors_t)digits[3]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[2]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[1]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[0])));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(nvmlTemperatureSensors_t) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(nvmlTemperatureSensors_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(nvmlTemperatureSensors_t) - 1 > 4 * PyLong_SHIFT)) {
                            return (nvmlTemperatureSensors_t) ((((((((((nvmlTemperatureSensors_t)digits[3]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[2]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[1]) << PyLong_SHIFT) | (nvmlTemperatureSensors_t)digits[0])));
                        }
                    }
                    break;
            }
        }
#endif
        if ((sizeof(nvmlTemperatureSensors_t) <= sizeof(long))) {
            __PYX_VERIFY_RETURN_INT_EXC(nvmlTemperatureSensors_t, long, PyLong_AsLong(x))
        } else if ((sizeof(nvmlTemperatureSensors_t) <= sizeof(PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(nvmlTemperatureSensors_t, PY_LONG_LONG, PyLong_AsLongLong(x))
        }
    }
    {
        nvmlTemperatureSensors_t val;
        int ret = -1;
#if PY_VERSION_HEX >= 0x030d00A6 && !CYTHON_COMPILING_IN_LIMITED_API
        Py_ssize_t bytes_copied = PyLong_AsNativeBytes(
            x, &val, sizeof(val), Py_ASNATIVEBYTES_NATIVE_ENDIAN | (is_unsigned ? Py_ASNATIVEBYTES_UNSIGNED_BUFFER | Py_ASNATIVEBYTES_REJECT_NEGATIVE : 0));
        if (unlikely(bytes_copied == -1)) {
        } else if (unlikely(bytes_copied > (Py_ssize_t) sizeof(val))) {
            goto raise_overflow;
        } else {
            ret = 0;
        }
#elif PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray)
        int one = 1; int is_little = (int)*(unsigned char *)&one;
        unsigned char *bytes = (unsigned char *)&val;
        ret = _PyLong_AsByteArray((PyLongObject *)x,
                                    bytes, sizeof(val),
                                    is_little, !is_unsigned);
#else
        PyErr_SetString(PyExc_RuntimeError,
                        "_PyLong_AsByteArray() or PyLong_AsNativeBytes() not available, cannot convert large enums");
        val = (nvmlTemperatureSensors_t) -1;
#endif
        if (unlikely(ret))
            return (nvmlTemperatureSensors_t) -1;
        return val;
    }
raise_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "value too large to convert to nvmlTemperatureSensors_t");
    return (nvmlTemperatureSensors_t) -1;
raise_neg_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "can't convert negative value to nvmlTemperatureSensors_t");
    return (nvmlTemperatureSensors_t) -1;
}

/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyLong_As_long(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (unlikely(!PyLong_Check(x))) {
        long val;
        PyObject *tmp = __Pyx_PyNumber_Long(x);
        if (!tmp) return (long) -1;
        val = __Pyx_PyLong_As_long(tmp);
        Py_DECREF(tmp);
        return val;
    }
    if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
        if (unlikely(__Pyx_PyLong_IsNeg(x))) {
            goto raise_neg_overflow;
        } else if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(long, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_DigitCount(x)) {
                case 2:
                    if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) >= 2 * PyLong_SHIFT)) {
                            return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) >= 3 * PyLong_SHIFT)) {
                            return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) >= 4 * PyLong_SHIFT)) {
                            return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
                        }
                    }
                    break;
            }
        }
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7
        if (unlikely(Py_SIZE(x) < 0)) {
            goto raise_neg_overflow;
        }
#else
        {
            int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
            if (unlikely(result < 0))
                return (long) -1;
            if (unlikely(result == 1))
                goto raise_neg_overflow;
        }
#endif
        if ((sizeof(long) <= sizeof(unsigned long))) {
            __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
        } else if ((sizeof(long) <= sizeof(unsigned PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
        }
    } else {
#if CYTHON_USE_PYLONG_INTERNALS
        if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(long, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_SignedDigitCount(x)) {
                case -2:
                    if ((8 * sizeof(long) - 1 > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) {
                            return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
                        }
                    }
                    break;
                case 2:
                    if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) {
                            return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
                        }
                    }
                    break;
                case -3:
                    if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) {
                            return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) {
                            return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
                        }
                    }
                    break;
                case -4:
                    if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) {
                            return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) {
                            return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
                        }
                    }
                    break;
            }
        }
#endif
        if ((sizeof(long) <= sizeof(long))) {
            __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
        } else if ((sizeof(long) <= sizeof(PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
        }
    }
    {
        long val;
        int ret = -1;
#if PY_VERSION_HEX >= 0x030d00A6 && !CYTHON_COMPILING_IN_LIMITED_API
        Py_ssize_t bytes_copied = PyLong_AsNativeBytes(
            x, &val, sizeof(val), Py_ASNATIVEBYTES_NATIVE_ENDIAN | (is_unsigned ? Py_ASNATIVEBYTES_UNSIGNED_BUFFER | Py_ASNATIVEBYTES_REJECT_NEGATIVE : 0));
        if (unlikely(bytes_copied == -1)) {
        } else if (unlikely(bytes_copied > (Py_ssize_t) sizeof(val))) {
            goto raise_overflow;
        } else {
            ret = 0;
        }
#elif PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray)
        int one = 1; int is_little = (int)*(unsigned char *)&one;
        unsigned char *bytes = (unsigned char *)&val;
        ret = _PyLong_AsByteArray((PyLongObject *)x,
                                    bytes, sizeof(val),
                                    is_little, !is_unsigned);
#else
        PyObject *v;
        PyObject *stepval = NULL, *mask = NULL, *shift = NULL;
        int bits, remaining_bits, is_negative = 0;
        int chunk_size = (sizeof(long) < 8) ? 30 : 62;
        if (likely(PyLong_CheckExact(x))) {
            v = __Pyx_NewRef(x);
        } else {
            v = PyNumber_Long(x);
            if (unlikely(!v)) return (long) -1;
            assert(PyLong_CheckExact(v));
        }
        {
            int result = PyObject_RichCompareBool(v, Py_False, Py_LT);
            if (unlikely(result < 0)) {
                Py_DECREF(v);
                return (long) -1;
            }
            is_negative = result == 1;
        }
        if (is_unsigned && unlikely(is_negative)) {
            Py_DECREF(v);
            goto raise_neg_overflow;
        } else if (is_negative) {
            stepval = PyNumber_Invert(v);
            Py_DECREF(v);
            if (unlikely(!stepval))
                return (long) -1;
        } else {
            stepval = v;
        }
        v = NULL;
        val = (long) 0;
        mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done;
        shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done;
        for (bits = 0; bits < (int) sizeof(long) * 8 - chunk_size; bits += chunk_size) {
            PyObject *tmp, *digit;
            long idigit;
            digit = PyNumber_And(stepval, mask);
            if (unlikely(!digit)) goto done;
            idigit = PyLong_AsLong(digit);
            Py_DECREF(digit);
            if (unlikely(idigit < 0)) goto done;
            val |= ((long) idigit) << bits;
            tmp = PyNumber_Rshift(stepval, shift);
            if (unlikely(!tmp)) goto done;
            Py_DECREF(stepval); stepval = tmp;
        }
        Py_DECREF(shift); shift = NULL;
        Py_DECREF(mask); mask = NULL;
        {
            long idigit = PyLong_AsLong(stepval);
            if (unlikely(idigit < 0)) goto done;
            remaining_bits = ((int) sizeof(long) * 8) - bits - (is_unsigned ? 0 : 1);
            if (unlikely(idigit >= (1L << remaining_bits)))
                goto raise_overflow;
            val |= ((long) idigit) << bits;
        }
        if (!is_unsigned) {
            if (unlikely(val & (((long) 1) << (sizeof(long) * 8 - 1))))
                goto raise_overflow;
            if (is_negative)
                val = ~val;
        }
        ret = 0;
    done:
        Py_XDECREF(shift);
        Py_XDECREF(mask);
        Py_XDECREF(stepval);
#endif
        if (unlikely(ret))
            return (long) -1;
        return val;
    }
raise_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "value too large to convert to long");
    return (long) -1;
raise_neg_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "can't convert negative value to long");
    return (long) -1;
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_int(int value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(int) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(int) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(int) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(int),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(int));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_unsigned_int(unsigned int value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(unsigned int) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(unsigned int) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(unsigned int) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(unsigned int),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(unsigned int));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_unsigned_PY_LONG_LONG(unsigned PY_LONG_LONG value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG) -1, const_zero = (unsigned PY_LONG_LONG) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(unsigned PY_LONG_LONG) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(unsigned PY_LONG_LONG) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(unsigned PY_LONG_LONG),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(unsigned PY_LONG_LONG));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_long(long value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(long) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(long) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(long) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(long),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(long));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From___pyx_anon_enum(int value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(int) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(int) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(int) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(int),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(int));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_unsigned_long(unsigned long value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const unsigned long neg_one = (unsigned long) -1, const_zero = (unsigned long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(unsigned long) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(unsigned long) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(unsigned long) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(unsigned long) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(unsigned long) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(unsigned long),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(unsigned long));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntFromPy */
static CYTHON_INLINE unsigned long __Pyx_PyLong_As_unsigned_long(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const unsigned long neg_one = (unsigned long) -1, const_zero = (unsigned long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (unlikely(!PyLong_Check(x))) {
        unsigned long val;
        PyObject *tmp = __Pyx_PyNumber_Long(x);
        if (!tmp) return (unsigned long) -1;
        val = __Pyx_PyLong_As_unsigned_long(tmp);
        Py_DECREF(tmp);
        return val;
    }
    if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
        if (unlikely(__Pyx_PyLong_IsNeg(x))) {
            goto raise_neg_overflow;
        } else if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(unsigned long, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_DigitCount(x)) {
                case 2:
                    if ((8 * sizeof(unsigned long) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned long) >= 2 * PyLong_SHIFT)) {
                            return (unsigned long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned long) >= 3 * PyLong_SHIFT)) {
                            return (unsigned long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned long) >= 4 * PyLong_SHIFT)) {
                            return (unsigned long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
                        }
                    }
                    break;
            }
        }
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7
        if (unlikely(Py_SIZE(x) < 0)) {
            goto raise_neg_overflow;
        }
#else
        {
            int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
            if (unlikely(result < 0))
                return (unsigned long) -1;
            if (unlikely(result == 1))
                goto raise_neg_overflow;
        }
#endif
        if ((sizeof(unsigned long) <= sizeof(unsigned long))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned long, unsigned long, PyLong_AsUnsignedLong(x))
        } else if ((sizeof(unsigned long) <= sizeof(unsigned PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
        }
    } else {
#if CYTHON_USE_PYLONG_INTERNALS
        if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(unsigned long, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_SignedDigitCount(x)) {
                case -2:
                    if ((8 * sizeof(unsigned long) - 1 > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned long) - 1 > 2 * PyLong_SHIFT)) {
                            return (unsigned long) (((unsigned long)-1)*(((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])));
                        }
                    }
                    break;
                case 2:
                    if ((8 * sizeof(unsigned long) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned long) - 1 > 2 * PyLong_SHIFT)) {
                            return (unsigned long) ((((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])));
                        }
                    }
                    break;
                case -3:
                    if ((8 * sizeof(unsigned long) - 1 > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned long) - 1 > 3 * PyLong_SHIFT)) {
                            return (unsigned long) (((unsigned long)-1)*(((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned long) - 1 > 3 * PyLong_SHIFT)) {
                            return (unsigned long) ((((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])));
                        }
                    }
                    break;
                case -4:
                    if ((8 * sizeof(unsigned long) - 1 > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned long) - 1 > 4 * PyLong_SHIFT)) {
                            return (unsigned long) (((unsigned long)-1)*(((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned long) - 1 > 4 * PyLong_SHIFT)) {
                            return (unsigned long) ((((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])));
                        }
                    }
                    break;
            }
        }
#endif
        if ((sizeof(unsigned long) <= sizeof(long))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned long, long, PyLong_AsLong(x))
        } else if ((sizeof(unsigned long) <= sizeof(PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned long, PY_LONG_LONG, PyLong_AsLongLong(x))
        }
    }
    {
        unsigned long val;
        int ret = -1;
#if PY_VERSION_HEX >= 0x030d00A6 && !CYTHON_COMPILING_IN_LIMITED_API
        Py_ssize_t bytes_copied = PyLong_AsNativeBytes(
            x, &val, sizeof(val), Py_ASNATIVEBYTES_NATIVE_ENDIAN | (is_unsigned ? Py_ASNATIVEBYTES_UNSIGNED_BUFFER | Py_ASNATIVEBYTES_REJECT_NEGATIVE : 0));
        if (unlikely(bytes_copied == -1)) {
        } else if (unlikely(bytes_copied > (Py_ssize_t) sizeof(val))) {
            goto raise_overflow;
        } else {
            ret = 0;
        }
#elif PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray)
        int one = 1; int is_little = (int)*(unsigned char *)&one;
        unsigned char *bytes = (unsigned char *)&val;
        ret = _PyLong_AsByteArray((PyLongObject *)x,
                                    bytes, sizeof(val),
                                    is_little, !is_unsigned);
#else
        PyObject *v;
        PyObject *stepval = NULL, *mask = NULL, *shift = NULL;
        int bits, remaining_bits, is_negative = 0;
        int chunk_size = (sizeof(long) < 8) ? 30 : 62;
        if (likely(PyLong_CheckExact(x))) {
            v = __Pyx_NewRef(x);
        } else {
            v = PyNumber_Long(x);
            if (unlikely(!v)) return (unsigned long) -1;
            assert(PyLong_CheckExact(v));
        }
        {
            int result = PyObject_RichCompareBool(v, Py_False, Py_LT);
            if (unlikely(result < 0)) {
                Py_DECREF(v);
                return (unsigned long) -1;
            }
            is_negative = result == 1;
        }
        if (is_unsigned && unlikely(is_negative)) {
            Py_DECREF(v);
            goto raise_neg_overflow;
        } else if (is_negative) {
            stepval = PyNumber_Invert(v);
            Py_DECREF(v);
            if (unlikely(!stepval))
                return (unsigned long) -1;
        } else {
            stepval = v;
        }
        v = NULL;
        val = (unsigned long) 0;
        mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done;
        shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done;
        for (bits = 0; bits < (int) sizeof(unsigned long) * 8 - chunk_size; bits += chunk_size) {
            PyObject *tmp, *digit;
            long idigit;
            digit = PyNumber_And(stepval, mask);
            if (unlikely(!digit)) goto done;
            idigit = PyLong_AsLong(digit);
            Py_DECREF(digit);
            if (unlikely(idigit < 0)) goto done;
            val |= ((unsigned long) idigit) << bits;
            tmp = PyNumber_Rshift(stepval, shift);
            if (unlikely(!tmp)) goto done;
            Py_DECREF(stepval); stepval = tmp;
        }
        Py_DECREF(shift); shift = NULL;
        Py_DECREF(mask); mask = NULL;
        {
            long idigit = PyLong_AsLong(stepval);
            if (unlikely(idigit < 0)) goto done;
            remaining_bits = ((int) sizeof(unsigned long) * 8) - bits - (is_unsigned ? 0 : 1);
            if (unlikely(idigit >= (1L << remaining_bits)))
                goto raise_overflow;
            val |= ((unsigned long) idigit) << bits;
        }
        if (!is_unsigned) {
            if (unlikely(val & (((unsigned long) 1) << (sizeof(unsigned long) * 8 - 1))))
                goto raise_overflow;
            if (is_negative)
                val = ~val;
        }
        ret = 0;
    done:
        Py_XDECREF(shift);
        Py_XDECREF(mask);
        Py_XDECREF(stepval);
#endif
        if (unlikely(ret))
            return (unsigned long) -1;
        return val;
    }
raise_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "value too large to convert to unsigned long");
    return (unsigned long) -1;
raise_neg_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "can't convert negative value to unsigned long");
    return (unsigned long) -1;
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_PY_LONG_LONG(PY_LONG_LONG value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const PY_LONG_LONG neg_one = (PY_LONG_LONG) -1, const_zero = (PY_LONG_LONG) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(PY_LONG_LONG) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(PY_LONG_LONG) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(PY_LONG_LONG) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(PY_LONG_LONG) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(PY_LONG_LONG) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(PY_LONG_LONG),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(PY_LONG_LONG));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntFromPy */
static CYTHON_INLINE PY_LONG_LONG __Pyx_PyLong_As_PY_LONG_LONG(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const PY_LONG_LONG neg_one = (PY_LONG_LONG) -1, const_zero = (PY_LONG_LONG) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (unlikely(!PyLong_Check(x))) {
        PY_LONG_LONG val;
        PyObject *tmp = __Pyx_PyNumber_Long(x);
        if (!tmp) return (PY_LONG_LONG) -1;
        val = __Pyx_PyLong_As_PY_LONG_LONG(tmp);
        Py_DECREF(tmp);
        return val;
    }
    if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
        if (unlikely(__Pyx_PyLong_IsNeg(x))) {
            goto raise_neg_overflow;
        } else if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_DigitCount(x)) {
                case 2:
                    if ((8 * sizeof(PY_LONG_LONG) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(PY_LONG_LONG) >= 2 * PyLong_SHIFT)) {
                            return (PY_LONG_LONG) (((((PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0]));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(PY_LONG_LONG) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(PY_LONG_LONG) >= 3 * PyLong_SHIFT)) {
                            return (PY_LONG_LONG) (((((((PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0]));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(PY_LONG_LONG) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(PY_LONG_LONG) >= 4 * PyLong_SHIFT)) {
                            return (PY_LONG_LONG) (((((((((PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0]));
                        }
                    }
                    break;
            }
        }
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7
        if (unlikely(Py_SIZE(x) < 0)) {
            goto raise_neg_overflow;
        }
#else
        {
            int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
            if (unlikely(result < 0))
                return (PY_LONG_LONG) -1;
            if (unlikely(result == 1))
                goto raise_neg_overflow;
        }
#endif
        if ((sizeof(PY_LONG_LONG) <= sizeof(unsigned long))) {
            __PYX_VERIFY_RETURN_INT_EXC(PY_LONG_LONG, unsigned long, PyLong_AsUnsignedLong(x))
        } else if ((sizeof(PY_LONG_LONG) <= sizeof(unsigned PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(PY_LONG_LONG, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
        }
    } else {
#if CYTHON_USE_PYLONG_INTERNALS
        if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_SignedDigitCount(x)) {
                case -2:
                    if ((8 * sizeof(PY_LONG_LONG) - 1 > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT)) {
                            return (PY_LONG_LONG) (((PY_LONG_LONG)-1)*(((((PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0])));
                        }
                    }
                    break;
                case 2:
                    if ((8 * sizeof(PY_LONG_LONG) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT)) {
                            return (PY_LONG_LONG) ((((((PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0])));
                        }
                    }
                    break;
                case -3:
                    if ((8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT)) {
                            return (PY_LONG_LONG) (((PY_LONG_LONG)-1)*(((((((PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0])));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(PY_LONG_LONG) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT)) {
                            return (PY_LONG_LONG) ((((((((PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0])));
                        }
                    }
                    break;
                case -4:
                    if ((8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT)) {
                            return (PY_LONG_LONG) (((PY_LONG_LONG)-1)*(((((((((PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0])));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(PY_LONG_LONG) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT)) {
                            return (PY_LONG_LONG) ((((((((((PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0])));
                        }
                    }
                    break;
            }
        }
#endif
        if ((sizeof(PY_LONG_LONG) <= sizeof(long))) {
            __PYX_VERIFY_RETURN_INT_EXC(PY_LONG_LONG, long, PyLong_AsLong(x))
        } else if ((sizeof(PY_LONG_LONG) <= sizeof(PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(PY_LONG_LONG, PY_LONG_LONG, PyLong_AsLongLong(x))
        }
    }
    {
        PY_LONG_LONG val;
        int ret = -1;
#if PY_VERSION_HEX >= 0x030d00A6 && !CYTHON_COMPILING_IN_LIMITED_API
        Py_ssize_t bytes_copied = PyLong_AsNativeBytes(
            x, &val, sizeof(val), Py_ASNATIVEBYTES_NATIVE_ENDIAN | (is_unsigned ? Py_ASNATIVEBYTES_UNSIGNED_BUFFER | Py_ASNATIVEBYTES_REJECT_NEGATIVE : 0));
        if (unlikely(bytes_copied == -1)) {
        } else if (unlikely(bytes_copied > (Py_ssize_t) sizeof(val))) {
            goto raise_overflow;
        } else {
            ret = 0;
        }
#elif PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray)
        int one = 1; int is_little = (int)*(unsigned char *)&one;
        unsigned char *bytes = (unsigned char *)&val;
        ret = _PyLong_AsByteArray((PyLongObject *)x,
                                    bytes, sizeof(val),
                                    is_little, !is_unsigned);
#else
        PyObject *v;
        PyObject *stepval = NULL, *mask = NULL, *shift = NULL;
        int bits, remaining_bits, is_negative = 0;
        int chunk_size = (sizeof(long) < 8) ? 30 : 62;
        if (likely(PyLong_CheckExact(x))) {
            v = __Pyx_NewRef(x);
        } else {
            v = PyNumber_Long(x);
            if (unlikely(!v)) return (PY_LONG_LONG) -1;
            assert(PyLong_CheckExact(v));
        }
        {
            int result = PyObject_RichCompareBool(v, Py_False, Py_LT);
            if (unlikely(result < 0)) {
                Py_DECREF(v);
                return (PY_LONG_LONG) -1;
            }
            is_negative = result == 1;
        }
        if (is_unsigned && unlikely(is_negative)) {
            Py_DECREF(v);
            goto raise_neg_overflow;
        } else if (is_negative) {
            stepval = PyNumber_Invert(v);
            Py_DECREF(v);
            if (unlikely(!stepval))
                return (PY_LONG_LONG) -1;
        } else {
            stepval = v;
        }
        v = NULL;
        val = (PY_LONG_LONG) 0;
        mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done;
        shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done;
        for (bits = 0; bits < (int) sizeof(PY_LONG_LONG) * 8 - chunk_size; bits += chunk_size) {
            PyObject *tmp, *digit;
            long idigit;
            digit = PyNumber_And(stepval, mask);
            if (unlikely(!digit)) goto done;
            idigit = PyLong_AsLong(digit);
            Py_DECREF(digit);
            if (unlikely(idigit < 0)) goto done;
            val |= ((PY_LONG_LONG) idigit) << bits;
            tmp = PyNumber_Rshift(stepval, shift);
            if (unlikely(!tmp)) goto done;
            Py_DECREF(stepval); stepval = tmp;
        }
        Py_DECREF(shift); shift = NULL;
        Py_DECREF(mask); mask = NULL;
        {
            long idigit = PyLong_AsLong(stepval);
            if (unlikely(idigit < 0)) goto done;
            remaining_bits = ((int) sizeof(PY_LONG_LONG) * 8) - bits - (is_unsigned ? 0 : 1);
            if (unlikely(idigit >= (1L << remaining_bits)))
                goto raise_overflow;
            val |= ((PY_LONG_LONG) idigit) << bits;
        }
        if (!is_unsigned) {
            if (unlikely(val & (((PY_LONG_LONG) 1) << (sizeof(PY_LONG_LONG) * 8 - 1))))
                goto raise_overflow;
            if (is_negative)
                val = ~val;
        }
        ret = 0;
    done:
        Py_XDECREF(shift);
        Py_XDECREF(mask);
        Py_XDECREF(stepval);
#endif
        if (unlikely(ret))
            return (PY_LONG_LONG) -1;
        return val;
    }
raise_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "value too large to convert to PY_LONG_LONG");
    return (PY_LONG_LONG) -1;
raise_neg_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "can't convert negative value to PY_LONG_LONG");
    return (PY_LONG_LONG) -1;
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_unsigned_short(unsigned short value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const unsigned short neg_one = (unsigned short) -1, const_zero = (unsigned short) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(unsigned short) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(unsigned short) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(unsigned short) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(unsigned short) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(unsigned short) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(unsigned short),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(unsigned short));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntFromPy */
static CYTHON_INLINE unsigned short __Pyx_PyLong_As_unsigned_short(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const unsigned short neg_one = (unsigned short) -1, const_zero = (unsigned short) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (unlikely(!PyLong_Check(x))) {
        unsigned short val;
        PyObject *tmp = __Pyx_PyNumber_Long(x);
        if (!tmp) return (unsigned short) -1;
        val = __Pyx_PyLong_As_unsigned_short(tmp);
        Py_DECREF(tmp);
        return val;
    }
    if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
        if (unlikely(__Pyx_PyLong_IsNeg(x))) {
            goto raise_neg_overflow;
        } else if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(unsigned short, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_DigitCount(x)) {
                case 2:
                    if ((8 * sizeof(unsigned short) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned short, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned short) >= 2 * PyLong_SHIFT)) {
                            return (unsigned short) (((((unsigned short)digits[1]) << PyLong_SHIFT) | (unsigned short)digits[0]));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(unsigned short) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned short, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned short) >= 3 * PyLong_SHIFT)) {
                            return (unsigned short) (((((((unsigned short)digits[2]) << PyLong_SHIFT) | (unsigned short)digits[1]) << PyLong_SHIFT) | (unsigned short)digits[0]));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(unsigned short) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned short, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned short) >= 4 * PyLong_SHIFT)) {
                            return (unsigned short) (((((((((unsigned short)digits[3]) << PyLong_SHIFT) | (unsigned short)digits[2]) << PyLong_SHIFT) | (unsigned short)digits[1]) << PyLong_SHIFT) | (unsigned short)digits[0]));
                        }
                    }
                    break;
            }
        }
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7
        if (unlikely(Py_SIZE(x) < 0)) {
            goto raise_neg_overflow;
        }
#else
        {
            int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
            if (unlikely(result < 0))
                return (unsigned short) -1;
            if (unlikely(result == 1))
                goto raise_neg_overflow;
        }
#endif
        if ((sizeof(unsigned short) <= sizeof(unsigned long))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned short, unsigned long, PyLong_AsUnsignedLong(x))
        } else if ((sizeof(unsigned short) <= sizeof(unsigned PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned short, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
        }
    } else {
#if CYTHON_USE_PYLONG_INTERNALS
        if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(unsigned short, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_SignedDigitCount(x)) {
                case -2:
                    if ((8 * sizeof(unsigned short) - 1 > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned short, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned short) - 1 > 2 * PyLong_SHIFT)) {
                            return (unsigned short) (((unsigned short)-1)*(((((unsigned short)digits[1]) << PyLong_SHIFT) | (unsigned short)digits[0])));
                        }
                    }
                    break;
                case 2:
                    if ((8 * sizeof(unsigned short) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned short, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned short) - 1 > 2 * PyLong_SHIFT)) {
                            return (unsigned short) ((((((unsigned short)digits[1]) << PyLong_SHIFT) | (unsigned short)digits[0])));
                        }
                    }
                    break;
                case -3:
                    if ((8 * sizeof(unsigned short) - 1 > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned short, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned short) - 1 > 3 * PyLong_SHIFT)) {
                            return (unsigned short) (((unsigned short)-1)*(((((((unsigned short)digits[2]) << PyLong_SHIFT) | (unsigned short)digits[1]) << PyLong_SHIFT) | (unsigned short)digits[0])));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(unsigned short) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned short, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned short) - 1 > 3 * PyLong_SHIFT)) {
                            return (unsigned short) ((((((((unsigned short)digits[2]) << PyLong_SHIFT) | (unsigned short)digits[1]) << PyLong_SHIFT) | (unsigned short)digits[0])));
                        }
                    }
                    break;
                case -4:
                    if ((8 * sizeof(unsigned short) - 1 > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned short, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned short) - 1 > 4 * PyLong_SHIFT)) {
                            return (unsigned short) (((unsigned short)-1)*(((((((((unsigned short)digits[3]) << PyLong_SHIFT) | (unsigned short)digits[2]) << PyLong_SHIFT) | (unsigned short)digits[1]) << PyLong_SHIFT) | (unsigned short)digits[0])));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(unsigned short) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned short, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned short) - 1 > 4 * PyLong_SHIFT)) {
                            return (unsigned short) ((((((((((unsigned short)digits[3]) << PyLong_SHIFT) | (unsigned short)digits[2]) << PyLong_SHIFT) | (unsigned short)digits[1]) << PyLong_SHIFT) | (unsigned short)digits[0])));
                        }
                    }
                    break;
            }
        }
#endif
        if ((sizeof(unsigned short) <= sizeof(long))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned short, long, PyLong_AsLong(x))
        } else if ((sizeof(unsigned short) <= sizeof(PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned short, PY_LONG_LONG, PyLong_AsLongLong(x))
        }
    }
    {
        unsigned short val;
        int ret = -1;
#if PY_VERSION_HEX >= 0x030d00A6 && !CYTHON_COMPILING_IN_LIMITED_API
        Py_ssize_t bytes_copied = PyLong_AsNativeBytes(
            x, &val, sizeof(val), Py_ASNATIVEBYTES_NATIVE_ENDIAN | (is_unsigned ? Py_ASNATIVEBYTES_UNSIGNED_BUFFER | Py_ASNATIVEBYTES_REJECT_NEGATIVE : 0));
        if (unlikely(bytes_copied == -1)) {
        } else if (unlikely(bytes_copied > (Py_ssize_t) sizeof(val))) {
            goto raise_overflow;
        } else {
            ret = 0;
        }
#elif PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray)
        int one = 1; int is_little = (int)*(unsigned char *)&one;
        unsigned char *bytes = (unsigned char *)&val;
        ret = _PyLong_AsByteArray((PyLongObject *)x,
                                    bytes, sizeof(val),
                                    is_little, !is_unsigned);
#else
        PyObject *v;
        PyObject *stepval = NULL, *mask = NULL, *shift = NULL;
        int bits, remaining_bits, is_negative = 0;
        int chunk_size = (sizeof(long) < 8) ? 30 : 62;
        if (likely(PyLong_CheckExact(x))) {
            v = __Pyx_NewRef(x);
        } else {
            v = PyNumber_Long(x);
            if (unlikely(!v)) return (unsigned short) -1;
            assert(PyLong_CheckExact(v));
        }
        {
            int result = PyObject_RichCompareBool(v, Py_False, Py_LT);
            if (unlikely(result < 0)) {
                Py_DECREF(v);
                return (unsigned short) -1;
            }
            is_negative = result == 1;
        }
        if (is_unsigned && unlikely(is_negative)) {
            Py_DECREF(v);
            goto raise_neg_overflow;
        } else if (is_negative) {
            stepval = PyNumber_Invert(v);
            Py_DECREF(v);
            if (unlikely(!stepval))
                return (unsigned short) -1;
        } else {
            stepval = v;
        }
        v = NULL;
        val = (unsigned short) 0;
        mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done;
        shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done;
        for (bits = 0; bits < (int) sizeof(unsigned short) * 8 - chunk_size; bits += chunk_size) {
            PyObject *tmp, *digit;
            long idigit;
            digit = PyNumber_And(stepval, mask);
            if (unlikely(!digit)) goto done;
            idigit = PyLong_AsLong(digit);
            Py_DECREF(digit);
            if (unlikely(idigit < 0)) goto done;
            val |= ((unsigned short) idigit) << bits;
            tmp = PyNumber_Rshift(stepval, shift);
            if (unlikely(!tmp)) goto done;
            Py_DECREF(stepval); stepval = tmp;
        }
        Py_DECREF(shift); shift = NULL;
        Py_DECREF(mask); mask = NULL;
        {
            long idigit = PyLong_AsLong(stepval);
            if (unlikely(idigit < 0)) goto done;
            remaining_bits = ((int) sizeof(unsigned short) * 8) - bits - (is_unsigned ? 0 : 1);
            if (unlikely(idigit >= (1L << remaining_bits)))
                goto raise_overflow;
            val |= ((unsigned short) idigit) << bits;
        }
        if (!is_unsigned) {
            if (unlikely(val & (((unsigned short) 1) << (sizeof(unsigned short) * 8 - 1))))
                goto raise_overflow;
            if (is_negative)
                val = ~val;
        }
        ret = 0;
    done:
        Py_XDECREF(shift);
        Py_XDECREF(mask);
        Py_XDECREF(stepval);
#endif
        if (unlikely(ret))
            return (unsigned short) -1;
        return val;
    }
raise_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "value too large to convert to unsigned short");
    return (unsigned short) -1;
raise_neg_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "can't convert negative value to unsigned short");
    return (unsigned short) -1;
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_unsigned_char(unsigned char value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const unsigned char neg_one = (unsigned char) -1, const_zero = (unsigned char) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(unsigned char) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(unsigned char) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(unsigned char) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(unsigned char) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(unsigned char) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(unsigned char),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(unsigned char));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntFromPy */
static CYTHON_INLINE unsigned char __Pyx_PyLong_As_unsigned_char(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const unsigned char neg_one = (unsigned char) -1, const_zero = (unsigned char) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (unlikely(!PyLong_Check(x))) {
        unsigned char val;
        PyObject *tmp = __Pyx_PyNumber_Long(x);
        if (!tmp) return (unsigned char) -1;
        val = __Pyx_PyLong_As_unsigned_char(tmp);
        Py_DECREF(tmp);
        return val;
    }
    if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
        if (unlikely(__Pyx_PyLong_IsNeg(x))) {
            goto raise_neg_overflow;
        } else if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(unsigned char, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_DigitCount(x)) {
                case 2:
                    if ((8 * sizeof(unsigned char) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned char) >= 2 * PyLong_SHIFT)) {
                            return (unsigned char) (((((unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0]));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(unsigned char) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned char) >= 3 * PyLong_SHIFT)) {
                            return (unsigned char) (((((((unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0]));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(unsigned char) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned char) >= 4 * PyLong_SHIFT)) {
                            return (unsigned char) (((((((((unsigned char)digits[3]) << PyLong_SHIFT) | (unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0]));
                        }
                    }
                    break;
            }
        }
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7
        if (unlikely(Py_SIZE(x) < 0)) {
            goto raise_neg_overflow;
        }
#else
        {
            int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
            if (unlikely(result < 0))
                return (unsigned char) -1;
            if (unlikely(result == 1))
                goto raise_neg_overflow;
        }
#endif
        if ((sizeof(unsigned char) <= sizeof(unsigned long))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned char, unsigned long, PyLong_AsUnsignedLong(x))
        } else if ((sizeof(unsigned char) <= sizeof(unsigned PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
        }
    } else {
#if CYTHON_USE_PYLONG_INTERNALS
        if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(unsigned char, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_SignedDigitCount(x)) {
                case -2:
                    if ((8 * sizeof(unsigned char) - 1 > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned char) - 1 > 2 * PyLong_SHIFT)) {
                            return (unsigned char) (((unsigned char)-1)*(((((unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0])));
                        }
                    }
                    break;
                case 2:
                    if ((8 * sizeof(unsigned char) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned char) - 1 > 2 * PyLong_SHIFT)) {
                            return (unsigned char) ((((((unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0])));
                        }
                    }
                    break;
                case -3:
                    if ((8 * sizeof(unsigned char) - 1 > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned char) - 1 > 3 * PyLong_SHIFT)) {
                            return (unsigned char) (((unsigned char)-1)*(((((((unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0])));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(unsigned char) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned char) - 1 > 3 * PyLong_SHIFT)) {
                            return (unsigned char) ((((((((unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0])));
                        }
                    }
                    break;
                case -4:
                    if ((8 * sizeof(unsigned char) - 1 > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned char) - 1 > 4 * PyLong_SHIFT)) {
                            return (unsigned char) (((unsigned char)-1)*(((((((((unsigned char)digits[3]) << PyLong_SHIFT) | (unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0])));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(unsigned char) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(unsigned char) - 1 > 4 * PyLong_SHIFT)) {
                            return (unsigned char) ((((((((((unsigned char)digits[3]) << PyLong_SHIFT) | (unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0])));
                        }
                    }
                    break;
            }
        }
#endif
        if ((sizeof(unsigned char) <= sizeof(long))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned char, long, PyLong_AsLong(x))
        } else if ((sizeof(unsigned char) <= sizeof(PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(unsigned char, PY_LONG_LONG, PyLong_AsLongLong(x))
        }
    }
    {
        unsigned char val;
        int ret = -1;
#if PY_VERSION_HEX >= 0x030d00A6 && !CYTHON_COMPILING_IN_LIMITED_API
        Py_ssize_t bytes_copied = PyLong_AsNativeBytes(
            x, &val, sizeof(val), Py_ASNATIVEBYTES_NATIVE_ENDIAN | (is_unsigned ? Py_ASNATIVEBYTES_UNSIGNED_BUFFER | Py_ASNATIVEBYTES_REJECT_NEGATIVE : 0));
        if (unlikely(bytes_copied == -1)) {
        } else if (unlikely(bytes_copied > (Py_ssize_t) sizeof(val))) {
            goto raise_overflow;
        } else {
            ret = 0;
        }
#elif PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray)
        int one = 1; int is_little = (int)*(unsigned char *)&one;
        unsigned char *bytes = (unsigned char *)&val;
        ret = _PyLong_AsByteArray((PyLongObject *)x,
                                    bytes, sizeof(val),
                                    is_little, !is_unsigned);
#else
        PyObject *v;
        PyObject *stepval = NULL, *mask = NULL, *shift = NULL;
        int bits, remaining_bits, is_negative = 0;
        int chunk_size = (sizeof(long) < 8) ? 30 : 62;
        if (likely(PyLong_CheckExact(x))) {
            v = __Pyx_NewRef(x);
        } else {
            v = PyNumber_Long(x);
            if (unlikely(!v)) return (unsigned char) -1;
            assert(PyLong_CheckExact(v));
        }
        {
            int result = PyObject_RichCompareBool(v, Py_False, Py_LT);
            if (unlikely(result < 0)) {
                Py_DECREF(v);
                return (unsigned char) -1;
            }
            is_negative = result == 1;
        }
        if (is_unsigned && unlikely(is_negative)) {
            Py_DECREF(v);
            goto raise_neg_overflow;
        } else if (is_negative) {
            stepval = PyNumber_Invert(v);
            Py_DECREF(v);
            if (unlikely(!stepval))
                return (unsigned char) -1;
        } else {
            stepval = v;
        }
        v = NULL;
        val = (unsigned char) 0;
        mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done;
        shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done;
        for (bits = 0; bits < (int) sizeof(unsigned char) * 8 - chunk_size; bits += chunk_size) {
            PyObject *tmp, *digit;
            long idigit;
            digit = PyNumber_And(stepval, mask);
            if (unlikely(!digit)) goto done;
            idigit = PyLong_AsLong(digit);
            Py_DECREF(digit);
            if (unlikely(idigit < 0)) goto done;
            val |= ((unsigned char) idigit) << bits;
            tmp = PyNumber_Rshift(stepval, shift);
            if (unlikely(!tmp)) goto done;
            Py_DECREF(stepval); stepval = tmp;
        }
        Py_DECREF(shift); shift = NULL;
        Py_DECREF(mask); mask = NULL;
        {
            long idigit = PyLong_AsLong(stepval);
            if (unlikely(idigit < 0)) goto done;
            remaining_bits = ((int) sizeof(unsigned char) * 8) - bits - (is_unsigned ? 0 : 1);
            if (unlikely(idigit >= (1L << remaining_bits)))
                goto raise_overflow;
            val |= ((unsigned char) idigit) << bits;
        }
        if (!is_unsigned) {
            if (unlikely(val & (((unsigned char) 1) << (sizeof(unsigned char) * 8 - 1))))
                goto raise_overflow;
            if (is_negative)
                val = ~val;
        }
        ret = 0;
    done:
        Py_XDECREF(shift);
        Py_XDECREF(mask);
        Py_XDECREF(stepval);
#endif
        if (unlikely(ret))
            return (unsigned char) -1;
        return val;
    }
raise_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "value too large to convert to unsigned char");
    return (unsigned char) -1;
raise_neg_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "can't convert negative value to unsigned char");
    return (unsigned char) -1;
}

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_char(char value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const char neg_one = (char) -1, const_zero = (char) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(char) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(char) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(char) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(char),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(char));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* PyObjectCallMethod1 */
#if !(CYTHON_VECTORCALL && (__PYX_LIMITED_VERSION_HEX >= 0x030C0000 || (!CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x03090000)))
static PyObject* __Pyx__PyObject_CallMethod1(PyObject* method, PyObject* arg) {
    PyObject *result = __Pyx_PyObject_CallOneArg(method, arg);
    Py_DECREF(method);
    return result;
}
#endif
static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg) {
#if CYTHON_VECTORCALL && (__PYX_LIMITED_VERSION_HEX >= 0x030C0000 || (!CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x03090000))
    PyObject *args[2] = {obj, arg};
    (void) __Pyx_PyObject_CallOneArg;
    (void) __Pyx_PyObject_Call2Args;
    return PyObject_VectorcallMethod(method_name, args, 2 | PY_VECTORCALL_ARGUMENTS_OFFSET, NULL);
#else
    PyObject *method = NULL, *result;
    int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method);
    if (likely(is_method)) {
        result = __Pyx_PyObject_Call2Args(method, obj, arg);
        Py_DECREF(method);
        return result;
    }
    if (unlikely(!method)) return NULL;
    return __Pyx__PyObject_CallMethod1(method, arg);
#endif
}

/* UpdateUnpickledDict */
static int __Pyx__UpdateUnpickledDict(PyObject *obj, PyObject *state, Py_ssize_t index) {
    PyObject *state_dict = __Pyx_PySequence_ITEM(state, index);
    if (unlikely(!state_dict)) {
        return -1;
    }
    int non_empty = PyObject_IsTrue(state_dict);
    if (non_empty == 0) {
        Py_DECREF(state_dict);
        return 0;
    } else if (unlikely(non_empty == -1)) {
        return -1;
    }
    PyObject *dict;
    #if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000
    dict = PyObject_GetAttrString(obj, "__dict__");
    #else
    dict = PyObject_GenericGetDict(obj, NULL);
    #endif
    if (unlikely(!dict)) {
        Py_DECREF(state_dict);
        return -1;
    }
    int result;
    if (likely(PyDict_CheckExact(dict))) {
        result = PyDict_Update(dict, state_dict);
    } else {
        PyObject *obj_result = __Pyx_PyObject_CallMethod1(dict, __pyx_mstate_global->__pyx_n_u_update, state_dict);
        if (likely(obj_result)) {
            Py_DECREF(obj_result);
            result = 0;
        } else {
            result = -1;
        }
    }
    Py_DECREF(state_dict);
    Py_DECREF(dict);
    return result;
}
static int __Pyx_UpdateUnpickledDict(PyObject *obj, PyObject *state, Py_ssize_t index) {
    Py_ssize_t state_size = __Pyx_PyTuple_GET_SIZE(state);
    #if !CYTHON_ASSUME_SAFE_SIZE
    if (unlikely(state_size == -1)) return -1;
    #endif
    if (state_size <= index) {
        return 0;
    }
    return __Pyx__UpdateUnpickledDict(obj, state, index);
}

/* CIntFromPy */
static CYTHON_INLINE char __Pyx_PyLong_As_char(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const char neg_one = (char) -1, const_zero = (char) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (unlikely(!PyLong_Check(x))) {
        char val;
        PyObject *tmp = __Pyx_PyNumber_Long(x);
        if (!tmp) return (char) -1;
        val = __Pyx_PyLong_As_char(tmp);
        Py_DECREF(tmp);
        return val;
    }
    if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
        if (unlikely(__Pyx_PyLong_IsNeg(x))) {
            goto raise_neg_overflow;
        } else if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(char, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_DigitCount(x)) {
                case 2:
                    if ((8 * sizeof(char) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(char) >= 2 * PyLong_SHIFT)) {
                            return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(char) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(char) >= 3 * PyLong_SHIFT)) {
                            return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(char) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(char) >= 4 * PyLong_SHIFT)) {
                            return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
                        }
                    }
                    break;
            }
        }
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7
        if (unlikely(Py_SIZE(x) < 0)) {
            goto raise_neg_overflow;
        }
#else
        {
            int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
            if (unlikely(result < 0))
                return (char) -1;
            if (unlikely(result == 1))
                goto raise_neg_overflow;
        }
#endif
        if ((sizeof(char) <= sizeof(unsigned long))) {
            __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x))
        } else if ((sizeof(char) <= sizeof(unsigned PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
        }
    } else {
#if CYTHON_USE_PYLONG_INTERNALS
        if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(char, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_SignedDigitCount(x)) {
                case -2:
                    if ((8 * sizeof(char) - 1 > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(char) - 1 > 2 * PyLong_SHIFT)) {
                            return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
                        }
                    }
                    break;
                case 2:
                    if ((8 * sizeof(char) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(char) - 1 > 2 * PyLong_SHIFT)) {
                            return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
                        }
                    }
                    break;
                case -3:
                    if ((8 * sizeof(char) - 1 > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(char) - 1 > 3 * PyLong_SHIFT)) {
                            return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(char) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(char) - 1 > 3 * PyLong_SHIFT)) {
                            return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
                        }
                    }
                    break;
                case -4:
                    if ((8 * sizeof(char) - 1 > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(char) - 1 > 4 * PyLong_SHIFT)) {
                            return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(char) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(char) - 1 > 4 * PyLong_SHIFT)) {
                            return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
                        }
                    }
                    break;
            }
        }
#endif
        if ((sizeof(char) <= sizeof(long))) {
            __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x))
        } else if ((sizeof(char) <= sizeof(PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x))
        }
    }
    {
        char val;
        int ret = -1;
#if PY_VERSION_HEX >= 0x030d00A6 && !CYTHON_COMPILING_IN_LIMITED_API
        Py_ssize_t bytes_copied = PyLong_AsNativeBytes(
            x, &val, sizeof(val), Py_ASNATIVEBYTES_NATIVE_ENDIAN | (is_unsigned ? Py_ASNATIVEBYTES_UNSIGNED_BUFFER | Py_ASNATIVEBYTES_REJECT_NEGATIVE : 0));
        if (unlikely(bytes_copied == -1)) {
        } else if (unlikely(bytes_copied > (Py_ssize_t) sizeof(val))) {
            goto raise_overflow;
        } else {
            ret = 0;
        }
#elif PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray)
        int one = 1; int is_little = (int)*(unsigned char *)&one;
        unsigned char *bytes = (unsigned char *)&val;
        ret = _PyLong_AsByteArray((PyLongObject *)x,
                                    bytes, sizeof(val),
                                    is_little, !is_unsigned);
#else
        PyObject *v;
        PyObject *stepval = NULL, *mask = NULL, *shift = NULL;
        int bits, remaining_bits, is_negative = 0;
        int chunk_size = (sizeof(long) < 8) ? 30 : 62;
        if (likely(PyLong_CheckExact(x))) {
            v = __Pyx_NewRef(x);
        } else {
            v = PyNumber_Long(x);
            if (unlikely(!v)) return (char) -1;
            assert(PyLong_CheckExact(v));
        }
        {
            int result = PyObject_RichCompareBool(v, Py_False, Py_LT);
            if (unlikely(result < 0)) {
                Py_DECREF(v);
                return (char) -1;
            }
            is_negative = result == 1;
        }
        if (is_unsigned && unlikely(is_negative)) {
            Py_DECREF(v);
            goto raise_neg_overflow;
        } else if (is_negative) {
            stepval = PyNumber_Invert(v);
            Py_DECREF(v);
            if (unlikely(!stepval))
                return (char) -1;
        } else {
            stepval = v;
        }
        v = NULL;
        val = (char) 0;
        mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done;
        shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done;
        for (bits = 0; bits < (int) sizeof(char) * 8 - chunk_size; bits += chunk_size) {
            PyObject *tmp, *digit;
            long idigit;
            digit = PyNumber_And(stepval, mask);
            if (unlikely(!digit)) goto done;
            idigit = PyLong_AsLong(digit);
            Py_DECREF(digit);
            if (unlikely(idigit < 0)) goto done;
            val |= ((char) idigit) << bits;
            tmp = PyNumber_Rshift(stepval, shift);
            if (unlikely(!tmp)) goto done;
            Py_DECREF(stepval); stepval = tmp;
        }
        Py_DECREF(shift); shift = NULL;
        Py_DECREF(mask); mask = NULL;
        {
            long idigit = PyLong_AsLong(stepval);
            if (unlikely(idigit < 0)) goto done;
            remaining_bits = ((int) sizeof(char) * 8) - bits - (is_unsigned ? 0 : 1);
            if (unlikely(idigit >= (1L << remaining_bits)))
                goto raise_overflow;
            val |= ((char) idigit) << bits;
        }
        if (!is_unsigned) {
            if (unlikely(val & (((char) 1) << (sizeof(char) * 8 - 1))))
                goto raise_overflow;
            if (is_negative)
                val = ~val;
        }
        ret = 0;
    done:
        Py_XDECREF(shift);
        Py_XDECREF(mask);
        Py_XDECREF(stepval);
#endif
        if (unlikely(ret))
            return (char) -1;
        return val;
    }
raise_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "value too large to convert to char");
    return (char) -1;
raise_neg_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "can't convert negative value to char");
    return (char) -1;
}

/* FormatTypeName */
#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030d0000
static __Pyx_TypeName
__Pyx_PyType_GetFullyQualifiedName(PyTypeObject* tp)
{
    PyObject *module = NULL, *name = NULL, *result = NULL;
    #if __PYX_LIMITED_VERSION_HEX < 0x030b0000
    name = __Pyx_PyObject_GetAttrStr((PyObject *)tp,
                                               __pyx_mstate_global->__pyx_n_u_qualname);
    #else
    name = PyType_GetQualName(tp);
    #endif
    if (unlikely(name == NULL) || unlikely(!PyUnicode_Check(name))) goto bad;
    module = __Pyx_PyObject_GetAttrStr((PyObject *)tp,
                                               __pyx_mstate_global->__pyx_n_u_module);
    if (unlikely(module == NULL) || unlikely(!PyUnicode_Check(module))) goto bad;
    if (PyUnicode_CompareWithASCIIString(module, "builtins") == 0) {
        result = name;
        name = NULL;
        goto done;
    }
    result = PyUnicode_FromFormat("%U.%U", module, name);
    if (unlikely(result == NULL)) goto bad;
  done:
    Py_XDECREF(name);
    Py_XDECREF(module);
    return result;
  bad:
    PyErr_Clear();
    if (name) {
        result = name;
        name = NULL;
    } else {
        result = __Pyx_NewRef(__pyx_mstate_global->__pyx_kp_u__8);
    }
    goto done;
}
#endif

/* GetRuntimeVersion */
#if __PYX_LIMITED_VERSION_HEX < 0x030b0000
void __Pyx_init_runtime_version(void) {
    if (__Pyx_cached_runtime_version == 0) {
        const char* rt_version = Py_GetVersion();
        unsigned long version = 0;
        unsigned long factor = 0x01000000UL;
        unsigned int digit = 0;
        int i = 0;
        while (factor) {
            while ('0' <= rt_version[i] && rt_version[i] <= '9') {
                digit = digit * 10 + (unsigned int) (rt_version[i] - '0');
                ++i;
            }
            version += factor * digit;
            if (rt_version[i] != '.')
                break;
            digit = 0;
            factor >>= 8;
            ++i;
        }
        __Pyx_cached_runtime_version = version;
    }
}
#endif
static unsigned long __Pyx_get_runtime_version(void) {
#if __PYX_LIMITED_VERSION_HEX >= 0x030b0000
    return Py_Version & ~0xFFUL;
#else
    return __Pyx_cached_runtime_version;
#endif
}

/* CheckBinaryVersion */
static int __Pyx_check_binary_version(unsigned long ct_version, unsigned long rt_version, int allow_newer) {
    const unsigned long MAJOR_MINOR = 0xFFFF0000UL;
    if ((rt_version & MAJOR_MINOR) == (ct_version & MAJOR_MINOR))
        return 0;
    if (likely(allow_newer && (rt_version & MAJOR_MINOR) > (ct_version & MAJOR_MINOR)))
        return 1;
    {
        char message[200];
        PyOS_snprintf(message, sizeof(message),
                      "compile time Python version %d.%d "
                      "of module '%.100s' "
                      "%s "
                      "runtime version %d.%d",
                       (int) (ct_version >> 24), (int) ((ct_version >> 16) & 0xFF),
                       __Pyx_MODULE_NAME,
                       (allow_newer) ? "was newer than" : "does not match",
                       (int) (rt_version >> 24), (int) ((rt_version >> 16) & 0xFF)
       );
        return PyErr_WarnEx(NULL, message, 1);
    }
}

/* NewCodeObj */
#if CYTHON_COMPILING_IN_LIMITED_API
    static PyObject* __Pyx__PyCode_New(int a, int p, int k, int l, int s, int f,
                                       PyObject *code, PyObject *c, PyObject* n, PyObject *v,
                                       PyObject *fv, PyObject *cell, PyObject* fn,
                                       PyObject *name, int fline, PyObject *lnos) {
        PyObject *exception_table = NULL;
        PyObject *types_module=NULL, *code_type=NULL, *result=NULL;
        #if __PYX_LIMITED_VERSION_HEX < 0x030b0000
        PyObject *version_info;
        PyObject *py_minor_version = NULL;
        #endif
        long minor_version = 0;
        PyObject *type, *value, *traceback;
        PyErr_Fetch(&type, &value, &traceback);
        #if __PYX_LIMITED_VERSION_HEX >= 0x030b0000
        minor_version = 11;
        #else
        if (!(version_info = PySys_GetObject("version_info"))) goto end;
        if (!(py_minor_version = PySequence_GetItem(version_info, 1))) goto end;
        minor_version = PyLong_AsLong(py_minor_version);
        Py_DECREF(py_minor_version);
        if (minor_version == -1 && PyErr_Occurred()) goto end;
        #endif
        if (!(types_module = PyImport_ImportModule("types"))) goto end;
        if (!(code_type = PyObject_GetAttrString(types_module, "CodeType"))) goto end;
        if (minor_version <= 7) {
            (void)p;
            result = PyObject_CallFunction(code_type, "iiiiiOOOOOOiOOO", a, k, l, s, f, code,
                          c, n, v, fn, name, fline, lnos, fv, cell);
        } else if (minor_version <= 10) {
            result = PyObject_CallFunction(code_type, "iiiiiiOOOOOOiOOO", a,p, k, l, s, f, code,
                          c, n, v, fn, name, fline, lnos, fv, cell);
        } else {
            if (!(exception_table = PyBytes_FromStringAndSize(NULL, 0))) goto end;
            result = PyObject_CallFunction(code_type, "iiiiiiOOOOOOOiOOOO", a,p, k, l, s, f, code,
                          c, n, v, fn, name, name, fline, lnos, exception_table, fv, cell);
        }
    end:
        Py_XDECREF(code_type);
        Py_XDECREF(exception_table);
        Py_XDECREF(types_module);
        if (type) {
            PyErr_Restore(type, value, traceback);
        }
        return result;
    }
#elif PY_VERSION_HEX >= 0x030B0000
  static PyCodeObject* __Pyx__PyCode_New(int a, int p, int k, int l, int s, int f,
                                         PyObject *code, PyObject *c, PyObject* n, PyObject *v,
                                         PyObject *fv, PyObject *cell, PyObject* fn,
                                         PyObject *name, int fline, PyObject *lnos) {
    PyCodeObject *result;
    result =
      #if PY_VERSION_HEX >= 0x030C0000
        PyUnstable_Code_NewWithPosOnlyArgs
      #else
        PyCode_NewWithPosOnlyArgs
      #endif
        (a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, name, fline, lnos, __pyx_mstate_global->__pyx_empty_bytes);
    #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030c00A1
    if (likely(result))
        result->_co_firsttraceable = 0;
    #endif
    return result;
  }
#elif !CYTHON_COMPILING_IN_PYPY
  #define __Pyx__PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
          PyCode_NewWithPosOnlyArgs(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#else
  #define __Pyx__PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
          PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
static PyObject* __Pyx_PyCode_New(
        const __Pyx_PyCode_New_function_description descr,
        PyObject * const *varnames,
        PyObject *filename,
        PyObject *funcname,
        PyObject *line_table,
        PyObject *tuple_dedup_map
) {
    PyObject *code_obj = NULL, *varnames_tuple_dedup = NULL, *code_bytes = NULL;
    Py_ssize_t var_count = (Py_ssize_t) descr.nlocals;
    PyObject *varnames_tuple = PyTuple_New(var_count);
    if (unlikely(!varnames_tuple)) return NULL;
    for (Py_ssize_t i=0; i < var_count; i++) {
        Py_INCREF(varnames[i]);
        if (__Pyx_PyTuple_SET_ITEM(varnames_tuple, i, varnames[i]) != (0)) goto done;
    }
    #if CYTHON_COMPILING_IN_LIMITED_API
    varnames_tuple_dedup = PyDict_GetItem(tuple_dedup_map, varnames_tuple);
    if (!varnames_tuple_dedup) {
        if (unlikely(PyDict_SetItem(tuple_dedup_map, varnames_tuple, varnames_tuple) < 0)) goto done;
        varnames_tuple_dedup = varnames_tuple;
    }
    #else
    varnames_tuple_dedup = PyDict_SetDefault(tuple_dedup_map, varnames_tuple, varnames_tuple);
    if (unlikely(!varnames_tuple_dedup)) goto done;
    #endif
    #if CYTHON_AVOID_BORROWED_REFS
    Py_INCREF(varnames_tuple_dedup);
    #endif
    if (__PYX_LIMITED_VERSION_HEX >= (0x030b0000) && line_table != NULL && !CYTHON_COMPILING_IN_GRAAL) {
        Py_ssize_t line_table_length = __Pyx_PyBytes_GET_SIZE(line_table);
        #if !CYTHON_ASSUME_SAFE_SIZE
        if (unlikely(line_table_length == -1)) goto done;
        #endif
        Py_ssize_t code_len = (line_table_length * 2 + 4) & ~3LL;
        code_bytes = PyBytes_FromStringAndSize(NULL, code_len);
        if (unlikely(!code_bytes)) goto done;
        char* c_code_bytes = PyBytes_AsString(code_bytes);
        if (unlikely(!c_code_bytes)) goto done;
        memset(c_code_bytes, 0, (size_t) code_len);
    }
    code_obj = (PyObject*) __Pyx__PyCode_New(
        (int) descr.argcount,
        (int) descr.num_posonly_args,
        (int) descr.num_kwonly_args,
        (int) descr.nlocals,
        0,
        (int) descr.flags,
        code_bytes ? code_bytes : __pyx_mstate_global->__pyx_empty_bytes,
        __pyx_mstate_global->__pyx_empty_tuple,
        __pyx_mstate_global->__pyx_empty_tuple,
        varnames_tuple_dedup,
        __pyx_mstate_global->__pyx_empty_tuple,
        __pyx_mstate_global->__pyx_empty_tuple,
        filename,
        funcname,
        (int) descr.first_line,
        (__PYX_LIMITED_VERSION_HEX >= (0x030b0000) && line_table) ? line_table : __pyx_mstate_global->__pyx_empty_bytes
    );
done:
    Py_XDECREF(code_bytes);
    #if CYTHON_AVOID_BORROWED_REFS
    Py_XDECREF(varnames_tuple_dedup);
    #endif
    Py_DECREF(varnames_tuple);
    return code_obj;
}

/* DecompressString */
static PyObject *__Pyx_DecompressString(const char *s, Py_ssize_t length, int algo) {
    PyObject *module, *decompress, *compressed_bytes, *decompressed;
    const char* module_name = algo == 3 ? "compression.zstd" : algo == 2 ? "bz2" : "zlib";
    PyObject *methodname = PyUnicode_FromString("decompress");
    if (unlikely(!methodname)) return NULL;
    #if __PYX_LIMITED_VERSION_HEX >= 0x030e0000
    if (algo == 3) {
        PyObject *fromlist = Py_BuildValue("[O]", methodname);
        if (unlikely(!fromlist)) return NULL;
        module = PyImport_ImportModuleLevel("compression.zstd", NULL, NULL, fromlist, 0);
        Py_DECREF(fromlist);
    } else
    #endif
        module = PyImport_ImportModule(module_name);
    if (unlikely(!module)) goto import_failed;
    decompress = PyObject_GetAttr(module, methodname);
    if (unlikely(!decompress)) goto import_failed;
    {
        #ifdef __cplusplus
            char *memview_bytes = const_cast<char*>(s);
        #else
            #if defined(__clang__)
              #pragma clang diagnostic push
              #pragma clang diagnostic ignored "-Wcast-qual"
            #elif !defined(__INTEL_COMPILER) && defined(__GNUC__)
              #pragma GCC diagnostic push
              #pragma GCC diagnostic ignored "-Wcast-qual"
            #endif
            char *memview_bytes = (char*) s;
            #if defined(__clang__)
              #pragma clang diagnostic pop
            #elif !defined(__INTEL_COMPILER) && defined(__GNUC__)
              #pragma GCC diagnostic pop
            #endif
        #endif
        #if CYTHON_COMPILING_IN_LIMITED_API && !defined(PyBUF_READ)
        int memview_flags = 0x100;
        #else
        int memview_flags = PyBUF_READ;
        #endif
        compressed_bytes = PyMemoryView_FromMemory(memview_bytes, length, memview_flags);
    }
    if (unlikely(!compressed_bytes)) {
        Py_DECREF(decompress);
        goto bad;
    }
    decompressed = PyObject_CallFunctionObjArgs(decompress, compressed_bytes, NULL);
    Py_DECREF(compressed_bytes);
    Py_DECREF(decompress);
    Py_DECREF(module);
    Py_DECREF(methodname);
    return decompressed;
import_failed:
    PyErr_Format(PyExc_ImportError,
        "Failed to import '%.20s.decompress' - cannot initialise module strings. "
        "String compression was configured with the C macro 'CYTHON_COMPRESS_STRINGS=%d'.",
        module_name, algo);
bad:
    Py_XDECREF(module);
    Py_DECREF(methodname);
    return NULL;
}

#include <string.h>
static CYTHON_INLINE Py_ssize_t __Pyx_ssize_strlen(const char *s) {
    size_t len = strlen(s);
    if (unlikely(len > (size_t) PY_SSIZE_T_MAX)) {
        PyErr_SetString(PyExc_OverflowError, "byte string is too long");
        return -1;
    }
    return (Py_ssize_t) len;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
    Py_ssize_t len = __Pyx_ssize_strlen(c_str);
    if (unlikely(len < 0)) return NULL;
    return __Pyx_PyUnicode_FromStringAndSize(c_str, len);
}
static CYTHON_INLINE PyObject* __Pyx_PyByteArray_FromString(const char* c_str) {
    Py_ssize_t len = __Pyx_ssize_strlen(c_str);
    if (unlikely(len < 0)) return NULL;
    return PyByteArray_FromStringAndSize(c_str, len);
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
    Py_ssize_t ignore;
    return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_UTF8
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
    if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if CYTHON_COMPILING_IN_LIMITED_API
    {
        const char* result;
        Py_ssize_t unicode_length;
        CYTHON_MAYBE_UNUSED_VAR(unicode_length); // only for __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
        #if __PYX_LIMITED_VERSION_HEX < 0x030A0000
        if (unlikely(PyArg_Parse(o, "s#", &result, length) < 0)) return NULL;
        #else
        result = PyUnicode_AsUTF8AndSize(o, length);
        #endif
        #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
        unicode_length = PyUnicode_GetLength(o);
        if (unlikely(unicode_length < 0)) return NULL;
        if (unlikely(unicode_length != *length)) {
            PyUnicode_AsASCIIString(o);
            return NULL;
        }
        #endif
        return result;
    }
#else
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
    if (likely(PyUnicode_IS_ASCII(o))) {
        *length = PyUnicode_GET_LENGTH(o);
        return PyUnicode_AsUTF8(o);
    } else {
        PyUnicode_AsASCIIString(o);
        return NULL;
    }
#else
    return PyUnicode_AsUTF8AndSize(o, length);
#endif
#endif
}
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_UTF8
    if (PyUnicode_Check(o)) {
        return __Pyx_PyUnicode_AsStringAndSize(o, length);
    } else
#endif
    if (PyByteArray_Check(o)) {
#if (CYTHON_ASSUME_SAFE_SIZE && CYTHON_ASSUME_SAFE_MACROS) || (CYTHON_COMPILING_IN_PYPY && (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)))
        *length = PyByteArray_GET_SIZE(o);
        return PyByteArray_AS_STRING(o);
#else
        *length = PyByteArray_Size(o);
        if (*length == -1) return NULL;
        return PyByteArray_AsString(o);
#endif
    } else
    {
        char* result;
        int r = PyBytes_AsStringAndSize(o, &result, length);
        if (unlikely(r < 0)) {
            return NULL;
        } else {
            return result;
        }
    }
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
   int is_true = x == Py_True;
   if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
   else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
    int retval;
    if (unlikely(!x)) return -1;
    retval = __Pyx_PyObject_IsTrue(x);
    Py_DECREF(x);
    return retval;
}
static PyObject* __Pyx_PyNumber_LongWrongResultType(PyObject* result) {
    __Pyx_TypeName result_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(result));
    if (PyLong_Check(result)) {
        if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
                "__int__ returned non-int (type " __Pyx_FMT_TYPENAME ").  "
                "The ability to return an instance of a strict subclass of int is deprecated, "
                "and may be removed in a future version of Python.",
                result_type_name)) {
            __Pyx_DECREF_TypeName(result_type_name);
            Py_DECREF(result);
            return NULL;
        }
        __Pyx_DECREF_TypeName(result_type_name);
        return result;
    }
    PyErr_Format(PyExc_TypeError,
                 "__int__ returned non-int (type " __Pyx_FMT_TYPENAME ")",
                 result_type_name);
    __Pyx_DECREF_TypeName(result_type_name);
    Py_DECREF(result);
    return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Long(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
  PyNumberMethods *m;
#endif
  PyObject *res = NULL;
  if (likely(PyLong_Check(x)))
      return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
  m = Py_TYPE(x)->tp_as_number;
  if (likely(m && m->nb_int)) {
      res = m->nb_int(x);
  }
#else
  if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
      res = PyNumber_Long(x);
  }
#endif
  if (likely(res)) {
      if (unlikely(!PyLong_CheckExact(res))) {
          return __Pyx_PyNumber_LongWrongResultType(res);
      }
  }
  else if (!PyErr_Occurred()) {
      PyErr_SetString(PyExc_TypeError,
                      "an integer is required");
  }
  return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
  Py_ssize_t ival;
  PyObject *x;
  if (likely(PyLong_CheckExact(b))) {
    #if CYTHON_USE_PYLONG_INTERNALS
    if (likely(__Pyx_PyLong_IsCompact(b))) {
        return __Pyx_PyLong_CompactValue(b);
    } else {
      const digit* digits = __Pyx_PyLong_Digits(b);
      const Py_ssize_t size = __Pyx_PyLong_SignedDigitCount(b);
      switch (size) {
         case 2:
           if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
             return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
           }
           break;
         case -2:
           if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
             return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
           }
           break;
         case 3:
           if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
             return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
           }
           break;
         case -3:
           if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
             return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
           }
           break;
         case 4:
           if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
             return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
           }
           break;
         case -4:
           if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
             return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
           }
           break;
      }
    }
    #endif
    return PyLong_AsSsize_t(b);
  }
  x = PyNumber_Index(b);
  if (!x) return -1;
  ival = PyLong_AsSsize_t(x);
  Py_DECREF(x);
  return ival;
}
static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) {
  if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) {
    return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o);
  } else {
    Py_ssize_t ival;
    PyObject *x;
    x = PyNumber_Index(o);
    if (!x) return -1;
    ival = PyLong_AsLong(x);
    Py_DECREF(x);
    return ival;
  }
}
static CYTHON_INLINE PyObject *__Pyx_Owned_Py_None(int b) {
    CYTHON_UNUSED_VAR(b);
    return __Pyx_NewRef(Py_None);
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
  return __Pyx_NewRef(b ? Py_True: Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyLong_FromSize_t(size_t ival) {
    return PyLong_FromSize_t(ival);
}


/* MultiPhaseInitModuleState */
#if CYTHON_PEP489_MULTI_PHASE_INIT && CYTHON_USE_MODULE_STATE
#ifndef CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
#if (CYTHON_COMPILING_IN_LIMITED_API || PY_VERSION_HEX >= 0x030C0000)
  #define CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE 1
#else
  #define CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE 0
#endif
#endif
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE && !CYTHON_ATOMICS
#error "Module state with PEP489 requires atomics. Currently that's one of\
 C11, C++11, gcc atomic intrinsics or MSVC atomic intrinsics"
#endif
#if !CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
#define __Pyx_ModuleStateLookup_Lock()
#define __Pyx_ModuleStateLookup_Unlock()
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d0000
static PyMutex __Pyx_ModuleStateLookup_mutex = {0};
#define __Pyx_ModuleStateLookup_Lock() PyMutex_Lock(&__Pyx_ModuleStateLookup_mutex)
#define __Pyx_ModuleStateLookup_Unlock() PyMutex_Unlock(&__Pyx_ModuleStateLookup_mutex)
#elif defined(__cplusplus) && __cplusplus >= 201103L
#include <mutex>
static std::mutex __Pyx_ModuleStateLookup_mutex;
#define __Pyx_ModuleStateLookup_Lock() __Pyx_ModuleStateLookup_mutex.lock()
#define __Pyx_ModuleStateLookup_Unlock() __Pyx_ModuleStateLookup_mutex.unlock()
#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201112L) && !defined(__STDC_NO_THREADS__)
#include <threads.h>
static mtx_t __Pyx_ModuleStateLookup_mutex;
static once_flag __Pyx_ModuleStateLookup_mutex_once_flag = ONCE_FLAG_INIT;
static void __Pyx_ModuleStateLookup_initialize_mutex(void) {
    mtx_init(&__Pyx_ModuleStateLookup_mutex, mtx_plain);
}
#define __Pyx_ModuleStateLookup_Lock()\
  call_once(&__Pyx_ModuleStateLookup_mutex_once_flag, __Pyx_ModuleStateLookup_initialize_mutex);\
  mtx_lock(&__Pyx_ModuleStateLookup_mutex)
#define __Pyx_ModuleStateLookup_Unlock() mtx_unlock(&__Pyx_ModuleStateLookup_mutex)
#elif defined(HAVE_PTHREAD_H)
#include <pthread.h>
static pthread_mutex_t __Pyx_ModuleStateLookup_mutex = PTHREAD_MUTEX_INITIALIZER;
#define __Pyx_ModuleStateLookup_Lock() pthread_mutex_lock(&__Pyx_ModuleStateLookup_mutex)
#define __Pyx_ModuleStateLookup_Unlock() pthread_mutex_unlock(&__Pyx_ModuleStateLookup_mutex)
#elif defined(_WIN32)
#include <Windows.h>  // synchapi.h on its own doesn't work
static SRWLOCK __Pyx_ModuleStateLookup_mutex = SRWLOCK_INIT;
#define __Pyx_ModuleStateLookup_Lock() AcquireSRWLockExclusive(&__Pyx_ModuleStateLookup_mutex)
#define __Pyx_ModuleStateLookup_Unlock() ReleaseSRWLockExclusive(&__Pyx_ModuleStateLookup_mutex)
#else
#error "No suitable lock available for CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE.\
 Requires C standard >= C11, or C++ standard >= C++11,\
 or pthreads, or the Windows 32 API, or Python >= 3.13."
#endif
typedef struct {
    int64_t id;
    PyObject *module;
} __Pyx_InterpreterIdAndModule;
typedef struct {
    char interpreter_id_as_index;
    Py_ssize_t count;
    Py_ssize_t allocated;
    __Pyx_InterpreterIdAndModule table[1];
} __Pyx_ModuleStateLookupData;
#define __PYX_MODULE_STATE_LOOKUP_SMALL_SIZE 32
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
static __pyx_atomic_int_type __Pyx_ModuleStateLookup_read_counter = 0;
#endif
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
static __pyx_atomic_ptr_type __Pyx_ModuleStateLookup_data = 0;
#else
static __Pyx_ModuleStateLookupData* __Pyx_ModuleStateLookup_data = NULL;
#endif
static __Pyx_InterpreterIdAndModule* __Pyx_State_FindModuleStateLookupTableLowerBound(
        __Pyx_InterpreterIdAndModule* table,
        Py_ssize_t count,
        int64_t interpreterId) {
    __Pyx_InterpreterIdAndModule* begin = table;
    __Pyx_InterpreterIdAndModule* end = begin + count;
    if (begin->id == interpreterId) {
        return begin;
    }
    while ((end - begin) > __PYX_MODULE_STATE_LOOKUP_SMALL_SIZE) {
        __Pyx_InterpreterIdAndModule* halfway = begin + (end - begin)/2;
        if (halfway->id == interpreterId) {
            return halfway;
        }
        if (halfway->id < interpreterId) {
            begin = halfway;
        } else {
            end = halfway;
        }
    }
    for (; begin < end; ++begin) {
        if (begin->id >= interpreterId) return begin;
    }
    return begin;
}
static PyObject *__Pyx_State_FindModule(CYTHON_UNUSED void* dummy) {
    int64_t interpreter_id = PyInterpreterState_GetID(__Pyx_PyInterpreterState_Get());
    if (interpreter_id == -1) return NULL;
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
    __Pyx_ModuleStateLookupData* data = (__Pyx_ModuleStateLookupData*)__pyx_atomic_pointer_load_relaxed(&__Pyx_ModuleStateLookup_data);
    {
        __pyx_atomic_incr_acq_rel(&__Pyx_ModuleStateLookup_read_counter);
        if (likely(data)) {
            __Pyx_ModuleStateLookupData* new_data = (__Pyx_ModuleStateLookupData*)__pyx_atomic_pointer_load_acquire(&__Pyx_ModuleStateLookup_data);
            if (likely(data == new_data)) {
                goto read_finished;
            }
        }
        __pyx_atomic_decr_acq_rel(&__Pyx_ModuleStateLookup_read_counter);
        __Pyx_ModuleStateLookup_Lock();
        __pyx_atomic_incr_relaxed(&__Pyx_ModuleStateLookup_read_counter);
        data = (__Pyx_ModuleStateLookupData*)__pyx_atomic_pointer_load_relaxed(&__Pyx_ModuleStateLookup_data);
        __Pyx_ModuleStateLookup_Unlock();
    }
  read_finished:;
#else
    __Pyx_ModuleStateLookupData* data = __Pyx_ModuleStateLookup_data;
#endif
    __Pyx_InterpreterIdAndModule* found = NULL;
    if (unlikely(!data)) goto end;
    if (data->interpreter_id_as_index) {
        if (interpreter_id < data->count) {
            found = data->table+interpreter_id;
        }
    } else {
        found = __Pyx_State_FindModuleStateLookupTableLowerBound(
            data->table, data->count, interpreter_id);
    }
  end:
    {
        PyObject *result=NULL;
        if (found && found->id == interpreter_id) {
            result = found->module;
        }
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
        __pyx_atomic_decr_acq_rel(&__Pyx_ModuleStateLookup_read_counter);
#endif
        return result;
    }
}
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
static void __Pyx_ModuleStateLookup_wait_until_no_readers(void) {
    while (__pyx_atomic_load(&__Pyx_ModuleStateLookup_read_counter) != 0);
}
#else
#define __Pyx_ModuleStateLookup_wait_until_no_readers()
#endif
static int __Pyx_State_AddModuleInterpIdAsIndex(__Pyx_ModuleStateLookupData **old_data, PyObject* module, int64_t interpreter_id) {
    Py_ssize_t to_allocate = (*old_data)->allocated;
    while (to_allocate <= interpreter_id) {
        if (to_allocate == 0) to_allocate = 1;
        else to_allocate *= 2;
    }
    __Pyx_ModuleStateLookupData *new_data = *old_data;
    if (to_allocate != (*old_data)->allocated) {
         new_data = (__Pyx_ModuleStateLookupData *)realloc(
            *old_data,
            sizeof(__Pyx_ModuleStateLookupData)+(to_allocate-1)*sizeof(__Pyx_InterpreterIdAndModule));
        if (!new_data) {
            PyErr_NoMemory();
            return -1;
        }
        for (Py_ssize_t i = new_data->allocated; i < to_allocate; ++i) {
            new_data->table[i].id = i;
            new_data->table[i].module = NULL;
        }
        new_data->allocated = to_allocate;
    }
    new_data->table[interpreter_id].module = module;
    if (new_data->count < interpreter_id+1) {
        new_data->count = interpreter_id+1;
    }
    *old_data = new_data;
    return 0;
}
static void __Pyx_State_ConvertFromInterpIdAsIndex(__Pyx_ModuleStateLookupData *data) {
    __Pyx_InterpreterIdAndModule *read = data->table;
    __Pyx_InterpreterIdAndModule *write = data->table;
    __Pyx_InterpreterIdAndModule *end = read + data->count;
    for (; read<end; ++read) {
        if (read->module) {
            write->id = read->id;
            write->module = read->module;
            ++write;
        }
    }
    data->count = write - data->table;
    for (; write<end; ++write) {
        write->id = 0;
        write->module = NULL;
    }
    data->interpreter_id_as_index = 0;
}
static int __Pyx_State_AddModule(PyObject* module, CYTHON_UNUSED void* dummy) {
    int64_t interpreter_id = PyInterpreterState_GetID(__Pyx_PyInterpreterState_Get());
    if (interpreter_id == -1) return -1;
    int result = 0;
    __Pyx_ModuleStateLookup_Lock();
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
    __Pyx_ModuleStateLookupData *old_data = (__Pyx_ModuleStateLookupData *)
            __pyx_atomic_pointer_exchange(&__Pyx_ModuleStateLookup_data, 0);
#else
    __Pyx_ModuleStateLookupData *old_data = __Pyx_ModuleStateLookup_data;
#endif
    __Pyx_ModuleStateLookupData *new_data = old_data;
    if (!new_data) {
        new_data = (__Pyx_ModuleStateLookupData *)calloc(1, sizeof(__Pyx_ModuleStateLookupData));
        if (!new_data) {
            result = -1;
            PyErr_NoMemory();
            goto end;
        }
        new_data->allocated = 1;
        new_data->interpreter_id_as_index = 1;
    }
    __Pyx_ModuleStateLookup_wait_until_no_readers();
    if (new_data->interpreter_id_as_index) {
        if (interpreter_id < __PYX_MODULE_STATE_LOOKUP_SMALL_SIZE) {
            result = __Pyx_State_AddModuleInterpIdAsIndex(&new_data, module, interpreter_id);
            goto end;
        }
        __Pyx_State_ConvertFromInterpIdAsIndex(new_data);
    }
    {
        Py_ssize_t insert_at = 0;
        {
            __Pyx_InterpreterIdAndModule* lower_bound = __Pyx_State_FindModuleStateLookupTableLowerBound(
                new_data->table, new_data->count, interpreter_id);
            assert(lower_bound);
            insert_at = lower_bound - new_data->table;
            if (unlikely(insert_at < new_data->count && lower_bound->id == interpreter_id)) {
                lower_bound->module = module;
                goto end;  // already in table, nothing more to do
            }
        }
        if (new_data->count+1 >= new_data->allocated) {
            Py_ssize_t to_allocate = (new_data->count+1)*2;
            new_data =
                (__Pyx_ModuleStateLookupData*)realloc(
                    new_data,
                    sizeof(__Pyx_ModuleStateLookupData) +
                    (to_allocate-1)*sizeof(__Pyx_InterpreterIdAndModule));
            if (!new_data) {
                result = -1;
                new_data = old_data;
                PyErr_NoMemory();
                goto end;
            }
            new_data->allocated = to_allocate;
        }
        ++new_data->count;
        int64_t last_id = interpreter_id;
        PyObject *last_module = module;
        for (Py_ssize_t i=insert_at; i<new_data->count; ++i) {
            int64_t current_id = new_data->table[i].id;
            new_data->table[i].id = last_id;
            last_id = current_id;
            PyObject *current_module = new_data->table[i].module;
            new_data->table[i].module = last_module;
            last_module = current_module;
        }
    }
  end:
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
    __pyx_atomic_pointer_exchange(&__Pyx_ModuleStateLookup_data, new_data);
#else
    __Pyx_ModuleStateLookup_data = new_data;
#endif
    __Pyx_ModuleStateLookup_Unlock();
    return result;
}
static int __Pyx_State_RemoveModule(CYTHON_UNUSED void* dummy) {
    int64_t interpreter_id = PyInterpreterState_GetID(__Pyx_PyInterpreterState_Get());
    if (interpreter_id == -1) return -1;
    __Pyx_ModuleStateLookup_Lock();
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
    __Pyx_ModuleStateLookupData *data = (__Pyx_ModuleStateLookupData *)
            __pyx_atomic_pointer_exchange(&__Pyx_ModuleStateLookup_data, 0);
#else
    __Pyx_ModuleStateLookupData *data = __Pyx_ModuleStateLookup_data;
#endif
    if (data->interpreter_id_as_index) {
        if (interpreter_id < data->count) {
            data->table[interpreter_id].module = NULL;
        }
        goto done;
    }
    {
        __Pyx_ModuleStateLookup_wait_until_no_readers();
        __Pyx_InterpreterIdAndModule* lower_bound = __Pyx_State_FindModuleStateLookupTableLowerBound(
            data->table, data->count, interpreter_id);
        if (!lower_bound) goto done;
        if (lower_bound->id != interpreter_id) goto done;
        __Pyx_InterpreterIdAndModule *end = data->table+data->count;
        for (;lower_bound<end-1; ++lower_bound) {
            lower_bound->id = (lower_bound+1)->id;
            lower_bound->module = (lower_bound+1)->module;
        }
    }
    --data->count;
    if (data->count == 0) {
        free(data);
        data = NULL;
    }
  done:
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
    __pyx_atomic_pointer_exchange(&__Pyx_ModuleStateLookup_data, data);
#else
    __Pyx_ModuleStateLookup_data = data;
#endif
    __Pyx_ModuleStateLookup_Unlock();
    return 0;
}
#endif

/* #### Code section: utility_code_pragmas_end ### */
#ifdef _MSC_VER
#pragma warning( pop )
#endif



/* #### Code section: end ### */
#endif /* Py_PYTHON_H */
